mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-04-19 03:01:06 -04:00
Compare commits
149 Commits
payloadCom
...
precompute
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9eff96348a | ||
|
|
f7d3f0929d | ||
|
|
e6c4bfefae | ||
|
|
03e01b7227 | ||
|
|
9ba6f1e774 | ||
|
|
fcbe8cf75a | ||
|
|
54619663c6 | ||
|
|
6e4d7fd781 | ||
|
|
14f5e6f414 | ||
|
|
9d084bceb3 | ||
|
|
f79d2efc6e | ||
|
|
9dba7c5319 | ||
|
|
c02c057b7d | ||
|
|
b6ec6a8eec | ||
|
|
3ca8c3ba35 | ||
|
|
1092c7135f | ||
|
|
73033a9d67 | ||
|
|
a7b83c358a | ||
|
|
29a0fd6760 | ||
|
|
209e46bab7 | ||
|
|
108e2806cb | ||
|
|
68c4c36e65 | ||
|
|
67cc68c3bb | ||
|
|
c33f0d04b7 | ||
|
|
f05972a181 | ||
|
|
7352ae03c6 | ||
|
|
4f34624a54 | ||
|
|
4e44fdf55e | ||
|
|
139773aa3a | ||
|
|
6558e947ca | ||
|
|
d8150ac20c | ||
|
|
543746d95d | ||
|
|
8e8c990a04 | ||
|
|
3c9eae6064 | ||
|
|
c0ee666996 | ||
|
|
3e61778d38 | ||
|
|
0bfe736730 | ||
|
|
277797f6f8 | ||
|
|
2898a5e8a2 | ||
|
|
ca8cc65d72 | ||
|
|
445487b4a7 | ||
|
|
86edeef90f | ||
|
|
bfbca75862 | ||
|
|
1f72a1428c | ||
|
|
101dd55710 | ||
|
|
7d797ee4f9 | ||
|
|
45e38d430f | ||
|
|
0a643b177d | ||
|
|
9ea9e1f07c | ||
|
|
8fb4d85bbd | ||
|
|
259f526c8d | ||
|
|
77b5a7a5b3 | ||
|
|
fb9d9d93de | ||
|
|
7781e40abf | ||
|
|
e77724401d | ||
|
|
f43ba7851c | ||
|
|
dfc5bbef7f | ||
|
|
6430e27257 | ||
|
|
b141b5ccd2 | ||
|
|
696a08f3b9 | ||
|
|
65d428db58 | ||
|
|
a6e669d8bc | ||
|
|
605ab1c7ac | ||
|
|
93f7214b32 | ||
|
|
1bffcc84f4 | ||
|
|
7728ad4aa2 | ||
|
|
1362654669 | ||
|
|
3cec3997f8 | ||
|
|
1236519810 | ||
|
|
36052ed1bb | ||
|
|
458d4ebe54 | ||
|
|
8680f3f8bb | ||
|
|
416c49e6d5 | ||
|
|
6605dfbd50 | ||
|
|
84993fdd68 | ||
|
|
1e916418f2 | ||
|
|
8d5d584cf8 | ||
|
|
598509ffa8 | ||
|
|
0fbd643c02 | ||
|
|
0e4f3231d2 | ||
|
|
b17f2752ab | ||
|
|
932e5eb7d8 | ||
|
|
de233438f1 | ||
|
|
e35f6c351a | ||
|
|
4da5ed072c | ||
|
|
dec4b43b3e | ||
|
|
17ea45a011 | ||
|
|
1934afac73 | ||
|
|
d0c9a31657 | ||
|
|
66c70200ee | ||
|
|
928a874e4a | ||
|
|
ed8a3351aa | ||
|
|
5b95d11c5e | ||
|
|
7a4bea0e44 | ||
|
|
e899003973 | ||
|
|
e751a74c64 | ||
|
|
6826e77539 | ||
|
|
15c178ef0c | ||
|
|
e115137591 | ||
|
|
dc62271ebb | ||
|
|
3ec505bc22 | ||
|
|
6be77c0194 | ||
|
|
03fa7042cb | ||
|
|
c620f29aab | ||
|
|
7a652d7ec6 | ||
|
|
b59a830dce | ||
|
|
6999943c3d | ||
|
|
90064edd54 | ||
|
|
393eb1e83c | ||
|
|
d2bcf75c50 | ||
|
|
89d3a6c66f | ||
|
|
bf2485eb71 | ||
|
|
a88f60f1fa | ||
|
|
33f899506f | ||
|
|
f7ead02e6e | ||
|
|
0abf17f6cd | ||
|
|
8307ee1098 | ||
|
|
7e4a039a7f | ||
|
|
d46621eea3 | ||
|
|
8b25ffaa45 | ||
|
|
17f1b78494 | ||
|
|
5874226067 | ||
|
|
544bc3eb45 | ||
|
|
de6d9947d7 | ||
|
|
e5382d95dd | ||
|
|
6d8445d440 | ||
|
|
7b16c34af3 | ||
|
|
91577760b6 | ||
|
|
9ced048510 | ||
|
|
0439151373 | ||
|
|
5f050566f4 | ||
|
|
8a43513f5a | ||
|
|
6c8504ef71 | ||
|
|
e99f86ecc5 | ||
|
|
0c36416677 | ||
|
|
f2db0faea8 | ||
|
|
e31f59e04e | ||
|
|
83a5210549 | ||
|
|
0aa4a08b7b | ||
|
|
9c5d4a1767 | ||
|
|
dd1ede572d | ||
|
|
ec7eb97d41 | ||
|
|
0c893bd1a6 | ||
|
|
9a7cae9e5c | ||
|
|
c114bc57d9 | ||
|
|
a9aea0ba25 | ||
|
|
9f2ade53ff | ||
|
|
5b19916067 | ||
|
|
e4cbb34c2f |
@@ -1,4 +1,4 @@
|
||||
version: v1.7.0-alpha.2
|
||||
version: v1.7.0-alpha.4
|
||||
style: full
|
||||
|
||||
specrefs:
|
||||
@@ -23,6 +23,8 @@ exceptions:
|
||||
- PTC_SIZE#gloas
|
||||
|
||||
constants:
|
||||
# heze
|
||||
- DOMAIN_INCLUSION_LIST_COMMITTEE#heze
|
||||
# phase0
|
||||
- BASIS_POINTS#phase0
|
||||
- ENDIANNESS#phase0
|
||||
@@ -61,7 +63,6 @@ exceptions:
|
||||
- ATTESTATION_TIMELINESS_INDEX#gloas
|
||||
- BUILDER_INDEX_FLAG#gloas
|
||||
- BUILDER_INDEX_SELF_BUILD#gloas
|
||||
- DOMAIN_PROPOSER_PREFERENCES#gloas
|
||||
- NUM_BLOCK_TIMELINESS_DEADLINES#gloas
|
||||
- PTC_TIMELINESS_INDEX#gloas
|
||||
|
||||
@@ -72,14 +73,31 @@ exceptions:
|
||||
- CONTRIBUTION_DUE_BPS_GLOAS#gloas
|
||||
- GLOAS_FORK_EPOCH#gloas
|
||||
- GLOAS_FORK_VERSION#gloas
|
||||
- MAX_REQUEST_PAYLOADS#gloas
|
||||
- MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
|
||||
- PAYLOAD_ATTESTATION_DUE_BPS#gloas
|
||||
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
|
||||
# heze
|
||||
- HEZE_FORK_EPOCH#heze
|
||||
- HEZE_FORK_VERSION#heze
|
||||
- INCLUSION_LIST_SUBMISSION_DUE_BPS#heze
|
||||
- MAX_BYTES_PER_INCLUSION_LIST#heze
|
||||
- MAX_REQUEST_INCLUSION_LIST#heze
|
||||
- PROPOSER_INCLUSION_LIST_CUTOFF_BPS#heze
|
||||
- VIEW_FREEZE_CUTOFF_BPS#heze
|
||||
|
||||
ssz_objects:
|
||||
# phase0
|
||||
- Eth1Block#phase0
|
||||
# fulu
|
||||
- PartialDataColumnHeader#fulu
|
||||
- PartialDataColumnPartsMetadata#fulu
|
||||
- PartialDataColumnSidecar#fulu
|
||||
# gloas
|
||||
- PartialDataColumnHeader#gloas
|
||||
# heze
|
||||
- BeaconState#heze
|
||||
- ExecutionPayloadBid#heze
|
||||
- InclusionList#heze
|
||||
- SignedExecutionPayloadBid#heze
|
||||
- SignedInclusionList#heze
|
||||
# capella
|
||||
- LightClientBootstrap#capella
|
||||
- LightClientFinalityUpdate#capella
|
||||
@@ -109,6 +127,7 @@ exceptions:
|
||||
dataclasses:
|
||||
# phase0
|
||||
- LatestMessage#phase0
|
||||
- Seen#phase0
|
||||
- Store#phase0
|
||||
# altair
|
||||
- LightClientStore#altair
|
||||
@@ -125,6 +144,11 @@ exceptions:
|
||||
- ExpectedWithdrawals#gloas
|
||||
- LatestMessage#gloas
|
||||
- Store#gloas
|
||||
# heze
|
||||
- GetInclusionListResponse#heze
|
||||
- InclusionListStore#heze
|
||||
- PayloadAttributes#heze
|
||||
- Store#heze
|
||||
|
||||
functions:
|
||||
# Functions implemented by KZG library for EIP-4844
|
||||
@@ -181,11 +205,22 @@ exceptions:
|
||||
- verify_cell_kzg_proof_batch_impl#fulu
|
||||
|
||||
# phase0
|
||||
- compute_attestation_subnet_prefix_bits#phase0
|
||||
- compute_min_epochs_for_block_requests#phase0
|
||||
- compute_time_at_slot_ms#phase0
|
||||
- is_not_from_future_slot#phase0
|
||||
- is_within_slot_range#phase0
|
||||
- update_proposer_boost_root#phase0
|
||||
- is_proposer_equivocation#phase0
|
||||
- record_block_timeliness#phase0
|
||||
- compute_proposer_score#phase0
|
||||
- get_attestation_score#phase0
|
||||
- validate_attester_slashing_gossip#phase0
|
||||
- validate_beacon_aggregate_and_proof_gossip#phase0
|
||||
- validate_beacon_attestation_gossip#phase0
|
||||
- validate_beacon_block_gossip#phase0
|
||||
- validate_proposer_slashing_gossip#phase0
|
||||
- validate_voluntary_exit_gossip#phase0
|
||||
- calculate_committee_fraction#phase0
|
||||
- compute_fork_version#phase0
|
||||
- compute_pulled_up_tip#phase0
|
||||
@@ -276,6 +311,7 @@ exceptions:
|
||||
- upgrade_lc_store_to_capella#capella
|
||||
- upgrade_lc_update_to_capella#capella
|
||||
# deneb
|
||||
- compute_max_request_blob_sidecars#deneb
|
||||
- get_lc_execution_root#deneb
|
||||
- is_valid_light_client_header#deneb
|
||||
- prepare_execution_payload#deneb
|
||||
@@ -286,6 +322,7 @@ exceptions:
|
||||
- upgrade_lc_store_to_deneb#deneb
|
||||
- upgrade_lc_update_to_deneb#deneb
|
||||
# electra
|
||||
- compute_max_request_blob_sidecars#electra
|
||||
- compute_weak_subjectivity_period#electra
|
||||
- current_sync_committee_gindex_at_slot#electra
|
||||
- finalized_root_gindex_at_slot#electra
|
||||
@@ -307,12 +344,20 @@ exceptions:
|
||||
- upgrade_lc_store_to_electra#electra
|
||||
- upgrade_lc_update_to_electra#electra
|
||||
# fulu
|
||||
- compute_max_request_data_column_sidecars#fulu
|
||||
- compute_matrix#fulu
|
||||
- verify_partial_data_column_header_inclusion_proof#fulu
|
||||
- verify_partial_data_column_sidecar_kzg_proofs#fulu
|
||||
- get_blob_parameters#fulu
|
||||
- get_data_column_sidecars_from_block#fulu
|
||||
- get_data_column_sidecars_from_column_sidecar#fulu
|
||||
- recover_matrix#fulu
|
||||
# gloas
|
||||
- compute_ptc#gloas
|
||||
- initialize_ptc_window#gloas
|
||||
- is_payload_data_available#gloas
|
||||
- is_pending_validator#gloas
|
||||
- process_ptc_window#gloas
|
||||
- compute_balance_weighted_acceptance#gloas
|
||||
- compute_balance_weighted_selection#gloas
|
||||
- compute_fork_version#gloas
|
||||
@@ -390,10 +435,8 @@ exceptions:
|
||||
- get_builder_withdrawals#gloas
|
||||
- get_builders_sweep_withdrawals#gloas
|
||||
- get_index_for_new_builder#gloas
|
||||
- get_pending_balance_to_withdraw_for_builder#gloas
|
||||
- get_proposer_preferences_signature#gloas
|
||||
- get_upcoming_proposal_slots#gloas
|
||||
- initiate_builder_exit#gloas
|
||||
- is_active_builder#gloas
|
||||
- is_builder_index#gloas
|
||||
- is_data_available#gloas
|
||||
@@ -404,7 +447,6 @@ exceptions:
|
||||
- is_valid_proposal_slot#gloas
|
||||
- onboard_builders_from_pending_deposits#gloas
|
||||
- process_deposit_request#gloas
|
||||
- process_voluntary_exit#gloas
|
||||
- record_block_timeliness#gloas
|
||||
- record_block_timeliness#phase0
|
||||
- verify_data_column_sidecar_kzg_proofs#gloas
|
||||
@@ -413,6 +455,28 @@ exceptions:
|
||||
- update_next_withdrawal_builder_index#gloas
|
||||
- update_payload_expected_withdrawals#gloas
|
||||
- update_proposer_boost_root#gloas
|
||||
# heze
|
||||
- compute_fork_version#heze
|
||||
- get_forkchoice_store#heze
|
||||
- get_inclusion_list_bits#heze
|
||||
- get_inclusion_list_committee_assignment#heze
|
||||
- get_inclusion_list_committee#heze
|
||||
- get_inclusion_list_signature#heze
|
||||
- get_inclusion_list_store#heze
|
||||
- get_inclusion_list_submission_due_ms#heze
|
||||
- get_inclusion_list_transactions#heze
|
||||
- get_proposer_inclusion_list_cutoff_ms#heze
|
||||
- get_view_freeze_cutoff_ms#heze
|
||||
- is_inclusion_list_bits_inclusive#heze
|
||||
- is_payload_inclusion_list_satisfied#heze
|
||||
- is_valid_inclusion_list_signature#heze
|
||||
- on_execution_payload#heze
|
||||
- on_inclusion_list#heze
|
||||
- prepare_execution_payload#heze
|
||||
- process_inclusion_list#heze
|
||||
- record_payload_inclusion_list_satisfaction#heze
|
||||
- should_extend_payload#heze
|
||||
- upgrade_to_heze#heze
|
||||
|
||||
presets:
|
||||
# gloas
|
||||
@@ -421,3 +485,5 @@ exceptions:
|
||||
- MAX_BUILDERS_PER_WITHDRAWALS_SWEEP#gloas
|
||||
- MAX_PAYLOAD_ATTESTATIONS#gloas
|
||||
- PTC_SIZE#gloas
|
||||
# heze
|
||||
- INCLUSION_LIST_COMMITTEE_SIZE#heze
|
||||
|
||||
16
.github/workflows/clang-format.yml
vendored
16
.github/workflows/clang-format.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: Protobuf Format
|
||||
|
||||
on:
|
||||
on:
|
||||
push:
|
||||
branches: [ '*' ]
|
||||
pull_request:
|
||||
@@ -12,10 +12,14 @@ jobs:
|
||||
clang-format-checking:
|
||||
runs-on: ubuntu-4
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
# Is this step failing for you?
|
||||
- uses: actions/checkout@v6
|
||||
# Is this step failing for you?
|
||||
# Run: clang-format -i proto/**/*.proto
|
||||
# See: https://clang.llvm.org/docs/ClangFormat.html
|
||||
- uses: RafikFarhad/clang-format-github-action@v3
|
||||
with:
|
||||
sources: "proto/**/*.proto"
|
||||
- name: Install clang-format
|
||||
run: |
|
||||
sudo apt-get update -qq
|
||||
sudo apt-get install -y clang-format
|
||||
- name: Check protobuf formatting
|
||||
run: |
|
||||
clang-format --style=LLVM --dry-run --Werror proto/**/*.proto
|
||||
|
||||
67
.github/workflows/sbom-export.yaml
vendored
Normal file
67
.github/workflows/sbom-export.yaml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
name: SBOM Export & Centralize
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "develop" ]
|
||||
schedule:
|
||||
- cron: '50 21 * * 2'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
generate-and-upload:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Source Code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Check for recent changes
|
||||
id: check
|
||||
run: |
|
||||
if [ -z "$(git log --since='7 days ago' --oneline | head -1)" ]; then
|
||||
echo "No commits in the last 7 days, skipping SBOM generation."
|
||||
echo "skip=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Generate CycloneDX SBOM via cdxgen
|
||||
if: steps.check.outputs.skip != 'true'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
docker run --rm \
|
||||
--user "$(id -u):$(id -g)" \
|
||||
-v /tmp:/tmp \
|
||||
-v "${{ github.workspace }}:/app:rw" \
|
||||
-e FETCH_LICENSE=true \
|
||||
-e GITHUB_TOKEN \
|
||||
ghcr.io/cdxgen/cdxgen:v12.1.1 \
|
||||
-r /app \
|
||||
-o /app/sbom.cdx.json \
|
||||
--no-install-deps \
|
||||
--spec-version 1.6
|
||||
|
||||
if [ ! -s sbom.cdx.json ]; then
|
||||
echo "::error::cdxgen SBOM generation failed or returned empty."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "SBOM generated successfully:"
|
||||
ls -lh sbom.cdx.json
|
||||
|
||||
- name: Upload SBOM to Dependency Track
|
||||
if: steps.check.outputs.skip != 'true'
|
||||
env:
|
||||
DT_API_KEY: ${{ secrets.DEPENDENCY_TRACK_API_KEY }}
|
||||
DT_URL: ${{ secrets.DEPENDENCY_TRACK_URL }}
|
||||
run: |
|
||||
REPO_NAME=${GITHUB_REPOSITORY##*/}
|
||||
|
||||
curl -sf -X POST "${DT_URL}/api/v1/bom" \
|
||||
-H "X-Api-Key: ${DT_API_KEY}" \
|
||||
-F "autoCreate=true" \
|
||||
-F "projectName=${REPO_NAME}" \
|
||||
-F "projectVersion=${{ github.ref_name }}" \
|
||||
-F "bom=@sbom.cdx.json"
|
||||
|
||||
echo "SBOM uploaded to Dependency Track for ${REPO_NAME}@${{ github.ref_name }}"
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -44,6 +44,3 @@ tmp
|
||||
|
||||
# spectest coverage reports
|
||||
report.txt
|
||||
|
||||
# execution client data
|
||||
execution/
|
||||
|
||||
112
CHANGELOG.md
112
CHANGELOG.md
@@ -4,6 +4,116 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v7.1.3](https://github.com/prysmaticlabs/prysm/compare/v7.1.2...v7.1.3) - 2026-03-18
|
||||
|
||||
This release brings extensive Gloas (next fork) groundwork, a major logging infrastructure overhaul, and numerous performance optimizations across the beacon chain. A security update to go-ethereum v1.16.8 is also included.
|
||||
|
||||
Release highlights:
|
||||
|
||||
- **Gloas fork preparation**: Builder registry, bid processing, payload attestation, proposer slashing, slot processing, block API endpoints, and duty timing intervals are all wired up.
|
||||
- **Logging revamp**: New ephemeral debug logfile (24h retention, enabled by default), per-package loggers with CI enforcement, per-hook verbosity control (`--log.vmodule`), and a version banner at startup.
|
||||
- **Performance**: Forkchoice updates moved to background, post-Electra attestation data cached per slot, parallel data column cache warmup, reduced heap allocations in SSZ marshaling and `MixInLength`, and proposer preprocessing behind a feature flag.
|
||||
- **Validator client**: gRPC fallback now matches the REST API implementation — both connect only to fully synced nodes. The gRPC health endpoint returns an error on syncing/optimistic status.
|
||||
- **Security**: go-ethereum updated to v1.16.8; fixed an authentication bypass on `/v2/validator/*` endpoints.
|
||||
- **State storage**: Initial support for the `hdiff` state-diff feature — migration-to-cold and DB initialization are now available behind feature flags.
|
||||
|
||||
There are no known security issues in this release. Operators can update at their convenience.
|
||||
|
||||
### Added
|
||||
|
||||
- Use the head state to validate attestations for the previous epoch if head is compatible with the target checkpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16109)
|
||||
- Added separate logrus hooks for handling the formatting and output of terminal logs vs log-file logs, instead of the. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16102)
|
||||
- Batch publish data columns for faster data propogation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16183)
|
||||
- `--disable-get-blobs-v2` flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16155)
|
||||
- Update spectests to v1.7.0-alpha.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16219)
|
||||
- Added basic Gloas builder support (`Builder` message and `BeaconStateGloas` `builders`/`next_withdrawal_builder_index` fields). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16164)
|
||||
- Added an ephemeral debug logfile that for beacon and validator nodes that captures debug-level logs for 24 hours. It. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16108)
|
||||
- Add a feature flag to pass spectests with low validator count. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16231)
|
||||
- Add feature flag `--enable-proposer-preprocessing` to process the block and verify signatures before proposing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15920)
|
||||
- Add `ProofByFieldIndex` to generalize merkle proof generation for `BeaconState`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15443)
|
||||
- Update spectests to v1.7.0-alpha-1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16246)
|
||||
- Add feature flag to use hashtree instead of gohashtre. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16216)
|
||||
- Migrate to cold with the hdiff feature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16049)
|
||||
- Adding basic fulu fork transition support for mainnet and minimal e2e tests (multi scenario is not included). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15640)
|
||||
- `commitment_count_in_gossip_processed_blocks` gauge metric to track the number of blob KZG commitments in processed beacon blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16254)
|
||||
- Add Gloas latest execution bid processing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15638)
|
||||
- Added shell completion support for `beacon-chain` and `validator` CLI tools. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16245)
|
||||
- add pending payments processing and quorum threshold, plus spectests and state hooks (rotate/append). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15655)
|
||||
- Add slot processing with execution payload availability updates. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15730)
|
||||
- Implement modified proposer slashing for gloas. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16212)
|
||||
- Added missing beacon config in fulu so that the presets don't go missing in /eth/v1/config/spec beacon api. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16170)
|
||||
- Close opened file in data_column.go. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16274)
|
||||
- Flag `--log.vmodule` to set per-package verbosity levels for logging. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16272)
|
||||
- Added a version log at startup to display the version of the build. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16283)
|
||||
- gloas block return support for /eth/v2/beacon/blocks/{block_id} and /eth/v1/beacon/blocks/{block_id}/root endpoints. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16278)
|
||||
- Add Gloas process payload attestation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15650)
|
||||
- Initialize db with state-diff feature flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16203)
|
||||
- Gloas-specific timing intervals for validator attestation, aggregation, and sync duties. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16291)
|
||||
- Added new proofCollector type to ssz-query. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16177)
|
||||
- Added README for maintaining specrefs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16302)
|
||||
- The ability to download the nightly reference tests from a specific day. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16298)
|
||||
- Set beacon node options after reading the config file. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16320)
|
||||
- Implement finalization-based eviction for `CheckpointStateCache`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16458)
|
||||
|
||||
### Changed
|
||||
|
||||
- Performance improvement in ProcessConsolidationRequests: Use more performance HasPendingBalanceToWithdraw instead of PendingBalanceToWithdraw as no need to calculate full total pending balance. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16189)
|
||||
- Extend `httperror` analyzer to more functions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16186)
|
||||
- Do not check block signature on state transition. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14820)
|
||||
- Notify the engine about forkchoice updates in the background. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16149)
|
||||
- Use a separate context when updating the slot cache. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16209)
|
||||
- Data column sidecars cache warmup: Process in parallel all sidecars for a given epoch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16207)
|
||||
- Use lookahead to validate data column sidecar proposer index. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16202)
|
||||
- Summarize DEBUG log corresponding to incoming via gossip data column sidecar. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16210)
|
||||
- Added a log.go file for every important package with a logger variable containing a `package` field set to the package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16059)
|
||||
- Added a CI check to ensure every important package has a log.go file with the correct `package` field. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16059)
|
||||
- Changed the log formatter to use this `package` field instead of the previous `prefix` field. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16059)
|
||||
- Replaced `time.Sleep` with `require.Eventually` polling in tests to fix flaky behavior caused by race conditions between goroutines and assertions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16217)
|
||||
- changed IsHealthy check to IsReady for validator client's interpretation from /eth/v1/node/health, 206 will now return false as the node is syncing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16167)
|
||||
- Performance improvement in state (MarshalSSZTo): use copy() instead of byte-by-byte loop which isn't required. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16222)
|
||||
- Moved verbosity settings to be configurable per hook, rather than just globally. This allows us to control the. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16106)
|
||||
- updated go ethereum to 1.16.7. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15640)
|
||||
- Use dependent root and target root to verify data column proposer index. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16250)
|
||||
- post electra we now call attestation data once per slot and use a cache for subsequent requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16236)
|
||||
- Avoid unnessary heap allocation while calling MixInLength. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16251)
|
||||
- Log commitments instead of indices in missingCommitError. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16258)
|
||||
- Added some defensive checks to prevent overflows in block batch requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16227)
|
||||
- gRPC health endpoint will now return an error on syncing or optimistic status showing that it's unavailable. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16294)
|
||||
- Sample PTC per committee to reduce allocations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16293)
|
||||
- gRPC fallback now matches rest api implementation and will also check and connect to only synced nodes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16215)
|
||||
- Improved node fallback logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16316)
|
||||
- Improved integrations with ethspecify so specrefs can be used throughout the codebase. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16304)
|
||||
- Fixed the logging issue described in #16314. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16322)
|
||||
|
||||
### Removed
|
||||
|
||||
- removed github.com/MariusVanDerWijden/FuzzyVM and github.com/MariusVanDerWijden/tx-fuzz due to lack of support post 1.16.7, only used in e2e for transaction fuzzing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15640)
|
||||
- Remove unused `delay` parameter from `fetchOriginDataColumnSidecars` function. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16262)
|
||||
- Batching of KZG verification for incoming via gossip data column sidecars. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16240)
|
||||
- `--disable-get-blobs-v2` flag from help. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16265)
|
||||
- gRPC resolver for load balancing, the new implementation matches rest api's so we should remove the resolver so it's handled the same way for consistency. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16215)
|
||||
|
||||
### Fixed
|
||||
|
||||
- avoid panic when fork schedule is empty [#16175](https://github.com/OffchainLabs/prysm/pull/16175). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16175)
|
||||
- Fix validation logic for `--backfill-oldest-slot`, which was rejecting slots newer than 1056767. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16173)
|
||||
- Don't call trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize) twice. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16211)
|
||||
- When adding the `--[semi-]supernode` flag, update the ealiest available slot accordingly. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16230)
|
||||
- fixed broken and old links to actual. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15856)
|
||||
- stop SlotIntervalTicker goroutine leaks [#16241](https://github.com/OffchainLabs/prysm/pull/16241). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16241)
|
||||
- Fix `prysmctl testnet generate-genesis` to use the timestamp from `--geth-genesis-json-in` when `--genesis-time` is not explicitly provided. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16239)
|
||||
- Prevent authentication bypass on direct `/v2/validator/*` endpoints by enforcing auth checks for non-public routes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16226)
|
||||
- Fixed a typo: AggregrateDueBPS -> AggregateDueBPS. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16194)
|
||||
- Fixed a bug in `hack/check-logs.sh` where untracked files were ignored. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16287)
|
||||
- Fix hashtree release builds. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16288)
|
||||
- Fix Bazel build failure on macOS x86_64 (darwin_amd64) (adds missing assembly stub to hashtree patch). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16281)
|
||||
- a potential race condition when switching hosts quickly and reconnecting to same host on an old connection. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16316)
|
||||
- Fixed a bug where `cmd/beacon-chain/execution` was being ignored by `hack/gen-logs.sh` due to a `.gitignore` rule. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16328)
|
||||
|
||||
### Security
|
||||
|
||||
- Update go-ethereum to v1.16.8. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16252)
|
||||
|
||||
## [v7.1.2](https://github.com/prysmaticlabs/prysm/compare/v7.1.1...v7.1.2) - 2026-01-07
|
||||
|
||||
Happy new year! This patch release is very small. The main improvement is better management of pending attestation aggregation via [PR 16153](https://github.com/OffchainLabs/prysm/pull/16153).
|
||||
@@ -4046,4 +4156,4 @@ There are no security updates in this release.
|
||||
|
||||
# Older than v2.0.0
|
||||
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
10
WORKSPACE
10
WORKSPACE
@@ -273,16 +273,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.7.0-alpha.2"
|
||||
consensus_spec_version = "v1.7.0-alpha.4"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-iGQsGZ1cHah+2CSod9jC3kN8Ku4n6KO0hIwfINrn/po=",
|
||||
"minimal": "sha256-TgcYt8N8sXSttdHTGvOa+exUZ1zn1UzlAMz0V7i37xc=",
|
||||
"mainnet": "sha256-LnXyiLoJtrvEvbqLDSAAqpLMdN/lXv92SAgYG8fNjCs=",
|
||||
"general": "sha256-kNJxuhCtW4RbuS9nb4U6JXHlPgTSg6G3hWeHFVB9gZ4=",
|
||||
"minimal": "sha256-U1tCkXxtdI6mkEdk80i8z9LU2hAyf7Ztz5SBYo5oMzo=",
|
||||
"mainnet": "sha256-Ga8VDOcNhTTdXDj8tSyBVYrwya9f1HO94ehJ5vv91r4=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -298,7 +298,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-Y/67Dg393PksZj5rTFNLntiJ6hNdB7Rxbu5gZE2gebY=",
|
||||
integrity = "sha256-XHu5K/65mue+5po63L9yGTFjGfU1RGj4S56dmcHc2Rs=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -46,7 +46,7 @@ func EnsureReady(ctx context.Context, provider HostProvider, checker ReadyChecke
|
||||
"previous": startingHost,
|
||||
"current": provider.CurrentHost(),
|
||||
"tried": attemptedHosts,
|
||||
}).Info("Switched to responsive beacon node")
|
||||
}).Warn("Switched to responsive beacon node")
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -92,7 +92,11 @@ func AcceptEncodingHeaderHandler() Middleware {
|
||||
return
|
||||
}
|
||||
|
||||
gz := gzip.NewWriter(w)
|
||||
gz, err := gzip.NewWriterLevel(w, gzip.BestSpeed)
|
||||
if err != nil {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
gzipRW := &gzipResponseWriter{gz: gz, ResponseWriter: w}
|
||||
defer func() {
|
||||
if !gzipRW.zip {
|
||||
|
||||
@@ -9,6 +9,7 @@ go_library(
|
||||
"conversions_blob.go",
|
||||
"conversions_block.go",
|
||||
"conversions_block_execution.go",
|
||||
"conversions_gloas.go",
|
||||
"conversions_lightclient.go",
|
||||
"conversions_state.go",
|
||||
"endpoints_beacon.go",
|
||||
@@ -57,10 +58,12 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
],
|
||||
|
||||
@@ -540,6 +540,12 @@ type PayloadAttestation struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type PayloadAttestationMessage struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
Data *PayloadAttestationData `json:"data"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyGloas struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
@@ -577,3 +583,17 @@ func (s *SignedBeaconBlockGloas) MessageRawJson() ([]byte, error) {
|
||||
func (s *SignedBeaconBlockGloas) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type ExecutionPayloadEnvelope struct {
|
||||
Payload *ExecutionPayloadDeneb `json:"payload"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
BeaconBlockRoot string `json:"beacon_block_root"`
|
||||
Slot string `json:"slot"`
|
||||
StateRoot string `json:"state_root"`
|
||||
}
|
||||
|
||||
type SignedExecutionPayloadEnvelope struct {
|
||||
Message *ExecutionPayloadEnvelope `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
@@ -2966,6 +2966,14 @@ func PayloadAttestationFromConsensus(pa *eth.PayloadAttestation) *PayloadAttesta
|
||||
}
|
||||
}
|
||||
|
||||
func PayloadAttestationMessageFromConsensus(m *eth.PayloadAttestationMessage) *PayloadAttestationMessage {
|
||||
return &PayloadAttestationMessage{
|
||||
ValidatorIndex: fmt.Sprintf("%d", m.ValidatorIndex),
|
||||
Data: PayloadAttestationDataFromConsensus(m.Data),
|
||||
Signature: hexutil.Encode(m.Signature),
|
||||
}
|
||||
}
|
||||
|
||||
func PayloadAttestationDataFromConsensus(d *eth.PayloadAttestationData) *PayloadAttestationData {
|
||||
return &PayloadAttestationData{
|
||||
BeaconBlockRoot: hexutil.Encode(d.BeaconBlockRoot),
|
||||
@@ -3275,3 +3283,26 @@ func (d *PayloadAttestationData) ToConsensus() (*eth.PayloadAttestationData, err
|
||||
BlobDataAvailable: d.BlobDataAvailable,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SignedExecutionPayloadEnvelopeFromConsensus converts a proto envelope to the API struct.
|
||||
func SignedExecutionPayloadEnvelopeFromConsensus(e *eth.SignedExecutionPayloadEnvelope) (*SignedExecutionPayloadEnvelope, error) {
|
||||
payload, err := ExecutionPayloadDenebFromConsensus(e.Message.Payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var requests *ExecutionRequests
|
||||
if e.Message.ExecutionRequests != nil {
|
||||
requests = ExecutionRequestsFromConsensus(e.Message.ExecutionRequests)
|
||||
}
|
||||
return &SignedExecutionPayloadEnvelope{
|
||||
Message: &ExecutionPayloadEnvelope{
|
||||
Payload: payload,
|
||||
ExecutionRequests: requests,
|
||||
BuilderIndex: fmt.Sprintf("%d", e.Message.BuilderIndex),
|
||||
BeaconBlockRoot: hexutil.Encode(e.Message.BeaconBlockRoot),
|
||||
Slot: fmt.Sprintf("%d", e.Message.Slot),
|
||||
StateRoot: hexutil.Encode(e.Message.StateRoot),
|
||||
},
|
||||
Signature: hexutil.Encode(e.Signature),
|
||||
}, nil
|
||||
}
|
||||
|
||||
89
api/server/structs/conversions_gloas.go
Normal file
89
api/server/structs/conversions_gloas.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
func ROExecutionPayloadBidFromConsensus(b interfaces.ROExecutionPayloadBid) *ExecutionPayloadBid {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
pbh := b.ParentBlockHash()
|
||||
pbr := b.ParentBlockRoot()
|
||||
bh := b.BlockHash()
|
||||
pr := b.PrevRandao()
|
||||
fr := b.FeeRecipient()
|
||||
commitments := b.BlobKzgCommitments()
|
||||
blobKzgCommitments := make([]string, 0, len(commitments))
|
||||
for _, commitment := range commitments {
|
||||
blobKzgCommitments = append(blobKzgCommitments, hexutil.Encode(commitment))
|
||||
}
|
||||
return &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(pbh[:]),
|
||||
ParentBlockRoot: hexutil.Encode(pbr[:]),
|
||||
BlockHash: hexutil.Encode(bh[:]),
|
||||
PrevRandao: hexutil.Encode(pr[:]),
|
||||
FeeRecipient: hexutil.Encode(fr[:]),
|
||||
GasLimit: fmt.Sprintf("%d", b.GasLimit()),
|
||||
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex()),
|
||||
Slot: fmt.Sprintf("%d", b.Slot()),
|
||||
Value: fmt.Sprintf("%d", b.Value()),
|
||||
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment()),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
}
|
||||
}
|
||||
|
||||
func BuildersFromConsensus(builders []*ethpb.Builder) []*Builder {
|
||||
newBuilders := make([]*Builder, len(builders))
|
||||
for i, b := range builders {
|
||||
newBuilders[i] = BuilderFromConsensus(b)
|
||||
}
|
||||
return newBuilders
|
||||
}
|
||||
|
||||
func BuilderFromConsensus(b *ethpb.Builder) *Builder {
|
||||
return &Builder{
|
||||
Pubkey: hexutil.Encode(b.Pubkey),
|
||||
Version: hexutil.Encode(b.Version),
|
||||
ExecutionAddress: hexutil.Encode(b.ExecutionAddress),
|
||||
Balance: fmt.Sprintf("%d", b.Balance),
|
||||
DepositEpoch: fmt.Sprintf("%d", b.DepositEpoch),
|
||||
WithdrawableEpoch: fmt.Sprintf("%d", b.WithdrawableEpoch),
|
||||
}
|
||||
}
|
||||
|
||||
func BuilderPendingPaymentsFromConsensus(payments []*ethpb.BuilderPendingPayment) []*BuilderPendingPayment {
|
||||
newPayments := make([]*BuilderPendingPayment, len(payments))
|
||||
for i, p := range payments {
|
||||
newPayments[i] = BuilderPendingPaymentFromConsensus(p)
|
||||
}
|
||||
return newPayments
|
||||
}
|
||||
|
||||
func BuilderPendingPaymentFromConsensus(p *ethpb.BuilderPendingPayment) *BuilderPendingPayment {
|
||||
return &BuilderPendingPayment{
|
||||
Weight: fmt.Sprintf("%d", p.Weight),
|
||||
Withdrawal: BuilderPendingWithdrawalFromConsensus(p.Withdrawal),
|
||||
}
|
||||
}
|
||||
|
||||
func BuilderPendingWithdrawalsFromConsensus(withdrawals []*ethpb.BuilderPendingWithdrawal) []*BuilderPendingWithdrawal {
|
||||
newWithdrawals := make([]*BuilderPendingWithdrawal, len(withdrawals))
|
||||
for i, w := range withdrawals {
|
||||
newWithdrawals[i] = BuilderPendingWithdrawalFromConsensus(w)
|
||||
}
|
||||
return newWithdrawals
|
||||
}
|
||||
|
||||
func BuilderPendingWithdrawalFromConsensus(w *ethpb.BuilderPendingWithdrawal) *BuilderPendingWithdrawal {
|
||||
return &BuilderPendingWithdrawal{
|
||||
FeeRecipient: hexutil.Encode(w.FeeRecipient),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
BuilderIndex: fmt.Sprintf("%d", w.BuilderIndex),
|
||||
}
|
||||
}
|
||||
@@ -972,3 +972,223 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
ProposerLookahead: lookahead,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Gloas
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func BeaconStateGloasFromConsensus(st beaconState.BeaconState) (*BeaconStateGloas, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
for i, r := range srcBr {
|
||||
br[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcSr := st.StateRoots()
|
||||
sr := make([]string, len(srcSr))
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcVotes := st.Eth1DataVotes()
|
||||
votes := make([]*Eth1Data, len(srcVotes))
|
||||
for i, e := range srcVotes {
|
||||
votes[i] = Eth1DataFromConsensus(e)
|
||||
}
|
||||
srcVals := st.Validators()
|
||||
vals := make([]*Validator, len(srcVals))
|
||||
for i, v := range srcVals {
|
||||
vals[i] = ValidatorFromConsensus(v)
|
||||
}
|
||||
srcBals := st.Balances()
|
||||
bals := make([]string, len(srcBals))
|
||||
for i, b := range srcBals {
|
||||
bals[i] = fmt.Sprintf("%d", b)
|
||||
}
|
||||
srcRm := st.RandaoMixes()
|
||||
rm := make([]string, len(srcRm))
|
||||
for i, m := range srcRm {
|
||||
rm[i] = hexutil.Encode(m)
|
||||
}
|
||||
srcSlashings := st.Slashings()
|
||||
slashings := make([]string, len(srcSlashings))
|
||||
for i, s := range srcSlashings {
|
||||
slashings[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
srcPrevPart, err := st.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevPart := make([]string, len(srcPrevPart))
|
||||
for i, p := range srcPrevPart {
|
||||
prevPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcCurrPart, err := st.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currPart := make([]string, len(srcCurrPart))
|
||||
for i, p := range srcCurrPart {
|
||||
currPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcIs, err := st.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
is := make([]string, len(srcIs))
|
||||
for i, s := range srcIs {
|
||||
is[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
currSc, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSc, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHs, err := st.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hs := make([]*HistoricalSummary, len(srcHs))
|
||||
for i, s := range srcHs {
|
||||
hs[i] = HistoricalSummaryFromConsensus(s)
|
||||
}
|
||||
nwi, err := st.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nwvi, err := st.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
drsi, err := st.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbtc, err := st.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ebtc, err := st.ExitBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eee, err := st.EarliestExitEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cbtc, err := st.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ece, err := st.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pbd, err := st.PendingDeposits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ppw, err := st.PendingPartialWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pc, err := st.PendingConsolidations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcLookahead, err := st.ProposerLookahead()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lookahead := make([]string, len(srcLookahead))
|
||||
for i, v := range srcLookahead {
|
||||
lookahead[i] = fmt.Sprintf("%d", uint64(v))
|
||||
}
|
||||
// Gloas-specific fields
|
||||
lepb, err := st.LatestExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builders, err := st.Builders()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nwbi, err := st.NextWithdrawalBuilderIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epa, err := st.ExecutionPayloadAvailabilityVector()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bpp, err := st.BuilderPendingPayments()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bpw, err := st.BuilderPendingWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lbh, err := st.LatestBlockHash()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pew, err := st.PayloadExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BeaconStateGloas{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
|
||||
BlockRoots: br,
|
||||
StateRoots: sr,
|
||||
HistoricalRoots: hr,
|
||||
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
|
||||
Eth1DataVotes: votes,
|
||||
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
RandaoMixes: rm,
|
||||
Slashings: slashings,
|
||||
PreviousEpochParticipation: prevPart,
|
||||
CurrentEpochParticipation: currPart,
|
||||
JustificationBits: hexutil.Encode(st.JustificationBits()),
|
||||
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
|
||||
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
|
||||
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
|
||||
InactivityScores: is,
|
||||
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
|
||||
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
|
||||
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
|
||||
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
|
||||
HistoricalSummaries: hs,
|
||||
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
|
||||
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
|
||||
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
|
||||
EarliestExitEpoch: fmt.Sprintf("%d", eee),
|
||||
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
|
||||
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
|
||||
PendingDeposits: PendingDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
ProposerLookahead: lookahead,
|
||||
LatestExecutionPayloadBid: ROExecutionPayloadBidFromConsensus(lepb),
|
||||
Builders: BuildersFromConsensus(builders),
|
||||
NextWithdrawalBuilderIndex: fmt.Sprintf("%d", nwbi),
|
||||
ExecutionPayloadAvailability: hexutil.Encode(epa),
|
||||
BuilderPendingPayments: BuilderPendingPaymentsFromConsensus(bpp),
|
||||
BuilderPendingWithdrawals: BuilderPendingWithdrawalsFromConsensus(bpw),
|
||||
LatestBlockHash: hexutil.Encode(lbh[:]),
|
||||
PayloadExpectedWithdrawals: WithdrawalsFromConsensus(pew),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
@@ -355,3 +359,214 @@ func TestIndexedAttestation_ToConsensus(t *testing.T) {
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestROExecutionPayloadBidFromConsensus(t *testing.T) {
|
||||
t.Run("empty blobkzg commitments", func(t *testing.T) {
|
||||
bid := ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x03}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
|
||||
GasLimit: 100,
|
||||
BuilderIndex: 7,
|
||||
Slot: 9,
|
||||
Value: 11,
|
||||
ExecutionPayment: 22,
|
||||
BlobKzgCommitments: [][]byte{},
|
||||
}
|
||||
roBid, err := blocks.WrappedROExecutionPayloadBid(bid)
|
||||
require.NoError(t, err)
|
||||
|
||||
got := ROExecutionPayloadBidFromConsensus(roBid)
|
||||
want := &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(bid.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(bid.ParentBlockRoot),
|
||||
BlockHash: hexutil.Encode(bid.BlockHash),
|
||||
PrevRandao: hexutil.Encode(bid.PrevRandao),
|
||||
FeeRecipient: hexutil.Encode(bid.FeeRecipient),
|
||||
GasLimit: "100",
|
||||
BuilderIndex: "7",
|
||||
Slot: "9",
|
||||
Value: "11",
|
||||
ExecutionPayment: "22",
|
||||
BlobKzgCommitments: []string{},
|
||||
}
|
||||
assert.DeepEqual(t, want, got)
|
||||
})
|
||||
|
||||
t.Run("default", func(t *testing.T) {
|
||||
bid := ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x03}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
|
||||
GasLimit: 100,
|
||||
BuilderIndex: 7,
|
||||
Slot: 9,
|
||||
Value: 11,
|
||||
ExecutionPayment: 22,
|
||||
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x06}, 48)},
|
||||
}
|
||||
roBid, err := blocks.WrappedROExecutionPayloadBid(bid)
|
||||
require.NoError(t, err)
|
||||
|
||||
var bkcs []string
|
||||
for _, commitment := range roBid.BlobKzgCommitments() {
|
||||
bkcs = append(bkcs, hexutil.Encode(commitment))
|
||||
}
|
||||
|
||||
got := ROExecutionPayloadBidFromConsensus(roBid)
|
||||
want := &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(bid.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(bid.ParentBlockRoot),
|
||||
BlockHash: hexutil.Encode(bid.BlockHash),
|
||||
PrevRandao: hexutil.Encode(bid.PrevRandao),
|
||||
FeeRecipient: hexutil.Encode(bid.FeeRecipient),
|
||||
GasLimit: "100",
|
||||
BuilderIndex: "7",
|
||||
Slot: "9",
|
||||
Value: "11",
|
||||
ExecutionPayment: "22",
|
||||
BlobKzgCommitments: bkcs,
|
||||
}
|
||||
assert.DeepEqual(t, want, got)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderConversionsFromConsensus(t *testing.T) {
|
||||
builder := ð.Builder{
|
||||
Pubkey: bytes.Repeat([]byte{0xAA}, 48),
|
||||
Version: bytes.Repeat([]byte{0x01}, 4),
|
||||
ExecutionAddress: bytes.Repeat([]byte{0xBB}, 20),
|
||||
Balance: 42,
|
||||
DepositEpoch: 3,
|
||||
WithdrawableEpoch: 4,
|
||||
}
|
||||
wantBuilder := &Builder{
|
||||
Pubkey: hexutil.Encode(builder.Pubkey),
|
||||
Version: hexutil.Encode(builder.Version),
|
||||
ExecutionAddress: hexutil.Encode(builder.ExecutionAddress),
|
||||
Balance: "42",
|
||||
DepositEpoch: "3",
|
||||
WithdrawableEpoch: "4",
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, wantBuilder, BuilderFromConsensus(builder))
|
||||
assert.DeepEqual(t, []*Builder{wantBuilder}, BuildersFromConsensus([]*eth.Builder{builder}))
|
||||
}
|
||||
|
||||
func TestBuilderPendingPaymentConversionsFromConsensus(t *testing.T) {
|
||||
withdrawal := ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x10}, 20),
|
||||
Amount: 15,
|
||||
BuilderIndex: 2,
|
||||
}
|
||||
payment := ð.BuilderPendingPayment{
|
||||
Weight: 5,
|
||||
Withdrawal: withdrawal,
|
||||
}
|
||||
wantWithdrawal := &BuilderPendingWithdrawal{
|
||||
FeeRecipient: hexutil.Encode(withdrawal.FeeRecipient),
|
||||
Amount: "15",
|
||||
BuilderIndex: "2",
|
||||
}
|
||||
wantPayment := &BuilderPendingPayment{
|
||||
Weight: "5",
|
||||
Withdrawal: wantWithdrawal,
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, wantPayment, BuilderPendingPaymentFromConsensus(payment))
|
||||
assert.DeepEqual(t, []*BuilderPendingPayment{wantPayment}, BuilderPendingPaymentsFromConsensus([]*eth.BuilderPendingPayment{payment}))
|
||||
assert.DeepEqual(t, wantWithdrawal, BuilderPendingWithdrawalFromConsensus(withdrawal))
|
||||
assert.DeepEqual(t, []*BuilderPendingWithdrawal{wantWithdrawal}, BuilderPendingWithdrawalsFromConsensus([]*eth.BuilderPendingWithdrawal{withdrawal}))
|
||||
}
|
||||
|
||||
func TestBeaconStateGloasFromConsensus(t *testing.T) {
|
||||
st, err := util.NewBeaconStateGloas(func(state *eth.BeaconStateGloas) error {
|
||||
state.GenesisTime = 123
|
||||
state.GenesisValidatorsRoot = bytes.Repeat([]byte{0x10}, 32)
|
||||
state.Slot = 5
|
||||
state.ProposerLookahead = []uint64{1, 2}
|
||||
state.LatestExecutionPayloadBid = ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32),
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x12}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x13}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x14}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x15}, 20),
|
||||
GasLimit: 64,
|
||||
BuilderIndex: 3,
|
||||
Slot: 5,
|
||||
Value: 99,
|
||||
ExecutionPayment: 7,
|
||||
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x16}, 48)},
|
||||
}
|
||||
state.Builders = []*eth.Builder{
|
||||
{
|
||||
Pubkey: bytes.Repeat([]byte{0x20}, 48),
|
||||
Version: bytes.Repeat([]byte{0x21}, 4),
|
||||
ExecutionAddress: bytes.Repeat([]byte{0x22}, 20),
|
||||
Balance: 88,
|
||||
DepositEpoch: 1,
|
||||
WithdrawableEpoch: 2,
|
||||
},
|
||||
}
|
||||
state.NextWithdrawalBuilderIndex = 9
|
||||
state.ExecutionPayloadAvailability = []byte{0x01, 0x02}
|
||||
state.BuilderPendingPayments = []*eth.BuilderPendingPayment{
|
||||
{
|
||||
Weight: 3,
|
||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x23}, 20),
|
||||
Amount: 4,
|
||||
BuilderIndex: 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
state.BuilderPendingWithdrawals = []*eth.BuilderPendingWithdrawal{
|
||||
{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x24}, 20),
|
||||
Amount: 6,
|
||||
BuilderIndex: 7,
|
||||
},
|
||||
}
|
||||
state.LatestBlockHash = bytes.Repeat([]byte{0x25}, 32)
|
||||
state.PayloadExpectedWithdrawals = []*enginev1.Withdrawal{
|
||||
{Index: 1, ValidatorIndex: 2, Address: bytes.Repeat([]byte{0x26}, 20), Amount: 10},
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := BeaconStateGloasFromConsensus(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "123", got.GenesisTime)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x10}, 32)), got.GenesisValidatorsRoot)
|
||||
require.Equal(t, "5", got.Slot)
|
||||
require.DeepEqual(t, []string{"1", "2"}, got.ProposerLookahead)
|
||||
require.Equal(t, "9", got.NextWithdrawalBuilderIndex)
|
||||
require.Equal(t, hexutil.Encode([]byte{0x01, 0x02}), got.ExecutionPayloadAvailability)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x25}, 32)), got.LatestBlockHash)
|
||||
|
||||
require.NotNil(t, got.LatestExecutionPayloadBid)
|
||||
require.Equal(t, "64", got.LatestExecutionPayloadBid.GasLimit)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x11}, 32)), got.LatestExecutionPayloadBid.ParentBlockHash)
|
||||
|
||||
require.NotNil(t, got.Builders)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x20}, 48)), got.Builders[0].Pubkey)
|
||||
require.Equal(t, "88", got.Builders[0].Balance)
|
||||
|
||||
require.Equal(t, "3", got.BuilderPendingPayments[0].Weight)
|
||||
require.Equal(t, "4", got.BuilderPendingPayments[0].Withdrawal.Amount)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x23}, 20)), got.BuilderPendingPayments[0].Withdrawal.FeeRecipient)
|
||||
|
||||
require.Equal(t, "6", got.BuilderPendingWithdrawals[0].Amount)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x24}, 20)), got.BuilderPendingWithdrawals[0].FeeRecipient)
|
||||
|
||||
require.Equal(t, "1", got.PayloadExpectedWithdrawals[0].WithdrawalIndex)
|
||||
require.Equal(t, "2", got.PayloadExpectedWithdrawals[0].ValidatorIndex)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x26}, 20)), got.PayloadExpectedWithdrawals[0].ExecutionAddress)
|
||||
require.Equal(t, "10", got.PayloadExpectedWithdrawals[0].Amount)
|
||||
}
|
||||
|
||||
@@ -285,6 +285,13 @@ type GetBlobsResponse struct {
|
||||
Data []string `json:"data"` //blobs
|
||||
}
|
||||
|
||||
type GetExecutionPayloadEnvelopeResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data *SignedExecutionPayloadEnvelope `json:"data"`
|
||||
}
|
||||
|
||||
type SSZQueryRequest struct {
|
||||
Query string `json:"query"`
|
||||
IncludeProof bool `json:"include_proof,omitempty"`
|
||||
|
||||
@@ -112,3 +112,8 @@ type LightClientOptimisticUpdateEvent struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientOptimisticUpdate `json:"data"`
|
||||
}
|
||||
|
||||
type PayloadEvent struct {
|
||||
Slot string `json:"slot"`
|
||||
BlockRoot string `json:"block_root"`
|
||||
}
|
||||
|
||||
@@ -262,3 +262,23 @@ type PendingConsolidation struct {
|
||||
SourceIndex string `json:"source_index"`
|
||||
TargetIndex string `json:"target_index"`
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
Version string `json:"version"`
|
||||
ExecutionAddress string `json:"execution_address"`
|
||||
Balance string `json:"balance"`
|
||||
DepositEpoch string `json:"deposit_epoch"`
|
||||
WithdrawableEpoch string `json:"withdrawable_epoch"`
|
||||
}
|
||||
|
||||
type BuilderPendingPayment struct {
|
||||
Weight string `json:"weight"`
|
||||
Withdrawal *BuilderPendingWithdrawal `json:"withdrawal"`
|
||||
}
|
||||
|
||||
type BuilderPendingWithdrawal struct {
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
Amount string `json:"amount"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
}
|
||||
|
||||
@@ -221,3 +221,51 @@ type BeaconStateFulu struct {
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
ProposerLookahead []string `json:"proposer_lookahead"`
|
||||
}
|
||||
|
||||
type BeaconStateGloas struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *Fork `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots"`
|
||||
StateRoots []string `json:"state_roots"`
|
||||
HistoricalRoots []string `json:"historical_roots"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*Validator `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation []string `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits"`
|
||||
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
|
||||
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
|
||||
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
|
||||
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
|
||||
EarliestExitEpoch string `json:"earliest_exit_epoch"`
|
||||
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
|
||||
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
ProposerLookahead []string `json:"proposer_lookahead"`
|
||||
LatestExecutionPayloadBid *ExecutionPayloadBid `json:"latest_execution_payload_bid"`
|
||||
Builders []*Builder `json:"builders"`
|
||||
NextWithdrawalBuilderIndex string `json:"next_withdrawal_builder_index"`
|
||||
ExecutionPayloadAvailability string `json:"execution_payload_availability"`
|
||||
BuilderPendingPayments []*BuilderPendingPayment `json:"builder_pending_payments"`
|
||||
BuilderPendingWithdrawals []*BuilderPendingWithdrawal `json:"builder_pending_withdrawals"`
|
||||
LatestBlockHash string `json:"latest_block_hash"`
|
||||
PayloadExpectedWithdrawals []*Withdrawal `json:"payload_expected_withdrawals"`
|
||||
}
|
||||
|
||||
@@ -136,6 +136,7 @@ go_test(
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
"receive_block_test.go",
|
||||
"receive_payload_attestation_message_test.go",
|
||||
"service_norace_test.go",
|
||||
"service_test.go",
|
||||
"setup_forkchoice_test.go",
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -39,11 +40,15 @@ type ChainInfoFetcher interface {
|
||||
// of locking forkchoice
|
||||
type ForkchoiceFetcher interface {
|
||||
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
|
||||
BlockHash(root [32]byte) ([32]byte, error)
|
||||
CachedHeadRoot() [32]byte
|
||||
GetProposerHead() [32]byte
|
||||
SetForkChoiceGenesisTime(time.Time)
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
HighestReceivedBlockRoot() [32]byte
|
||||
HasFullNode([32]byte) bool
|
||||
PayloadContentLookup([32]byte) ([32]byte, bool)
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
|
||||
InsertPayload(interfaces.ROExecutionPayloadEnvelope) error
|
||||
@@ -53,6 +58,8 @@ type ForkchoiceFetcher interface {
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
|
||||
DependentRoot(primitives.Epoch) ([32]byte, error)
|
||||
CanonicalNodeAtSlot(primitives.Slot) ([32]byte, bool)
|
||||
ShouldIgnoreData(parentRoot [32]byte, dataSlot primitives.Slot) bool
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
@@ -114,6 +121,7 @@ type FinalizationFetcher interface {
|
||||
FinalizedBlockHash() [32]byte
|
||||
InForkchoice([32]byte) bool
|
||||
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
|
||||
ParentPayloadReady(interfaces.ReadOnlyBeaconBlock) bool
|
||||
}
|
||||
|
||||
// OptimisticModeFetcher retrieves information about optimistic status of the node.
|
||||
@@ -403,6 +411,32 @@ func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// ParentPayloadReady returns true if the block's parent payload is available
|
||||
// in forkchoice. For pre-Gloas blocks or blocks building on empty, this always
|
||||
// returns true. For blocks building on full, it checks that the full node
|
||||
// exists.
|
||||
func (s *Service) ParentPayloadReady(blk interfaces.ReadOnlyBeaconBlock) bool {
|
||||
if blk.Version() < version.Gloas {
|
||||
return true
|
||||
}
|
||||
parentRoot := blk.ParentRoot()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
blockHash, err := s.cfg.ForkChoiceStore.BlockHash(parentRoot)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
bid, err := blk.Body().SignedExecutionPayloadBid()
|
||||
if err != nil || bid == nil || bid.Message == nil {
|
||||
return false
|
||||
}
|
||||
parentBlockHash := [32]byte(bid.Message.ParentBlockHash)
|
||||
if parentBlockHash != blockHash {
|
||||
return true // builds on empty, no full node needed
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.HasFullNode(parentRoot)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
@@ -565,3 +599,26 @@ func (s *Service) inRegularSync() bool {
|
||||
func (s *Service) validating() bool {
|
||||
return s.cfg.TrackedValidatorsCache.Validating()
|
||||
}
|
||||
|
||||
// ShouldIgnoreData returns true if the data for the given parent root and slot should be ignored.
|
||||
func (s *Service) ShouldIgnoreData(parentRoot [32]byte, dataSlot primitives.Slot) bool {
|
||||
currentEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
if slots.ToEpoch(dataSlot) < currentEpoch {
|
||||
return false
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
parentSlot, err := s.cfg.ForkChoiceStore.Slot(parentRoot)
|
||||
if err != nil {
|
||||
// This should not happen. The caller should have already checked the parent is in forkchoice.
|
||||
return false
|
||||
}
|
||||
j := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
if j == nil {
|
||||
return false
|
||||
}
|
||||
if slots.ToEpoch(parentSlot) >= j.Epoch {
|
||||
return false
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.IsCanonical(parentRoot)
|
||||
}
|
||||
|
||||
@@ -42,6 +42,34 @@ func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
|
||||
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
}
|
||||
|
||||
// HighestReceivedBlockRoot returns the corresponding value from forkchoice
|
||||
func (s *Service) HighestReceivedBlockRoot() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HighestReceivedBlockRoot()
|
||||
}
|
||||
|
||||
// BlockHash returns the execution payload block hash for the given beacon block root from forkchoice.
|
||||
func (s *Service) BlockHash(root [32]byte) ([32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.BlockHash(root)
|
||||
}
|
||||
|
||||
// HasFullNode returns the corresponding value from forkchoice
|
||||
func (s *Service) HasFullNode(root [32]byte) bool {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HasFullNode(root)
|
||||
}
|
||||
|
||||
// PayloadContentLookup returns the preferred payload-content lookup key from forkchoice.
|
||||
func (s *Service) PayloadContentLookup(root [32]byte) ([32]byte, bool) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.PayloadContentLookup(root)
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
|
||||
func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
@@ -129,6 +157,13 @@ func (s *Service) hashForGenesisBlock(ctx context.Context, root [32]byte) ([]byt
|
||||
if st.Version() < version.Bellatrix {
|
||||
return nil, nil
|
||||
}
|
||||
if st.Version() >= version.Gloas {
|
||||
h, err := st.LatestBlockHash()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get latest block hash")
|
||||
}
|
||||
return bytesutil.SafeCopyBytes(h[:]), nil
|
||||
}
|
||||
header, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get latest execution payload header")
|
||||
@@ -136,6 +171,13 @@ func (s *Service) hashForGenesisBlock(ctx context.Context, root [32]byte) ([]byt
|
||||
return bytesutil.SafeCopyBytes(header.BlockHash()), nil
|
||||
}
|
||||
|
||||
// CanonicalNodeAtSlot wraps the corresponding method in forkchoice
|
||||
func (s *Service) CanonicalNodeAtSlot(slot primitives.Slot) ([32]byte, bool) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.CanonicalNodeAtSlot(slot)
|
||||
}
|
||||
|
||||
// DependentRoot wraps the corresponding method in forkchoice
|
||||
func (s *Service) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -620,6 +621,188 @@ func TestService_IsFinalized(t *testing.T) {
|
||||
require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'}))
|
||||
}
|
||||
|
||||
func TestParentPayloadReady(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 0
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
fcs := tr.fcs
|
||||
|
||||
parentRoot := [32]byte{1}
|
||||
parentBlockHash := [32]byte{10}
|
||||
zeroHash := params.BeaconConfig().ZeroHash
|
||||
|
||||
// Insert parent node into forkchoice.
|
||||
st, parentROBlock, err := prepareGloasForkchoiceState(ctx, 1, parentRoot, zeroHash, parentBlockHash, zeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, parentROBlock))
|
||||
|
||||
t.Run("pre-Gloas always true", func(t *testing.T) {
|
||||
blk := util.HydrateSignedBeaconBlockDeneb(ðpb.SignedBeaconBlockDeneb{
|
||||
Block: ðpb.BeaconBlockDeneb{ParentRoot: parentRoot[:]},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
|
||||
t.Run("parent not in forkchoice", func(t *testing.T) {
|
||||
unknownParent := [32]byte{99}
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
},
|
||||
})
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
ParentRoot: unknownParent[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
|
||||
t.Run("builds on empty", func(t *testing.T) {
|
||||
differentHash := [32]byte{99}
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
ParentBlockHash: differentHash[:],
|
||||
},
|
||||
})
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
|
||||
t.Run("builds on full without payload", func(t *testing.T) {
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
},
|
||||
})
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
|
||||
t.Run("builds on full with payload", func(t *testing.T) {
|
||||
pe, err := blocks.WrappedROExecutionPayloadEnvelope(ðpb.ExecutionPayloadEnvelope{
|
||||
BeaconBlockRoot: parentRoot[:],
|
||||
Payload: &enginev1.ExecutionPayloadDeneb{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertPayload(pe))
|
||||
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
},
|
||||
})
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
}
|
||||
|
||||
func TestService_ShouldIgnoreData(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
fcs := tr.fcs
|
||||
|
||||
zeroHash := params.BeaconConfig().ZeroHash
|
||||
currentSlot := service.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Build a chain in forkchoice:
|
||||
// genesis (slot 0) -> nodeA (slot 1, epoch 0) -> nodeB (slot slotsPerEpoch, epoch 1) -> nodeC (slot 2*slotsPerEpoch, epoch 2)
|
||||
nodeARoot := [32]byte{1}
|
||||
nodeBRoot := [32]byte{2}
|
||||
nodeCRoot := [32]byte{3}
|
||||
nodeASlot := primitives.Slot(1)
|
||||
nodeBSlot := primitives.Slot(slotsPerEpoch) // epoch 1
|
||||
nodeCSlot := primitives.Slot(2 * slotsPerEpoch) // epoch 2
|
||||
|
||||
stA, robA, err := prepareForkchoiceState(ctx, nodeASlot, nodeARoot, zeroHash, [32]byte{10}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stA, robA))
|
||||
|
||||
stB, robB, err := prepareForkchoiceState(ctx, nodeBSlot, nodeBRoot, nodeARoot, [32]byte{11}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stB, robB))
|
||||
|
||||
stC, robC, err := prepareForkchoiceState(ctx, nodeCSlot, nodeCRoot, nodeBRoot, [32]byte{12}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stC, robC))
|
||||
|
||||
// Set justified checkpoint to nodeB (epoch 1).
|
||||
fcs.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 1, Root: nodeBRoot}))
|
||||
|
||||
t.Run("past epoch data is not ignored", func(t *testing.T) {
|
||||
pastSlot := primitives.Slot((currentEpoch - 1) * primitives.Epoch(slotsPerEpoch))
|
||||
require.Equal(t, false, service.ShouldIgnoreData(nodeARoot, pastSlot))
|
||||
})
|
||||
|
||||
t.Run("parent not in forkchoice", func(t *testing.T) {
|
||||
unknownRoot := [32]byte{99}
|
||||
require.Equal(t, false, service.ShouldIgnoreData(unknownRoot, currentSlot))
|
||||
})
|
||||
|
||||
t.Run("parent epoch at or after justified", func(t *testing.T) {
|
||||
// nodeB is at epoch 1, justified is epoch 1 => parentEpoch >= justified => false
|
||||
require.Equal(t, false, service.ShouldIgnoreData(nodeBRoot, currentSlot))
|
||||
})
|
||||
|
||||
t.Run("canonical parent before justified is ignored", func(t *testing.T) {
|
||||
// nodeA is at epoch 0 < justified epoch 1, and is canonical => true
|
||||
require.Equal(t, true, service.ShouldIgnoreData(nodeARoot, currentSlot))
|
||||
})
|
||||
|
||||
t.Run("non-canonical parent before justified is not ignored", func(t *testing.T) {
|
||||
// Insert a fork: nodeD at slot 2 (epoch 0) branching from nodeA, not on the canonical chain.
|
||||
nodeDRoot := [32]byte{4}
|
||||
stD, robD, err := prepareForkchoiceState(ctx, 2, nodeDRoot, nodeARoot, [32]byte{13}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stD, robD))
|
||||
|
||||
// nodeD is at epoch 0 < justified epoch 1, but not canonical => false
|
||||
require.Equal(t, false, service.ShouldIgnoreData(nodeDRoot, currentSlot))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_hashForGenesisRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
@@ -637,3 +820,23 @@ func Test_hashForGenesisRoot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{}, [32]byte(genRoot))
|
||||
}
|
||||
|
||||
func Test_hashForGenesisRoot_Gloas(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
|
||||
expectedHash := [32]byte{1, 2, 3, 4, 5}
|
||||
st, err := state_native.InitializeFromProtoGloas(ðpb.BeaconStateGloas{
|
||||
LatestBlockHash: expectedHash[:],
|
||||
})
|
||||
require.NoError(t, err)
|
||||
genesis.StoreDuringTest(t, genesis.GenesisData{State: st})
|
||||
|
||||
genesisRoot := [32]byte{0xaa}
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
|
||||
genHash, err := c.hashForGenesisBlock(ctx, genesisRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedHash, [32]byte(genHash))
|
||||
}
|
||||
|
||||
@@ -321,7 +321,7 @@ func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, paren
|
||||
|
||||
// getPayloadAttributes returns the payload attributes for the given state and slot.
|
||||
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) payloadattribute.Attributer {
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot, accessRoot []byte) payloadattribute.Attributer {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
|
||||
// If it is an epoch boundary then process slots to get the right
|
||||
@@ -343,7 +343,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
// right proposer index pre-Fulu, either way we need to copy the state to process it.
|
||||
st = st.Copy()
|
||||
var err error
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, accessRoot, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not process slots to get payload attribute")
|
||||
return emptyAttri
|
||||
@@ -371,66 +371,91 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
}
|
||||
|
||||
v := st.Version()
|
||||
|
||||
if v >= version.Deneb {
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
Withdrawals: withdrawals,
|
||||
ParentBeaconBlockRoot: headRoot,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
return attr
|
||||
switch {
|
||||
case v >= version.Gloas:
|
||||
return payloadAttributesGloas(st, uint64(t.Unix()), prevRando, val.FeeRecipient[:], headRoot)
|
||||
case v >= version.Deneb:
|
||||
return payloadAttributesDeneb(st, uint64(t.Unix()), prevRando, val.FeeRecipient[:], headRoot)
|
||||
case v >= version.Capella:
|
||||
return payloadAttributesCapella(st, uint64(t.Unix()), prevRando, val.FeeRecipient[:])
|
||||
case v >= version.Bellatrix:
|
||||
return payloadAttributesBellatrix(uint64(t.Unix()), prevRando, val.FeeRecipient[:])
|
||||
default:
|
||||
log.WithField("version", version.String(v)).Error("Could not get payload attribute due to unknown state version")
|
||||
return payloadattribute.EmptyWithVersion(v)
|
||||
}
|
||||
}
|
||||
|
||||
if v >= version.Capella {
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV2{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
Withdrawals: withdrawals,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
return attr
|
||||
func payloadAttributesGloas(st state.BeaconState, timestamp uint64, prevRandao, feeRecipient, parentBeaconBlockRoot []byte) payloadattribute.Attributer {
|
||||
withdrawals, err := st.WithdrawalsForPayload()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload withdrawals to get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(st.Version())
|
||||
}
|
||||
|
||||
if v >= version.Bellatrix {
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
return attr
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRandao,
|
||||
SuggestedFeeRecipient: feeRecipient,
|
||||
Withdrawals: withdrawals,
|
||||
ParentBeaconBlockRoot: parentBeaconBlockRoot,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(st.Version())
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
log.WithField("version", version.String(st.Version())).Error("Could not get payload attribute due to unknown state version")
|
||||
return emptyAttri
|
||||
func payloadAttributesDeneb(st state.BeaconState, timestamp uint64, prevRandao, feeRecipient, parentBeaconBlockRoot []byte) payloadattribute.Attributer {
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(st.Version())
|
||||
}
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRandao,
|
||||
SuggestedFeeRecipient: feeRecipient,
|
||||
Withdrawals: withdrawals,
|
||||
ParentBeaconBlockRoot: parentBeaconBlockRoot,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(st.Version())
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
func payloadAttributesCapella(st state.BeaconState, timestamp uint64, prevRandao, feeRecipient []byte) payloadattribute.Attributer {
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(st.Version())
|
||||
}
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV2{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRandao,
|
||||
SuggestedFeeRecipient: feeRecipient,
|
||||
Withdrawals: withdrawals,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(st.Version())
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
func payloadAttributesBellatrix(timestamp uint64, prevRandao, feeRecipient []byte) payloadattribute.Attributer {
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRandao,
|
||||
SuggestedFeeRecipient: feeRecipient,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(version.Bellatrix)
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
// removeInvalidBlockAndState removes the invalid block, blob and its corresponding state from the cache and DB.
|
||||
|
||||
@@ -717,14 +717,14 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{}, []byte{})
|
||||
require.Equal(t, true, attr.IsEmpty())
|
||||
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
|
||||
@@ -732,7 +732,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
}
|
||||
@@ -747,7 +747,7 @@ func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) {
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{}, []byte{})
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
}
|
||||
@@ -757,14 +757,14 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{}, []byte{})
|
||||
require.Equal(t, true, attr.IsEmpty())
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
a, err := attr.Withdrawals()
|
||||
@@ -775,7 +775,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
@@ -809,14 +809,14 @@ func Test_GetPayloadAttributeV3(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
attr := service.getPayloadAttribute(ctx, test.st, 0, []byte{})
|
||||
attr := service.getPayloadAttribute(ctx, test.st, 0, []byte{}, []byte{})
|
||||
require.Equal(t, true, attr.IsEmpty())
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
a, err := attr.Withdrawals()
|
||||
@@ -827,7 +827,7 @@ func Test_GetPayloadAttributeV3(t *testing.T) {
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
|
||||
@@ -18,19 +18,21 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (s *Service) isNewHead(r [32]byte) bool {
|
||||
func (s *Service) isNewHead(r [32]byte, full bool) bool {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
currentHeadRoot := s.originBlockRoot
|
||||
currentFull := false
|
||||
if s.head != nil {
|
||||
currentHeadRoot = s.headRoot()
|
||||
currentFull = s.head.full
|
||||
}
|
||||
|
||||
return r != currentHeadRoot || r == [32]byte{}
|
||||
return r != currentHeadRoot || full != currentFull || r == [32]byte{}
|
||||
}
|
||||
|
||||
func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.BeaconState, interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
func (s *Service) getStateAndBlock(ctx context.Context, r, h [32]byte) (state.BeaconState, interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if !s.hasBlockInInitSyncOrDB(ctx, r) {
|
||||
return nil, nil, errors.New("block does not exist")
|
||||
}
|
||||
@@ -38,7 +40,7 @@ func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.Beaco
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, r)
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, h)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -70,7 +72,7 @@ func (s *Service) sendFCU(cfg *postBlockProcessConfig) {
|
||||
return
|
||||
}
|
||||
// If head has not been updated and attributes are nil, we can skip the FCU.
|
||||
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
|
||||
if !s.isNewHead(cfg.headRoot, false) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
|
||||
return
|
||||
}
|
||||
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
|
||||
@@ -81,7 +83,7 @@ func (s *Service) sendFCU(cfg *postBlockProcessConfig) {
|
||||
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
|
||||
if s.isNewHead(fcuArgs.headRoot) {
|
||||
if s.isNewHead(fcuArgs.headRoot, false) {
|
||||
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
@@ -19,23 +19,42 @@ import (
|
||||
func TestService_isNewHead(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
require.Equal(t, true, service.isNewHead([32]byte{}))
|
||||
|
||||
// Zero root is always a new head
|
||||
require.Equal(t, true, service.isNewHead([32]byte{}, false))
|
||||
require.Equal(t, true, service.isNewHead([32]byte{}, true))
|
||||
|
||||
// Different root is a new head
|
||||
service.head = &head{root: [32]byte{1}}
|
||||
require.Equal(t, true, service.isNewHead([32]byte{2}))
|
||||
require.Equal(t, false, service.isNewHead([32]byte{1}))
|
||||
require.Equal(t, true, service.isNewHead([32]byte{2}, false))
|
||||
|
||||
// Same root and same full status is not a new head
|
||||
require.Equal(t, false, service.isNewHead([32]byte{1}, false))
|
||||
|
||||
// Same root but different full status is a new head
|
||||
require.Equal(t, true, service.isNewHead([32]byte{1}, true))
|
||||
|
||||
// Same root and both full is not a new head
|
||||
service.head = &head{root: [32]byte{1}, full: true}
|
||||
require.Equal(t, false, service.isNewHead([32]byte{1}, true))
|
||||
|
||||
// Same root, head is full but incoming is not full, is a new head
|
||||
require.Equal(t, true, service.isNewHead([32]byte{1}, false))
|
||||
|
||||
// Nil head should use origin root
|
||||
service.head = nil
|
||||
service.originBlockRoot = [32]byte{3}
|
||||
require.Equal(t, true, service.isNewHead([32]byte{2}))
|
||||
require.Equal(t, false, service.isNewHead([32]byte{3}))
|
||||
require.Equal(t, true, service.isNewHead([32]byte{2}, false))
|
||||
require.Equal(t, false, service.isNewHead([32]byte{3}, false))
|
||||
|
||||
// Nil head with full=true is always a new head (originBlockRoot has full=false)
|
||||
require.Equal(t, true, service.isNewHead([32]byte{3}, true))
|
||||
}
|
||||
|
||||
func TestService_getHeadStateAndBlock(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
_, _, err := service.getStateAndBlock(t.Context(), [32]byte{})
|
||||
_, _, err := service.getStateAndBlock(t.Context(), [32]byte{}, [32]byte{})
|
||||
require.ErrorContains(t, "block does not exist", err)
|
||||
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{Signature: []byte{1}}))
|
||||
|
||||
@@ -1,11 +1,42 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (s *Service) waitUntilEpoch(target primitives.Epoch, secondsPerSlot uint64) error {
|
||||
if slots.ToEpoch(s.CurrentSlot()) >= target {
|
||||
return nil
|
||||
}
|
||||
ticker := slots.NewSlotTicker(s.genesisTime, secondsPerSlot)
|
||||
defer ticker.Done()
|
||||
for {
|
||||
select {
|
||||
case slot := <-ticker.C():
|
||||
if slots.ToEpoch(slot) >= target {
|
||||
return nil
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
return s.ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getLookupParentRoot returns the root that serves as key to generate the parent state for the passed beacon block.
|
||||
// if it is based on empty or it is pre-Gloas, it is the parent root of the block, otherwise if it is based on full it is
|
||||
// the parent hash.
|
||||
@@ -16,6 +47,14 @@ func (s *Service) getLookupParentRoot(b consensus_blocks.ROBlock) ([32]byte, err
|
||||
if b.Version() < version.Gloas {
|
||||
return parentRoot, nil
|
||||
}
|
||||
parentSlot, err := s.cfg.ForkChoiceStore.Slot(parentRoot)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "failed to get slot for parent root")
|
||||
}
|
||||
|
||||
if slots.ToEpoch(parentSlot) < params.BeaconConfig().GloasForkEpoch {
|
||||
return parentRoot, nil
|
||||
}
|
||||
blockHash, err := s.cfg.ForkChoiceStore.BlockHash(parentRoot)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "failed to get block hash for parent root")
|
||||
@@ -33,3 +72,142 @@ func (s *Service) getLookupParentRoot(b consensus_blocks.ROBlock) ([32]byte, err
|
||||
}
|
||||
return parentRoot, nil
|
||||
}
|
||||
|
||||
func (s *Service) runLatePayloadTasks() {
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Failed to wait for initial sync")
|
||||
return
|
||||
}
|
||||
cfg := params.BeaconConfig()
|
||||
if cfg.GloasForkEpoch == math.MaxUint64 {
|
||||
return
|
||||
}
|
||||
if err := s.waitUntilEpoch(cfg.GloasForkEpoch, cfg.SecondsPerSlot); err != nil {
|
||||
return
|
||||
}
|
||||
offset := cfg.SlotComponentDuration(cfg.PayloadAttestationDueBPS)
|
||||
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, offset, cfg.SecondsPerSlot)
|
||||
defer ticker.Done()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C():
|
||||
s.latePayloadTasks(s.ctx)
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting late payload tasks routine")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) checkIfProposing(st state.ReadOnlyBeaconState, slot primitives.Slot) (cache.TrackedValidator, bool) {
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(st.Slot())
|
||||
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
|
||||
if e == stateEpoch || fuluAndNextEpoch {
|
||||
return s.trackedProposer(st, slot)
|
||||
}
|
||||
return cache.TrackedValidator{}, false
|
||||
}
|
||||
|
||||
// This is a Gloas version of getPayloadAttribute that avoids all the clutter that was originally due to the proposer Index.
|
||||
// It is guaranteed to be called for the current slot + 1 and the head state to have been advanced to at least the current epoch.
|
||||
func (s *Service) getPayloadAttributeGloas(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot, headRoot, accessRoot []byte) payloadattribute.Attributer {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
val, proposing := s.checkIfProposing(st, slot)
|
||||
if !proposing {
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
st, err := transition.ProcessSlotsIfNeeded(ctx, st, accessRoot, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not process slots to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
// Get previous randao.
|
||||
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get randao mix to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
// Get timestamp.
|
||||
t, err := slots.StartTime(s.genesisTime, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timestamp to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
withdrawals, err := st.WithdrawalsForPayload()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
Withdrawals: withdrawals,
|
||||
ParentBeaconBlockRoot: headRoot,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
// latePayloadTasks updates the NSC and epoch boundary caches when there is no payload in the current slot (and there is a block)
|
||||
// The case where the block was also missing would have been dealt by lateBlockTasks already.
|
||||
// We call FCU only if we are proposing next slot, as the execution head is assumed to not have changed.
|
||||
func (s *Service) latePayloadTasks(ctx context.Context) {
|
||||
currentSlot := s.CurrentSlot()
|
||||
if currentSlot != s.HeadSlot() {
|
||||
// We must've already sent a FCU and updated the caches in lateBlockTaks.
|
||||
return
|
||||
}
|
||||
r, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get head root")
|
||||
return
|
||||
}
|
||||
hr := [32]byte(r)
|
||||
if s.payloadBeingSynced.isSyncing(hr) {
|
||||
return
|
||||
}
|
||||
if s.HasFullNode(hr) {
|
||||
return
|
||||
}
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get head state")
|
||||
return
|
||||
}
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
}
|
||||
attr := s.getPayloadAttributeGloas(ctx, st, currentSlot+1, r, r)
|
||||
if attr == nil || attr.IsEmpty() {
|
||||
return
|
||||
}
|
||||
beaconLatePayloadTaskTriggeredTotal.Inc()
|
||||
// Head is the empty block.
|
||||
bh, err := st.LatestBlockHash()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get latest block hash to notify engine")
|
||||
return
|
||||
}
|
||||
pid, err := s.notifyForkchoiceUpdateGloas(ctx, bh, attr)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not notify forkchoice update")
|
||||
return
|
||||
}
|
||||
if pid == nil {
|
||||
log.Warn("Received nil payload ID from forkchoice update.")
|
||||
return
|
||||
}
|
||||
var pId [8]byte
|
||||
copy(pId[:], pid[:])
|
||||
s.cfg.PayloadIDCache.Set(currentSlot+1, hr, pId)
|
||||
}
|
||||
|
||||
@@ -3,12 +3,14 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution"
|
||||
mockExecution "github.com/OffchainLabs/prysm/v7/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
@@ -19,6 +21,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func prepareGloasForkchoiceState(
|
||||
@@ -442,6 +446,37 @@ func TestPostPayloadHeadUpdate_NotHead(t *testing.T) {
|
||||
require.NoError(t, s.postPayloadHeadUpdate(ctx, envelope, st, root, headRoot[:]))
|
||||
}
|
||||
|
||||
func TestPostPayloadHeadUpdate_SetsHeadFull(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
root := bytesutil.ToBytes32([]byte("root1"))
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
base, blk := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.head = &head{root: root, block: signed, state: st, slot: 1}
|
||||
require.Equal(t, false, s.head.full)
|
||||
|
||||
env := ðpb.ExecutionPayloadEnvelope{
|
||||
BeaconBlockRoot: root[:],
|
||||
Payload: &enginev1.ExecutionPayloadDeneb{BlockHash: blockHash[:], ParentHash: make([]byte, 32)},
|
||||
Slot: 1,
|
||||
}
|
||||
envelope, err := blocks.WrappedROExecutionPayloadEnvelope(env)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.postPayloadHeadUpdate(ctx, envelope, st, root, root[:]))
|
||||
|
||||
s.headLock.RLock()
|
||||
require.Equal(t, true, s.head.full)
|
||||
s.headLock.RUnlock()
|
||||
}
|
||||
|
||||
func TestGetLookupParentRoot_PreGloas(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
|
||||
@@ -462,6 +497,12 @@ func TestGetLookupParentRoot_PreGloas(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetLookupParentRoot_GloasBuildsOnEmpty(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 0
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, req := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -503,6 +544,12 @@ func TestGetLookupParentRoot_GloasBuildsOnEmpty(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetLookupParentRoot_GloasBuildsOnFull(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 0
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, req := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -542,3 +589,369 @@ func TestGetLookupParentRoot_GloasBuildsOnFull(t *testing.T) {
|
||||
// parentBlockHash == parentNodeBlockHash, so it builds on full => returns parentBlockHash
|
||||
require.Equal(t, parentNodeBlockHash, got)
|
||||
}
|
||||
|
||||
func TestGetLookupParentRoot_GloasParentPreForkEpoch(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 2
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, req := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
|
||||
parentRoot := [32]byte{1}
|
||||
parentNodeBlockHash := [32]byte{10}
|
||||
parentSlot, err := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
|
||||
require.NoError(t, err)
|
||||
parentSlot = parentSlot - 1
|
||||
|
||||
st, parentROBlock, err := prepareGloasForkchoiceState(
|
||||
ctx,
|
||||
parentSlot,
|
||||
parentRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
parentNodeBlockHash,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
0,
|
||||
0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, req.fcs.InsertNode(ctx, st, parentROBlock))
|
||||
|
||||
blockHash := [32]byte{20}
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: blockHash[:],
|
||||
ParentBlockHash: parentNodeBlockHash[:],
|
||||
},
|
||||
})
|
||||
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: parentSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
SignedExecutionPayloadBid: bid,
|
||||
},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := service.getLookupParentRoot(roblock)
|
||||
require.NoError(t, err)
|
||||
// Parent slot is pre-fork, so always return parentRoot.
|
||||
require.Equal(t, parentRoot, got)
|
||||
}
|
||||
|
||||
func TestLatePayloadTasks_ReturnsEarlyWhenBlockLate(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
service, tr := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
base, _ := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
|
||||
base.LatestBlockHash = blockHash[:]
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
headRoot := bytesutil.ToBytes32([]byte("headroot"))
|
||||
service.head = &head{
|
||||
root: headRoot,
|
||||
state: st,
|
||||
slot: 1,
|
||||
}
|
||||
// Set genesis time so CurrentSlot > HeadSlot.
|
||||
service.SetGenesisTime(time.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
|
||||
service.latePayloadTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "Could not notify forkchoice update")
|
||||
// No payload ID should have been cached.
|
||||
_, has := service.cfg.PayloadIDCache.PayloadID(service.CurrentSlot()+1, headRoot)
|
||||
require.Equal(t, false, has)
|
||||
}
|
||||
|
||||
func TestLatePayloadTasks_SendsFCU(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
PrepareAllPayloads: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
pid := &enginev1.PayloadIDBytes{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
service, tr := setupGloasService(t, &mockExecution.EngineClient{PayloadIDBytes: pid})
|
||||
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
base, blk := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
|
||||
base.LatestBlockHash = blockHash[:]
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
headRoot := bytesutil.ToBytes32([]byte("headroot"))
|
||||
service.head = &head{
|
||||
root: headRoot,
|
||||
block: signed,
|
||||
state: st,
|
||||
slot: 1,
|
||||
}
|
||||
// CurrentSlot == HeadSlot == 1: place genesis 1.5 slots ago so we're solidly in slot 1.
|
||||
service.SetGenesisTime(time.Now().Add(-3 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second / 2))
|
||||
service.SetForkChoiceGenesisTime(service.genesisTime)
|
||||
|
||||
service.latePayloadTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "Could not notify forkchoice update")
|
||||
require.LogsDoNotContain(t, logHook, "Could not get")
|
||||
// Payload ID should have been cached.
|
||||
cachedPid, has := service.cfg.PayloadIDCache.PayloadID(service.CurrentSlot()+1, headRoot)
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, primitives.PayloadID(pid[:]), cachedPid)
|
||||
}
|
||||
|
||||
func TestLateBlockTasks_GloasFCU(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
PrepareAllPayloads: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
pid := &enginev1.PayloadIDBytes{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
service, tr := setupGloasService(t, &mockExecution.EngineClient{PayloadIDBytes: pid})
|
||||
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
base, _ := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
|
||||
base.LatestBlockHash = blockHash[:]
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
headRoot := bytesutil.ToBytes32([]byte("headroot"))
|
||||
service.head = &head{
|
||||
root: headRoot,
|
||||
state: st,
|
||||
slot: 1,
|
||||
}
|
||||
|
||||
// Set genesis time so CurrentSlot > HeadSlot, triggering late block logic.
|
||||
service.SetGenesisTime(time.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
service.SetForkChoiceGenesisTime(service.genesisTime)
|
||||
|
||||
service.lateBlockTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
|
||||
// Payload ID should have been cached by the Gloas FCU path.
|
||||
cachedPid, has := service.cfg.PayloadIDCache.PayloadID(service.CurrentSlot()+1, headRoot)
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, primitives.PayloadID(pid[:]), cachedPid)
|
||||
}
|
||||
|
||||
// TestSaveHead_GloasForkBoundary_PreforkBidForcesEmptyHead verifies that saveHead does not
|
||||
// treat the head as "full" when the latest execution payload bid was issued in a pre-fork epoch.
|
||||
// This guards against the Fulu->Gloas upgrade-seeded bid (bid.BlockHash == latestBlockHash,
|
||||
// bid.Slot == 0) causing a spurious full=true head before any real Gloas bid has been processed.
|
||||
func TestSaveHead_GloasForkBoundary_PreforkBidForcesEmptyHead(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 1
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
blockRoot := bytesutil.ToBytes32([]byte("root1"))
|
||||
parentRoot := params.BeaconConfig().ZeroHash
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
// Create a Gloas state where IsParentBlockFull()==true (bid.BlockHash == LatestBlockHash)
|
||||
// but bid.Slot is 0 (epoch 0, pre-fork). This mimics the upgrade-seeded state.
|
||||
base, blk := testGloasState(t, 1, parentRoot, blockHash)
|
||||
base.LatestBlockHash = blockHash[:]
|
||||
// bid.Slot defaults to 0, which is before GloasForkEpoch=1.
|
||||
|
||||
// Set a valid initial head so saveHead's headBlock() call does not panic.
|
||||
// We do NOT insert the old block into forkchoice because insertGloasBlock
|
||||
// would claim the tree root slot; the target block (parentRoot=ZeroHash) must
|
||||
// be the first node inserted so it can become the tree root.
|
||||
oldBlk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{})
|
||||
oldSigned, err2 := blocks.NewSignedBeaconBlock(oldBlk)
|
||||
require.NoError(t, err2)
|
||||
oldSt, err2 := state_native.InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
Slot: 0,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{ParentRoot: make([]byte, 32), StateRoot: make([]byte, 32), BodyRoot: make([]byte, 32)},
|
||||
Eth1Data: ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)},
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{BlockHash: make([]byte, 32), ParentBlockHash: make([]byte, 32), ParentBlockRoot: make([]byte, 32), PrevRandao: make([]byte, 32), FeeRecipient: make([]byte, 20), BlobKzgCommitments: [][]byte{make([]byte, 48)}},
|
||||
BuilderPendingPayments: func() []*ethpb.BuilderPendingPayment {
|
||||
pp := make([]*ethpb.BuilderPendingPayment, 64)
|
||||
for i := range pp {
|
||||
pp[i] = ðpb.BuilderPendingPayment{Withdrawal: ðpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)}}
|
||||
}
|
||||
return pp
|
||||
}(),
|
||||
ExecutionPayloadAvailability: make([]byte, 1024),
|
||||
LatestBlockHash: make([]byte, 32),
|
||||
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
ProposerLookahead: make([]uint64, 64),
|
||||
})
|
||||
require.NoError(t, err2)
|
||||
oldRoot := bytesutil.ToBytes32([]byte("oldroot1"))
|
||||
service.head = &head{root: oldRoot, block: oldSigned, state: oldSt, slot: 0}
|
||||
|
||||
insertGloasBlock(t, service, base, blk, blockRoot)
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify precondition: IsParentBlockFull() is true.
|
||||
full, err := st.IsParentBlockFull()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, full, "precondition: IsParentBlockFull must be true")
|
||||
|
||||
// Verify guard precondition: bid.Slot is pre-fork.
|
||||
bid, err := st.LatestExecutionPayloadBid()
|
||||
require.NoError(t, err)
|
||||
isPrefork := slots.ToEpoch(bid.Slot()) < params.BeaconConfig().GloasForkEpoch
|
||||
require.Equal(t, true, isPrefork, "precondition: bid.Slot must be pre-fork")
|
||||
|
||||
ssigned, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// saveHead should NOT mark the head as full because bid.Slot < GloasForkEpoch.
|
||||
require.NoError(t, service.saveHead(ctx, blockRoot, ssigned, st))
|
||||
|
||||
service.headLock.RLock()
|
||||
headFull := service.head.full
|
||||
service.headLock.RUnlock()
|
||||
require.Equal(t, false, headFull, "head must not be full for upgrade-seeded bid")
|
||||
}
|
||||
|
||||
// TestSaveHead_GloasForkBoundary_PostforkBidSetsFullHead verifies that saveHead correctly
|
||||
// marks the head as full when the latest bid is from a post-fork epoch.
|
||||
func TestSaveHead_GloasForkBoundary_PostforkBidSetsFullHead(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 1
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
forkSlot, err := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot := bytesutil.ToBytes32([]byte("root1"))
|
||||
parentRoot := params.BeaconConfig().ZeroHash
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
// Set a valid initial head so saveHead's headBlock() call does not panic.
|
||||
// Do NOT use insertGloasBlock for the old block — the target block must be
|
||||
// the first node inserted so it can claim the tree root (parentRoot=ZeroHash).
|
||||
oldBlk2 := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{})
|
||||
oldSigned2, err2 := blocks.NewSignedBeaconBlock(oldBlk2)
|
||||
require.NoError(t, err2)
|
||||
oldSt2, err2 := state_native.InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
Slot: 0,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{ParentRoot: make([]byte, 32), StateRoot: make([]byte, 32), BodyRoot: make([]byte, 32)},
|
||||
Eth1Data: ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)},
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{BlockHash: make([]byte, 32), ParentBlockHash: make([]byte, 32), ParentBlockRoot: make([]byte, 32), PrevRandao: make([]byte, 32), FeeRecipient: make([]byte, 20), BlobKzgCommitments: [][]byte{make([]byte, 48)}},
|
||||
BuilderPendingPayments: func() []*ethpb.BuilderPendingPayment {
|
||||
pp := make([]*ethpb.BuilderPendingPayment, 64)
|
||||
for i := range pp {
|
||||
pp[i] = ðpb.BuilderPendingPayment{Withdrawal: ðpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)}}
|
||||
}
|
||||
return pp
|
||||
}(),
|
||||
ExecutionPayloadAvailability: make([]byte, 1024),
|
||||
LatestBlockHash: make([]byte, 32),
|
||||
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
ProposerLookahead: make([]uint64, 64),
|
||||
})
|
||||
require.NoError(t, err2)
|
||||
oldRoot2 := bytesutil.ToBytes32([]byte("oldroot2"))
|
||||
service.head = &head{root: oldRoot2, block: oldSigned2, state: oldSt2, slot: 0}
|
||||
|
||||
base, blk := testGloasState(t, forkSlot+1, parentRoot, blockHash)
|
||||
base.LatestBlockHash = blockHash[:]
|
||||
// Set bid.Slot to a post-fork epoch slot.
|
||||
base.LatestExecutionPayloadBid.Slot = forkSlot + 1
|
||||
|
||||
insertGloasBlock(t, service, base, blk, blockRoot)
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify preconditions.
|
||||
full, err := st.IsParentBlockFull()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, full, "precondition: IsParentBlockFull must be true")
|
||||
|
||||
bid, err := st.LatestExecutionPayloadBid()
|
||||
require.NoError(t, err)
|
||||
isPostfork := slots.ToEpoch(bid.Slot()) >= params.BeaconConfig().GloasForkEpoch
|
||||
require.Equal(t, true, isPostfork, "precondition: bid.Slot must be post-fork")
|
||||
|
||||
ssigned, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// saveHead SHOULD mark the head as full because bid.Slot >= GloasForkEpoch.
|
||||
require.NoError(t, service.saveHead(ctx, blockRoot, ssigned, st))
|
||||
|
||||
service.headLock.RLock()
|
||||
headFull := service.head.full
|
||||
service.headLock.RUnlock()
|
||||
require.Equal(t, true, headFull, "head must be full for real post-fork bid")
|
||||
}
|
||||
|
||||
// TestLateBlockTasks_GloasForkBoundary_PreforkBidUsesHeadRoot verifies that lateBlockTasks
|
||||
// uses headRoot (not LatestBlockHash) as the accessRoot when the bid is pre-fork epoch.
|
||||
// Without this guard, the upgrade-seeded bid would cause lateBlockTasks to use the wrong
|
||||
// access root for the next-slot cache.
|
||||
func TestLateBlockTasks_GloasForkBoundary_PreforkBidUsesHeadRoot(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
PrepareAllPayloads: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 1
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
pid := &enginev1.PayloadIDBytes{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
service, tr := setupGloasService(t, &mockExecution.EngineClient{PayloadIDBytes: pid})
|
||||
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
base, _ := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
|
||||
// Make IsParentBlockFull() true: bid.BlockHash == LatestBlockHash.
|
||||
base.LatestBlockHash = blockHash[:]
|
||||
// bid.Slot is 0 (pre-fork epoch): the epoch guard should prevent using LatestBlockHash as accessRoot.
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
headRoot := bytesutil.ToBytes32([]byte("headroot"))
|
||||
service.head = &head{
|
||||
root: headRoot,
|
||||
state: st,
|
||||
slot: 1,
|
||||
}
|
||||
|
||||
// Trigger late block logic: CurrentSlot > HeadSlot.
|
||||
service.SetGenesisTime(time.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
service.SetForkChoiceGenesisTime(service.genesisTime)
|
||||
|
||||
service.lateBlockTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@ type head struct {
|
||||
block interfaces.ReadOnlySignedBeaconBlock // current head block.
|
||||
state state.BeaconState // current head state.
|
||||
slot primitives.Slot // the head block slot number
|
||||
full bool // whether the head is post-CL or post-EL after Gloas
|
||||
optimistic bool // optimistic status when saved head
|
||||
}
|
||||
|
||||
@@ -60,8 +61,24 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
|
||||
defer span.End()
|
||||
|
||||
// Pre-Gloas we use empty for head because we still key states by blockroot
|
||||
var full bool
|
||||
var err error
|
||||
if headState.Version() >= version.Gloas {
|
||||
gloasFirstSlot, err := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute gloas first slot")
|
||||
}
|
||||
if headState.Slot() > gloasFirstSlot {
|
||||
full, err = headState.IsParentBlockFull()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if head is full or not")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Do nothing if head hasn't changed.
|
||||
if !s.isNewHead(newHeadRoot) {
|
||||
if !s.isNewHead(newHeadRoot, full) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -157,6 +174,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
state: headState,
|
||||
optimistic: isOptimistic,
|
||||
slot: headBlock.Block().Slot(),
|
||||
full: full,
|
||||
}
|
||||
if err := s.setHead(newHead); err != nil {
|
||||
return errors.Wrap(err, "could not set head")
|
||||
@@ -217,6 +235,7 @@ func (s *Service) setHead(newHead *head) error {
|
||||
root: newHead.root,
|
||||
block: bCp,
|
||||
state: newHead.state.Copy(),
|
||||
full: newHead.full,
|
||||
optimistic: newHead.optimistic,
|
||||
slot: newHead.slot,
|
||||
}
|
||||
@@ -333,13 +352,16 @@ func (s *Service) notifyNewHeadEvent(
|
||||
if currentDutyDependentRoot == [32]byte{} {
|
||||
currentDutyDependentRoot = s.originBlockRoot
|
||||
}
|
||||
previousDutyDependentRoot := currentDutyDependentRoot
|
||||
var previousDutyDependentRoot [32]byte
|
||||
if currEpoch > 0 {
|
||||
previousDutyDependentRoot, err = s.DependentRoot(currEpoch.Sub(1))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty dependent root")
|
||||
}
|
||||
}
|
||||
if previousDutyDependentRoot == [32]byte{} {
|
||||
previousDutyDependentRoot = s.originBlockRoot
|
||||
}
|
||||
|
||||
isOptimistic, err := s.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -213,7 +213,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
Block: newHeadRoot[:],
|
||||
State: newHeadStateRoot[:],
|
||||
EpochTransition: true,
|
||||
PreviousDutyDependentRoot: make([]byte, 32),
|
||||
PreviousDutyDependentRoot: srv.originBlockRoot[:],
|
||||
CurrentDutyDependentRoot: srv.originBlockRoot[:],
|
||||
}
|
||||
require.DeepSSZEqual(t, wanted, eventHead)
|
||||
@@ -243,11 +243,35 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
Block: newHeadRoot[:],
|
||||
State: newHeadStateRoot[:],
|
||||
EpochTransition: true,
|
||||
PreviousDutyDependentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
PreviousDutyDependentRoot: srv.originBlockRoot[:],
|
||||
CurrentDutyDependentRoot: srv.originBlockRoot[:],
|
||||
}
|
||||
require.DeepSSZEqual(t, wanted, eventHead)
|
||||
})
|
||||
t.Run("previous dependent root zero hash falls back to origin", func(t *testing.T) {
|
||||
srv := testServiceWithDB(t)
|
||||
srv.SetGenesisTime(time.Now())
|
||||
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
|
||||
srv.originBlockRoot = [32]byte{0xab}
|
||||
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
newHeadRoot := [32]byte{3}
|
||||
st, blk, err = prepareForkchoiceState(t.Context(), 32, newHeadRoot, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
newHeadSlot := params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), newHeadSlot, []byte{2}, newHeadRoot[:]))
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
|
||||
eventHead, ok := events[0].Data.(*ethpbv1.EventHead)
|
||||
require.Equal(t, true, ok)
|
||||
// DependentRoot(0) returns zero hash since the forkchoice tree is sparse.
|
||||
// The fix ensures it falls back to originBlockRoot instead of sending zeros.
|
||||
assert.DeepEqual(t, srv.originBlockRoot[:], eventHead.PreviousDutyDependentRoot)
|
||||
assert.DeepEqual(t, srv.originBlockRoot[:], eventHead.CurrentDutyDependentRoot)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
|
||||
@@ -77,10 +77,10 @@ func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]by
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Bytes48{}))
|
||||
}
|
||||
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Bytes48{}))
|
||||
}
|
||||
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
|
||||
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])
|
||||
|
||||
@@ -74,6 +74,7 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
}
|
||||
if len(eReqs.Consolidations) > 0 {
|
||||
log = log.WithField("consolidationRequestCount", len(eReqs.Consolidations))
|
||||
consolidationRequestCount.Add(float64(len(eReqs.Consolidations)))
|
||||
}
|
||||
if len(eReqs.Withdrawals) > 0 {
|
||||
log = log.WithField("withdrawalRequestCount", len(eReqs.Withdrawals))
|
||||
@@ -132,6 +133,15 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
}
|
||||
if block.Version() < version.Gloas {
|
||||
moreFields["dataAvailabilityWaitedTime"] = daWaitedTime
|
||||
} else {
|
||||
signedBid, err := block.Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get signed execution payload bid for logging")
|
||||
} else {
|
||||
moreFields["blockHash"] = fmt.Sprintf("%#x", bytesutil.Trunc(signedBid.Message.BlockHash))
|
||||
moreFields["parentHash"] = fmt.Sprintf("%#x", bytesutil.Trunc(signedBid.Message.ParentBlockHash))
|
||||
moreFields["builderIndex"] = signedBid.Message.BuilderIndex
|
||||
}
|
||||
}
|
||||
|
||||
level := logs.PackageVerbosity("beacon-chain/blockchain")
|
||||
|
||||
@@ -170,6 +170,10 @@ var (
|
||||
Name: "txs_per_slot_count",
|
||||
Help: "Count the number of txs per slot",
|
||||
})
|
||||
consolidationRequestCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "consolidation_request_count",
|
||||
Help: "Count the number of consolidation requests",
|
||||
})
|
||||
onBlockProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "on_block_processing_milliseconds",
|
||||
Help: "Total time in milliseconds to complete a call to postBlockProcess()",
|
||||
@@ -234,6 +238,25 @@ var (
|
||||
Help: "The maximum number of blobs allowed in a block.",
|
||||
},
|
||||
)
|
||||
beaconExecutionPayloadEnvelopeValidTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_execution_payload_envelope_valid_total",
|
||||
Help: "Count the number of execution payload envelopes that were processed successfully.",
|
||||
})
|
||||
beaconExecutionPayloadEnvelopeInvalidTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_execution_payload_envelope_invalid_total",
|
||||
Help: "Count the number of execution payload envelopes that failed processing.",
|
||||
})
|
||||
beaconExecutionPayloadEnvelopeProcessingDurationSeconds = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_execution_payload_envelope_processing_duration_seconds",
|
||||
Help: "Captures end-to-end processing time for execution payload envelopes.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
},
|
||||
)
|
||||
beaconLatePayloadTaskTriggeredTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_late_payload_task_triggered_total",
|
||||
Help: "Count the number of times late payload tasks fired.",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
@@ -341,7 +364,7 @@ func reportEpochMetrics(ctx context.Context, postState, headState state.BeaconSt
|
||||
processedDepositsCount.Set(float64(postState.Eth1DepositIndex() + 1))
|
||||
|
||||
var b *precompute.Balance
|
||||
var v []*precompute.Validator
|
||||
var v []precompute.Validator
|
||||
var err error
|
||||
|
||||
if headState.Version() == version.Phase0 {
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
@@ -82,6 +84,9 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
if err := s.handleBlockAttestations(ctx, cfg.roblock.Block(), cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
if err := s.handleBlockPayloadAttestations(ctx, cfg.roblock.Block(), cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's payload attestations")
|
||||
}
|
||||
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, cfg.roblock.Block().Body().AttesterSlashings())
|
||||
if cfg.isValidPayload {
|
||||
@@ -103,6 +108,11 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
}
|
||||
if cfg.roblock.Version() < version.Gloas {
|
||||
s.sendFCU(cfg)
|
||||
} else if s.isNewHead(cfg.headRoot, false) { // We reach this only when the incoming block is head.
|
||||
if err := s.saveHead(ctx, cfg.headRoot, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(ctx, cfg.postState, cfg.roblock)
|
||||
}
|
||||
|
||||
// Pre-Fulu the caches are updated when computing the payload attributes
|
||||
@@ -125,7 +135,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
|
||||
var err error
|
||||
preStateVersion := st.Version()
|
||||
switch preStateVersion {
|
||||
case version.Phase0, version.Altair:
|
||||
case version.Phase0, version.Altair, version.Gloas:
|
||||
default:
|
||||
preStateHeader, err = st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
@@ -135,7 +145,112 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
|
||||
return preStateVersion, preStateHeader, nil
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
// applyPayloadIfNeeded applies the parent block's execution payload envelope to
|
||||
// preState when the current block's bid indicates it built on a full parent.
|
||||
func (s *Service) applyPayloadIfNeeded(ctx context.Context, b interfaces.ReadOnlyBeaconBlock, parentRoot [32]byte, preState state.BeaconState) error {
|
||||
if b.Version() < version.Gloas || parentRoot == [32]byte{} {
|
||||
return nil
|
||||
}
|
||||
parentBlock, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get parent block with root %#x", parentRoot)
|
||||
}
|
||||
if parentBlock.Version() < version.Gloas {
|
||||
return nil
|
||||
}
|
||||
sb, err := b.Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload bid for block")
|
||||
}
|
||||
if sb == nil || sb.Message == nil {
|
||||
return fmt.Errorf("missing execution payload bid for block at slot %d", b.Slot())
|
||||
}
|
||||
parentBid, err := parentBlock.Block().Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get execution payload bid for parent block with root %#x", parentRoot)
|
||||
}
|
||||
if parentBid == nil || parentBid.Message == nil {
|
||||
return fmt.Errorf("missing execution payload bid for parent block with root %#x", parentRoot)
|
||||
}
|
||||
if !bytes.Equal(sb.Message.ParentBlockHash, parentBid.Message.BlockHash) {
|
||||
return nil
|
||||
}
|
||||
signedEnvelope, err := s.cfg.BeaconDB.ExecutionPayloadEnvelope(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get execution payload envelope for parent block with root %#x", parentRoot)
|
||||
}
|
||||
if signedEnvelope == nil || signedEnvelope.Message == nil {
|
||||
return nil
|
||||
}
|
||||
envelope, err := consensusblocks.WrappedROBlindedExecutionPayloadEnvelope(signedEnvelope.Message)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not wrap blinded execution payload envelope for parent block with root %#x", parentRoot)
|
||||
}
|
||||
return gloas.ProcessBlindedExecutionPayload(ctx, preState, parentBlock.Block().StateRoot(), envelope)
|
||||
}
|
||||
|
||||
// getBatchPrestate returns the pre-state to apply to the first beacon block in the batch and returns true if it applied the first envelope before
|
||||
func (s *Service) getBatchPrestate(ctx context.Context, b consensusblocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope) (state.BeaconState, bool, error) {
|
||||
if len(envelopes) == 0 || b.Version() < version.Gloas {
|
||||
blockPreState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.Block().ParentRoot())
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get block pre state")
|
||||
}
|
||||
return blockPreState, false, nil
|
||||
}
|
||||
full, err := consensusblocks.BlockBuiltOnEnvelope(envelopes[0], b)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not check if block builds on envelope")
|
||||
}
|
||||
if !full {
|
||||
blockPreState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.Block().ParentRoot())
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get block pre state")
|
||||
}
|
||||
return blockPreState, false, nil
|
||||
}
|
||||
parentRoot := b.Block().ParentRoot()
|
||||
if s.cfg.BeaconDB.HasExecutionPayloadEnvelope(ctx, parentRoot) {
|
||||
// This path should have been filtered already in init sync.
|
||||
log.Debugf("Ignoring already processed envelope for blockroot %#x", parentRoot)
|
||||
env, err := envelopes[0].Envelope()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
blockPreState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, env.BlockHash())
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return blockPreState, false, nil
|
||||
}
|
||||
env, err := envelopes[0].Envelope()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
// notify the engine of the new envelope
|
||||
blockPreState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.Block().ParentRoot())
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get block pre state")
|
||||
}
|
||||
if _, err := s.notifyNewEnvelope(ctx, blockPreState, env); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
parentBlock, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get parent block")
|
||||
}
|
||||
if err := gloas.ProcessBlindedExecutionPayload(ctx, blockPreState, parentBlock.Block().StateRoot(), env); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return blockPreState, true, nil
|
||||
}
|
||||
|
||||
type versionAndHeader struct {
|
||||
version int
|
||||
header interfaces.ExecutionData
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -149,16 +264,35 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
b := blks[0].Block()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b.ParentRoot()); err != nil {
|
||||
parentRoot := b.ParentRoot()
|
||||
if err := s.verifyBlkPreState(ctx, parentRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.ParentRoot())
|
||||
preState, applied, err := s.getBatchPrestate(ctx, blks[0], envelopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if preState == nil || preState.IsNil() {
|
||||
return fmt.Errorf("nil pre state for slot %d", b.Slot())
|
||||
}
|
||||
var eidx int
|
||||
var br [32]byte
|
||||
sigSet := bls.NewSet()
|
||||
if applied {
|
||||
eidx = 1
|
||||
envSigSet, err := gloas.ExecutionPayloadEnvelopeSignatureBatch(preState, envelopes[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigSet.Join(envSigSet)
|
||||
}
|
||||
if eidx < len(envelopes) {
|
||||
env, err := envelopes[eidx].Envelope()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
br = env.BeaconBlockRoot()
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
@@ -167,11 +301,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
|
||||
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
sigSet := bls.NewSet()
|
||||
type versionAndHeader struct {
|
||||
version int
|
||||
header interfaces.ExecutionData
|
||||
}
|
||||
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
||||
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
||||
var set *bls.SignatureBatch
|
||||
@@ -193,6 +322,23 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
if err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
if b.Root() == br && eidx < len(envelopes) {
|
||||
envSigSet, err := gloas.ProcessExecutionPayloadWithDeferredSig(ctx, preState, b.Block().StateRoot(), envelopes[eidx])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigSet.Join(envSigSet)
|
||||
eidx++
|
||||
if eidx < len(envelopes) {
|
||||
nextEnv, err := envelopes[eidx].Envelope()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
br = nextEnv.BeaconBlockRoot()
|
||||
} else {
|
||||
br = [32]byte{}
|
||||
}
|
||||
}
|
||||
// Save potential boundary states.
|
||||
if slots.IsEpochStart(preState.Slot()) {
|
||||
boundaries[b.Root()] = preState.Copy()
|
||||
@@ -224,56 +370,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.New("batch block signature verification failed")
|
||||
}
|
||||
|
||||
// blocks have been verified, save them and call the engine
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, len(blks))
|
||||
var isValidPayload bool
|
||||
for i, b := range blks {
|
||||
root := b.Root()
|
||||
isValidPayload, err = s.notifyNewPayload(ctx,
|
||||
postVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].header, b)
|
||||
if err != nil {
|
||||
// this call does not have the root in forkchoice yet.
|
||||
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
|
||||
preVersionAndHeaders[i].header, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
|
||||
}
|
||||
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[i] = args
|
||||
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: b.Block().Slot(),
|
||||
Root: root[:],
|
||||
}); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if i > 0 && fCheckpoints[i].Epoch > fCheckpoints[i-1].Epoch {
|
||||
if err := s.updateFinalized(ctx, fCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
pendingNodes, isValidPayload, err := s.notifyEngineAndSaveData(ctx, blks, envelopes, avs, preVersionAndHeaders, postVersionAndHeaders, jCheckpoints, fCheckpoints)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Save boundary states that will be useful for forkchoice
|
||||
for r, st := range boundaries {
|
||||
@@ -288,6 +387,15 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return err
|
||||
}
|
||||
// Insert all nodes to forkchoice
|
||||
if applied {
|
||||
env, err := envelopes[0].Envelope()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertPayload(env); err != nil {
|
||||
return errors.Wrap(err, "could not insert first payload in batch to forkchoice")
|
||||
}
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
@@ -300,13 +408,120 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) notifyEngineAndSaveData(
|
||||
ctx context.Context,
|
||||
blks []consensusblocks.ROBlock,
|
||||
envelopes []interfaces.ROSignedExecutionPayloadEnvelope,
|
||||
avs das.AvailabilityChecker,
|
||||
preVersionAndHeaders []*versionAndHeader,
|
||||
postVersionAndHeaders []*versionAndHeader,
|
||||
jCheckpoints []*ethpb.Checkpoint,
|
||||
fCheckpoints []*ethpb.Checkpoint,
|
||||
) ([]*forkchoicetypes.BlockAndCheckpoints, bool, error) {
|
||||
span := trace.FromContext(ctx)
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, len(blks))
|
||||
var isValidPayload bool
|
||||
var err error
|
||||
|
||||
envMap := make(map[[32]byte]int, len(envelopes))
|
||||
for i, e := range envelopes {
|
||||
env, err := e.Envelope()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
envMap[env.BeaconBlockRoot()] = i
|
||||
}
|
||||
|
||||
for i, b := range blks {
|
||||
root := b.Root()
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
if b.Version() < version.Gloas {
|
||||
isValidPayload, err = s.notifyNewPayload(ctx,
|
||||
postVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].header, b)
|
||||
if err != nil {
|
||||
return nil, false, s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
|
||||
preVersionAndHeaders[i].header, b); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
idx, ok := envMap[root]
|
||||
if ok {
|
||||
env, err := envelopes[idx].Envelope()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
isValidPayload, err = s.notifyNewEnvelopeFromBlock(ctx, b, env)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not notify new envelope from block")
|
||||
}
|
||||
args.HasPayload = true
|
||||
bh := env.BlockHash()
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: b.Block().Slot(),
|
||||
Root: bh[:],
|
||||
}); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
|
||||
return nil, false, errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
|
||||
}
|
||||
|
||||
pendingNodes[i] = args
|
||||
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, false, err
|
||||
}
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: b.Block().Slot(),
|
||||
Root: root[:],
|
||||
}); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, false, err
|
||||
}
|
||||
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
if i > 0 && fCheckpoints[i].Epoch > fCheckpoints[i-1].Epoch {
|
||||
if err := s.updateFinalized(ctx, fCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return pendingNodes, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityChecker, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
|
||||
if blockVersion >= version.Fulu {
|
||||
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), block); err != nil {
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := s.areDataColumnsAvailable(ctx, roBlock.Root(), slot); err != nil {
|
||||
return errors.Wrapf(err, "are data columns available for block %#x with slot %d", roBlock.Root(), slot)
|
||||
}
|
||||
|
||||
@@ -368,9 +583,47 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
return nil
|
||||
}
|
||||
|
||||
// refreshCaches updates the next slot state cache and epoch boundary caches.
|
||||
// Before Fulu this is done synchronously, after Fulu it is deferred to a goroutine.
|
||||
func (s *Service) refreshCaches(ctx context.Context, currentSlot primitives.Slot, headRoot [32]byte, headState state.BeaconState, accessRoot [32]byte) {
|
||||
lastRoot, lastState := transition.LastCachedState()
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
if lastState.Version() < version.Fulu {
|
||||
s.updateCachesAndEpochBoundary(ctx, currentSlot, headState, accessRoot, lastRoot, lastState)
|
||||
} else {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
s.updateCachesAndEpochBoundary(ctx, currentSlot, headState, accessRoot, lastRoot, lastState)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// updateCachesAndEpochBoundary updates the next slot state cache and handles
|
||||
// epoch boundary processing. If the lastRoot matches accessRoot, the cached
|
||||
// last state is reused; otherwise, the head state is advanced instead.
|
||||
func (s *Service) updateCachesAndEpochBoundary(ctx context.Context, currentSlot primitives.Slot, headState state.BeaconState, accessRoot [32]byte, lastRoot []byte, lastState state.BeaconState) {
|
||||
if bytes.Equal(lastRoot, accessRoot[:]) {
|
||||
// Happy case, the last advanced state is head, we thus keep it
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
} else {
|
||||
// Last advanced state was not head, we do not advance this but rather use headstate
|
||||
if err := transition.UpdateNextSlotCache(ctx, accessRoot[:], headState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, accessRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}
|
||||
|
||||
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
|
||||
// caches. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.ReadOnlyBeaconState, blockRoot []byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
defer span.End()
|
||||
// return early if we are advancing to a past epoch
|
||||
@@ -419,6 +672,36 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleBlockPayloadAttestations feeds payload attestations included in a Gloas block into forkchoice.
|
||||
func (s *Service) handleBlockPayloadAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
|
||||
if blk.Version() < version.Gloas {
|
||||
return nil
|
||||
}
|
||||
atts, err := blk.Body().PayloadAttestations()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(atts) == 0 {
|
||||
return nil
|
||||
}
|
||||
committee, err := st.PayloadCommitteeReadOnly(blk.Slot() - 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, att := range atts {
|
||||
root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot)
|
||||
if !s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
continue
|
||||
}
|
||||
for i := range committee {
|
||||
if att.AggregationBits.BitAt(uint64(i)) {
|
||||
s.cfg.ForkChoiceStore.SetPTCVote(root, uint64(i), att.Data.PayloadPresent, att.Data.BlobDataAvailable)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// InsertSlashingsToForkChoiceStore inserts attester slashing indices to fork choice store.
|
||||
// To call this function, it's caller's responsibility to ensure the slashing object is valid.
|
||||
// This function requires a write lock on forkchoice.
|
||||
@@ -592,11 +875,22 @@ func (s *Service) runLateBlockTasks() {
|
||||
return
|
||||
}
|
||||
|
||||
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
|
||||
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
|
||||
cfg := params.BeaconConfig()
|
||||
attDueBPS := cfg.AttestationDueBPS
|
||||
if slots.ToEpoch(s.CurrentSlot()) >= cfg.GloasForkEpoch {
|
||||
attDueBPS = cfg.AttestationDueBPSGloas
|
||||
}
|
||||
attThreshold := cfg.SlotComponentDuration(attDueBPS)
|
||||
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, attThreshold, cfg.SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C():
|
||||
case slot := <-ticker.C():
|
||||
if attDueBPS != cfg.AttestationDueBPSGloas && slots.ToEpoch(slot) >= cfg.GloasForkEpoch {
|
||||
ticker.Done()
|
||||
attDueBPS = cfg.AttestationDueBPSGloas
|
||||
attThreshold = cfg.SlotComponentDuration(attDueBPS)
|
||||
ticker = slots.NewSlotTickerWithOffset(s.genesisTime, attThreshold, cfg.SecondsPerSlot)
|
||||
}
|
||||
s.lateBlockTasks(s.ctx)
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
@@ -673,7 +967,18 @@ func (s *Service) isDataAvailable(
|
||||
root := roBlock.Root()
|
||||
blockVersion := block.Version()
|
||||
if blockVersion >= version.Fulu {
|
||||
return s.areDataColumnsAvailable(ctx, root, block)
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
return s.areDataColumnsAvailable(ctx, root, block.Slot())
|
||||
}
|
||||
|
||||
if blockVersion >= version.Deneb {
|
||||
@@ -688,30 +993,15 @@ func (s *Service) isDataAvailable(
|
||||
func (s *Service) areDataColumnsAvailable(
|
||||
ctx context.Context,
|
||||
root [fieldparams.RootLength]byte,
|
||||
block interfaces.ReadOnlyBeaconBlock,
|
||||
slot primitives.Slot,
|
||||
) error {
|
||||
// We are only required to check within MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS
|
||||
blockSlot, currentSlot := block.Slot(), s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(blockSlot), slots.ToEpoch(currentSlot)
|
||||
currentSlot := s.CurrentSlot()
|
||||
blockEpoch, currentEpoch := slots.ToEpoch(slot), slots.ToEpoch(currentSlot)
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
@@ -765,7 +1055,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot, err := slots.StartTime(s.genesisTime, block.Slot()+1)
|
||||
nextSlot, err := slots.StartTime(s.genesisTime, slot+1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine slot start time: %w", err)
|
||||
}
|
||||
@@ -780,7 +1070,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": helpers.SortedPrettySliceFromMap(peerInfo.CustodyColumns),
|
||||
"columnsWaiting": helpers.SortedPrettySliceFromMap(missing),
|
||||
@@ -826,7 +1116,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
missingIndices = helpers.SortedPrettySliceFromMap(missing)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", slot, root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -924,37 +1214,20 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
headRoot := s.headRoot()
|
||||
headState := s.headState(ctx)
|
||||
s.headLock.RUnlock()
|
||||
lastRoot, lastState := transition.LastCachedState()
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
// Before Fulu we need to process the next slot to find out if we are proposing.
|
||||
if lastState.Version() < version.Fulu {
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
|
||||
var accessRoot [32]byte
|
||||
isFull, err := headState.IsParentBlockFull()
|
||||
gloasFirstSlot, _ := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
|
||||
if err != nil || !isFull || headState.Slot() <= gloasFirstSlot {
|
||||
accessRoot = headRoot
|
||||
} else {
|
||||
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
|
||||
defer func() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}()
|
||||
}()
|
||||
accessRoot, err = headState.LatestBlockHash()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve latest block hash, using head root as access root")
|
||||
accessRoot = headRoot
|
||||
}
|
||||
}
|
||||
s.refreshCaches(ctx, currentSlot, headRoot, headState, accessRoot)
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
_, has := s.cfg.PayloadIDCache.PayloadID(s.CurrentSlot()+1, headRoot)
|
||||
@@ -962,12 +1235,27 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:], accessRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
return
|
||||
}
|
||||
|
||||
if headState.Version() >= version.Gloas {
|
||||
bh, err := headState.LatestBlockHash()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve latest block hash")
|
||||
return
|
||||
}
|
||||
id, err := s.notifyForkchoiceUpdateGloas(ctx, bh, attribute)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
if id != nil {
|
||||
s.cfg.PayloadIDCache.Set(s.CurrentSlot()+1, headRoot, [8]byte(*id))
|
||||
}
|
||||
return
|
||||
}
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
mathutil "github.com/OffchainLabs/prysm/v7/math"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
@@ -44,7 +46,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig) (*fcuConfig, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:], cfg.headRoot[:])
|
||||
return fcuArgs, nil
|
||||
}
|
||||
|
||||
@@ -64,26 +66,32 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig) (*fcuConfig,
|
||||
// block is not the head of the chain. It requires the caller holds a lock on
|
||||
// Forkchoice.
|
||||
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.ConsensusNodeWeight(blockRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
|
||||
headWeight, err := s.cfg.ForkChoiceStore.ConsensusNodeWeight(headRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
fields := logrus.Fields{
|
||||
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
"receivedWeight": receivedWeight,
|
||||
"headRoot": fmt.Sprintf("%#x", headRoot),
|
||||
"headWeight": headWeight,
|
||||
}).Debug("Head block is not the received block")
|
||||
}
|
||||
headEmpty, headFull, err := s.cfg.ForkChoiceStore.PayloadWeights(headRoot)
|
||||
if err == nil {
|
||||
fields["headEmptyWeight"] = headEmpty
|
||||
fields["headFullWeight"] = headFull
|
||||
}
|
||||
log.WithFields(fields).Debug("Head block is not the received block")
|
||||
}
|
||||
|
||||
// fcuArgsNonCanonicalBlock returns the arguments to the FCU call when the
|
||||
// incoming block is non-canonical, that is, based on the head root.
|
||||
func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig) (*fcuConfig, error) {
|
||||
headState, headBlock, err := s.getStateAndBlock(cfg.ctx, cfg.headRoot)
|
||||
headState, headBlock, err := s.getStateAndBlock(cfg.ctx, cfg.headRoot, cfg.headRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -193,10 +201,32 @@ func reportProcessingTime(startTime time.Time) {
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// GetPrestateToPropose returns the pre-state for a proposer to base its block on.
|
||||
// It is similar to GetBlockPreState but it lacks unnecessary verifications.
|
||||
func (s *Service) GetPrestateToPropose(ctx context.Context, b consensus_blocks.ROBlock) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.GetPreStateToPropose")
|
||||
defer span.End()
|
||||
|
||||
accessRoot, err := s.getLookupParentRoot(b)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get lookup parent root")
|
||||
}
|
||||
|
||||
bl := b.Block()
|
||||
preState, err := s.cfg.StateGen.StateByRoot(ctx, accessRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", bl.Slot())
|
||||
}
|
||||
if preState == nil || preState.IsNil() {
|
||||
return nil, errors.Wrapf(err, "nil pre state for slot %d", bl.Slot())
|
||||
}
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
// GetBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
func (s *Service) getBlockPreState(ctx context.Context, b consensus_blocks.ROBlock) (state.BeaconState, error) {
|
||||
func (s *Service) GetBlockPreState(ctx context.Context, b consensus_blocks.ROBlock) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.getBlockPreState")
|
||||
defer span.End()
|
||||
|
||||
@@ -359,6 +389,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
return err
|
||||
}
|
||||
root := signed.Block().ParentRoot()
|
||||
child := signed
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
@@ -372,10 +403,33 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hasPayload := false
|
||||
if roblock.Version() >= version.Gloas {
|
||||
sbid, err := child.Block().Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get execution payload bid for block at slot %d", child.Block().Slot())
|
||||
}
|
||||
if sbid == nil || sbid.Message == nil {
|
||||
return fmt.Errorf("missing execution payload bid for block at slot %d", child.Block().Slot())
|
||||
}
|
||||
parentBid, err := b.Block().Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get execution payload bid for block at slot %d", b.Block().Slot())
|
||||
}
|
||||
if parentBid == nil || parentBid.Message == nil {
|
||||
return fmt.Errorf("missing execution payload bid for block at slot %d", b.Block().Slot())
|
||||
}
|
||||
if bytes.Equal(sbid.Message.ParentBlockHash, parentBid.Message.BlockHash) {
|
||||
hasPayload = true
|
||||
}
|
||||
}
|
||||
root = b.Block().ParentRoot()
|
||||
child = b
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint,
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
FinalizedCheckpoint: fCheckpoint,
|
||||
HasPayload: hasPayload,
|
||||
}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
}
|
||||
if len(pendingNodes) == 0 {
|
||||
|
||||
@@ -163,7 +163,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, rwsb)
|
||||
}
|
||||
err := service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{})
|
||||
err := service.onBlockBatch(ctx, blks, nil, &das.MockAvailabilityStore{})
|
||||
require.NoError(t, err)
|
||||
jcp := service.CurrentJustifiedCheckpt()
|
||||
jroot := bytesutil.ToBytes32(jcp.Root)
|
||||
@@ -193,7 +193,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, rwsb.Root(), wsb))
|
||||
blks = append(blks, rwsb)
|
||||
}
|
||||
require.NoError(t, service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{}))
|
||||
require.NoError(t, service.onBlockBatch(ctx, blks, nil, &das.MockAvailabilityStore{}))
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
@@ -733,7 +733,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -785,7 +785,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -849,7 +849,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1324,7 +1324,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
go func() {
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
@@ -1338,7 +1338,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
go func() {
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
@@ -1352,7 +1352,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
go func() {
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
@@ -1366,7 +1366,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
go func() {
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
@@ -1444,7 +1444,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1466,7 +1466,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1489,7 +1489,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1514,7 +1514,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1551,7 +1551,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, rowsb)
|
||||
preState, err = service.GetBlockPreState(ctx, rowsb)
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
@@ -1580,7 +1580,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, roblock)
|
||||
preState, err = service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1649,7 +1649,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1672,7 +1672,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1694,7 +1694,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1725,7 +1725,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1755,7 +1755,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, rowsb)
|
||||
preState, err = service.GetBlockPreState(ctx, rowsb)
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
@@ -1795,7 +1795,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, roblock)
|
||||
preState, err = service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1822,7 +1822,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1852,7 +1852,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, roblock)
|
||||
preState, err = service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1914,7 +1914,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1936,7 +1936,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1958,7 +1958,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1991,7 +1991,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2023,7 +2023,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, rowsb)
|
||||
preState, err = service.GetBlockPreState(ctx, rowsb)
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
@@ -2073,7 +2073,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
rwsb, err := consensusblocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
// We use onBlockBatch here because the valid chain is missing in forkchoice
|
||||
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}, &das.MockAvailabilityStore{}))
|
||||
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}, nil, &das.MockAvailabilityStore{}))
|
||||
// Check that the head is now VALID and the node is not optimistic
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
headRoot, err = service.HeadRoot(ctx)
|
||||
@@ -2115,7 +2115,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2183,7 +2183,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2456,7 +2456,7 @@ func TestRollbackBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2516,7 +2516,7 @@ func TestRollbackBlock_SavePostStateInfo_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2574,7 +2574,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2591,7 +2591,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, roblock)
|
||||
preState, err = service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -3480,3 +3480,219 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleBlockPayloadAttestations(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.GloasForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run("pre-Gloas block is no-op", func(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
blk := util.NewBeaconBlockElectra()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.handleBlockPayloadAttestations(t.Context(), wsb.Block(), st))
|
||||
})
|
||||
|
||||
t.Run("empty payload attestations", func(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
blk := util.NewBeaconBlockGloas()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
st, err := util.NewBeaconStateGloas()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.handleBlockPayloadAttestations(t.Context(), wsb.Block(), st))
|
||||
})
|
||||
|
||||
t.Run("unknown root is skipped", func(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
numVals := 2048
|
||||
headState := gloasStateWithValidators(t, 2, numVals)
|
||||
|
||||
unknownRoot := bytesutil.ToBytes32([]byte("unknown"))
|
||||
bits := bitfield.NewBitvector512()
|
||||
bits.SetBitAt(0, true)
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
PayloadAttestations: []*ethpb.PayloadAttestation{
|
||||
{
|
||||
AggregationBits: bits,
|
||||
Data: ðpb.PayloadAttestationData{
|
||||
BeaconBlockRoot: unknownRoot[:],
|
||||
Slot: 1,
|
||||
PayloadPresent: true,
|
||||
BlobDataAvailable: true,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.handleBlockPayloadAttestations(ctx, wsb.Block(), headState))
|
||||
})
|
||||
|
||||
t.Run("known root sets PTC votes", func(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
blockRoot := bytesutil.ToBytes32([]byte("root1"))
|
||||
parentRoot := params.BeaconConfig().ZeroHash
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
numVals := 2048
|
||||
headState := gloasStateWithValidators(t, 2, numVals)
|
||||
|
||||
base, insertBlk := testGloasState(t, 1, parentRoot, blockHash)
|
||||
insertGloasBlock(t, s, base, insertBlk, blockRoot)
|
||||
|
||||
ptc, err := headState.PayloadCommitteeReadOnly(1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(ptc))
|
||||
|
||||
bits := bitfield.NewBitvector512()
|
||||
bits.SetBitAt(0, true)
|
||||
bits.SetBitAt(2, true)
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
PayloadAttestations: []*ethpb.PayloadAttestation{
|
||||
{
|
||||
AggregationBits: bits,
|
||||
Data: ðpb.PayloadAttestationData{
|
||||
BeaconBlockRoot: blockRoot[:],
|
||||
Slot: 1,
|
||||
PayloadPresent: true,
|
||||
BlobDataAvailable: true,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.handleBlockPayloadAttestations(ctx, wsb.Block(), headState))
|
||||
})
|
||||
|
||||
t.Run("multiple attestations", func(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
blockRoot := bytesutil.ToBytes32([]byte("root1"))
|
||||
parentRoot := params.BeaconConfig().ZeroHash
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
numVals := 2048
|
||||
headState := gloasStateWithValidators(t, 2, numVals)
|
||||
|
||||
base, insertBlk := testGloasState(t, 1, parentRoot, blockHash)
|
||||
insertGloasBlock(t, s, base, insertBlk, blockRoot)
|
||||
|
||||
bits1 := bitfield.NewBitvector512()
|
||||
bits1.SetBitAt(0, true)
|
||||
bits2 := bitfield.NewBitvector512()
|
||||
bits2.SetBitAt(1, true)
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
PayloadAttestations: []*ethpb.PayloadAttestation{
|
||||
{
|
||||
AggregationBits: bits1,
|
||||
Data: ðpb.PayloadAttestationData{
|
||||
BeaconBlockRoot: blockRoot[:],
|
||||
Slot: 1,
|
||||
PayloadPresent: true,
|
||||
BlobDataAvailable: false,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
{
|
||||
AggregationBits: bits2,
|
||||
Data: ðpb.PayloadAttestationData{
|
||||
BeaconBlockRoot: blockRoot[:],
|
||||
Slot: 1,
|
||||
PayloadPresent: false,
|
||||
BlobDataAvailable: true,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.handleBlockPayloadAttestations(ctx, wsb.Block(), headState))
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateCachesAndEpochBoundary_MatchingRoots(t *testing.T) {
|
||||
service := testServiceNoDB(t)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
accessRoot := [32]byte{'a'}
|
||||
|
||||
service.updateCachesAndEpochBoundary(t.Context(), 1, st, accessRoot, accessRoot[:], st)
|
||||
|
||||
cached := transition.NextSlotState(accessRoot[:], 1)
|
||||
require.NotNil(t, cached)
|
||||
require.Equal(t, primitives.Slot(1), cached.Slot())
|
||||
}
|
||||
|
||||
func TestUpdateCachesAndEpochBoundary_DifferentRoots(t *testing.T) {
|
||||
service := testServiceNoDB(t)
|
||||
headState, _ := util.DeterministicGenesisState(t, 1)
|
||||
lastState, _ := util.DeterministicGenesisState(t, 1)
|
||||
accessRoot := [32]byte{'a'}
|
||||
lastRoot := [32]byte{'b'}
|
||||
|
||||
service.updateCachesAndEpochBoundary(t.Context(), 1, headState, accessRoot, lastRoot[:], lastState)
|
||||
|
||||
// Cache should be keyed by accessRoot, not lastRoot.
|
||||
cached := transition.NextSlotState(accessRoot[:], 1)
|
||||
require.NotNil(t, cached)
|
||||
require.Equal(t, primitives.Slot(1), cached.Slot())
|
||||
|
||||
cached = transition.NextSlotState(lastRoot[:], 1)
|
||||
require.Equal(t, true, cached == nil)
|
||||
}
|
||||
|
||||
func TestRefreshCaches_NoCachedState(t *testing.T) {
|
||||
service := testServiceNoDB(t)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
headRoot := [32]byte{'h'}
|
||||
|
||||
service.refreshCaches(t.Context(), 1, headRoot, st, headRoot)
|
||||
|
||||
cached := transition.NextSlotState(headRoot[:], 1)
|
||||
require.NotNil(t, cached)
|
||||
require.Equal(t, primitives.Slot(1), cached.Slot())
|
||||
}
|
||||
|
||||
func TestRefreshCaches_CachedStateMatchesAccessRoot(t *testing.T) {
|
||||
service := testServiceNoDB(t)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
accessRoot := [32]byte{'a'}
|
||||
headRoot := [32]byte{'h'}
|
||||
|
||||
// Pre-populate the cache with accessRoot.
|
||||
require.NoError(t, transition.UpdateNextSlotCache(t.Context(), accessRoot[:], st))
|
||||
|
||||
service.refreshCaches(t.Context(), 1, headRoot, st, accessRoot)
|
||||
|
||||
cached := transition.NextSlotState(accessRoot[:], 1)
|
||||
require.NotNil(t, cached)
|
||||
require.Equal(t, primitives.Slot(1), cached.Slot())
|
||||
}
|
||||
|
||||
@@ -134,38 +134,64 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
|
||||
start = time.Now()
|
||||
// return early if we haven't changed head
|
||||
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
newHeadRoot, newHeadBlockHash, full, err := s.cfg.ForkChoiceStore.FullHead(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute head from new attestations")
|
||||
return
|
||||
}
|
||||
if !s.isNewHead(newHeadRoot) {
|
||||
if !s.isNewHead(newHeadRoot, full) {
|
||||
return
|
||||
}
|
||||
log.WithField("newHeadRoot", fmt.Sprintf("%#x", newHeadRoot)).Debug("Head changed due to attestations")
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
|
||||
var accessRoot [32]byte
|
||||
postGloas := slots.ToEpoch(proposingSlot) >= params.BeaconConfig().GloasForkEpoch
|
||||
if full && postGloas {
|
||||
accessRoot = newHeadBlockHash
|
||||
} else {
|
||||
accessRoot = newHeadRoot
|
||||
}
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot, accessRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block")
|
||||
log.WithError(err).Error("Could not get head block and state")
|
||||
return
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: newHeadRoot,
|
||||
headBlock: headBlock,
|
||||
proposingSlot: proposingSlot,
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
attr := s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:], accessRoot[:])
|
||||
if attr != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
|
||||
if postGloas {
|
||||
go func() {
|
||||
pid, err := s.notifyForkchoiceUpdateGloas(s.ctx, newHeadBlockHash, attr)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice with engine")
|
||||
}
|
||||
if pid == nil {
|
||||
if attr != nil {
|
||||
log.Warn("Engine did not return a payload ID for the fork choice update with attributes")
|
||||
}
|
||||
return
|
||||
}
|
||||
var pId [8]byte
|
||||
copy(pId[:], pid[:])
|
||||
s.cfg.PayloadIDCache.Set(proposingSlot, newHeadRoot, pId)
|
||||
}()
|
||||
} else {
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: newHeadRoot,
|
||||
headBlock: headBlock,
|
||||
proposingSlot: proposingSlot,
|
||||
attributes: attr,
|
||||
}
|
||||
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
|
||||
}
|
||||
}
|
||||
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
if err := s.saveHead(s.ctx, newHeadRoot, headBlock, headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
s.pruneAttsFromPool(s.ctx, headState, headBlock)
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -112,7 +112,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -174,7 +174,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -41,10 +41,12 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityChecker) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
GetBlockPreState(ctx context.Context, b blocks.ROBlock) (state.BeaconState, error)
|
||||
GetPrestateToPropose(ctx context.Context, b blocks.ROBlock) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// BlobReceiver interface defines the methods of chain service for receiving new
|
||||
@@ -100,7 +102,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, roblock)
|
||||
preState, err := s.GetBlockPreState(ctx, roblock)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
}
|
||||
@@ -151,7 +153,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
|
||||
// Have we been finalizing? Should we start saving hot states to db?
|
||||
if err := s.checkSaveHotStateDB(ctx); err != nil {
|
||||
return errors.Wrap(err, "check save hot state db")
|
||||
log.WithError(err).Error("Could not check save hot state DB")
|
||||
}
|
||||
|
||||
// We apply the same heuristic to some of our more important caches.
|
||||
@@ -364,12 +366,14 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
go s.checkpointStateCache.EvictUpTo(finalized.Epoch)
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -377,7 +381,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Apply state transition on the incoming newly received block batches, one by one.
|
||||
if err := s.onBlockBatch(ctx, blocks, avs); err != nil {
|
||||
if err := s.onBlockBatch(ctx, blocks, envelopes, avs); err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -417,6 +421,15 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, e := range envelopes {
|
||||
protoEnv, ok := e.Proto().(*ethpb.SignedExecutionPayloadEnvelope)
|
||||
if !ok {
|
||||
return errors.New("could not type assert signed envelope to proto")
|
||||
}
|
||||
if err := s.cfg.BeaconDB.SaveExecutionPayloadEnvelope(ctx, protoEnv); err != nil {
|
||||
return errors.Wrap(err, "could not save execution payload envelope")
|
||||
}
|
||||
}
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
|
||||
@@ -281,7 +281,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb}, &das.MockAvailabilityStore{})
|
||||
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb}, nil, &das.MockAvailabilityStore{})
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
|
||||
@@ -4,11 +4,15 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -29,9 +33,18 @@ type ExecutionPayloadEnvelopeReceiver interface {
|
||||
}
|
||||
|
||||
// ReceiveExecutionPayloadEnvelope processes a signed execution payload envelope for the Gloas fork.
|
||||
func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope) error {
|
||||
func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope) (err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveExecutionPayloadEnvelope")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
beaconExecutionPayloadEnvelopeProcessingDurationSeconds.Observe(time.Since(start).Seconds())
|
||||
if err != nil {
|
||||
beaconExecutionPayloadEnvelopeInvalidTotal.Inc()
|
||||
return
|
||||
}
|
||||
beaconExecutionPayloadEnvelopeValidTotal.Inc()
|
||||
}()
|
||||
|
||||
envelope, err := signed.Envelope()
|
||||
if err != nil {
|
||||
@@ -68,6 +81,16 @@ func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed in
|
||||
return err
|
||||
}
|
||||
|
||||
// DA check: verify data columns are available before inserting payload.
|
||||
bid, err := preState.LatestExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get latest execution payload bid")
|
||||
}
|
||||
if len(bid.BlobKzgCommitments()) > 0 {
|
||||
if err := s.areDataColumnsAvailable(ctx, root, envelope.Slot()); err != nil {
|
||||
return errors.Wrap(err, "data availability check failed for payload envelope")
|
||||
}
|
||||
}
|
||||
if err := s.savePostPayload(ctx, signed, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -92,9 +115,25 @@ func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed in
|
||||
return err
|
||||
}
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.PayloadProcessed,
|
||||
Data: &statefeed.PayloadProcessedData{
|
||||
Slot: envelope.Slot(),
|
||||
BlockRoot: root,
|
||||
},
|
||||
})
|
||||
|
||||
execution, err := envelope.Execution()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get execution payload from envelope for logging")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": envelope.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", root),
|
||||
"slot": envelope.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(execution.BlockHash())),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(execution.ParentHash())),
|
||||
}).Info("Processed execution payload envelope")
|
||||
return nil
|
||||
}
|
||||
@@ -111,13 +150,21 @@ func (s *Service) postPayloadHeadUpdate(ctx context.Context, envelope interfaces
|
||||
|
||||
s.headLock.Lock()
|
||||
s.head.state = st
|
||||
s.head.full = true
|
||||
s.headLock.Unlock()
|
||||
|
||||
if err := transition.UpdateNextSlotCache(ctx, blockHash[:], st); err != nil {
|
||||
log.WithError(err).Error("Could not update next slot cache")
|
||||
}
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
if err := transition.UpdateNextSlotCache(ctx, blockHash[:], st); err != nil {
|
||||
log.WithError(err).Error("Could not update next slot cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, envelope.Slot(), st, blockHash[:]); err != nil {
|
||||
log.WithError(err).Error("Could not handle epoch boundary")
|
||||
}
|
||||
}()
|
||||
|
||||
attr := s.getPayloadAttribute(ctx, st, envelope.Slot()+1, headRoot)
|
||||
attr := s.getPayloadAttribute(ctx, st, envelope.Slot()+1, headRoot, blockHash[:])
|
||||
if s.inRegularSync() {
|
||||
go func() {
|
||||
pid, err := s.notifyForkchoiceUpdateGloas(s.ctx, blockHash, attr)
|
||||
@@ -156,6 +203,50 @@ func (s *Service) getPayloadEnvelopePrestate(ctx context.Context, envelope inter
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
func (s *Service) callNewPayload(
|
||||
ctx context.Context,
|
||||
payload interfaces.ExecutionData,
|
||||
versionedHashes []common.Hash,
|
||||
parentRoot common.Hash,
|
||||
requests *enginev1.ExecutionRequests,
|
||||
slot primitives.Slot,
|
||||
) (bool, error) {
|
||||
_, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &parentRoot, requests)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic envelope")
|
||||
return false, nil
|
||||
}
|
||||
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
|
||||
return false, invalidBlock{error: ErrInvalidPayload}
|
||||
}
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
|
||||
func (s *Service) notifyNewEnvelopeFromBlock(ctx context.Context, b blocks.ROBlock, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewEnvelopeFromBlock")
|
||||
defer span.End()
|
||||
|
||||
payload, err := envelope.Execution()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution payload from envelope")
|
||||
}
|
||||
sbid, err := b.Block().Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get signed execution payload bid from block")
|
||||
}
|
||||
versionedHashes := make([]common.Hash, len(sbid.Message.BlobKzgCommitments))
|
||||
for i, c := range sbid.Message.BlobKzgCommitments {
|
||||
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(c)
|
||||
}
|
||||
return s.callNewPayload(ctx, payload, versionedHashes, common.Hash(b.Block().ParentRoot()), envelope.ExecutionRequests(), envelope.Slot())
|
||||
}
|
||||
|
||||
// The returned boolean indicates whether the payload was valid or if it was accepted as syncing (optimistic).
|
||||
func (s *Service) notifyNewEnvelope(ctx context.Context, st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewEnvelope")
|
||||
@@ -165,7 +256,6 @@ func (s *Service) notifyNewEnvelope(ctx context.Context, st state.BeaconState, e
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution payload from envelope")
|
||||
}
|
||||
|
||||
latestBid, err := st.LatestExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get latest execution payload bid")
|
||||
@@ -175,25 +265,7 @@ func (s *Service) notifyNewEnvelope(ctx context.Context, st state.BeaconState, e
|
||||
for i, c := range commitments {
|
||||
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(c)
|
||||
}
|
||||
|
||||
parentRoot := common.Hash(bytesutil.ToBytes32(st.LatestBlockHeader().ParentRoot))
|
||||
requests := envelope.ExecutionRequests()
|
||||
|
||||
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &parentRoot, requests)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": envelope.Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic envelope")
|
||||
return false, nil
|
||||
}
|
||||
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
|
||||
return false, invalidBlock{error: ErrInvalidPayload}
|
||||
}
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
return s.callNewPayload(ctx, payload, versionedHashes, common.Hash(bytesutil.ToBytes32(st.LatestBlockHeader().ParentRoot)), envelope.ExecutionRequests(), envelope.Slot())
|
||||
}
|
||||
|
||||
func (s *Service) validateExecutionOnEnvelope(ctx context.Context, st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
|
||||
|
||||
@@ -3,6 +3,10 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -12,8 +16,24 @@ type PayloadAttestationReceiver interface {
|
||||
ReceivePayloadAttestationMessage(context.Context, *ethpb.PayloadAttestationMessage) error
|
||||
}
|
||||
|
||||
// ReceivePayloadAttestationMessage accepts a payload attestation message.
|
||||
// ReceivePayloadAttestationMessage accepts a payload attestation message and updates the
|
||||
// forkchoice PTC vote bitvectors for the referenced beacon block.
|
||||
func (s *Service) ReceivePayloadAttestationMessage(ctx context.Context, a *ethpb.PayloadAttestationMessage) error {
|
||||
// TODO: Handle payload attestation message processing once Gloas is fully wired.
|
||||
if a == nil || a.Data == nil {
|
||||
return errors.New("nil payload attestation message")
|
||||
}
|
||||
root := bytesutil.ToBytes32(a.Data.BeaconBlockRoot)
|
||||
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idx, err := gloas.PayloadCommitteeIndex(ctx, st, a.Data.Slot, a.ValidatorIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.cfg.ForkChoiceStore.SetPTCVote(root, idx, a.Data.PayloadPresent, a.Data.BlobDataAvailable)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,142 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
mockExecution "github.com/OffchainLabs/prysm/v7/beacon-chain/execution/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func TestReceivePayloadAttestationMessage_NilMessage(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
err := s.ReceivePayloadAttestationMessage(t.Context(), nil)
|
||||
require.ErrorContains(t, "nil payload attestation message", err)
|
||||
}
|
||||
|
||||
func TestReceivePayloadAttestationMessage_NilData(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
msg := ðpb.PayloadAttestationMessage{}
|
||||
err := s.ReceivePayloadAttestationMessage(t.Context(), msg)
|
||||
require.ErrorContains(t, "nil payload attestation message", err)
|
||||
}
|
||||
|
||||
func TestReceivePayloadAttestationMessage_ValidatorNotInPTC(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.GloasForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
blockRoot := bytesutil.ToBytes32([]byte("root1"))
|
||||
parentRoot := params.BeaconConfig().ZeroHash
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
numVals := 2048
|
||||
headState := gloasStateWithValidators(t, 1, numVals)
|
||||
|
||||
base, blk := testGloasState(t, 1, parentRoot, blockHash)
|
||||
insertGloasBlock(t, s, base, blk, blockRoot)
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s.head = &head{root: blockRoot, block: wsb, state: headState, slot: 1}
|
||||
|
||||
ptc, err := headState.PayloadCommitteeReadOnly(1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Pick a validator index not in the PTC.
|
||||
inPTC := make(map[primitives.ValidatorIndex]bool)
|
||||
for _, idx := range ptc {
|
||||
inPTC[idx] = true
|
||||
}
|
||||
var notInPTC primitives.ValidatorIndex
|
||||
for i := primitives.ValidatorIndex(0); int(i) < numVals; i++ {
|
||||
if !inPTC[i] {
|
||||
notInPTC = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
msg := ðpb.PayloadAttestationMessage{
|
||||
ValidatorIndex: notInPTC,
|
||||
Data: ðpb.PayloadAttestationData{
|
||||
BeaconBlockRoot: blockRoot[:],
|
||||
Slot: 1,
|
||||
},
|
||||
}
|
||||
err = s.ReceivePayloadAttestationMessage(ctx, msg)
|
||||
require.ErrorContains(t, "validator not in PTC", err)
|
||||
}
|
||||
|
||||
func TestReceivePayloadAttestationMessage_OK(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.GloasForkEpoch = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
blockRoot := bytesutil.ToBytes32([]byte("root1"))
|
||||
parentRoot := params.BeaconConfig().ZeroHash
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
headState := gloasStateWithValidators(t, 1, 2048)
|
||||
|
||||
base, blk := testGloasState(t, 1, parentRoot, blockHash)
|
||||
insertGloasBlock(t, s, base, blk, blockRoot)
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
s.head = &head{root: blockRoot, block: wsb, state: headState, slot: 1}
|
||||
|
||||
ptc, err := headState.PayloadCommitteeReadOnly(1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, 0, len(ptc))
|
||||
|
||||
msg := ðpb.PayloadAttestationMessage{
|
||||
ValidatorIndex: ptc[0],
|
||||
Data: ðpb.PayloadAttestationData{
|
||||
BeaconBlockRoot: blockRoot[:],
|
||||
Slot: 1,
|
||||
PayloadPresent: true,
|
||||
BlobDataAvailable: true,
|
||||
},
|
||||
}
|
||||
require.NoError(t, s.ReceivePayloadAttestationMessage(ctx, msg))
|
||||
}
|
||||
|
||||
// gloasStateWithValidators returns a Gloas beacon state with active validators
|
||||
// for PTC committee computation.
|
||||
func gloasStateWithValidators(t *testing.T, slot primitives.Slot, numVals int) state.BeaconState {
|
||||
t.Helper()
|
||||
validators := make([]*ethpb.Validator, numVals)
|
||||
balances := make([]uint64, numVals)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
balances[i] = params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
st, err := util.NewBeaconStateGloas(func(s *ethpb.BeaconStateGloas) error {
|
||||
s.Slot = slot
|
||||
s.Validators = validators
|
||||
s.Balances = balances
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}
|
||||
@@ -213,6 +213,7 @@ func (s *Service) Start() {
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
go s.runLatePayloadTasks()
|
||||
}
|
||||
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
@@ -343,7 +344,7 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
}
|
||||
if err := s.setHead(&head{root, blk, st, blk.Block().Slot(), false}); err != nil {
|
||||
if err := s.setHead(&head{root, blk, st, blk.Block().Slot(), false, false}); err != nil {
|
||||
return errors.Wrap(err, "could not set head")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -432,6 +433,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
genesisState,
|
||||
genesisBlk.Block().Slot(),
|
||||
false,
|
||||
false,
|
||||
}); err != nil {
|
||||
log.WithError(err).Fatal("Could not set head")
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -77,8 +78,12 @@ func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
|
||||
log.WithError(err).Error("Could not build forkchoice chain, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
resolveChainPayloadStatus(chain)
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.markFinalizedRootFull(chain, fRoot); err != nil {
|
||||
log.WithError(err).Error("Could not mark finalized root as full in forkchoice")
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertChain(s.ctx, chain)
|
||||
}
|
||||
|
||||
@@ -145,6 +150,68 @@ func (s *Service) setupForkchoiceRoot(st state.BeaconState) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolveChainPayloadStatus determines which blocks in the chain had their
|
||||
// execution payloads delivered by checking if consecutive blocks' bids indicate
|
||||
// payload delivery. For each pair of blocks (chain[i], chain[i+1]), if the next
|
||||
// block's bid parentBlockHash equals the current block's bid blockHash, the
|
||||
// current block's payload was delivered.
|
||||
func resolveChainPayloadStatus(chain []*forkchoicetypes.BlockAndCheckpoints) {
|
||||
for i := 0; i < len(chain)-1; i++ {
|
||||
curr := chain[i].Block.Block()
|
||||
next := chain[i+1].Block.Block()
|
||||
if curr.Version() < version.Gloas || next.Version() < version.Gloas {
|
||||
continue
|
||||
}
|
||||
currBid, err := curr.Body().SignedExecutionPayloadBid()
|
||||
if err != nil || currBid == nil || currBid.Message == nil {
|
||||
continue
|
||||
}
|
||||
nextBid, err := next.Body().SignedExecutionPayloadBid()
|
||||
if err != nil || nextBid == nil || nextBid.Message == nil {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(nextBid.Message.ParentBlockHash, currBid.Message.BlockHash) {
|
||||
chain[i].HasPayload = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// markFinalizedRootFull checks whether the finalized root block's execution
|
||||
// payload was delivered by inspecting the first block in the chain. If the first
|
||||
// block's bid parentBlockHash equals the finalized block's bid blockHash, the
|
||||
// finalized block's payload was delivered and a full node must be created in
|
||||
// forkchoice. The caller must hold the forkchoice lock.
|
||||
func (s *Service) markFinalizedRootFull(chain []*forkchoicetypes.BlockAndCheckpoints, fRoot [32]byte) error {
|
||||
if len(chain) == 0 {
|
||||
return nil
|
||||
}
|
||||
firstBlock := chain[0].Block.Block()
|
||||
if firstBlock.Version() < version.Gloas {
|
||||
return nil
|
||||
}
|
||||
firstBid, err := firstBlock.Body().SignedExecutionPayloadBid()
|
||||
if err != nil || firstBid == nil || firstBid.Message == nil {
|
||||
return nil
|
||||
}
|
||||
fBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
if fBlock.Block().Version() < version.Gloas {
|
||||
return nil
|
||||
}
|
||||
fBid, err := fBlock.Block().Body().SignedExecutionPayloadBid()
|
||||
if err != nil || fBid == nil || fBid.Message == nil {
|
||||
return nil
|
||||
}
|
||||
if !bytes.Equal(firstBid.Message.ParentBlockHash, fBid.Message.BlockHash) {
|
||||
return nil
|
||||
}
|
||||
// The finalized block's payload was delivered. Create the full node.
|
||||
s.cfg.ForkChoiceStore.MarkFullNode(fRoot)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) setupForkchoiceCheckpoints() error {
|
||||
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
@@ -166,11 +233,11 @@ func (s *Service) setupForkchoiceCheckpoints() error {
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
log.WithError(err).Error("Could not update forkchoice's justified checkpoint, trying to update finalized checkpoint anyway")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: fRoot}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
log.WithError(err).Error("Could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
|
||||
return nil
|
||||
|
||||
@@ -106,7 +106,7 @@ func Test_setupForkchoiceTree_Head(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -94,6 +94,11 @@ func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []bl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastForEpoch(_ context.Context, _ proto.Message, _ primitives.Epoch) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
|
||||
@@ -77,6 +77,14 @@ type ChainService struct {
|
||||
DataColumns []blocks.VerifiedRODataColumn
|
||||
TargetRoot [32]byte
|
||||
MockHeadSlot *primitives.Slot
|
||||
DependentRootCB func([32]byte, primitives.Epoch) ([32]byte, error)
|
||||
MockCanonicalRoots map[primitives.Slot][32]byte
|
||||
MockCanonicalFull map[primitives.Slot]bool
|
||||
MockPayloadContentLookup map[[32]byte][32]byte
|
||||
MockPayloadContentIsFull map[[32]byte]bool
|
||||
ParentPayloadReadyVal *bool
|
||||
ForkchoiceRoots map[[32]byte]bool
|
||||
ForkchoiceBlockHashes map[[32]byte][32]byte
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -274,7 +282,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityChecker) error {
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ []interfaces.ROSignedExecutionPayloadEnvelope, _ das.AvailabilityChecker) error {
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
@@ -334,6 +342,16 @@ func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOn
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlockPreState mocks the same method in the chain service.
|
||||
func (s *ChainService) GetBlockPreState(_ context.Context, _ blocks.ROBlock) (state.BeaconState, error) {
|
||||
return s.State, nil
|
||||
}
|
||||
|
||||
// GetPrestateToPropose mocks the same method in the chain service.
|
||||
func (s *ChainService) GetPrestateToPropose(_ context.Context, _ blocks.ROBlock) (state.BeaconState, error) {
|
||||
return s.State.Copy(), nil
|
||||
}
|
||||
|
||||
// HeadSlot mocks HeadSlot method in chain service.
|
||||
func (s *ChainService) HeadSlot() primitives.Slot {
|
||||
if s.MockHeadSlot != nil {
|
||||
@@ -569,10 +587,23 @@ func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
|
||||
}
|
||||
|
||||
// InForkchoice mocks the same method in the chain service
|
||||
func (s *ChainService) InForkchoice(_ [32]byte) bool {
|
||||
func (s *ChainService) InForkchoice(root [32]byte) bool {
|
||||
if s.ForkchoiceRoots != nil {
|
||||
return s.ForkchoiceRoots[root]
|
||||
}
|
||||
return !s.NotFinalized
|
||||
}
|
||||
|
||||
// BlockHash mocks the execution payload block hash lookup for a beacon block root.
|
||||
func (s *ChainService) BlockHash(root [32]byte) ([32]byte, error) {
|
||||
if s.ForkchoiceBlockHashes != nil {
|
||||
if blockHash, ok := s.ForkchoiceBlockHashes[root]; ok {
|
||||
return blockHash, nil
|
||||
}
|
||||
}
|
||||
return [32]byte{}, errors.New("block hash not found")
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) IsOptimisticForRoot(_ context.Context, root [32]byte) (bool, error) {
|
||||
s.OptimisticCheckRootReceived = root
|
||||
@@ -630,7 +661,7 @@ func prepareForkchoiceState(
|
||||
}
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
st, err := state_native.InitializeFromProtoUnsafeBellatrix(base)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
@@ -689,7 +720,60 @@ func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
}
|
||||
return 0
|
||||
if s.Slot != nil {
|
||||
return *s.Slot
|
||||
}
|
||||
return s.BlockSlot
|
||||
}
|
||||
|
||||
// HighestReceivedBlockRoot mocks the same method in the chain service
|
||||
func (s *ChainService) HighestReceivedBlockRoot() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.HighestReceivedBlockRoot()
|
||||
}
|
||||
if s.Slot != nil && s.MockCanonicalRoots != nil {
|
||||
if root, ok := s.MockCanonicalRoots[*s.Slot]; ok {
|
||||
return root
|
||||
}
|
||||
}
|
||||
if len(s.Root) == 32 {
|
||||
return bytesutil.ToBytes32(s.Root)
|
||||
}
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// HasFullNode mocks the same method in the chain service
|
||||
func (s *ChainService) HasFullNode(root [32]byte) bool {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.HasFullNode(root)
|
||||
}
|
||||
if s.Slot != nil && s.MockCanonicalRoots != nil && s.MockCanonicalFull != nil {
|
||||
if r, ok := s.MockCanonicalRoots[*s.Slot]; ok && r == root {
|
||||
return s.MockCanonicalFull[*s.Slot]
|
||||
}
|
||||
}
|
||||
if s.ForkchoiceRoots != nil {
|
||||
return s.ForkchoiceRoots[root]
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ShouldIgnoreData returns true if the data for the given parent root and slot should be ignored.
|
||||
func (s *ChainService) ShouldIgnoreData(_ [32]byte, _ primitives.Slot) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PayloadContentLookup mocks the same method in the chain service.
|
||||
func (s *ChainService) PayloadContentLookup(root [32]byte) ([32]byte, bool) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.PayloadContentLookup(root)
|
||||
}
|
||||
if s.MockPayloadContentLookup != nil {
|
||||
if value, ok := s.MockPayloadContentLookup[root]; ok {
|
||||
return value, s.MockPayloadContentIsFull[root]
|
||||
}
|
||||
}
|
||||
return root, false
|
||||
}
|
||||
|
||||
// InsertNode mocks the same method in the chain service
|
||||
@@ -775,8 +859,19 @@ func (c *ChainService) ReceiveExecutionPayloadEnvelope(_ context.Context, _ inte
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParentPayloadReady mocks the same method in the chain service.
|
||||
func (s *ChainService) ParentPayloadReady(_ interfaces.ReadOnlyBeaconBlock) bool {
|
||||
if s.ParentPayloadReadyVal != nil {
|
||||
return *s.ParentPayloadReadyVal
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// DependentRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
func (c *ChainService) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
if c.DependentRootCB != nil {
|
||||
return c.DependentRootCB(root, epoch)
|
||||
}
|
||||
return c.TargetRoot, nil
|
||||
}
|
||||
|
||||
@@ -785,6 +880,17 @@ func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]b
|
||||
return c.TargetRoot, nil
|
||||
}
|
||||
|
||||
func (c *ChainService) CanonicalNodeAtSlot(slot primitives.Slot) ([32]byte, bool) {
|
||||
var root [32]byte
|
||||
if c.MockCanonicalRoots != nil {
|
||||
root = c.MockCanonicalRoots[slot]
|
||||
}
|
||||
if c.MockCanonicalFull != nil {
|
||||
return root, c.MockCanonicalFull[slot]
|
||||
}
|
||||
return root, false
|
||||
}
|
||||
|
||||
// MockSyncChecker is a mock implementation of blockchain.Checker.
|
||||
// We can't make an assertion here that this is true because that would create a circular dependency.
|
||||
type MockSyncChecker struct {
|
||||
|
||||
5
beacon-chain/cache/BUILD.bazel
vendored
5
beacon-chain/cache/BUILD.bazel
vendored
@@ -15,6 +15,7 @@ go_library(
|
||||
"common.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"highest_execution_payload_bid.go",
|
||||
"interfaces.go",
|
||||
"log.go",
|
||||
"payload_attestation.go",
|
||||
@@ -22,6 +23,7 @@ go_library(
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
"proposer_indices_type.go",
|
||||
"proposer_preferences.go",
|
||||
"registration.go",
|
||||
"skip_slot_cache.go",
|
||||
"subnet_ids.go",
|
||||
@@ -55,6 +57,7 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
@@ -77,10 +80,12 @@ go_test(
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"highest_execution_payload_bid_test.go",
|
||||
"payload_attestation_test.go",
|
||||
"payload_id_test.go",
|
||||
"private_access_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"proposer_preferences_test.go",
|
||||
"registration_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"subnet_ids_test.go",
|
||||
|
||||
9
beacon-chain/cache/attestation_data.go
vendored
9
beacon-chain/cache/attestation_data.go
vendored
@@ -9,10 +9,11 @@ import (
|
||||
)
|
||||
|
||||
type AttestationConsensusData struct {
|
||||
Slot primitives.Slot
|
||||
HeadRoot []byte
|
||||
Target forkchoicetypes.Checkpoint
|
||||
Source forkchoicetypes.Checkpoint
|
||||
Slot primitives.Slot
|
||||
HeadRoot []byte
|
||||
Target forkchoicetypes.Checkpoint
|
||||
Source forkchoicetypes.Checkpoint
|
||||
IsPayloadFull bool
|
||||
}
|
||||
|
||||
// AttestationDataCache stores cached results of AttestationData requests.
|
||||
|
||||
51
beacon-chain/cache/checkpoint_state.go
vendored
51
beacon-chain/cache/checkpoint_state.go
vendored
@@ -3,8 +3,10 @@ package cache
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
lruwrpr "github.com/OffchainLabs/prysm/v7/cache/lru"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -25,6 +27,14 @@ var (
|
||||
Name: "check_point_state_cache_hit",
|
||||
Help: "The number of check point state requests that are present in the cache.",
|
||||
})
|
||||
checkpointStateSize = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "check_point_state_cache_size",
|
||||
Help: "The number of entries in the check point state cache.",
|
||||
})
|
||||
checkpointStateEvicted = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "check_point_state_cache_evicted_total",
|
||||
Help: "The number of entries evicted from the check point state cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// CheckpointStateCache is a struct with 1 queue for looking up state by checkpoint.
|
||||
@@ -49,14 +59,14 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.Be
|
||||
|
||||
item, exists := c.cache.Get(h)
|
||||
|
||||
if exists && item != nil {
|
||||
checkpointStateHit.Inc()
|
||||
// Copy here is unnecessary since the return will only be used to verify attestation signature.
|
||||
return item.(state.BeaconState), nil
|
||||
if !exists || item == nil {
|
||||
checkpointStateMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
checkpointStateMiss.Inc()
|
||||
return nil, nil
|
||||
checkpointStateHit.Inc()
|
||||
// Copy here is unnecessary since the return will only be used to verify attestation signature.
|
||||
return item.(state.BeaconState), nil
|
||||
}
|
||||
|
||||
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
|
||||
@@ -66,6 +76,35 @@ func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s state.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.cache.Add(h, s)
|
||||
checkpointStateSize.Set(float64(c.cache.Len()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EvictUpTo removes all entries from the cache whose state epoch is at
|
||||
// or before the given epoch. Returns the number of evicted entries.
|
||||
func (c *CheckpointStateCache) EvictUpTo(epoch primitives.Epoch) int {
|
||||
evicted := 0
|
||||
for _, key := range c.cache.Keys() {
|
||||
// Peek is used here to avoid updating the recency of the entry,
|
||||
// as we are only checking for eviction.
|
||||
v, ok := c.cache.Peek(key)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
st := v.(state.ReadOnlyBeaconState)
|
||||
if slots.ToEpoch(st.Slot()) <= epoch {
|
||||
c.cache.Remove(key)
|
||||
evicted++
|
||||
}
|
||||
}
|
||||
|
||||
if evicted > 0 {
|
||||
checkpointStateSize.Set(float64(c.cache.Len()))
|
||||
checkpointStateEvicted.Add(float64(evicted))
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
73
beacon-chain/cache/checkpoint_state_test.go
vendored
73
beacon-chain/cache/checkpoint_state_test.go
vendored
@@ -72,3 +72,76 @@ func TestCheckpointStateCache_MaxSize(t *testing.T) {
|
||||
|
||||
assert.Equal(t, cache.MaxCheckpointStateSize(), len(c.Cache().Keys()))
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_EvictFinalized_FinalizedEntry(t *testing.T) {
|
||||
c := cache.NewCheckpointStateCache()
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 32})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.AddCheckpointState(cp, st))
|
||||
|
||||
evicted := c.EvictUpTo(1)
|
||||
assert.Equal(t, 1, evicted, "expected finalized entry to be evicted")
|
||||
|
||||
s, err := c.StateByCheckpoint(cp)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, state.BeaconState(nil), s, "expected cache to be empty after eviction")
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_EvictFinalized_NotFinalizedEntry(t *testing.T) {
|
||||
c := cache.NewCheckpointStateCache()
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 160})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.AddCheckpointState(cp, st))
|
||||
|
||||
evicted := c.EvictUpTo(3)
|
||||
assert.Equal(t, 0, evicted, "expected non-finalized entry NOT to be evicted")
|
||||
|
||||
s, err := c.StateByCheckpoint(cp)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, s, "expected entry to still be in cache")
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_EvictFinalized_Mixed(t *testing.T) {
|
||||
c := cache.NewCheckpointStateCache()
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
st1, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 32})
|
||||
require.NoError(t, err)
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
|
||||
st2, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 64})
|
||||
require.NoError(t, err)
|
||||
|
||||
cp5 := ðpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte{'C'}, 32)}
|
||||
st5, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 160})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, c.AddCheckpointState(cp1, st1))
|
||||
require.NoError(t, c.AddCheckpointState(cp2, st2))
|
||||
require.NoError(t, c.AddCheckpointState(cp5, st5))
|
||||
|
||||
evicted := c.EvictUpTo(3)
|
||||
assert.Equal(t, 2, evicted, "expected epochs 1 and 2 to be evicted")
|
||||
|
||||
s, err := c.StateByCheckpoint(cp1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, state.BeaconState(nil), s, "expected cp1 to be evicted")
|
||||
|
||||
s, err = c.StateByCheckpoint(cp2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, state.BeaconState(nil), s, "expected cp2 to be evicted")
|
||||
|
||||
s, err = c.StateByCheckpoint(cp5)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, s, "expected cp5 to still be in cache")
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_EvictFinalized_EmptyCache(t *testing.T) {
|
||||
c := cache.NewCheckpointStateCache()
|
||||
evicted := c.EvictUpTo(0)
|
||||
assert.Equal(t, 0, evicted, "expected no eviction from empty cache")
|
||||
}
|
||||
|
||||
76
beacon-chain/cache/highest_execution_payload_bid.go
vendored
Normal file
76
beacon-chain/cache/highest_execution_payload_bid.go
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type executionPayloadBidKey struct {
|
||||
slot primitives.Slot
|
||||
parentHash [32]byte
|
||||
parentRoot [32]byte
|
||||
}
|
||||
|
||||
// HighestExecutionPayloadBidCache stores the highest bid for each
|
||||
// (slot, parent_block_hash, parent_block_root) tuple.
|
||||
type HighestExecutionPayloadBidCache struct {
|
||||
bids map[executionPayloadBidKey]*ethpb.SignedExecutionPayloadBid
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewHighestExecutionPayloadBidCache initializes a highest-bid cache.
|
||||
func NewHighestExecutionPayloadBidCache() *HighestExecutionPayloadBidCache {
|
||||
return &HighestExecutionPayloadBidCache{
|
||||
bids: make(map[executionPayloadBidKey]*ethpb.SignedExecutionPayloadBid),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the highest cached bid for the given tuple.
|
||||
func (c *HighestExecutionPayloadBidCache) Get(
|
||||
slot primitives.Slot,
|
||||
parentHash [32]byte,
|
||||
parentRoot [32]byte,
|
||||
) (*ethpb.SignedExecutionPayloadBid, bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
bid, ok := c.bids[executionPayloadBidKey{
|
||||
slot: slot,
|
||||
parentHash: parentHash,
|
||||
parentRoot: parentRoot,
|
||||
}]
|
||||
return bid, ok
|
||||
}
|
||||
|
||||
// SetIfHigher inserts the bid if absent, or replaces the cached bid only if
|
||||
// the incoming value is strictly greater.
|
||||
func (c *HighestExecutionPayloadBidCache) SetIfHigher(bid *ethpb.SignedExecutionPayloadBid) bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
key := executionPayloadBidKey{
|
||||
slot: bid.Message.Slot,
|
||||
parentHash: [32]byte(bid.Message.ParentBlockHash),
|
||||
parentRoot: [32]byte(bid.Message.ParentBlockRoot),
|
||||
}
|
||||
cached, ok := c.bids[key]
|
||||
if !ok || bid.Message.Value > cached.Message.Value {
|
||||
c.bids[key] = bid
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PruneBefore removes all cached bids for slots before the provided slot.
|
||||
func (c *HighestExecutionPayloadBidCache) PruneBefore(slot primitives.Slot) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
for key := range c.bids {
|
||||
if key.slot < slot {
|
||||
delete(c.bids, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
105
beacon-chain/cache/highest_execution_payload_bid_test.go
vendored
Normal file
105
beacon-chain/cache/highest_execution_payload_bid_test.go
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestHighestExecutionPayloadBidCache_GetSetIfHigher(t *testing.T) {
|
||||
c := NewHighestExecutionPayloadBidCache()
|
||||
bid := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 100)
|
||||
|
||||
inserted := c.SetIfHigher(bid)
|
||||
require.Equal(t, true, inserted)
|
||||
|
||||
got, ok := c.Get(10, [32]byte{0x01}, [32]byte{0x02})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, bid, got)
|
||||
}
|
||||
|
||||
func TestHighestExecutionPayloadBidCache_SetIfHigher_ReplacesOnlyOnHigherValue(t *testing.T) {
|
||||
c := NewHighestExecutionPayloadBidCache()
|
||||
low := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 100)
|
||||
same := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 100)
|
||||
high := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 101)
|
||||
|
||||
require.Equal(t, true, c.SetIfHigher(low))
|
||||
require.Equal(t, false, c.SetIfHigher(same))
|
||||
|
||||
got, ok := c.Get(10, [32]byte{0x01}, [32]byte{0x02})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, low, got)
|
||||
|
||||
require.Equal(t, true, c.SetIfHigher(high))
|
||||
got, ok = c.Get(10, [32]byte{0x01}, [32]byte{0x02})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, high, got)
|
||||
}
|
||||
|
||||
func TestHighestExecutionPayloadBidCache_SetIfHigher_KeepsDistinctTuples(t *testing.T) {
|
||||
c := NewHighestExecutionPayloadBidCache()
|
||||
first := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 100)
|
||||
second := testSignedExecutionPayloadBid(10, [32]byte{0x03}, [32]byte{0x02}, 50)
|
||||
third := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x04}, 75)
|
||||
|
||||
require.Equal(t, true, c.SetIfHigher(first))
|
||||
require.Equal(t, true, c.SetIfHigher(second))
|
||||
require.Equal(t, true, c.SetIfHigher(third))
|
||||
|
||||
got, ok := c.Get(10, [32]byte{0x01}, [32]byte{0x02})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, first, got)
|
||||
|
||||
got, ok = c.Get(10, [32]byte{0x03}, [32]byte{0x02})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, second, got)
|
||||
|
||||
got, ok = c.Get(10, [32]byte{0x01}, [32]byte{0x04})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, third, got)
|
||||
}
|
||||
|
||||
func TestHighestExecutionPayloadBidCache_PruneBefore(t *testing.T) {
|
||||
c := NewHighestExecutionPayloadBidCache()
|
||||
oldBid := testSignedExecutionPayloadBid(9, [32]byte{0x01}, [32]byte{0x02}, 100)
|
||||
currentBid := testSignedExecutionPayloadBid(10, [32]byte{0x03}, [32]byte{0x04}, 101)
|
||||
|
||||
require.Equal(t, true, c.SetIfHigher(oldBid))
|
||||
require.Equal(t, true, c.SetIfHigher(currentBid))
|
||||
|
||||
c.PruneBefore(10)
|
||||
|
||||
_, ok := c.Get(9, [32]byte{0x01}, [32]byte{0x02})
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
got, ok := c.Get(10, [32]byte{0x03}, [32]byte{0x04})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, currentBid, got)
|
||||
}
|
||||
|
||||
func testSignedExecutionPayloadBid(
|
||||
slot primitives.Slot,
|
||||
parentHash [32]byte,
|
||||
parentRoot [32]byte,
|
||||
value uint64,
|
||||
) *ethpb.SignedExecutionPayloadBid {
|
||||
return ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
Slot: slot,
|
||||
ParentBlockHash: bytes.Clone(parentHash[:]),
|
||||
ParentBlockRoot: bytes.Clone(parentRoot[:]),
|
||||
BlockHash: bytes.Repeat([]byte{0x03}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
|
||||
GasLimit: 30_000_000,
|
||||
BuilderIndex: 1,
|
||||
Value: primitives.Gwei(value),
|
||||
ExecutionPayment: 10,
|
||||
},
|
||||
Signature: bytes.Repeat([]byte{0x06}, 96),
|
||||
}
|
||||
}
|
||||
87
beacon-chain/cache/proposer_preferences.go
vendored
Normal file
87
beacon-chain/cache/proposer_preferences.go
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// ProposerPreference stores the proposer fee recipient and gas limit for a slot.
|
||||
type ProposerPreference struct {
|
||||
FeeRecipient []byte
|
||||
GasLimit uint64
|
||||
}
|
||||
|
||||
// ProposerPreferencesCache stores proposer preferences by slot.
|
||||
type ProposerPreferencesCache struct {
|
||||
slotToPreferences map[primitives.Slot]ProposerPreference
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewProposerPreferencesCache initializes a proposer preferences cache.
|
||||
func NewProposerPreferencesCache() *ProposerPreferencesCache {
|
||||
return &ProposerPreferencesCache{
|
||||
slotToPreferences: make(map[primitives.Slot]ProposerPreference),
|
||||
}
|
||||
}
|
||||
|
||||
// Add stores proposer preferences for a slot. If the slot already exists, the
|
||||
// existing value is kept and false is returned.
|
||||
func (c *ProposerPreferencesCache) Add(slot primitives.Slot, feeRecipient []byte, gasLimit uint64) bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if _, ok := c.slotToPreferences[slot]; ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// FeeRecipient comes from validated SSZ-decoded proposer preferences, so
|
||||
// retaining the slice reference here is intentional.
|
||||
c.slotToPreferences[slot] = ProposerPreference{
|
||||
FeeRecipient: feeRecipient,
|
||||
GasLimit: gasLimit,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Get returns proposer preferences for a slot.
|
||||
func (c *ProposerPreferencesCache) Get(slot primitives.Slot) (ProposerPreference, bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
pref, ok := c.slotToPreferences[slot]
|
||||
if !ok {
|
||||
return ProposerPreference{}, false
|
||||
}
|
||||
|
||||
return pref, true
|
||||
}
|
||||
|
||||
// Has returns true if proposer preferences for the slot already exist.
|
||||
func (c *ProposerPreferencesCache) Has(slot primitives.Slot) bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
_, ok := c.slotToPreferences[slot]
|
||||
return ok
|
||||
}
|
||||
|
||||
// PruneBefore removes all proposer preferences for slots before the provided slot.
|
||||
func (c *ProposerPreferencesCache) PruneBefore(slot primitives.Slot) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
for cachedSlot := range c.slotToPreferences {
|
||||
if cachedSlot < slot {
|
||||
delete(c.slotToPreferences, cachedSlot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear removes all cached proposer preferences.
|
||||
func (c *ProposerPreferencesCache) Clear() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
c.slotToPreferences = make(map[primitives.Slot]ProposerPreference)
|
||||
}
|
||||
63
beacon-chain/cache/proposer_preferences_test.go
vendored
Normal file
63
beacon-chain/cache/proposer_preferences_test.go
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestProposerPreferencesCache_AddGetHas(t *testing.T) {
|
||||
c := NewProposerPreferencesCache()
|
||||
slot := primitives.Slot(123)
|
||||
feeRecipient := []byte{1, 2, 3, 4}
|
||||
|
||||
require.Equal(t, false, c.Has(slot))
|
||||
added := c.Add(slot, feeRecipient, 42)
|
||||
require.Equal(t, true, added)
|
||||
require.Equal(t, true, c.Has(slot))
|
||||
|
||||
pref, ok := c.Get(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, feeRecipient, pref.FeeRecipient)
|
||||
require.Equal(t, uint64(42), pref.GasLimit)
|
||||
}
|
||||
|
||||
func TestProposerPreferencesCache_AddDuplicateSlot(t *testing.T) {
|
||||
c := NewProposerPreferencesCache()
|
||||
slot := primitives.Slot(456)
|
||||
|
||||
require.Equal(t, true, c.Add(slot, []byte{1}, 10))
|
||||
require.Equal(t, false, c.Add(slot, []byte{2}, 20))
|
||||
|
||||
pref, ok := c.Get(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, []byte{1}, pref.FeeRecipient)
|
||||
require.Equal(t, uint64(10), pref.GasLimit)
|
||||
}
|
||||
|
||||
func TestProposerPreferencesCache_Clear(t *testing.T) {
|
||||
c := NewProposerPreferencesCache()
|
||||
slot := primitives.Slot(789)
|
||||
|
||||
require.Equal(t, true, c.Add(slot, []byte{1}, 10))
|
||||
c.Clear()
|
||||
|
||||
require.Equal(t, false, c.Has(slot))
|
||||
_, ok := c.Get(slot)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func TestProposerPreferencesCache_PruneBefore(t *testing.T) {
|
||||
c := NewProposerPreferencesCache()
|
||||
|
||||
require.Equal(t, true, c.Add(10, []byte{1}, 10))
|
||||
require.Equal(t, true, c.Add(11, []byte{2}, 11))
|
||||
require.Equal(t, true, c.Add(12, []byte{3}, 12))
|
||||
|
||||
c.PruneBefore(11)
|
||||
|
||||
require.Equal(t, false, c.Has(10))
|
||||
require.Equal(t, true, c.Has(11))
|
||||
require.Equal(t, true, c.Has(12))
|
||||
}
|
||||
2
beacon-chain/cache/sync_committee.go
vendored
2
beacon-chain/cache/sync_committee.go
vendored
@@ -172,7 +172,7 @@ func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to
|
||||
// current epoch and next epoch. This should be called when `current_sync_committee` and `next_sync_committee`
|
||||
// change and that happens every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`.
|
||||
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.BeaconState) error {
|
||||
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.ReadOnlyBeaconState) error {
|
||||
// since we call UpdatePositionsInCommittee asynchronously, keep track of the cache value
|
||||
// seen at the beginning of the routine and compare at the end before updating. If the underlying value has been
|
||||
// cycled (new address), don't update it.
|
||||
|
||||
@@ -32,7 +32,7 @@ func (s *FakeSyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx p
|
||||
}
|
||||
|
||||
// UpdatePositionsInCommittee -- fake.
|
||||
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.BeaconState) error {
|
||||
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.ReadOnlyBeaconState) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ go_library(
|
||||
"//beacon-chain/p2p/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/math"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
@@ -24,10 +25,10 @@ type AttDelta struct {
|
||||
}
|
||||
|
||||
// InitializePrecomputeValidators precomputes individual validator for its attested balances and the total sum of validators attested balances of the epoch.
|
||||
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
func InitializePrecomputeValidators(ctx context.Context, beaconState state.BeaconState) ([]precompute.Validator, *precompute.Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.InitializePrecomputeValidators")
|
||||
defer span.End()
|
||||
vals := make([]*precompute.Validator, beaconState.NumValidators())
|
||||
vals := make([]precompute.Validator, beaconState.NumValidators())
|
||||
bal := &precompute.Balance{}
|
||||
prevEpoch := time.PrevEpoch(beaconState)
|
||||
currentEpoch := time.CurrentEpoch(beaconState)
|
||||
@@ -41,31 +42,30 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco
|
||||
if beaconState.NumValidators() != len(inactivityScores) {
|
||||
return nil, nil, errors.New("num of validators is different than num of inactivity scores")
|
||||
}
|
||||
if err := beaconState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if err := beaconState.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
|
||||
// Set validator's balance, inactivity score and slashed/withdrawable status.
|
||||
v := &precompute.Validator{
|
||||
CurrentEpochEffectiveBalance: val.EffectiveBalance(),
|
||||
vals[idx] = precompute.Validator{
|
||||
CurrentEpochEffectiveBalance: val.EffectiveBalance,
|
||||
InactivityScore: inactivityScores[idx],
|
||||
IsSlashed: val.Slashed(),
|
||||
IsWithdrawableCurrentEpoch: currentEpoch >= val.WithdrawableEpoch(),
|
||||
IsSlashed: val.Slashed,
|
||||
IsWithdrawableCurrentEpoch: currentEpoch >= val.WithdrawableEpoch,
|
||||
}
|
||||
// Set validator's active status for current epoch.
|
||||
if helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
|
||||
v.IsActiveCurrentEpoch = true
|
||||
bal.ActiveCurrentEpoch, err = math.Add64(bal.ActiveCurrentEpoch, val.EffectiveBalance())
|
||||
if helpers.IsActiveCompactValidator(val, currentEpoch) {
|
||||
vals[idx].IsActiveCurrentEpoch = true
|
||||
bal.ActiveCurrentEpoch, err = math.Add64(bal.ActiveCurrentEpoch, val.EffectiveBalance)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Set validator's active status for previous epoch.
|
||||
if helpers.IsActiveValidatorUsingTrie(val, prevEpoch) {
|
||||
v.IsActivePrevEpoch = true
|
||||
bal.ActivePrevEpoch, err = math.Add64(bal.ActivePrevEpoch, val.EffectiveBalance())
|
||||
if helpers.IsActiveCompactValidator(val, prevEpoch) {
|
||||
vals[idx].IsActivePrevEpoch = true
|
||||
bal.ActivePrevEpoch, err = math.Add64(bal.ActivePrevEpoch, val.EffectiveBalance)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
vals[idx] = v
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not read every validator")
|
||||
@@ -85,8 +85,8 @@ func InitializePrecomputeValidators(ctx context.Context, beaconState state.Beaco
|
||||
func ProcessInactivityScores(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
vals []*precompute.Validator,
|
||||
) (state.BeaconState, []*precompute.Validator, error) {
|
||||
vals []precompute.Validator,
|
||||
) (state.BeaconState, []precompute.Validator, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.ProcessInactivityScores")
|
||||
defer span.End()
|
||||
|
||||
@@ -104,18 +104,18 @@ func ProcessInactivityScores(
|
||||
recoveryRate := cfg.InactivityScoreRecoveryRate
|
||||
prevEpoch := time.PrevEpoch(beaconState)
|
||||
finalizedEpoch := beaconState.FinalizedCheckpointEpoch()
|
||||
for i, v := range vals {
|
||||
if !precompute.EligibleForRewards(v) {
|
||||
for i := range vals {
|
||||
if !precompute.EligibleForRewards(&vals[i]) {
|
||||
continue
|
||||
}
|
||||
|
||||
if v.IsPrevEpochTargetAttester && !v.IsSlashed {
|
||||
if vals[i].IsPrevEpochTargetAttester && !vals[i].IsSlashed {
|
||||
// Decrease inactivity score when validator gets target correct.
|
||||
if v.InactivityScore > 0 {
|
||||
v.InactivityScore -= 1
|
||||
if vals[i].InactivityScore > 0 {
|
||||
vals[i].InactivityScore -= 1
|
||||
}
|
||||
} else {
|
||||
v.InactivityScore, err = math.Add64(v.InactivityScore, bias)
|
||||
vals[i].InactivityScore, err = math.Add64(vals[i].InactivityScore, bias)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -123,10 +123,10 @@ func ProcessInactivityScores(
|
||||
|
||||
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
|
||||
// Prevents underflow below 0.
|
||||
score := min(recoveryRate, v.InactivityScore)
|
||||
v.InactivityScore -= score
|
||||
score := min(recoveryRate, vals[i].InactivityScore)
|
||||
vals[i].InactivityScore -= score
|
||||
}
|
||||
inactivityScores[i] = v.InactivityScore
|
||||
inactivityScores[i] = vals[i].InactivityScore
|
||||
}
|
||||
|
||||
if err := beaconState.SetInactivityScores(inactivityScores); err != nil {
|
||||
@@ -151,8 +151,8 @@ func ProcessEpochParticipation(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
bal *precompute.Balance,
|
||||
vals []*precompute.Validator,
|
||||
) ([]*precompute.Validator, *precompute.Balance, error) {
|
||||
vals []precompute.Validator,
|
||||
) ([]precompute.Validator, *precompute.Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "altair.ProcessEpochParticipation")
|
||||
defer span.End()
|
||||
|
||||
@@ -219,7 +219,7 @@ func ProcessEpochParticipation(
|
||||
func ProcessRewardsAndPenaltiesPrecompute(
|
||||
beaconState state.BeaconState,
|
||||
bal *precompute.Balance,
|
||||
vals []*precompute.Validator,
|
||||
vals []precompute.Validator,
|
||||
) (state.BeaconState, error) {
|
||||
// Don't process rewards and penalties in genesis epoch.
|
||||
cfg := params.BeaconConfig()
|
||||
@@ -263,7 +263,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
|
||||
|
||||
// AttestationsDelta computes and returns the rewards and penalties differences for individual validators based on the
|
||||
// voting records.
|
||||
func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, vals []*precompute.Validator) ([]*AttDelta, error) {
|
||||
func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, vals []precompute.Validator) ([]*AttDelta, error) {
|
||||
attDeltas := make([]*AttDelta, len(vals))
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
@@ -282,8 +282,8 @@ func AttestationsDelta(beaconState state.BeaconState, bal *precompute.Balance, v
|
||||
}
|
||||
inactivityDenominator := bias * inactivityPenaltyQuotient
|
||||
|
||||
for i, v := range vals {
|
||||
attDeltas[i], err = attestationDelta(bal, v, baseRewardMultiplier, inactivityDenominator, leak)
|
||||
for i := range vals {
|
||||
attDeltas[i], err = attestationDelta(bal, &vals[i], baseRewardMultiplier, inactivityDenominator, leak)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -33,23 +33,23 @@ func TestInitializeEpochValidators_Ok(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
v, b, err := InitializePrecomputeValidators(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, precompute.Validator{
|
||||
IsSlashed: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InactivityScore: 0,
|
||||
}, v[0], "Incorrect validator 0 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, precompute.Validator{
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InactivityScore: 1,
|
||||
}, v[1], "Incorrect validator 1 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, precompute.Validator{
|
||||
IsActivePrevEpoch: true,
|
||||
IsActiveCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InactivityScore: 2,
|
||||
}, v[2], "Incorrect validator 2 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, precompute.Validator{
|
||||
IsActivePrevEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InactivityScore: 3,
|
||||
@@ -94,13 +94,13 @@ func TestProcessEpochParticipation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, precompute.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}, validators[0])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, precompute.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
@@ -109,7 +109,7 @@ func TestProcessEpochParticipation(t *testing.T) {
|
||||
IsPrevEpochAttester: true,
|
||||
IsPrevEpochSourceAttester: true,
|
||||
}, validators[1])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, precompute.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
@@ -120,7 +120,7 @@ func TestProcessEpochParticipation(t *testing.T) {
|
||||
IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochTargetAttester: true,
|
||||
}, validators[2])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, precompute.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
@@ -172,13 +172,13 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
validators, balance, err = ProcessEpochParticipation(t.Context(), st, balance, validators)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, precompute.Validator{
|
||||
IsActiveCurrentEpoch: false,
|
||||
IsActivePrevEpoch: false,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}, validators[0])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, precompute.Validator{
|
||||
IsActiveCurrentEpoch: false,
|
||||
IsActivePrevEpoch: true,
|
||||
IsPrevEpochAttester: true,
|
||||
@@ -187,7 +187,7 @@ func TestProcessEpochParticipation_InactiveValidator(t *testing.T) {
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}, validators[1])
|
||||
require.DeepEqual(t, &precompute.Validator{
|
||||
require.DeepEqual(t, precompute.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
@@ -432,7 +432,7 @@ func TestProcessRewardsAndPenaltiesPrecompute_BadState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
_, balance, err = ProcessEpochParticipation(t.Context(), s, balance, validators)
|
||||
require.NoError(t, err)
|
||||
_, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, []*precompute.Validator{})
|
||||
_, err = ProcessRewardsAndPenaltiesPrecompute(s, balance, []precompute.Validator{})
|
||||
require.ErrorContains(t, "validator registries not the same length as state's validator registries", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -34,12 +34,12 @@ func BaseReward(s state.ReadOnlyBeaconState, index primitives.ValidatorIndex) (u
|
||||
|
||||
// BaseRewardWithTotalBalance calculates the base reward with the provided total balance.
|
||||
func BaseRewardWithTotalBalance(s state.ReadOnlyBeaconState, index primitives.ValidatorIndex, totalBalance uint64) (uint64, error) {
|
||||
val, err := s.ValidatorAtIndexReadOnly(index)
|
||||
effBal, err := s.EffectiveBalanceAtIndex(index)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cfg := params.BeaconConfig()
|
||||
increments := val.EffectiveBalance() / cfg.EffectiveBalanceIncrement
|
||||
increments := effBal / cfg.EffectiveBalanceIncrement
|
||||
baseRewardPerInc, err := BaseRewardPerIncrement(totalBalance)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
||||
@@ -60,6 +60,7 @@ go_test(
|
||||
"block_operations_fuzz_test.go",
|
||||
"block_regression_test.go",
|
||||
"eth1_data_test.go",
|
||||
"exit_builder_test.go",
|
||||
"exit_test.go",
|
||||
"exports_test.go",
|
||||
"genesis_test.go",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
@@ -62,6 +63,16 @@ func ProcessVoluntaryExits(
|
||||
if exit == nil || exit.Exit == nil {
|
||||
return nil, errors.New("nil voluntary exit in block body")
|
||||
}
|
||||
// [New in Gloas:EIP7732] Builder exits are identified by the builder index flag.
|
||||
if beaconState.Version() >= version.Gloas && exit.Exit.ValidatorIndex.IsBuilderIndex() {
|
||||
if err := verifyBuilderExitAndSignature(beaconState, exit); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify builder exit %d", idx)
|
||||
}
|
||||
if err := gloas.InitiateBuilderExit(beaconState, exit.Exit.ValidatorIndex.ToBuilderIndex()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
val, err := beaconState.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -102,19 +113,24 @@ func ProcessVoluntaryExits(
|
||||
// initiate_validator_exit(state, voluntary_exit.validator_index)
|
||||
func VerifyExitAndSignature(
|
||||
validator state.ReadOnlyValidator,
|
||||
state state.ReadOnlyBeaconState,
|
||||
st state.ReadOnlyBeaconState,
|
||||
signed *ethpb.SignedVoluntaryExit,
|
||||
) error {
|
||||
if signed == nil || signed.Exit == nil {
|
||||
return errors.New("nil exit")
|
||||
}
|
||||
|
||||
fork := state.Fork()
|
||||
genesisRoot := state.GenesisValidatorsRoot()
|
||||
// [New in Gloas:EIP7732] Builder exits are verified separately.
|
||||
if st.Version() >= version.Gloas && signed.Exit.ValidatorIndex.IsBuilderIndex() {
|
||||
return verifyBuilderExitAndSignature(st, signed)
|
||||
}
|
||||
|
||||
fork := st.Fork()
|
||||
genesisRoot := st.GenesisValidatorsRoot()
|
||||
|
||||
// EIP-7044: Beginning in Deneb, fix the fork version to Capella.
|
||||
// This allows for signed validator exits to be valid forever.
|
||||
if state.Version() >= version.Deneb {
|
||||
if st.Version() >= version.Deneb {
|
||||
fork = ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
@@ -123,7 +139,7 @@ func VerifyExitAndSignature(
|
||||
}
|
||||
|
||||
exit := signed.Exit
|
||||
if err := verifyExitConditions(state, validator, exit); err != nil {
|
||||
if err := verifyExitConditions(st, validator, exit); err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, exit.Epoch, params.BeaconConfig().DomainVoluntaryExit, genesisRoot)
|
||||
@@ -198,3 +214,57 @@ func verifyExitConditions(st state.ReadOnlyBeaconState, validator state.ReadOnly
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyBuilderExitAndSignature validates a builder voluntary exit.
|
||||
// [New in Gloas:EIP7732]
|
||||
func verifyBuilderExitAndSignature(st state.ReadOnlyBeaconState, signed *ethpb.SignedVoluntaryExit) error {
|
||||
if signed == nil || signed.Exit == nil {
|
||||
return errors.New("nil exit")
|
||||
}
|
||||
exit := signed.Exit
|
||||
builderIndex := exit.ValidatorIndex.ToBuilderIndex()
|
||||
|
||||
// Exits must specify an epoch when they become valid; they are not valid before then.
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
if currentEpoch < exit.Epoch {
|
||||
return fmt.Errorf("expected current epoch >= exit epoch, received %d < %d", currentEpoch, exit.Epoch)
|
||||
}
|
||||
|
||||
// Verify the builder is active.
|
||||
active, err := st.IsActiveBuilder(builderIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if builder is active")
|
||||
}
|
||||
if !active {
|
||||
return fmt.Errorf("builder %d is not active", builderIndex)
|
||||
}
|
||||
|
||||
// Only exit builder if it has no pending balance to withdraw.
|
||||
pendingBalance, err := st.BuilderPendingBalanceToWithdraw(builderIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get builder pending balance to withdraw")
|
||||
}
|
||||
if pendingBalance != 0 {
|
||||
return fmt.Errorf("builder %d has pending balance to withdraw: %d", builderIndex, pendingBalance)
|
||||
}
|
||||
|
||||
// Verify signature using builder pubkey with Capella fork version (EIP-7044).
|
||||
pubkey, err := st.BuilderPubkey(builderIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get builder pubkey")
|
||||
}
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
}
|
||||
genesisRoot := st.GenesisValidatorsRoot()
|
||||
domain, err := signing.Domain(fork, exit.Epoch, params.BeaconConfig().DomainVoluntaryExit, genesisRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := signing.VerifySigningRoot(exit, pubkey[:], signed.Signature, domain); err != nil {
|
||||
return signing.ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
307
beacon-chain/core/blocks/exit_builder_test.go
Normal file
307
beacon-chain/core/blocks/exit_builder_test.go
Normal file
@@ -0,0 +1,307 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
// setGloasTestConfig sets fork epochs so Gloas is active at epoch 5.
|
||||
func setGloasTestConfig(t *testing.T) {
|
||||
t.Helper()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
cfg.GloasForkEpoch = 5
|
||||
params.SetActiveTestCleanup(t, cfg)
|
||||
}
|
||||
|
||||
// newGloasStateWithBuilder creates a minimal Gloas beacon state with one active builder
|
||||
// and returns the state along with the builder's BLS private key.
|
||||
func newGloasStateWithBuilder(t *testing.T, builderIndex primitives.BuilderIndex, epoch primitives.Epoch) (state.BeaconState, bls.SecretKey) {
|
||||
t.Helper()
|
||||
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
builder := ðpb.Builder{
|
||||
Pubkey: priv.PublicKey().Marshal(),
|
||||
WithdrawableEpoch: cfg.FarFutureEpoch,
|
||||
DepositEpoch: 0,
|
||||
Balance: 32_000_000_000,
|
||||
ExecutionAddress: make([]byte, 20),
|
||||
}
|
||||
|
||||
builders := make([]*ethpb.Builder, int(builderIndex)+1)
|
||||
for i := range builders {
|
||||
if primitives.BuilderIndex(i) == builderIndex {
|
||||
builders[i] = builder
|
||||
} else {
|
||||
builders[i] = ðpb.Builder{
|
||||
Pubkey: make([]byte, 48),
|
||||
WithdrawableEpoch: cfg.FarFutureEpoch,
|
||||
DepositEpoch: 0,
|
||||
ExecutionAddress: make([]byte, 20),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stProto := ðpb.BeaconStateGloas{
|
||||
Slot: cfg.SlotsPerEpoch * primitives.Slot(epoch),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: cfg.FuluForkVersion,
|
||||
CurrentVersion: cfg.GloasForkVersion,
|
||||
Epoch: cfg.GloasForkEpoch,
|
||||
},
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{
|
||||
Epoch: epoch - 1,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Builders: builders,
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
ExitEpoch: cfg.FarFutureEpoch,
|
||||
ActivationEpoch: 0,
|
||||
PublicKey: make([]byte, 48),
|
||||
},
|
||||
},
|
||||
Balances: []uint64{32_000_000_000},
|
||||
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
RandaoMixes: make([][]byte, cfg.EpochsPerHistoricalVector),
|
||||
Slashings: make([]uint64, cfg.EpochsPerSlashingsVector),
|
||||
ExecutionPayloadAvailability: make([]byte, cfg.SlotsPerHistoricalRoot/8),
|
||||
}
|
||||
|
||||
for i := range stProto.BlockRoots {
|
||||
stProto.BlockRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range stProto.StateRoots {
|
||||
stProto.StateRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range stProto.RandaoMixes {
|
||||
stProto.RandaoMixes[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(stProto)
|
||||
require.NoError(t, err)
|
||||
return st, priv
|
||||
}
|
||||
|
||||
func signBuilderExit(t *testing.T, st state.ReadOnlyBeaconState, exit *ethpb.VoluntaryExit, priv bls.SecretKey) *ethpb.SignedVoluntaryExit {
|
||||
t.Helper()
|
||||
|
||||
sb, err := signing.ComputeDomainAndSign(st, exit.Epoch, exit, params.BeaconConfig().DomainVoluntaryExit, priv)
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ðpb.SignedVoluntaryExit{
|
||||
Exit: exit,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature_BuilderExit_HappyPath(t *testing.T) {
|
||||
setGloasTestConfig(t)
|
||||
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
epoch := primitives.Epoch(10)
|
||||
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
|
||||
|
||||
exit := ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch,
|
||||
}
|
||||
signed := signBuilderExit(t, st, exit, priv)
|
||||
|
||||
err := blocks.VerifyExitAndSignature(nil, st, signed)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature_BuilderNotActive(t *testing.T) {
|
||||
setGloasTestConfig(t)
|
||||
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
epoch := primitives.Epoch(10)
|
||||
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
|
||||
|
||||
// Make builder not active by setting withdrawable epoch (already initiated exit).
|
||||
builder, err := st.Builder(builderIndex)
|
||||
require.NoError(t, err)
|
||||
builder.WithdrawableEpoch = 5
|
||||
require.NoError(t, st.UpdateBuilderAtIndex(builderIndex, builder))
|
||||
|
||||
exit := ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch,
|
||||
}
|
||||
signed := signBuilderExit(t, st, exit, priv)
|
||||
|
||||
err = blocks.VerifyExitAndSignature(nil, st, signed)
|
||||
assert.ErrorContains(t, "is not active", err)
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature_BuilderPendingWithdrawal(t *testing.T) {
|
||||
setGloasTestConfig(t)
|
||||
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
epoch := primitives.Epoch(10)
|
||||
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
|
||||
|
||||
// Give the builder a pending withdrawal.
|
||||
require.NoError(t, st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{
|
||||
{
|
||||
BuilderIndex: builderIndex,
|
||||
Amount: 1000,
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}))
|
||||
|
||||
exit := ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch,
|
||||
}
|
||||
signed := signBuilderExit(t, st, exit, priv)
|
||||
|
||||
err := blocks.VerifyExitAndSignature(nil, st, signed)
|
||||
assert.ErrorContains(t, "pending balance to withdraw", err)
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature_BuilderBadSignature(t *testing.T) {
|
||||
setGloasTestConfig(t)
|
||||
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
epoch := primitives.Epoch(10)
|
||||
st, _ := newGloasStateWithBuilder(t, builderIndex, epoch)
|
||||
|
||||
wrongKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
exit := ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch,
|
||||
}
|
||||
signed := signBuilderExit(t, st, exit, wrongKey)
|
||||
|
||||
err = blocks.VerifyExitAndSignature(nil, st, signed)
|
||||
assert.ErrorContains(t, "signature did not verify", err)
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature_BuilderExitInFuture(t *testing.T) {
|
||||
setGloasTestConfig(t)
|
||||
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
epoch := primitives.Epoch(10)
|
||||
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
|
||||
|
||||
exit := ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch + 1, // Future epoch.
|
||||
}
|
||||
signed := signBuilderExit(t, st, exit, priv)
|
||||
|
||||
err := blocks.VerifyExitAndSignature(nil, st, signed)
|
||||
assert.ErrorContains(t, "expected current epoch >= exit epoch", err)
|
||||
}
|
||||
|
||||
func TestProcessVoluntaryExits_BuilderExit(t *testing.T) {
|
||||
setGloasTestConfig(t)
|
||||
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
epoch := primitives.Epoch(10)
|
||||
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
|
||||
|
||||
exit := ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch,
|
||||
}
|
||||
signed := signBuilderExit(t, st, exit, priv)
|
||||
|
||||
newState, err := blocks.ProcessVoluntaryExits(t.Context(), st, []*ethpb.SignedVoluntaryExit{signed}, validators.ExitInformation(st))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify builder's withdrawable epoch was set.
|
||||
builder, err := newState.Builder(builderIndex)
|
||||
require.NoError(t, err)
|
||||
cfg := params.BeaconConfig()
|
||||
expectedWithdrawableEpoch := epoch + cfg.MinBuilderWithdrawabilityDelay
|
||||
assert.Equal(t, expectedWithdrawableEpoch, builder.WithdrawableEpoch)
|
||||
}
|
||||
|
||||
func TestProcessVoluntaryExits_BuilderExitPreGloas(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
cfg.GloasForkEpoch = 100 // Gloas not yet active.
|
||||
params.SetActiveTestCleanup(t, cfg)
|
||||
|
||||
epoch := primitives.Epoch(10)
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
|
||||
stProto := ðpb.BeaconStateFulu{
|
||||
Slot: cfg.SlotsPerEpoch * primitives.Slot(epoch),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: cfg.DenebForkVersion,
|
||||
CurrentVersion: cfg.FuluForkVersion,
|
||||
Epoch: cfg.FuluForkEpoch,
|
||||
},
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Validators: []*ethpb.Validator{
|
||||
{ExitEpoch: cfg.FarFutureEpoch, ActivationEpoch: 0, PublicKey: make([]byte, 48)},
|
||||
},
|
||||
Balances: []uint64{32_000_000_000},
|
||||
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
RandaoMixes: make([][]byte, cfg.EpochsPerHistoricalVector),
|
||||
Slashings: make([]uint64, cfg.EpochsPerSlashingsVector),
|
||||
}
|
||||
for i := range stProto.BlockRoots {
|
||||
stProto.BlockRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range stProto.StateRoots {
|
||||
stProto.StateRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range stProto.RandaoMixes {
|
||||
stProto.RandaoMixes[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeFulu(stProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
|
||||
// On pre-Gloas state, builder-flagged exits are not routed to the builder path.
|
||||
// ProcessVoluntaryExits treats the builder-flagged index as a regular validator index,
|
||||
// which fails because no such validator exists.
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), st, []*ethpb.SignedVoluntaryExit{signed}, validators.ExitInformation(st))
|
||||
require.ErrorContains(t, "out of bounds", err)
|
||||
}
|
||||
@@ -2,4 +2,5 @@ package blocks
|
||||
|
||||
var ProcessBLSToExecutionChange = processBLSToExecutionChange
|
||||
var ErrInvalidBLSPrefix = errInvalidBLSPrefix
|
||||
var ErrInvalidWithdrawalCredentials = errInvalidWithdrawalCredentials
|
||||
var VerifyBlobCommitmentCount = verifyBlobCommitmentCount
|
||||
|
||||
@@ -192,11 +192,45 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
Block: electraGenesisBlock(root),
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateGloas:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockGloas{
|
||||
Block: gloasGenesisBlock(root),
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
default:
|
||||
return nil, ErrUnrecognizedState
|
||||
}
|
||||
}
|
||||
|
||||
func gloasGenesisBlock(root [fieldparams.RootLength]byte) *ethpb.BeaconBlockGloas {
|
||||
return ðpb.BeaconBlockGloas{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
SignedExecutionPayloadBid: ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
PayloadAttestations: make([]*ethpb.PayloadAttestation, 0),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func electraGenesisBlock(root [fieldparams.RootLength]byte) *ethpb.BeaconBlockElectra {
|
||||
return ðpb.BeaconBlockElectra{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
|
||||
@@ -147,7 +147,7 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
_, err = blocks.ValidateBLSToExecutionChange(st, signed)
|
||||
// The state should return an empty validator, even when the validator object in the registry is
|
||||
// nil. This error should return when the withdrawal credentials are invalid or too short.
|
||||
require.ErrorIs(t, err, blocks.ErrInvalidBLSPrefix)
|
||||
require.ErrorIs(t, err, blocks.ErrInvalidWithdrawalCredentials)
|
||||
})
|
||||
t.Run("non-existent validator", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
|
||||
@@ -28,6 +28,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/state-native/custom-types:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
|
||||
@@ -20,37 +20,46 @@ import (
|
||||
)
|
||||
|
||||
func TestProcessPendingDepositsMultiplesSameDeposits(t *testing.T) {
|
||||
st := stateWithActiveBalanceETH(t, 1000)
|
||||
deps := make([]*eth.PendingDeposit, 2) // Make same deposit twice
|
||||
validators := st.Validators()
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
validators[i].PublicKey = sk.PublicKey().Marshal()
|
||||
validators[i].WithdrawalCredentials = wc
|
||||
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, 32, bytesutil.ToBytes32(wc), 0)
|
||||
}
|
||||
require.NoError(t, st.SetPendingDeposits(deps))
|
||||
const (
|
||||
depositCount = uint64(2)
|
||||
amountETH = uint64(32)
|
||||
slot = 0
|
||||
activeBalanceGwei = 10_000
|
||||
)
|
||||
|
||||
err = electra.ProcessPendingDeposits(context.TODO(), st, 10000)
|
||||
state := stateWithActiveBalanceETH(t, 0)
|
||||
|
||||
secretKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
val := st.Validators()
|
||||
seenPubkeys := make(map[string]struct{})
|
||||
for i := 0; i < len(val); i += 1 {
|
||||
if len(val[i].PublicKey) == 0 {
|
||||
continue
|
||||
}
|
||||
_, ok := seenPubkeys[string(val[i].PublicKey)]
|
||||
if ok {
|
||||
t.Fatalf("duplicated pubkeys")
|
||||
} else {
|
||||
seenPubkeys[string(val[i].PublicKey)] = struct{}{}
|
||||
}
|
||||
withdrawalCredentialsBytes := make([]byte, 32)
|
||||
withdrawalCredentialsBytes[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
withdrawalCredentials := bytesutil.ToBytes32(withdrawalCredentialsBytes)
|
||||
|
||||
validators := state.Validators()
|
||||
require.Equal(t, 0, len(validators))
|
||||
|
||||
deposits := make([]*eth.PendingDeposit, 0, depositCount)
|
||||
for range depositCount {
|
||||
deposit := stateTesting.GeneratePendingDeposit(t, secretKey, amountETH, withdrawalCredentials, slot)
|
||||
deposits = append(deposits, deposit)
|
||||
}
|
||||
|
||||
err = state.SetPendingDeposits(deposits)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = electra.ProcessPendingDeposits(t.Context(), state, activeBalanceGwei)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The first deposit should create a new validator,
|
||||
// and the second deposit should top up the same validator
|
||||
// We should have 1 validator with balance of 64 ETH.
|
||||
validators = state.Validators()
|
||||
require.Equal(t, 1, len(validators))
|
||||
|
||||
balance, err := state.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, depositCount*amountETH, balance)
|
||||
}
|
||||
|
||||
func TestProcessPendingDeposits(t *testing.T) {
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// ProcessEffectiveBalanceUpdates processes effective balance updates during epoch processing.
|
||||
@@ -30,35 +30,36 @@ import (
|
||||
// ):
|
||||
// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, EFFECTIVE_BALANCE_LIMIT)
|
||||
func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
|
||||
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
|
||||
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
|
||||
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
|
||||
cfg := params.BeaconConfig()
|
||||
effBalanceInc := cfg.EffectiveBalanceIncrement
|
||||
hysteresisInc := effBalanceInc / cfg.HysteresisQuotient
|
||||
downwardThreshold := hysteresisInc * cfg.HysteresisDownwardMultiplier
|
||||
upwardThreshold := hysteresisInc * cfg.HysteresisUpwardMultiplier
|
||||
minActivationBalance := cfg.MinActivationBalance
|
||||
maxEffBalanceElectra := cfg.MaxEffectiveBalanceElectra
|
||||
compoundingPrefix := cfg.CompoundingWithdrawalPrefixByte
|
||||
|
||||
bals := st.Balances()
|
||||
|
||||
// Update effective balances with hysteresis.
|
||||
validatorFunc := func(idx int, val state.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
|
||||
if val.IsNil() {
|
||||
return nil, fmt.Errorf("validator %d is nil in state", idx)
|
||||
}
|
||||
return st.ApplyToEveryCompactValidator(func(idx int, val *stateutil.CompactValidator) (stateutil.CompactValidator, bool, error) {
|
||||
if idx >= len(bals) {
|
||||
return nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(st.Balances()))
|
||||
return stateutil.CompactValidator{}, false, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(bals))
|
||||
}
|
||||
balance := bals[idx]
|
||||
|
||||
effectiveBalanceLimit := params.BeaconConfig().MinActivationBalance
|
||||
if val.HasCompoundingWithdrawalCredentials() {
|
||||
effectiveBalanceLimit = params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
effectiveBalanceLimit := minActivationBalance
|
||||
if val.WithdrawalCredentials[0] == compoundingPrefix {
|
||||
effectiveBalanceLimit = maxEffBalanceElectra
|
||||
}
|
||||
|
||||
if balance+downwardThreshold < val.EffectiveBalance() || val.EffectiveBalance()+upwardThreshold < balance {
|
||||
if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance {
|
||||
effectiveBal := min(balance-balance%effBalanceInc, effectiveBalanceLimit)
|
||||
newVal = val.Copy()
|
||||
newVal.EffectiveBalance = effectiveBal
|
||||
if effectiveBal != val.EffectiveBalance {
|
||||
updated := *val
|
||||
updated.EffectiveBalance = effectiveBal
|
||||
return updated, true, nil
|
||||
}
|
||||
}
|
||||
return newVal, nil
|
||||
}
|
||||
|
||||
return st.ApplyToEveryValidator(validatorFunc)
|
||||
return stateutil.CompactValidator{}, false, nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
@@ -48,19 +49,20 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
|
||||
eligibleForEjection := make([]primitives.ValidatorIndex, 0)
|
||||
eligibleForActivation := make([]primitives.ValidatorIndex, 0)
|
||||
|
||||
if err := st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
finalizedEpoch := st.FinalizedCheckpointEpoch()
|
||||
if err := st.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
|
||||
// Collect validators eligible to enter the activation queue.
|
||||
if helpers.IsEligibleForActivationQueue(val, currentEpoch) {
|
||||
if helpers.IsEligibleForActivationQueueCompact(val, currentEpoch) {
|
||||
eligibleForActivationQ = append(eligibleForActivationQ, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
|
||||
// Collect validators to eject.
|
||||
if val.EffectiveBalance() <= ejectionBal && helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
|
||||
if val.EffectiveBalance <= ejectionBal && helpers.IsActiveCompactValidator(val, currentEpoch) {
|
||||
eligibleForEjection = append(eligibleForEjection, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
|
||||
// Collect validators eligible for activation and not yet dequeued for activation.
|
||||
if helpers.IsEligibleForActivationUsingROVal(st, val) {
|
||||
if helpers.IsEligibleForActivationCompact(val, finalizedEpoch) {
|
||||
eligibleForActivation = append(eligibleForActivation, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
@@ -258,14 +259,14 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
earliestExitEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(beaconState))
|
||||
preActivationIndices := make([]primitives.ValidatorIndex, 0)
|
||||
compoundWithdrawalIndices := make([]primitives.ValidatorIndex, 0)
|
||||
if err = beaconState.ReadFromEveryValidator(func(index int, val state.ReadOnlyValidator) error {
|
||||
if val.ExitEpoch() != params.BeaconConfig().FarFutureEpoch && val.ExitEpoch() > earliestExitEpoch {
|
||||
earliestExitEpoch = val.ExitEpoch()
|
||||
if err = beaconState.ForEachValidator(func(index int, val *stateutil.CompactValidator) error {
|
||||
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch && val.ExitEpoch > earliestExitEpoch {
|
||||
earliestExitEpoch = val.ExitEpoch
|
||||
}
|
||||
if val.ActivationEpoch() == params.BeaconConfig().FarFutureEpoch {
|
||||
if val.ActivationEpoch == params.BeaconConfig().FarFutureEpoch {
|
||||
preActivationIndices = append(preActivationIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
if val.HasCompoundingWithdrawalCredentials() {
|
||||
if val.WithdrawalCredentials[0] == params.BeaconConfig().CompoundingWithdrawalPrefixByte {
|
||||
compoundWithdrawalIndices = append(compoundWithdrawalIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -16,9 +16,6 @@ import (
|
||||
func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
WithdrawalCredentials: []byte{}, // No withdrawal credentials
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xFF}, // Has withdrawal credentials
|
||||
},
|
||||
@@ -27,22 +24,19 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance + 100_000, // Has excess balance
|
||||
},
|
||||
})
|
||||
// Test that a validator with no withdrawal credentials cannot be switched to compounding.
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "validator has no withdrawal credentials", electra.SwitchToCompoundingValidator(s, 0))
|
||||
|
||||
// Test that a validator with withdrawal credentials can be switched to compounding.
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 1))
|
||||
v, err := s.ValidatorAtIndex(1)
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 0))
|
||||
v, err := s.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.HasPrefix(v.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}), "withdrawal credentials were not updated")
|
||||
// val_1 Balance is not changed
|
||||
b, err := s.BalanceAtIndex(1)
|
||||
// val_0 Balance is not changed
|
||||
b, err := s.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was changed")
|
||||
pbd, err := s.PendingDeposits()
|
||||
@@ -50,8 +44,8 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
require.Equal(t, 0, len(pbd), "pending balance deposits should be empty")
|
||||
|
||||
// Test that a validator with excess balance can be switched to compounding, excess balance is queued.
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 2))
|
||||
b, err = s.BalanceAtIndex(2)
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 1))
|
||||
b, err = s.BalanceAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was not changed")
|
||||
pbd, err = s.PendingDeposits()
|
||||
|
||||
@@ -59,21 +59,22 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
|
||||
eligibleForActivation := make([]primitives.ValidatorIndex, 0)
|
||||
eligibleForEjection := make([]primitives.ValidatorIndex, 0)
|
||||
|
||||
if err := st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
finalizedEpoch := st.FinalizedCheckpointEpoch()
|
||||
if err := st.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
|
||||
// Collect validators eligible to enter the activation queue.
|
||||
if helpers.IsEligibleForActivationQueue(val, currentEpoch) {
|
||||
if helpers.IsEligibleForActivationQueueCompact(val, currentEpoch) {
|
||||
eligibleForActivationQ = append(eligibleForActivationQ, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
|
||||
// Collect validators to eject.
|
||||
isActive := helpers.IsActiveValidatorUsingTrie(val, currentEpoch)
|
||||
belowEjectionBalance := val.EffectiveBalance() <= ejectionBal
|
||||
isActive := helpers.IsActiveCompactValidator(val, currentEpoch)
|
||||
belowEjectionBalance := val.EffectiveBalance <= ejectionBal
|
||||
if isActive && belowEjectionBalance {
|
||||
eligibleForEjection = append(eligibleForEjection, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
|
||||
// Collect validators eligible for activation and not yet dequeued for activation.
|
||||
if helpers.IsEligibleForActivationUsingROVal(st, val) {
|
||||
if helpers.IsEligibleForActivationCompact(val, finalizedEpoch) {
|
||||
eligibleForActivation = append(eligibleForActivation, primitives.ValidatorIndex(idx))
|
||||
}
|
||||
|
||||
@@ -243,15 +244,15 @@ func ProcessSlashings(st state.BeaconState) error {
|
||||
|
||||
bals := st.Balances()
|
||||
changed := false
|
||||
err = st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch()
|
||||
if val.Slashed() && correctEpoch {
|
||||
err = st.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
|
||||
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch
|
||||
if val.Slashed && correctEpoch {
|
||||
var penalty uint64
|
||||
if st.Version() >= version.Electra {
|
||||
effectiveBalanceIncrements := val.EffectiveBalance() / increment
|
||||
effectiveBalanceIncrements := val.EffectiveBalance / increment
|
||||
penalty = penaltyPerEffectiveBalanceIncrement * effectiveBalanceIncrements
|
||||
} else {
|
||||
penaltyNumerator := val.EffectiveBalance() / increment * minSlashing
|
||||
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
|
||||
penalty = penaltyNumerator / totalBalance * increment
|
||||
}
|
||||
bals[idx] = helpers.DecreaseBalanceWithVal(bals[idx], penalty)
|
||||
@@ -310,35 +311,31 @@ func ProcessEth1DataReset(state state.BeaconState) (state.BeaconState, error) {
|
||||
// ):
|
||||
// validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
func ProcessEffectiveBalanceUpdates(st state.BeaconState) (state.BeaconState, error) {
|
||||
effBalanceInc := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
maxEffBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
hysteresisInc := effBalanceInc / params.BeaconConfig().HysteresisQuotient
|
||||
downwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisDownwardMultiplier
|
||||
upwardThreshold := hysteresisInc * params.BeaconConfig().HysteresisUpwardMultiplier
|
||||
cfg := params.BeaconConfig()
|
||||
effBalanceInc := cfg.EffectiveBalanceIncrement
|
||||
maxEffBalance := cfg.MaxEffectiveBalance
|
||||
hysteresisInc := effBalanceInc / cfg.HysteresisQuotient
|
||||
downwardThreshold := hysteresisInc * cfg.HysteresisDownwardMultiplier
|
||||
upwardThreshold := hysteresisInc * cfg.HysteresisUpwardMultiplier
|
||||
|
||||
bals := st.Balances()
|
||||
|
||||
// Update effective balances with hysteresis.
|
||||
validatorFunc := func(idx int, val state.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
|
||||
if val == nil {
|
||||
return nil, fmt.Errorf("validator %d is nil in state", idx)
|
||||
}
|
||||
if err := st.ApplyToEveryCompactValidator(func(idx int, val *stateutil.CompactValidator) (stateutil.CompactValidator, bool, error) {
|
||||
if idx >= len(bals) {
|
||||
return nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(st.Balances()))
|
||||
return stateutil.CompactValidator{}, false, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(bals))
|
||||
}
|
||||
balance := bals[idx]
|
||||
|
||||
if balance+downwardThreshold < val.EffectiveBalance() || val.EffectiveBalance()+upwardThreshold < balance {
|
||||
if balance+downwardThreshold < val.EffectiveBalance || val.EffectiveBalance+upwardThreshold < balance {
|
||||
effectiveBal := min(maxEffBalance, balance-balance%effBalanceInc)
|
||||
if effectiveBal != val.EffectiveBalance() {
|
||||
newVal = val.Copy()
|
||||
newVal.EffectiveBalance = effectiveBal
|
||||
if effectiveBal != val.EffectiveBalance {
|
||||
updated := *val
|
||||
updated.EffectiveBalance = effectiveBal
|
||||
return updated, true, nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := st.ApplyToEveryValidator(validatorFunc); err != nil {
|
||||
return stateutil.CompactValidator{}, false, nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//math:go_default_library",
|
||||
|
||||
@@ -22,9 +22,9 @@ import (
|
||||
func ProcessAttestations(
|
||||
ctx context.Context,
|
||||
state state.ReadOnlyBeaconState,
|
||||
vp []*Validator,
|
||||
vp []Validator,
|
||||
pBal *Balance,
|
||||
) ([]*Validator, *Balance, error) {
|
||||
) ([]Validator, *Balance, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "precomputeEpoch.ProcessAttestations")
|
||||
defer span.End()
|
||||
|
||||
@@ -141,7 +141,7 @@ func SameHead(state state.ReadOnlyBeaconState, a *ethpb.PendingAttestation) (boo
|
||||
}
|
||||
|
||||
// UpdateValidator updates pre computed validator store.
|
||||
func UpdateValidator(vp []*Validator, record *Validator, indices []uint64, a *ethpb.PendingAttestation, aSlot primitives.Slot) []*Validator {
|
||||
func UpdateValidator(vp []Validator, record *Validator, indices []uint64, a *ethpb.PendingAttestation, aSlot primitives.Slot) []Validator {
|
||||
inclusionSlot := aSlot + a.InclusionDelay
|
||||
|
||||
for _, i := range indices {
|
||||
@@ -171,7 +171,7 @@ func UpdateValidator(vp []*Validator, record *Validator, indices []uint64, a *et
|
||||
}
|
||||
|
||||
// UpdateBalance updates pre computed balance store.
|
||||
func UpdateBalance(vp []*Validator, bBal *Balance, stateVersion int) *Balance {
|
||||
func UpdateBalance(vp []Validator, bBal *Balance, stateVersion int) *Balance {
|
||||
for _, v := range vp {
|
||||
if !v.IsSlashed {
|
||||
if v.IsCurrentEpochAttester {
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestUpdateValidator_Works(t *testing.T) {
|
||||
e := params.BeaconConfig().FarFutureSlot
|
||||
vp := []*precompute.Validator{{}, {InclusionSlot: e}, {}, {InclusionSlot: e}, {}, {InclusionSlot: e}}
|
||||
vp := []precompute.Validator{{}, {InclusionSlot: e}, {}, {InclusionSlot: e}, {}, {InclusionSlot: e}}
|
||||
record := &precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochAttester: true, IsPrevEpochTargetAttester: true, IsPrevEpochHeadAttester: true}
|
||||
a := ðpb.PendingAttestation{InclusionDelay: 1, ProposerIndex: 2}
|
||||
@@ -25,28 +25,28 @@ func TestUpdateValidator_Works(t *testing.T) {
|
||||
// Indices 1 3 and 5 attested
|
||||
vp = precompute.UpdateValidator(vp, record, []uint64{1, 3, 5}, a, 100)
|
||||
|
||||
wanted := &precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true,
|
||||
wanted := precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true,
|
||||
IsPrevEpochAttester: true, IsPrevEpochTargetAttester: true, IsPrevEpochHeadAttester: true,
|
||||
ProposerIndex: 2, InclusionDistance: 1, InclusionSlot: 101}
|
||||
wantedVp := []*precompute.Validator{{}, wanted, {}, wanted, {}, wanted}
|
||||
wantedVp := []precompute.Validator{{}, wanted, {}, wanted, {}, wanted}
|
||||
assert.DeepEqual(t, wantedVp, vp, "Incorrect attesting validator calculations")
|
||||
}
|
||||
|
||||
func TestUpdateValidator_InclusionOnlyCountsPrevEpoch(t *testing.T) {
|
||||
e := params.BeaconConfig().FarFutureSlot
|
||||
vp := []*precompute.Validator{{InclusionSlot: e}}
|
||||
vp := []precompute.Validator{{InclusionSlot: e}}
|
||||
record := &precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true}
|
||||
a := ðpb.PendingAttestation{InclusionDelay: 1, ProposerIndex: 2}
|
||||
|
||||
// Verify inclusion info doesn't get updated.
|
||||
vp = precompute.UpdateValidator(vp, record, []uint64{0}, a, 100)
|
||||
wanted := &precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true, InclusionSlot: e}
|
||||
wantedVp := []*precompute.Validator{wanted}
|
||||
wanted := precompute.Validator{IsCurrentEpochAttester: true, IsCurrentEpochTargetAttester: true, InclusionSlot: e}
|
||||
wantedVp := []precompute.Validator{wanted}
|
||||
assert.DeepEqual(t, wantedVp, vp, "Incorrect attesting validator calculations")
|
||||
}
|
||||
|
||||
func TestUpdateBalance(t *testing.T) {
|
||||
vp := []*precompute.Validator{
|
||||
vp := []precompute.Validator{
|
||||
{IsCurrentEpochAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{IsCurrentEpochTargetAttester: true, IsCurrentEpochAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{IsCurrentEpochTargetAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
@@ -70,7 +70,7 @@ func TestUpdateBalance(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateBalanceDifferentVersions(t *testing.T) {
|
||||
vp := []*precompute.Validator{
|
||||
vp := []precompute.Validator{
|
||||
{IsCurrentEpochAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{IsCurrentEpochTargetAttester: true, IsCurrentEpochAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{IsCurrentEpochTargetAttester: true, CurrentEpochEffectiveBalance: 100 * params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
@@ -201,9 +201,9 @@ func TestProcessAttestations(t *testing.T) {
|
||||
err = beaconState.AppendCurrentEpochAttestations(ðpb.PendingAttestation{Data: att2.Data, AggregationBits: bf, InclusionDelay: 1})
|
||||
require.NoError(t, err)
|
||||
|
||||
pVals := make([]*precompute.Validator, validators)
|
||||
pVals := make([]precompute.Validator, validators)
|
||||
for i := 0; i < len(pVals); i++ {
|
||||
pVals[i] = &precompute.Validator{CurrentEpochEffectiveBalance: 100}
|
||||
pVals[i] = precompute.Validator{CurrentEpochEffectiveBalance: 100}
|
||||
}
|
||||
pVals, _, err = precompute.ProcessAttestations(t.Context(), beaconState, pVals, &precompute.Balance{})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/pkg/errors"
|
||||
@@ -17,40 +18,37 @@ import (
|
||||
// New gets called at the beginning of process epoch cycle to return
|
||||
// pre computed instances of validators attesting records and total
|
||||
// balances attested in an epoch.
|
||||
func New(ctx context.Context, s state.BeaconState) ([]*Validator, *Balance, error) {
|
||||
func New(ctx context.Context, s state.BeaconState) ([]Validator, *Balance, error) {
|
||||
_, span := trace.StartSpan(ctx, "precomputeEpoch.New")
|
||||
defer span.End()
|
||||
|
||||
pValidators := make([]*Validator, s.NumValidators())
|
||||
pValidators := make([]Validator, s.NumValidators())
|
||||
pBal := &Balance{}
|
||||
|
||||
currentEpoch := time.CurrentEpoch(s)
|
||||
prevEpoch := time.PrevEpoch(s)
|
||||
farFutureSlot := params.BeaconConfig().FarFutureSlot
|
||||
|
||||
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if err := s.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
|
||||
// Was validator withdrawable or slashed
|
||||
withdrawable := prevEpoch+1 >= val.WithdrawableEpoch()
|
||||
pVal := &Validator{
|
||||
IsSlashed: val.Slashed(),
|
||||
withdrawable := prevEpoch+1 >= val.WithdrawableEpoch
|
||||
pValidators[idx] = Validator{
|
||||
IsSlashed: val.Slashed,
|
||||
IsWithdrawableCurrentEpoch: withdrawable,
|
||||
CurrentEpochEffectiveBalance: val.EffectiveBalance(),
|
||||
CurrentEpochEffectiveBalance: val.EffectiveBalance,
|
||||
InclusionSlot: farFutureSlot,
|
||||
InclusionDistance: farFutureSlot,
|
||||
}
|
||||
// Was validator active current epoch
|
||||
if helpers.IsActiveValidatorUsingTrie(val, currentEpoch) {
|
||||
pVal.IsActiveCurrentEpoch = true
|
||||
pBal.ActiveCurrentEpoch += val.EffectiveBalance()
|
||||
if helpers.IsActiveCompactValidator(val, currentEpoch) {
|
||||
pValidators[idx].IsActiveCurrentEpoch = true
|
||||
pBal.ActiveCurrentEpoch += val.EffectiveBalance
|
||||
}
|
||||
// Was validator active previous epoch
|
||||
if helpers.IsActiveValidatorUsingTrie(val, prevEpoch) {
|
||||
pVal.IsActivePrevEpoch = true
|
||||
pBal.ActivePrevEpoch += val.EffectiveBalance()
|
||||
if helpers.IsActiveCompactValidator(val, prevEpoch) {
|
||||
pValidators[idx].IsActivePrevEpoch = true
|
||||
pBal.ActivePrevEpoch += val.EffectiveBalance
|
||||
}
|
||||
// Set inclusion slot and inclusion distance to be max, they will be compared and replaced
|
||||
// with the lower values
|
||||
pVal.InclusionSlot = params.BeaconConfig().FarFutureSlot
|
||||
pVal.InclusionDistance = params.BeaconConfig().FarFutureSlot
|
||||
|
||||
pValidators[idx] = pVal
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to initialize precompute")
|
||||
|
||||
@@ -30,26 +30,26 @@ func TestNew(t *testing.T) {
|
||||
e := params.BeaconConfig().FarFutureSlot
|
||||
v, b, err := precompute.New(t.Context(), s)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, precompute.Validator{
|
||||
IsSlashed: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InclusionDistance: e,
|
||||
InclusionSlot: e,
|
||||
}, v[0], "Incorrect validator 0 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, precompute.Validator{
|
||||
IsWithdrawableCurrentEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InclusionDistance: e,
|
||||
InclusionSlot: e,
|
||||
}, v[1], "Incorrect validator 1 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, precompute.Validator{
|
||||
IsActiveCurrentEpoch: true,
|
||||
IsActivePrevEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InclusionDistance: e,
|
||||
InclusionSlot: e,
|
||||
}, v[2], "Incorrect validator 2 status")
|
||||
assert.DeepEqual(t, &precompute.Validator{
|
||||
assert.DeepEqual(t, precompute.Validator{
|
||||
IsActivePrevEpoch: true,
|
||||
CurrentEpochEffectiveBalance: 100,
|
||||
InclusionDistance: e,
|
||||
|
||||
@@ -10,15 +10,15 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type attesterRewardsFunc func(state.ReadOnlyBeaconState, *Balance, []*Validator) ([]uint64, []uint64, error)
|
||||
type proposerRewardsFunc func(state.ReadOnlyBeaconState, *Balance, []*Validator) ([]uint64, error)
|
||||
type attesterRewardsFunc func(state.ReadOnlyBeaconState, *Balance, []Validator) ([]uint64, []uint64, error)
|
||||
type proposerRewardsFunc func(state.ReadOnlyBeaconState, *Balance, []Validator) ([]uint64, error)
|
||||
|
||||
// ProcessRewardsAndPenaltiesPrecompute processes the rewards and penalties of individual validator.
|
||||
// This is an optimized version by passing in precomputed validator attesting records and total epoch balances.
|
||||
func ProcessRewardsAndPenaltiesPrecompute(
|
||||
state state.BeaconState,
|
||||
pBal *Balance,
|
||||
vp []*Validator,
|
||||
vp []Validator,
|
||||
attRewardsFunc attesterRewardsFunc,
|
||||
proRewardsFunc proposerRewardsFunc,
|
||||
) (state.BeaconState, error) {
|
||||
@@ -65,7 +65,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
|
||||
|
||||
// AttestationsDelta computes and returns the rewards and penalties differences for individual validators based on the
|
||||
// voting records.
|
||||
func AttestationsDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []*Validator) ([]uint64, []uint64, error) {
|
||||
func AttestationsDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []Validator) ([]uint64, []uint64, error) {
|
||||
numOfVals := state.NumValidators()
|
||||
rewards := make([]uint64, numOfVals)
|
||||
penalties := make([]uint64, numOfVals)
|
||||
@@ -73,8 +73,8 @@ func AttestationsDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []*Val
|
||||
finalizedEpoch := state.FinalizedCheckpointEpoch()
|
||||
|
||||
sqrtActiveCurrentEpoch := math.CachedSquareRoot(pBal.ActiveCurrentEpoch)
|
||||
for i, v := range vp {
|
||||
rewards[i], penalties[i] = attestationDelta(pBal, sqrtActiveCurrentEpoch, v, prevEpoch, finalizedEpoch)
|
||||
for i := range vp {
|
||||
rewards[i], penalties[i] = attestationDelta(pBal, sqrtActiveCurrentEpoch, &vp[i], prevEpoch, finalizedEpoch)
|
||||
}
|
||||
return rewards, penalties, nil
|
||||
}
|
||||
@@ -155,7 +155,7 @@ func attestationDelta(pBal *Balance, sqrtActiveCurrentEpoch uint64, v *Validator
|
||||
|
||||
// ProposersDelta computes and returns the rewards and penalties differences for individual validators based on the
|
||||
// proposer inclusion records.
|
||||
func ProposersDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []*Validator) ([]uint64, error) {
|
||||
func ProposersDelta(state state.ReadOnlyBeaconState, pBal *Balance, vp []Validator) ([]uint64, error) {
|
||||
numofVals := state.NumValidators()
|
||||
rewards := make([]uint64, numofVals)
|
||||
|
||||
|
||||
@@ -222,7 +222,7 @@ func TestProposerDeltaPrecompute_HappyCase(t *testing.T) {
|
||||
|
||||
proposerIndex := primitives.ValidatorIndex(1)
|
||||
b := &Balance{ActiveCurrentEpoch: 1000}
|
||||
v := []*Validator{
|
||||
v := []Validator{
|
||||
{IsPrevEpochAttester: true, CurrentEpochEffectiveBalance: 32, ProposerIndex: proposerIndex},
|
||||
}
|
||||
r, err := ProposersDelta(beaconState, b, v)
|
||||
@@ -244,7 +244,7 @@ func TestProposerDeltaPrecompute_ValidatorIndexOutOfRange(t *testing.T) {
|
||||
|
||||
proposerIndex := primitives.ValidatorIndex(validatorCount)
|
||||
b := &Balance{ActiveCurrentEpoch: 1000}
|
||||
v := []*Validator{
|
||||
v := []Validator{
|
||||
{IsPrevEpochAttester: true, CurrentEpochEffectiveBalance: 32, ProposerIndex: proposerIndex},
|
||||
}
|
||||
_, err = ProposersDelta(beaconState, b, v)
|
||||
@@ -260,7 +260,7 @@ func TestProposerDeltaPrecompute_SlashedCase(t *testing.T) {
|
||||
|
||||
proposerIndex := primitives.ValidatorIndex(1)
|
||||
b := &Balance{ActiveCurrentEpoch: 1000}
|
||||
v := []*Validator{
|
||||
v := []Validator{
|
||||
{IsPrevEpochAttester: true, CurrentEpochEffectiveBalance: 32, ProposerIndex: proposerIndex, IsSlashed: true},
|
||||
}
|
||||
r, err := ProposersDelta(beaconState, b, v)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
)
|
||||
|
||||
@@ -25,9 +26,9 @@ func ProcessSlashingsPrecompute(s state.BeaconState, pBal *Balance) error {
|
||||
|
||||
var hasSlashing bool
|
||||
// Iterate through validator list in state, stop until a validator satisfies slashing condition of current epoch.
|
||||
err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
correctEpoch := epochToWithdraw == val.WithdrawableEpoch()
|
||||
if val.Slashed() && correctEpoch {
|
||||
err := s.ForEachValidator(func(idx int, val *stateutil.CompactValidator) error {
|
||||
correctEpoch := epochToWithdraw == val.WithdrawableEpoch
|
||||
if val.Slashed && correctEpoch {
|
||||
hasSlashing = true
|
||||
}
|
||||
return nil
|
||||
@@ -42,16 +43,16 @@ func ProcessSlashingsPrecompute(s state.BeaconState, pBal *Balance) error {
|
||||
|
||||
increment := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
bals := s.Balances()
|
||||
validatorFunc := func(idx int, val state.ReadOnlyValidator) error {
|
||||
correctEpoch := epochToWithdraw == val.WithdrawableEpoch()
|
||||
if val.Slashed() && correctEpoch {
|
||||
penaltyNumerator := val.EffectiveBalance() / increment * minSlashing
|
||||
validatorFunc := func(idx int, val *stateutil.CompactValidator) error {
|
||||
correctEpoch := epochToWithdraw == val.WithdrawableEpoch
|
||||
if val.Slashed && correctEpoch {
|
||||
penaltyNumerator := val.EffectiveBalance / increment * minSlashing
|
||||
penalty := penaltyNumerator / pBal.ActiveCurrentEpoch * increment
|
||||
bals[idx] = helpers.DecreaseBalanceWithVal(bals[idx], penalty)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := s.ReadFromEveryValidator(validatorFunc); err != nil {
|
||||
if err := s.ForEachValidator(validatorFunc); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.SetBalances(bals)
|
||||
|
||||
@@ -46,6 +46,9 @@ const (
|
||||
|
||||
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
|
||||
DataColumnReceived = 12
|
||||
|
||||
// PayloadAttestationMessageReceived is sent after a payload attestation message is received from gossip or rpc.
|
||||
PayloadAttestationMessageReceived = 13
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -114,3 +117,8 @@ type DataColumnReceivedData struct {
|
||||
BlockRoot [32]byte
|
||||
KzgCommitments [][]byte
|
||||
}
|
||||
|
||||
// PayloadAttestationMessageReceivedData is the data sent with PayloadAttestationMessageReceived events.
|
||||
type PayloadAttestationMessageReceivedData struct {
|
||||
Message *ethpb.PayloadAttestationMessage
|
||||
}
|
||||
|
||||
@@ -33,6 +33,8 @@ const (
|
||||
LightClientOptimisticUpdate
|
||||
// PayloadAttributes events are fired upon a missed slot or new head.
|
||||
PayloadAttributes
|
||||
// PayloadProcessed is sent after a payload envelope has been processed.
|
||||
PayloadProcessed
|
||||
)
|
||||
|
||||
// BlockProcessedData is the data sent with BlockProcessed events.
|
||||
@@ -72,3 +74,9 @@ type InitializedData struct {
|
||||
// GenesisValidatorsRoot represents state.validators.HashTreeRoot().
|
||||
GenesisValidatorsRoot []byte
|
||||
}
|
||||
|
||||
// PayloadProcessedData is the data sent with PayloadProcessed events.
|
||||
type PayloadProcessedData struct {
|
||||
Slot primitives.Slot
|
||||
BlockRoot [32]byte
|
||||
}
|
||||
|
||||
@@ -5,13 +5,16 @@ go_library(
|
||||
srcs = [
|
||||
"attestation.go",
|
||||
"bid.go",
|
||||
"builder_exit.go",
|
||||
"deposit_request.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"payload.go",
|
||||
"payload_attestation.go",
|
||||
"pending_payment.go",
|
||||
"proposer_slashing.go",
|
||||
"upgrade.go",
|
||||
"withdrawals.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
||||
visibility = ["//visibility:public"],
|
||||
@@ -32,11 +35,14 @@ go_library(
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -52,6 +58,7 @@ go_test(
|
||||
"pending_payment_test.go",
|
||||
"proposer_slashing_test.go",
|
||||
"upgrade_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
||||
@@ -110,7 +110,7 @@ func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyB
|
||||
return fmt.Errorf("builder %d cannot cover bid amount %d", builderIndex, amount)
|
||||
}
|
||||
|
||||
if err := validatePayloadBidSignature(st, wrappedBid); err != nil {
|
||||
if err := ValidatePayloadBidSignature(st, wrappedBid); err != nil {
|
||||
return errors.Wrap(err, "bid signature validation failed")
|
||||
}
|
||||
}
|
||||
@@ -179,10 +179,10 @@ func validateBidConsistency(st state.BeaconState, bid interfaces.ROExecutionPayl
|
||||
return nil
|
||||
}
|
||||
|
||||
// validatePayloadBidSignature verifies the BLS signature on a signed execution payload bid.
|
||||
// ValidatePayloadBidSignature verifies the BLS signature on a signed execution payload bid.
|
||||
// It validates that the signature was created by the builder specified in the bid
|
||||
// using the appropriate domain for the beacon builder.
|
||||
func validatePayloadBidSignature(st state.ReadOnlyBeaconState, signedBid interfaces.ROSignedExecutionPayloadBid) error {
|
||||
func ValidatePayloadBidSignature(st state.ReadOnlyBeaconState, signedBid interfaces.ROSignedExecutionPayloadBid) error {
|
||||
bid, err := signedBid.Bid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get bid")
|
||||
|
||||
37
beacon-chain/core/gloas/builder_exit.go
Normal file
37
beacon-chain/core/gloas/builder_exit.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// InitiateBuilderExit initiates the exit of a builder by setting its withdrawable epoch.
|
||||
//
|
||||
// <spec fn="initiate_builder_exit" fork="gloas" hash="3da938d5">
|
||||
// def initiate_builder_exit(state: BeaconState, builder_index: BuilderIndex) -> None:
|
||||
// """
|
||||
// Initiate the exit of the builder with index ``index``.
|
||||
// """
|
||||
// # Return if builder already initiated exit
|
||||
// builder = state.builders[builder_index]
|
||||
// if builder.withdrawable_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
//
|
||||
// # Set builder exit epoch
|
||||
// builder.withdrawable_epoch = get_current_epoch(state) + MIN_BUILDER_WITHDRAWABILITY_DELAY
|
||||
// </spec>
|
||||
func InitiateBuilderExit(s state.BeaconState, builderIndex primitives.BuilderIndex) error {
|
||||
builder, err := s.Builder(builderIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Return if builder already initiated exit.
|
||||
if builder.WithdrawableEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return nil
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(s.Slot())
|
||||
builder.WithdrawableEpoch = currentEpoch + params.BeaconConfig().MinBuilderWithdrawabilityDelay
|
||||
return s.UpdateBuilderAtIndex(builderIndex, builder)
|
||||
}
|
||||
@@ -29,7 +29,7 @@ func processDepositRequests(ctx context.Context, beaconState state.BeaconState,
|
||||
|
||||
// processDepositRequest processes the specific deposit request
|
||||
//
|
||||
// <spec fn="process_deposit_request" fork="gloas" hash="3c6b0310">
|
||||
// <spec fn="process_deposit_request" fork="gloas" hash="0e8b94ab">
|
||||
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||
// # [New in Gloas:EIP7732]
|
||||
// builder_pubkeys = [b.pubkey for b in state.builders]
|
||||
@@ -40,8 +40,11 @@ func processDepositRequests(ctx context.Context, beaconState state.BeaconState,
|
||||
// # already exists with this pubkey, apply the deposit to their balance
|
||||
// is_builder = deposit_request.pubkey in builder_pubkeys
|
||||
// is_validator = deposit_request.pubkey in validator_pubkeys
|
||||
// is_builder_prefix = is_builder_withdrawal_credential(deposit_request.withdrawal_credentials)
|
||||
// if is_builder or (is_builder_prefix and not is_validator):
|
||||
// if is_builder or (
|
||||
// is_builder_withdrawal_credential(deposit_request.withdrawal_credentials)
|
||||
// and not is_validator
|
||||
// and not is_pending_validator(state, deposit_request.pubkey)
|
||||
// ):
|
||||
// # Apply builder deposits immediately
|
||||
// apply_deposit_for_builder(
|
||||
// state,
|
||||
@@ -74,6 +77,7 @@ func processDepositRequest(beaconState state.BeaconState, request *enginev1.Depo
|
||||
return errors.Wrap(err, "could not apply builder deposit")
|
||||
}
|
||||
if applied {
|
||||
builderDepositsProcessedTotal.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -118,13 +122,7 @@ func applyBuilderDepositRequest(beaconState state.BeaconState, request *enginev1
|
||||
}
|
||||
|
||||
pubkey := bytesutil.ToBytes48(request.Pubkey)
|
||||
_, isValidator := beaconState.ValidatorIndexByPubkey(pubkey)
|
||||
idx, isBuilder := beaconState.BuilderIndexByPubkey(pubkey)
|
||||
isBuilderPrefix := helpers.IsBuilderWithdrawalCredential(request.WithdrawalCredentials)
|
||||
if !isBuilder && (!isBuilderPrefix || isValidator) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if isBuilder {
|
||||
if err := beaconState.IncreaseBuilderBalance(idx, request.Amount); err != nil {
|
||||
return false, err
|
||||
@@ -132,6 +130,20 @@ func applyBuilderDepositRequest(beaconState state.BeaconState, request *enginev1
|
||||
return true, nil
|
||||
}
|
||||
|
||||
isBuilderPrefix := helpers.IsBuilderWithdrawalCredential(request.WithdrawalCredentials)
|
||||
_, isValidator := beaconState.ValidatorIndexByPubkey(pubkey)
|
||||
if !isBuilderPrefix || isValidator {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
isPending, err := beaconState.IsPendingValidator(request.Pubkey)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if isPending {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := applyDepositForNewBuilder(
|
||||
beaconState,
|
||||
request.Pubkey,
|
||||
|
||||
@@ -91,6 +91,33 @@ func TestProcessDepositRequest_ExistingBuilderIncreasesBalance(t *testing.T) {
|
||||
require.Equal(t, 0, len(pending))
|
||||
}
|
||||
|
||||
func TestProcessDepositRequest_BuilderDepositWithExistingPendingDepositStaysPending(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
validatorCred := validatorWithdrawalCredentials()
|
||||
builderCred := builderWithdrawalCredentials()
|
||||
existingPending := stateTesting.GeneratePendingDeposit(t, sk, 1234, validatorCred, 0)
|
||||
req := depositRequestFromPending(stateTesting.GeneratePendingDeposit(t, sk, 200, builderCred, 1), 9)
|
||||
|
||||
st := newGloasState(t, nil, nil)
|
||||
require.NoError(t, st.SetPendingDeposits([]*ethpb.PendingDeposit{existingPending}))
|
||||
|
||||
err = processDepositRequest(st, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, ok := st.BuilderIndexByPubkey(toBytes48(req.Pubkey))
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
pending, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(pending))
|
||||
require.DeepEqual(t, existingPending.PublicKey, pending[0].PublicKey)
|
||||
require.DeepEqual(t, req.Pubkey, pending[1].PublicKey)
|
||||
require.DeepEqual(t, req.WithdrawalCredentials, pending[1].WithdrawalCredentials)
|
||||
require.Equal(t, req.Amount, pending[1].Amount)
|
||||
}
|
||||
|
||||
func TestApplyDepositForBuilder_InvalidSignatureIgnoresDeposit(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
27
beacon-chain/core/gloas/metrics.go
Normal file
27
beacon-chain/core/gloas/metrics.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
builderPendingPaymentsProcessedTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "builder_pending_payments_processed_total",
|
||||
Help: "The number of builder pending payments promoted into the builder pending withdrawal queue.",
|
||||
},
|
||||
)
|
||||
builderDepositsProcessedTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "builder_deposits_processed_total",
|
||||
Help: "The number of builder-related deposit requests processed.",
|
||||
},
|
||||
)
|
||||
builderExitsProcessedTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "builder_exits_processed_total",
|
||||
Help: "The number of processed builder exits.",
|
||||
},
|
||||
)
|
||||
)
|
||||
@@ -17,7 +17,8 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessExecutionPayload processes the signed execution payload envelope for the Gloas fork.
|
||||
// ProcessExecutionPayload is the gossip entry point: verify signature, validate
|
||||
// consistency, apply state mutations, and verify the post-payload state root.
|
||||
//
|
||||
// <spec fn="process_execution_payload" fork="gloas" hash="36bd3af3">
|
||||
// def process_execution_payload(
|
||||
@@ -108,7 +109,7 @@ func ProcessExecutionPayload(
|
||||
st state.BeaconState,
|
||||
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
|
||||
) error {
|
||||
if err := VerifyExecutionPayloadEnvelopeSignature(st, signedEnvelope); err != nil {
|
||||
if err := verifyExecutionPayloadEnvelopeSignature(st, signedEnvelope); err != nil {
|
||||
return errors.Wrap(err, "signature verification failed")
|
||||
}
|
||||
|
||||
@@ -117,29 +118,132 @@ func ProcessExecutionPayload(
|
||||
return errors.Wrap(err, "could not get envelope from signed envelope")
|
||||
}
|
||||
|
||||
if err := ApplyExecutionPayload(ctx, st, envelope); err != nil {
|
||||
if err := cacheLatestBlockHeaderStateRoot(ctx, st); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get hash tree root")
|
||||
if err := validatePayloadConsistency(st, envelope); err != nil {
|
||||
return err
|
||||
}
|
||||
if r != envelope.StateRoot() {
|
||||
return fmt.Errorf("state root mismatch: expected %#x, got %#x", envelope.StateRoot(), r)
|
||||
if err := applyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), envelope.BlockHash()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return verifyPostStateRoot(ctx, st, envelope)
|
||||
}
|
||||
|
||||
// ApplyExecutionPayload applies the execution payload envelope to the state and performs the same
|
||||
// consistency checks as the full processing path. This keeps the post-payload state root computation
|
||||
// on a shared code path, even though some bid/payload checks are not strictly required for the root itself.
|
||||
// ProcessExecutionPayloadWithDeferredSig is the init-sync entry point: extract the
|
||||
// signature for deferred verification, validate consistency, apply state
|
||||
// mutations, and verify the post-payload state root. The caller provides the
|
||||
// previousStateRoot to avoid recomputing it.
|
||||
func ProcessExecutionPayloadWithDeferredSig(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
previousStateRoot [32]byte,
|
||||
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
|
||||
) (*bls.SignatureBatch, error) {
|
||||
sigBatch, err := ExecutionPayloadEnvelopeSignatureBatch(st, signedEnvelope)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not extract envelope signature batch")
|
||||
}
|
||||
|
||||
envelope, err := signedEnvelope.Envelope()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get envelope from signed envelope")
|
||||
}
|
||||
|
||||
if err := setLatestBlockHeaderStateRoot(st, previousStateRoot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set latest block header state root")
|
||||
}
|
||||
|
||||
if err := validatePayloadConsistency(st, envelope); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := applyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), envelope.BlockHash()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := verifyPostStateRoot(ctx, st, envelope); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sigBatch, nil
|
||||
}
|
||||
|
||||
// ProcessBlindedExecutionPayload is the replay/stategen entry
|
||||
// point: patch the block header, do minimal bid consistency checks, and apply
|
||||
// state mutations. No payload data is available — only the blinded envelope.
|
||||
// A nil envelope is a no-op (the payload was not delivered for that slot).
|
||||
func ProcessBlindedExecutionPayload(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
previousStateRoot [32]byte,
|
||||
envelope interfaces.ROBlindedExecutionPayloadEnvelope,
|
||||
) error {
|
||||
if envelope == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := setLatestBlockHeaderStateRoot(st, previousStateRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set latest block header state root")
|
||||
}
|
||||
|
||||
if envelope.Slot() != st.Slot() {
|
||||
return errors.Errorf("blinded envelope slot does not match state slot: envelope=%d, state=%d", envelope.Slot(), st.Slot())
|
||||
}
|
||||
|
||||
latestBid, err := st.LatestExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get latest execution payload bid")
|
||||
}
|
||||
if latestBid == nil {
|
||||
return errors.New("latest execution payload bid is nil")
|
||||
}
|
||||
if envelope.BuilderIndex() != latestBid.BuilderIndex() {
|
||||
return errors.Errorf(
|
||||
"blinded envelope builder index does not match committed bid builder index: envelope=%d, bid=%d",
|
||||
envelope.BuilderIndex(),
|
||||
latestBid.BuilderIndex(),
|
||||
)
|
||||
}
|
||||
|
||||
bidBlockHash := latestBid.BlockHash()
|
||||
envelopeBlockHash := envelope.BlockHash()
|
||||
if bidBlockHash != envelopeBlockHash {
|
||||
return errors.Errorf(
|
||||
"blinded envelope block hash does not match committed bid block hash: envelope=%#x, bid=%#x",
|
||||
envelopeBlockHash,
|
||||
bidBlockHash,
|
||||
)
|
||||
}
|
||||
|
||||
return applyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), envelopeBlockHash)
|
||||
}
|
||||
|
||||
// ApplyExecutionPayload patches the block header state root, validates
|
||||
// consistency, and applies state mutations. No signature or post-state-root
|
||||
// verification is performed. Used by the proposer path to compute the
|
||||
// post-payload state root for the envelope.
|
||||
func ApplyExecutionPayload(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
envelope interfaces.ROExecutionPayloadEnvelope,
|
||||
) error {
|
||||
if err := cacheLatestBlockHeaderStateRoot(ctx, st); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validatePayloadConsistency(st, envelope); err != nil {
|
||||
return err
|
||||
}
|
||||
return applyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), envelope.BlockHash())
|
||||
}
|
||||
|
||||
func setLatestBlockHeaderStateRoot(st state.BeaconState, root [32]byte) error {
|
||||
latestHeader := st.LatestBlockHeader()
|
||||
latestHeader.StateRoot = root[:]
|
||||
return st.SetLatestBlockHeader(latestHeader)
|
||||
}
|
||||
|
||||
// cacheLatestBlockHeaderStateRoot fills in the state root on the latest block
|
||||
// header if it hasn't been set yet (the spec's "cache latest block header
|
||||
// state root" step).
|
||||
func cacheLatestBlockHeaderStateRoot(ctx context.Context, st state.BeaconState) error {
|
||||
latestHeader := st.LatestBlockHeader()
|
||||
if len(latestHeader.StateRoot) == 0 || bytes.Equal(latestHeader.StateRoot, make([]byte, 32)) {
|
||||
previousStateRoot, err := st.HashTreeRoot(ctx)
|
||||
@@ -151,7 +255,13 @@ func ApplyExecutionPayload(
|
||||
return errors.Wrap(err, "could not set latest block header")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validatePayloadConsistency checks that the envelope and payload are consistent
|
||||
// with the beacon block header, the committed bid, and the current state.
|
||||
func validatePayloadConsistency(st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) error {
|
||||
latestHeader := st.LatestBlockHeader()
|
||||
blockHeaderRoot, err := latestHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute block header root")
|
||||
@@ -190,7 +300,6 @@ func ApplyExecutionPayload(
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get withdrawals from payload")
|
||||
}
|
||||
|
||||
ok, err := st.WithdrawalsMatchPayloadExpected(withdrawals)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not validate payload withdrawals")
|
||||
@@ -225,7 +334,32 @@ func ApplyExecutionPayload(
|
||||
return errors.Errorf("payload timestamp does not match expected timestamp: payload=%d, expected=%d", payload.Timestamp(), uint64(t.Unix()))
|
||||
}
|
||||
|
||||
if err := processExecutionRequests(ctx, st, envelope.ExecutionRequests()); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyPostStateRoot checks that the post-payload state root matches the
|
||||
// envelope's declared state root.
|
||||
func verifyPostStateRoot(ctx context.Context, st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) error {
|
||||
r, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute post-envelope state root")
|
||||
}
|
||||
if r != envelope.StateRoot() {
|
||||
return fmt.Errorf("state root mismatch: expected %#x, got %#x", envelope.StateRoot(), r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyExecutionPayloadStateMutations applies the state-changing operations
|
||||
// from an execution payload: process execution requests, queue builder payment,
|
||||
// set execution payload availability, and update the latest block hash.
|
||||
func applyExecutionPayloadStateMutations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
executionRequests *enginev1.ExecutionRequests,
|
||||
blockHash [32]byte,
|
||||
) error {
|
||||
if err := processExecutionRequests(ctx, st, executionRequests); err != nil {
|
||||
return errors.Wrap(err, "could not process execution requests")
|
||||
}
|
||||
|
||||
@@ -237,13 +371,114 @@ func ApplyExecutionPayload(
|
||||
return errors.Wrap(err, "could not set execution payload availability")
|
||||
}
|
||||
|
||||
if err := st.SetLatestBlockHash([32]byte(payload.BlockHash())); err != nil {
|
||||
if err := st.SetLatestBlockHash(blockHash); err != nil {
|
||||
return errors.Wrap(err, "could not set latest block hash")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExecutionPayloadEnvelopeSignatureBatch extracts the BLS signature from a signed execution payload
|
||||
// envelope as a SignatureBatch for deferred batch verification.
|
||||
func ExecutionPayloadEnvelopeSignatureBatch(
|
||||
st state.BeaconState,
|
||||
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
|
||||
) (*bls.SignatureBatch, error) {
|
||||
envelope, err := signedEnvelope.Envelope()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get envelope: %w", err)
|
||||
}
|
||||
|
||||
builderIdx := envelope.BuilderIndex()
|
||||
publicKey, err := envelopePublicKey(st, builderIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(envelope.Slot())
|
||||
domain, err := signing.Domain(
|
||||
st.Fork(),
|
||||
currentEpoch,
|
||||
params.BeaconConfig().DomainBeaconBuilder,
|
||||
st.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to compute signing domain: %w", err)
|
||||
}
|
||||
|
||||
signingRoot, err := signedEnvelope.SigningRoot(domain)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to compute signing root: %w", err)
|
||||
}
|
||||
|
||||
signatureBytes := signedEnvelope.Signature()
|
||||
return &bls.SignatureBatch{
|
||||
Signatures: [][]byte{signatureBytes[:]},
|
||||
PublicKeys: []bls.PublicKey{publicKey},
|
||||
Messages: [][32]byte{signingRoot},
|
||||
Descriptions: []string{"execution payload envelope signature"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// verifyExecutionPayloadEnvelopeSignature verifies the BLS signature on a signed execution payload envelope.
|
||||
//
|
||||
// <spec fn="verify_execution_payload_envelope_signature" fork="gloas" style="full" hash="49483ae2">
|
||||
// def verify_execution_payload_envelope_signature(
|
||||
// state: BeaconState, signed_envelope: SignedExecutionPayloadEnvelope
|
||||
// ) -> bool:
|
||||
// builder_index = signed_envelope.message.builder_index
|
||||
// if builder_index == BUILDER_INDEX_SELF_BUILD:
|
||||
// validator_index = state.latest_block_header.proposer_index
|
||||
// pubkey = state.validators[validator_index].pubkey
|
||||
// else:
|
||||
// pubkey = state.builders[builder_index].pubkey
|
||||
//
|
||||
// signing_root = compute_signing_root(
|
||||
// signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER)
|
||||
// )
|
||||
// return bls.Verify(pubkey, signing_root, signed_envelope.signature)
|
||||
// </spec>
|
||||
func verifyExecutionPayloadEnvelopeSignature(st state.BeaconState, signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope) error {
|
||||
envelope, err := signedEnvelope.Envelope()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get envelope: %w", err)
|
||||
}
|
||||
|
||||
builderIdx := envelope.BuilderIndex()
|
||||
publicKey, err := envelopePublicKey(st, builderIdx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signatureBytes := signedEnvelope.Signature()
|
||||
signature, err := bls.SignatureFromBytes(signatureBytes[:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid signature format: %w", err)
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(envelope.Slot())
|
||||
domain, err := signing.Domain(
|
||||
st.Fork(),
|
||||
currentEpoch,
|
||||
params.BeaconConfig().DomainBeaconBuilder,
|
||||
st.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute signing domain: %w", err)
|
||||
}
|
||||
|
||||
signingRoot, err := signedEnvelope.SigningRoot(domain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute signing root: %w", err)
|
||||
}
|
||||
|
||||
if !signature.Verify(publicKey, signingRoot[:]) {
|
||||
return fmt.Errorf("signature verification failed: %w", signing.ErrSigFailedToVerify)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func envelopePublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
|
||||
if builderIdx == params.BeaconConfig().BuilderIndexSelfBuild {
|
||||
return proposerPublicKey(st)
|
||||
@@ -280,10 +515,6 @@ func builderPublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex)
|
||||
}
|
||||
|
||||
// processExecutionRequests processes deposits, withdrawals, and consolidations from execution requests.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// for op in requests.deposits: process_deposit_request(state, op)
|
||||
// for op in requests.withdrawals: process_withdrawal_request(state, op)
|
||||
// for op in requests.consolidations: process_consolidation_request(state, op)
|
||||
func processExecutionRequests(ctx context.Context, st state.BeaconState, rqs *enginev1.ExecutionRequests) error {
|
||||
if err := processDepositRequests(ctx, st, rqs.Deposits); err != nil {
|
||||
return errors.Wrap(err, "could not process deposit requests")
|
||||
@@ -300,65 +531,3 @@ func processExecutionRequests(ctx context.Context, st state.BeaconState, rqs *en
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyExecutionPayloadEnvelopeSignature verifies the BLS signature on a signed execution payload envelope.
|
||||
// <spec fn="verify_execution_payload_envelope_signature" fork="gloas" style="full" hash="49483ae2">
|
||||
// def verify_execution_payload_envelope_signature(
|
||||
//
|
||||
// state: BeaconState, signed_envelope: SignedExecutionPayloadEnvelope
|
||||
//
|
||||
// ) -> bool:
|
||||
//
|
||||
// builder_index = signed_envelope.message.builder_index
|
||||
// if builder_index == BUILDER_INDEX_SELF_BUILD:
|
||||
// validator_index = state.latest_block_header.proposer_index
|
||||
// pubkey = state.validators[validator_index].pubkey
|
||||
// else:
|
||||
// pubkey = state.builders[builder_index].pubkey
|
||||
//
|
||||
// signing_root = compute_signing_root(
|
||||
// signed_envelope.message, get_domain(state, DOMAIN_BEACON_BUILDER)
|
||||
// )
|
||||
// return bls.Verify(pubkey, signing_root, signed_envelope.signature)
|
||||
//
|
||||
// </spec>
|
||||
func VerifyExecutionPayloadEnvelopeSignature(st state.BeaconState, signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope) error {
|
||||
envelope, err := signedEnvelope.Envelope()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get envelope: %w", err)
|
||||
}
|
||||
|
||||
builderIdx := envelope.BuilderIndex()
|
||||
publicKey, err := envelopePublicKey(st, builderIdx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signatureBytes := signedEnvelope.Signature()
|
||||
signature, err := bls.SignatureFromBytes(signatureBytes[:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid signature format: %w", err)
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(envelope.Slot())
|
||||
domain, err := signing.Domain(
|
||||
st.Fork(),
|
||||
currentEpoch,
|
||||
params.BeaconConfig().DomainBeaconBuilder,
|
||||
st.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute signing domain: %w", err)
|
||||
}
|
||||
|
||||
signingRoot, err := signedEnvelope.SigningRoot(domain)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute signing root: %w", err)
|
||||
}
|
||||
|
||||
if !signature.Verify(publicKey, signingRoot[:]) {
|
||||
return fmt.Errorf("signature verification failed: %w", signing.ErrSigFailedToVerify)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
@@ -18,11 +19,14 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrValidatorNotInPTC = stderrors.New("validator not in PTC")
|
||||
|
||||
// ProcessPayloadAttestations validates payload attestations in a block body.
|
||||
//
|
||||
// <spec fn="process_payload_attestation" fork="gloas" hash="f46bf0b0">
|
||||
@@ -77,7 +81,7 @@ func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body
|
||||
|
||||
// indexedPayloadAttestation converts a payload attestation into its indexed form.
|
||||
func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState, att *eth.PayloadAttestation) (*consensus_types.IndexedPayloadAttestation, error) {
|
||||
committee, err := PayloadCommittee(ctx, st, att.Data.Slot)
|
||||
committee, err := st.PayloadCommitteeReadOnly(att.Data.Slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -96,10 +100,10 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PayloadCommittee returns the payload timeliness committee for a given slot for the state.
|
||||
// computePTC computes the payload timeliness committee for a given slot.
|
||||
//
|
||||
// <spec fn="get_ptc" fork="gloas" hash="ae15f761">
|
||||
// def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
|
||||
// <spec fn="compute_ptc" fork="gloas" hash="0f323552">
|
||||
// def compute_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
|
||||
// """
|
||||
// Get the payload timeliness committee for the given ``slot``.
|
||||
// """
|
||||
@@ -115,7 +119,7 @@ func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState
|
||||
// state, indices, seed, size=PTC_SIZE, shuffle_indices=False
|
||||
// )
|
||||
// </spec>
|
||||
func PayloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
|
||||
func computePTC(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
seed, err := ptcSeed(st, epoch, slot)
|
||||
if err != nil {
|
||||
@@ -156,6 +160,24 @@ func PayloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot pr
|
||||
return selected, nil
|
||||
}
|
||||
|
||||
// PayloadCommitteeIndex returns the validator's index position in the payload committee for a slot.
|
||||
func PayloadCommitteeIndex(
|
||||
ctx context.Context,
|
||||
st state.ReadOnlyBeaconState,
|
||||
slot primitives.Slot,
|
||||
validatorIndex primitives.ValidatorIndex,
|
||||
) (uint64, error) {
|
||||
ptc, err := st.PayloadCommitteeReadOnly(slot)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
idx := slices.Index(ptc, validatorIndex)
|
||||
if idx == -1 {
|
||||
return 0, fmt.Errorf("%w: validator=%d slot=%d", ErrValidatorNotInPTC, validatorIndex, slot)
|
||||
}
|
||||
return uint64(idx), nil
|
||||
}
|
||||
|
||||
// ptcSeed computes the seed for the payload timeliness committee.
|
||||
func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitives.Slot) ([32]byte, error) {
|
||||
seed, err := helpers.Seed(st, epoch, params.BeaconConfig().DomainPTCAttester)
|
||||
@@ -254,12 +276,12 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
|
||||
offset := (round % 16) * 2
|
||||
randomValue := uint64(binary.LittleEndian.Uint16(random[offset : offset+2])) // 16-bit draw per spec
|
||||
|
||||
val, err := st.ValidatorAtIndex(idx)
|
||||
val, err := st.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "validator %d", idx)
|
||||
}
|
||||
|
||||
return val.EffectiveBalance*fieldparams.MaxRandomValueElectra >= maxBalance*randomValue, nil
|
||||
return val.EffectiveBalance()*fieldparams.MaxRandomValueElectra >= maxBalance*randomValue, nil
|
||||
}
|
||||
|
||||
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
|
||||
@@ -321,3 +343,43 @@ func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessPTCWindow rotates the cached PTC window at epoch boundaries by computing
|
||||
// PTC assignments for the new lookahead epoch and shifting the window.
|
||||
//
|
||||
// <spec fn="process_ptc_window" fork="gloas" hash="7be3d509">
|
||||
// def process_ptc_window(state: BeaconState) -> None:
|
||||
// """
|
||||
// Update the cached PTC window.
|
||||
// """
|
||||
// # Shift all epochs forward by one
|
||||
// state.ptc_window[: len(state.ptc_window) - SLOTS_PER_EPOCH] = state.ptc_window[SLOTS_PER_EPOCH:]
|
||||
// # Fill in the last epoch
|
||||
// next_epoch = Epoch(get_current_epoch(state) + MIN_SEED_LOOKAHEAD + 1)
|
||||
// start_slot = compute_start_slot_at_epoch(next_epoch)
|
||||
// state.ptc_window[len(state.ptc_window) - SLOTS_PER_EPOCH :] = [
|
||||
// compute_ptc(state, Slot(slot)) for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH)
|
||||
// ]
|
||||
// </spec>
|
||||
func ProcessPTCWindow(ctx context.Context, st state.BeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "gloas.ProcessPTCWindow")
|
||||
defer span.End()
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
lastEpoch := slots.ToEpoch(st.Slot()) + params.BeaconConfig().MinSeedLookahead + 1
|
||||
startSlot, err := slots.EpochStart(lastEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newSlots := make([]*eth.PTCs, slotsPerEpoch)
|
||||
for i := range slotsPerEpoch {
|
||||
ptc, err := computePTC(ctx, st, startSlot+primitives.Slot(i))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newSlots[i] = ð.PTCs{ValidatorIndices: ptc}
|
||||
}
|
||||
|
||||
return st.RotatePTCWindow(newSlots)
|
||||
}
|
||||
|
||||
@@ -2,13 +2,14 @@ package gloas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -119,7 +120,6 @@ func TestProcessPayloadAttestations_EmptyAggregationBits(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_HappyPath(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
setupTestConfig(t)
|
||||
|
||||
sk1, pk1 := newKey(t)
|
||||
@@ -150,7 +150,6 @@ func TestProcessPayloadAttestations_HappyPath(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_MultipleAttestations(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
setupTestConfig(t)
|
||||
|
||||
sk1, pk1 := newKey(t)
|
||||
@@ -216,6 +215,25 @@ func TestProcessPayloadAttestations_IndexedVerificationError(t *testing.T) {
|
||||
}
|
||||
|
||||
func newTestState(t *testing.T, vals []*eth.Validator, slot primitives.Slot) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
st, err := testutil.NewBeaconStateGloas(func(seed *eth.BeaconStateGloas) error {
|
||||
seed.Slot = slot
|
||||
seed.Validators = vals
|
||||
seed.Balances = make([]uint64, len(vals))
|
||||
for i, v := range vals {
|
||||
seed.Balances[i] = v.EffectiveBalance
|
||||
}
|
||||
seed.PtcWindow = deterministicPTCWindow(len(vals))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}
|
||||
|
||||
func newPhase0TestState(t *testing.T, vals []*eth.Validator, slot primitives.Slot) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
for _, v := range vals {
|
||||
@@ -223,10 +241,25 @@ func newTestState(t *testing.T, vals []*eth.Validator, slot primitives.Slot) sta
|
||||
require.NoError(t, st.AppendBalance(v.EffectiveBalance))
|
||||
}
|
||||
require.NoError(t, st.SetSlot(slot))
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(t.Context(), st, slots.ToEpoch(slot)))
|
||||
return st
|
||||
}
|
||||
|
||||
func deterministicPTCWindow(validatorCount int) []*eth.PTCs {
|
||||
window := make([]*eth.PTCs, 3*params.BeaconConfig().SlotsPerEpoch)
|
||||
indices := make([]primitives.ValidatorIndex, fieldparams.PTCSize)
|
||||
if validatorCount > 0 {
|
||||
for i := range indices {
|
||||
indices[i] = primitives.ValidatorIndex(i % validatorCount)
|
||||
}
|
||||
}
|
||||
for i := range window {
|
||||
window[i] = ð.PTCs{
|
||||
ValidatorIndices: slices.Clone(indices),
|
||||
}
|
||||
}
|
||||
return window
|
||||
}
|
||||
|
||||
func setupTestConfig(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
@@ -291,6 +324,50 @@ func signAttestation(t *testing.T, st state.ReadOnlyBeaconState, data *eth.Paylo
|
||||
return agg.Marshal()
|
||||
}
|
||||
|
||||
func TestProcessPTCWindow(t *testing.T) {
|
||||
fuluSt, _ := testutil.DeterministicGenesisStateFulu(t, 256)
|
||||
st, err := gloas.UpgradeToGloas(fuluSt)
|
||||
require.NoError(t, err)
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Get original window.
|
||||
origWindow, err := st.PTCWindow()
|
||||
require.NoError(t, err)
|
||||
windowSize := int(slotsPerEpoch.Mul(uint64(2 + params.BeaconConfig().MinSeedLookahead)))
|
||||
require.Equal(t, windowSize, len(origWindow))
|
||||
|
||||
// Advance state to next epoch boundary so process_ptc_window sees a new epoch.
|
||||
require.NoError(t, st.SetSlot(slotsPerEpoch))
|
||||
|
||||
// Process PTC window — should rotate.
|
||||
require.NoError(t, gloas.ProcessPTCWindow(t.Context(), st))
|
||||
|
||||
newWindow, err := st.PTCWindow()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, windowSize, len(newWindow))
|
||||
|
||||
// The first two epochs should be the old epochs 1 and 2 (shifted left by one epoch).
|
||||
for i := range 2 * slotsPerEpoch {
|
||||
require.DeepEqual(t, origWindow[slotsPerEpoch+i], newWindow[i])
|
||||
}
|
||||
|
||||
// The last epoch should be freshly computed — not all zeros.
|
||||
lastStart := 2 * slotsPerEpoch
|
||||
for i := range slotsPerEpoch {
|
||||
ptcSlot := newWindow[lastStart+i]
|
||||
require.NotNil(t, ptcSlot)
|
||||
nonZero := false
|
||||
for _, idx := range ptcSlot.ValidatorIndices {
|
||||
if idx != 0 {
|
||||
nonZero = true
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, nonZero, "last epoch slot %d should have non-zero validator indices", i)
|
||||
}
|
||||
}
|
||||
|
||||
type validatorLookupErrState struct {
|
||||
state.BeaconState
|
||||
errIndex primitives.ValidatorIndex
|
||||
|
||||
@@ -242,6 +242,84 @@ func TestProcessExecutionPayload_Success(t *testing.T) {
|
||||
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadWithDeferredSig_Success(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
header := fixture.state.LatestBlockHeader()
|
||||
var previousStateRoot [32]byte
|
||||
copy(previousStateRoot[:], header.StateRoot)
|
||||
|
||||
sigBatch, err := ProcessExecutionPayloadWithDeferredSig(t.Context(), fixture.state, previousStateRoot, fixture.signed)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, sigBatch)
|
||||
require.Equal(t, 1, len(sigBatch.Signatures))
|
||||
require.Equal(t, 1, len(sigBatch.PublicKeys))
|
||||
require.Equal(t, 1, len(sigBatch.Messages))
|
||||
require.Equal(t, 1, len(sigBatch.Descriptions))
|
||||
require.Equal(t, "execution payload envelope signature", sigBatch.Descriptions[0])
|
||||
|
||||
valid, err := sigBatch.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, valid)
|
||||
|
||||
latestHash, err := fixture.state.LatestBlockHash()
|
||||
require.NoError(t, err)
|
||||
var expectedHash [32]byte
|
||||
copy(expectedHash[:], fixture.payload.BlockHash)
|
||||
require.Equal(t, expectedHash, latestHash)
|
||||
|
||||
available, err := fixture.state.ExecutionPayloadAvailability(fixture.slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), available)
|
||||
|
||||
updatedHeader := fixture.state.LatestBlockHeader()
|
||||
require.DeepEqual(t, previousStateRoot[:], updatedHeader.StateRoot)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadWithDeferredSig_PreviousStateRootMismatch(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
previousStateRoot := [32]byte{0x42}
|
||||
|
||||
_, err := ProcessExecutionPayloadWithDeferredSig(t.Context(), fixture.state, previousStateRoot, fixture.signed)
|
||||
require.ErrorContains(t, "envelope beacon block root does not match state latest block header root", err)
|
||||
}
|
||||
|
||||
func TestApplyExecutionPayload_Success(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
envelope, err := fixture.signed.Envelope()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, ApplyExecutionPayload(t.Context(), fixture.state, envelope))
|
||||
|
||||
latestHash, err := fixture.state.LatestBlockHash()
|
||||
require.NoError(t, err)
|
||||
var expectedHash [32]byte
|
||||
copy(expectedHash[:], fixture.payload.BlockHash)
|
||||
require.Equal(t, expectedHash, latestHash)
|
||||
|
||||
available, err := fixture.state.ExecutionPayloadAvailability(fixture.slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), available)
|
||||
}
|
||||
|
||||
func TestApplyExecutionPayloadStateMutations_UpdatesAvailabilityAndLatestHash(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
newHash := [32]byte{}
|
||||
newHash[0] = 0x99
|
||||
|
||||
require.NoError(t, applyExecutionPayloadStateMutations(t.Context(), fixture.state, fixture.envelope.ExecutionRequests, newHash))
|
||||
|
||||
latestHash, err := fixture.state.LatestBlockHash()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, newHash, latestHash)
|
||||
|
||||
available, err := fixture.state.ExecutionPayloadAvailability(fixture.slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), available)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayload_PrevRandaoMismatch(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, func(_ *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, _ *ethpb.ExecutionPayloadEnvelope) {
|
||||
bid.PrevRandao = bytes.Repeat([]byte{0xFF}, 32)
|
||||
@@ -265,6 +343,95 @@ func TestQueueBuilderPayment_ZeroAmountClearsSlot(t *testing.T) {
|
||||
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func TestProcessBlindedExecutionPayload_NilEnvelope(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
require.NoError(t, ProcessBlindedExecutionPayload(t.Context(), fixture.state, [32]byte{}, nil))
|
||||
}
|
||||
|
||||
func TestProcessBlindedExecutionPayload_Success(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
st := fixture.state
|
||||
|
||||
blockHash := [32]byte(fixture.payload.BlockHash)
|
||||
stateRoot := [32]byte{0xAA}
|
||||
envelope := ðpb.SignedBlindedExecutionPayloadEnvelope{
|
||||
Message: ðpb.BlindedExecutionPayloadEnvelope{
|
||||
Slot: fixture.slot,
|
||||
BuilderIndex: fixture.envelope.BuilderIndex,
|
||||
BlockHash: blockHash[:],
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
ExecutionRequests: fixture.envelope.ExecutionRequests,
|
||||
},
|
||||
}
|
||||
|
||||
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, ProcessBlindedExecutionPayload(t.Context(), st, stateRoot, wrappedEnv))
|
||||
|
||||
latestHash, err := st.LatestBlockHash()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blockHash, latestHash)
|
||||
|
||||
available, err := st.ExecutionPayloadAvailability(fixture.slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), available)
|
||||
|
||||
header := st.LatestBlockHeader()
|
||||
require.DeepEqual(t, stateRoot[:], header.StateRoot)
|
||||
}
|
||||
|
||||
func TestProcessBlindedExecutionPayload_SlotMismatch(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
envelope := ðpb.SignedBlindedExecutionPayloadEnvelope{
|
||||
Message: ðpb.BlindedExecutionPayloadEnvelope{
|
||||
Slot: fixture.slot + 1,
|
||||
BlockHash: make([]byte, 32),
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
}
|
||||
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
|
||||
require.NoError(t, err)
|
||||
err = ProcessBlindedExecutionPayload(t.Context(), fixture.state, [32]byte{}, wrappedEnv)
|
||||
require.ErrorContains(t, "blinded envelope slot does not match state slot", err)
|
||||
}
|
||||
|
||||
func TestProcessBlindedExecutionPayload_BuilderIndexMismatch(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
blockHash := [32]byte(fixture.payload.BlockHash)
|
||||
envelope := ðpb.SignedBlindedExecutionPayloadEnvelope{
|
||||
Message: ðpb.BlindedExecutionPayloadEnvelope{
|
||||
Slot: fixture.slot,
|
||||
BuilderIndex: 999,
|
||||
BlockHash: blockHash[:],
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
}
|
||||
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
|
||||
require.NoError(t, err)
|
||||
err = ProcessBlindedExecutionPayload(t.Context(), fixture.state, [32]byte{}, wrappedEnv)
|
||||
require.ErrorContains(t, "builder index does not match", err)
|
||||
}
|
||||
|
||||
func TestProcessBlindedExecutionPayload_BlockHashMismatch(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
wrongHash := bytes.Repeat([]byte{0xFF}, 32)
|
||||
envelope := ðpb.SignedBlindedExecutionPayloadEnvelope{
|
||||
Message: ðpb.BlindedExecutionPayloadEnvelope{
|
||||
Slot: fixture.slot,
|
||||
BuilderIndex: fixture.envelope.BuilderIndex,
|
||||
BlockHash: wrongHash,
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
}
|
||||
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
|
||||
require.NoError(t, err)
|
||||
err = ProcessBlindedExecutionPayload(t.Context(), fixture.state, [32]byte{}, wrappedEnv)
|
||||
require.ErrorContains(t, "block hash does not match", err)
|
||||
}
|
||||
|
||||
func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
@@ -297,14 +464,14 @@ func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
|
||||
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, VerifyExecutionPayloadEnvelopeSignature(st, signed))
|
||||
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(st, signed))
|
||||
})
|
||||
|
||||
t.Run("builder", func(t *testing.T) {
|
||||
signed, err := blocks.WrappedROSignedExecutionPayloadEnvelope(fixture.signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, VerifyExecutionPayloadEnvelopeSignature(fixture.state, signed))
|
||||
require.NoError(t, verifyExecutionPayloadEnvelopeSignature(fixture.state, signed))
|
||||
})
|
||||
|
||||
t.Run("invalid signature", func(t *testing.T) {
|
||||
@@ -330,7 +497,7 @@ func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
|
||||
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = VerifyExecutionPayloadEnvelopeSignature(st, badSigned)
|
||||
err = verifyExecutionPayloadEnvelopeSignature(st, badSigned)
|
||||
require.ErrorContains(t, "invalid signature format", err)
|
||||
})
|
||||
|
||||
@@ -342,7 +509,7 @@ func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
|
||||
badSigned, err := blocks.WrappedROSignedExecutionPayloadEnvelope(signedProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = VerifyExecutionPayloadEnvelopeSignature(fixture.state, badSigned)
|
||||
err = verifyExecutionPayloadEnvelopeSignature(fixture.state, badSigned)
|
||||
require.ErrorContains(t, "invalid signature format", err)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -52,6 +52,7 @@ func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
||||
if err := state.RotateBuilderPendingPayments(); err != nil {
|
||||
return errors.Wrap(err, "could not rotate builder pending payments")
|
||||
}
|
||||
builderPendingPaymentsProcessedTotal.Add(float64(len(withdrawals)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
@@ -9,12 +11,13 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// UpgradeToGloas updates inputs a generic state to return the version Gloas state.
|
||||
//
|
||||
// <spec fn="upgrade_to_gloas" fork="gloas" hash="6e66df25">
|
||||
// <spec fn="upgrade_to_gloas" fork="gloas" hash="8f67112c">
|
||||
// def upgrade_to_gloas(pre: fulu.BeaconState) -> BeaconState:
|
||||
// epoch = fulu.get_current_epoch(pre)
|
||||
//
|
||||
@@ -81,6 +84,8 @@ import (
|
||||
// latest_block_hash=pre.latest_execution_payload_header.block_hash,
|
||||
// # [New in Gloas:EIP7732]
|
||||
// payload_expected_withdrawals=[],
|
||||
// # [New in Gloas:EIP7732]
|
||||
// ptc_window=initialize_ptc_window(pre),
|
||||
// )
|
||||
//
|
||||
// # [New in Gloas:EIP7732]
|
||||
@@ -143,12 +148,73 @@ func UpgradeToGloas(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert to gloas")
|
||||
}
|
||||
ptcWindow, err := initializePTCWindow(context.Background(), s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to initialize ptc window")
|
||||
}
|
||||
if err := s.SetPTCWindow(ptcWindow); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to set ptc window")
|
||||
}
|
||||
if err := s.OnboardBuildersFromPendingDeposits(); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to onboard builders from pending deposits")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// initializePTCWindow builds the initial PTC window for the Gloas fork upgrade.
|
||||
//
|
||||
// <spec fn="initialize_ptc_window" fork="gloas" hash="3764b7f5">
|
||||
// def initialize_ptc_window(
|
||||
// state: BeaconState,
|
||||
// ) -> Vector[Vector[ValidatorIndex, PTC_SIZE], (2 + MIN_SEED_LOOKAHEAD) * SLOTS_PER_EPOCH]:
|
||||
// """
|
||||
// Return the cached PTC window starting from the current epoch.
|
||||
// Used to initialize the ``ptc_window`` field in the beacon state at genesis and after forks.
|
||||
// """
|
||||
// empty_previous_epoch = [
|
||||
// Vector[ValidatorIndex, PTC_SIZE]([ValidatorIndex(0) for _ in range(PTC_SIZE)])
|
||||
// for _ in range(SLOTS_PER_EPOCH)
|
||||
// ]
|
||||
//
|
||||
// ptcs = []
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// for e in range(1 + MIN_SEED_LOOKAHEAD):
|
||||
// epoch = Epoch(current_epoch + e)
|
||||
// start_slot = compute_start_slot_at_epoch(epoch)
|
||||
// ptcs += [compute_ptc(state, Slot(start_slot + i)) for i in range(SLOTS_PER_EPOCH)]
|
||||
//
|
||||
// return empty_previous_epoch + ptcs
|
||||
// </spec>
|
||||
func initializePTCWindow(ctx context.Context, st state.ReadOnlyBeaconState) ([]*ethpb.PTCs, error) {
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
windowSize := slotsPerEpoch.Mul(uint64(2 + params.BeaconConfig().MinSeedLookahead))
|
||||
window := make([]*ethpb.PTCs, 0, windowSize)
|
||||
|
||||
// Previous epoch has no cached data at fork time — fill with empty slots.
|
||||
for range slotsPerEpoch {
|
||||
window = append(window, ðpb.PTCs{
|
||||
ValidatorIndices: make([]primitives.ValidatorIndex, fieldparams.PTCSize),
|
||||
})
|
||||
}
|
||||
|
||||
// Compute PTC for current epoch through lookahead.
|
||||
startSlot, err := slots.EpochStart(currentEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
totalSlots := slotsPerEpoch.Mul(uint64(1 + params.BeaconConfig().MinSeedLookahead))
|
||||
for i := range totalSlots {
|
||||
ptc, err := computePTC(ctx, st, startSlot+i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
window = append(window, ðpb.PTCs{ValidatorIndices: ptc})
|
||||
}
|
||||
|
||||
return window, nil
|
||||
}
|
||||
|
||||
func upgradeToGloas(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
|
||||
@@ -103,7 +103,7 @@ func TestUpgradeToGloas_Basic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpgradeToGloas_OnboardsBuilderDeposit(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateFulu(t, 4)
|
||||
st, _ := util.DeterministicGenesisStateFulu(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
105
beacon-chain/core/gloas/withdrawals.go
Normal file
105
beacon-chain/core/gloas/withdrawals.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessWithdrawals applies withdrawals to the state for Gloas.
|
||||
//
|
||||
// <spec fn="process_withdrawals" fork="gloas" hash="16d9ad2a">
|
||||
// def process_withdrawals(
|
||||
//
|
||||
// state: BeaconState,
|
||||
// # [Modified in Gloas:EIP7732]
|
||||
// # Removed `payload`
|
||||
//
|
||||
// ) -> None:
|
||||
//
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Return early if the parent block is empty
|
||||
// if not is_parent_block_full(state):
|
||||
// return
|
||||
//
|
||||
// # Get expected withdrawals
|
||||
// expected = get_expected_withdrawals(state)
|
||||
//
|
||||
// # Apply expected withdrawals
|
||||
// apply_withdrawals(state, expected.withdrawals)
|
||||
//
|
||||
// # Update withdrawals fields in the state
|
||||
// update_next_withdrawal_index(state, expected.withdrawals)
|
||||
// # [New in Gloas:EIP7732]
|
||||
// update_payload_expected_withdrawals(state, expected.withdrawals)
|
||||
// # [New in Gloas:EIP7732]
|
||||
// update_builder_pending_withdrawals(state, expected.processed_builder_withdrawals_count)
|
||||
// update_pending_partial_withdrawals(state, expected.processed_partial_withdrawals_count)
|
||||
// # [New in Gloas:EIP7732]
|
||||
// update_next_withdrawal_builder_index(state, expected.processed_builders_sweep_count)
|
||||
// update_next_withdrawal_validator_index(state, expected.withdrawals)
|
||||
//
|
||||
// </spec>
|
||||
func ProcessWithdrawals(st state.BeaconState) error {
|
||||
// Must be called before ProcessExecutionPayloadBid for the current block.
|
||||
full, err := st.IsParentBlockFull()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get parent block full status")
|
||||
}
|
||||
if !full {
|
||||
return nil
|
||||
}
|
||||
|
||||
expected, err := st.ExpectedWithdrawalsGloas()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get expected withdrawals")
|
||||
}
|
||||
|
||||
if err := st.DecreaseWithdrawalBalances(expected.Withdrawals); err != nil {
|
||||
return errors.Wrap(err, "could not decrease withdrawal balances")
|
||||
}
|
||||
|
||||
if len(expected.Withdrawals) > 0 {
|
||||
if err := st.SetNextWithdrawalIndex(expected.Withdrawals[len(expected.Withdrawals)-1].Index + 1); err != nil {
|
||||
return errors.Wrap(err, "could not set next withdrawal index")
|
||||
}
|
||||
}
|
||||
|
||||
if err := st.SetPayloadExpectedWithdrawals(expected.Withdrawals); err != nil {
|
||||
return errors.Wrap(err, "could not set payload expected withdrawals")
|
||||
}
|
||||
|
||||
if err := st.DequeueBuilderPendingWithdrawals(expected.ProcessedBuilderWithdrawalsCount); err != nil {
|
||||
return errors.Wrap(err, "unable to dequeue builder pending withdrawals from state")
|
||||
}
|
||||
|
||||
if err := st.DequeuePendingPartialWithdrawals(expected.ProcessedPartialWithdrawalsCount); err != nil {
|
||||
return errors.Wrap(err, "unable to dequeue partial withdrawals from state")
|
||||
}
|
||||
|
||||
err = st.SetNextWithdrawalBuilderIndex(expected.NextWithdrawalBuilderIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not set next withdrawal builder index")
|
||||
}
|
||||
|
||||
var nextValidatorIndex primitives.ValidatorIndex
|
||||
if uint64(len(expected.Withdrawals)) < params.BeaconConfig().MaxWithdrawalsPerPayload {
|
||||
nextValidatorIndex, err = st.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get next withdrawal validator index")
|
||||
}
|
||||
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
|
||||
} else {
|
||||
nextValidatorIndex = expected.Withdrawals[len(expected.Withdrawals)-1].ValidatorIndex + 1
|
||||
if nextValidatorIndex == primitives.ValidatorIndex(st.NumValidators()) {
|
||||
nextValidatorIndex = 0
|
||||
}
|
||||
}
|
||||
if err := st.SetNextWithdrawalValidatorIndex(nextValidatorIndex); err != nil {
|
||||
return errors.Wrap(err, "could not set next withdrawal validator index")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
388
beacon-chain/core/gloas/withdrawals_test.go
Normal file
388
beacon-chain/core/gloas/withdrawals_test.go
Normal file
@@ -0,0 +1,388 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestProcessWithdrawals(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
build func(t *testing.T) *withdrawalsState
|
||||
check func(t *testing.T, st *withdrawalsState)
|
||||
}{
|
||||
{
|
||||
name: "parent block not full",
|
||||
build: func(t *testing.T) *withdrawalsState {
|
||||
return &withdrawalsState{
|
||||
BeaconState: newGloasState(t, nil, nil),
|
||||
parentFull: false,
|
||||
}
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, false, st.expectedCalled)
|
||||
require.Equal(t, false, st.decreaseCalled)
|
||||
require.Equal(t, false, st.setNextWithdrawalIndexCalled)
|
||||
require.Equal(t, false, st.setPayloadExpectedWithdrawalsCalled)
|
||||
require.Equal(t, false, st.dequeueBuilderCalled)
|
||||
require.Equal(t, false, st.dequeuePartialCalled)
|
||||
require.Equal(t, false, st.setNextBuilderIndexCalled)
|
||||
require.Equal(t, false, st.nextValidatorIndexCalled)
|
||||
require.Equal(t, false, st.setNextValidatorIndexCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "updates indexes when not full payload",
|
||||
build: func(t *testing.T) *withdrawalsState {
|
||||
return &withdrawalsState{
|
||||
BeaconState: newGloasState(t, nil, nil),
|
||||
parentFull: true,
|
||||
numValidators: 10,
|
||||
nextValidatorIndex: 3,
|
||||
expectedResult: state.ExpectedWithdrawalsGloasResult{
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
{Index: 7, ValidatorIndex: 2, Amount: 1, Address: []byte{0x01}},
|
||||
{Index: 8, ValidatorIndex: 4, Amount: 2, Address: []byte{0x02}},
|
||||
},
|
||||
ProcessedBuilderWithdrawalsCount: 5,
|
||||
ProcessedPartialWithdrawalsCount: 2,
|
||||
NextWithdrawalBuilderIndex: 7,
|
||||
},
|
||||
}
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.expectedCalled)
|
||||
require.Equal(t, true, st.decreaseCalled)
|
||||
require.NotNil(t, st.setNextWithdrawalIndexArg)
|
||||
require.Equal(t, uint64(9), *st.setNextWithdrawalIndexArg)
|
||||
require.DeepEqual(t, st.expectedResult.Withdrawals, st.setPayloadExpectedWithdrawalsArg)
|
||||
require.Equal(t, uint64(5), *st.dequeueBuilderArg)
|
||||
require.Equal(t, uint64(2), *st.dequeuePartialArg)
|
||||
require.Equal(t, primitives.BuilderIndex(7), *st.setNextBuilderIndexArg)
|
||||
require.Equal(t, true, st.nextValidatorIndexCalled)
|
||||
|
||||
expectedNext := (uint64(st.nextValidatorIndex) + uint64(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)) % st.numValidators
|
||||
require.Equal(t, primitives.ValidatorIndex(expectedNext), *st.setNextValidatorIndexArg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "full payload uses last validator index",
|
||||
build: func(t *testing.T) *withdrawalsState {
|
||||
max := int(params.BeaconConfig().MaxWithdrawalsPerPayload)
|
||||
withdrawals := make([]*enginev1.Withdrawal, max)
|
||||
for i := range max {
|
||||
withdrawals[i] = &enginev1.Withdrawal{
|
||||
Index: uint64(i),
|
||||
ValidatorIndex: 0,
|
||||
Amount: 1,
|
||||
Address: []byte{0x03},
|
||||
}
|
||||
}
|
||||
withdrawals[max-1].ValidatorIndex = 4
|
||||
|
||||
return &withdrawalsState{
|
||||
BeaconState: newGloasState(t, nil, nil),
|
||||
parentFull: true,
|
||||
numValidators: 5,
|
||||
expectedResult: state.ExpectedWithdrawalsGloasResult{
|
||||
Withdrawals: withdrawals,
|
||||
NextWithdrawalBuilderIndex: 1,
|
||||
},
|
||||
}
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
max := int(params.BeaconConfig().MaxWithdrawalsPerPayload)
|
||||
require.NotNil(t, st.setNextWithdrawalIndexArg)
|
||||
require.Equal(t, uint64(max), *st.setNextWithdrawalIndexArg)
|
||||
require.Equal(t, false, st.nextValidatorIndexCalled)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), *st.setNextValidatorIndexArg)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty withdrawals skips next index update",
|
||||
build: func(t *testing.T) *withdrawalsState {
|
||||
return &withdrawalsState{
|
||||
BeaconState: newGloasState(t, nil, nil),
|
||||
parentFull: true,
|
||||
numValidators: 8,
|
||||
expectedResult: state.ExpectedWithdrawalsGloasResult{
|
||||
Withdrawals: []*enginev1.Withdrawal{},
|
||||
ProcessedBuilderWithdrawalsCount: 1,
|
||||
ProcessedPartialWithdrawalsCount: 2,
|
||||
NextWithdrawalBuilderIndex: 4,
|
||||
},
|
||||
}
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, false, st.setNextWithdrawalIndexCalled)
|
||||
require.Equal(t, true, st.setPayloadExpectedWithdrawalsCalled)
|
||||
require.Equal(t, true, st.dequeueBuilderCalled)
|
||||
require.Equal(t, true, st.dequeuePartialCalled)
|
||||
require.Equal(t, true, st.setNextBuilderIndexCalled)
|
||||
require.Equal(t, true, st.nextValidatorIndexCalled)
|
||||
require.Equal(t, true, st.setNextValidatorIndexCalled)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st := tc.build(t)
|
||||
require.NoError(t, ProcessWithdrawals(st))
|
||||
if tc.check != nil {
|
||||
tc.check(t, st)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessWithdrawals_ErrorPaths(t *testing.T) {
|
||||
base := func(t *testing.T) *withdrawalsState {
|
||||
return &withdrawalsState{
|
||||
BeaconState: newGloasState(t, nil, nil),
|
||||
parentFull: true,
|
||||
numValidators: 16,
|
||||
expectedResult: state.ExpectedWithdrawalsGloasResult{
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
{Index: 1, ValidatorIndex: 2, Amount: 1, Address: []byte{0x01}},
|
||||
},
|
||||
ProcessedBuilderWithdrawalsCount: 1,
|
||||
ProcessedPartialWithdrawalsCount: 1,
|
||||
NextWithdrawalBuilderIndex: 2,
|
||||
},
|
||||
nextValidatorIndex: 5,
|
||||
}
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
set func(st *withdrawalsState, err error)
|
||||
check func(t *testing.T, st *withdrawalsState)
|
||||
}{
|
||||
{
|
||||
name: "parent block full error",
|
||||
err: errors.New("parent err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.parentErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, false, st.expectedCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "expected withdrawals error",
|
||||
err: errors.New("expected err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.expectedErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.expectedCalled)
|
||||
require.Equal(t, false, st.decreaseCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "decrease balances error",
|
||||
err: errors.New("decrease err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.decreaseErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.decreaseCalled)
|
||||
require.Equal(t, false, st.setNextWithdrawalIndexCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set next withdrawal index error",
|
||||
err: errors.New("next index err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.setNextWithdrawalIndexErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.setNextWithdrawalIndexCalled)
|
||||
require.Equal(t, false, st.setPayloadExpectedWithdrawalsCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set payload expected withdrawals error",
|
||||
err: errors.New("payload expected err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.setPayloadExpectedWithdrawalsErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.setPayloadExpectedWithdrawalsCalled)
|
||||
require.Equal(t, false, st.dequeueBuilderCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dequeue builder pending withdrawals error",
|
||||
err: errors.New("dequeue builder err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.dequeueBuilderErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.dequeueBuilderCalled)
|
||||
require.Equal(t, false, st.dequeuePartialCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dequeue pending partial withdrawals error",
|
||||
err: errors.New("dequeue partial err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.dequeuePartialErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.dequeuePartialCalled)
|
||||
require.Equal(t, false, st.setNextBuilderIndexCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set next withdrawal builder index error",
|
||||
err: errors.New("next builder err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.setNextBuilderIndexErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.setNextBuilderIndexCalled)
|
||||
require.Equal(t, false, st.nextValidatorIndexCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "next withdrawal validator index error",
|
||||
err: errors.New("next validator err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.nextValidatorIndexErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.nextValidatorIndexCalled)
|
||||
require.Equal(t, false, st.setNextValidatorIndexCalled)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "set next withdrawal validator index error",
|
||||
err: errors.New("set next validator err"),
|
||||
set: func(st *withdrawalsState, err error) {
|
||||
st.setNextValidatorIndexErr = err
|
||||
},
|
||||
check: func(t *testing.T, st *withdrawalsState) {
|
||||
require.Equal(t, true, st.setNextValidatorIndexCalled)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st := base(t)
|
||||
tc.set(st, tc.err)
|
||||
err := ProcessWithdrawals(st)
|
||||
require.ErrorIs(t, err, tc.err)
|
||||
if tc.check != nil {
|
||||
tc.check(t, st)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type withdrawalsState struct {
|
||||
setNextValidatorIndexCalled bool
|
||||
nextValidatorIndexCalled bool
|
||||
setNextBuilderIndexCalled bool
|
||||
dequeuePartialCalled bool
|
||||
dequeueBuilderCalled bool
|
||||
setPayloadExpectedWithdrawalsCalled bool
|
||||
setNextWithdrawalIndexCalled bool
|
||||
parentFull bool
|
||||
expectedCalled bool
|
||||
decreaseCalled bool
|
||||
numValidators uint64
|
||||
setNextWithdrawalIndexArg *uint64
|
||||
nextValidatorIndex primitives.ValidatorIndex
|
||||
setNextBuilderIndexArg *primitives.BuilderIndex
|
||||
dequeuePartialArg *uint64
|
||||
setNextValidatorIndexArg *primitives.ValidatorIndex
|
||||
dequeueBuilderArg *uint64
|
||||
state.BeaconState
|
||||
setNextValidatorIndexErr error
|
||||
setNextBuilderIndexErr error
|
||||
dequeuePartialErr error
|
||||
dequeueBuilderErr error
|
||||
setPayloadExpectedWithdrawalsErr error
|
||||
nextValidatorIndexErr error
|
||||
decreaseErr error
|
||||
expectedErr error
|
||||
parentErr error
|
||||
setNextWithdrawalIndexErr error
|
||||
setPayloadExpectedWithdrawalsArg []*enginev1.Withdrawal
|
||||
expectedResult state.ExpectedWithdrawalsGloasResult
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) IsParentBlockFull() (bool, error) {
|
||||
return w.parentFull, w.parentErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) ExpectedWithdrawalsGloas() (state.ExpectedWithdrawalsGloasResult, error) {
|
||||
w.expectedCalled = true
|
||||
if w.expectedErr != nil {
|
||||
return state.ExpectedWithdrawalsGloasResult{}, w.expectedErr
|
||||
}
|
||||
return w.expectedResult, nil
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) DecreaseWithdrawalBalances(_ []*enginev1.Withdrawal) error {
|
||||
w.decreaseCalled = true
|
||||
return w.decreaseErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) SetNextWithdrawalIndex(index uint64) error {
|
||||
w.setNextWithdrawalIndexCalled = true
|
||||
w.setNextWithdrawalIndexArg = &index
|
||||
return w.setNextWithdrawalIndexErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) SetPayloadExpectedWithdrawals(withdrawals []*enginev1.Withdrawal) error {
|
||||
w.setPayloadExpectedWithdrawalsCalled = true
|
||||
w.setPayloadExpectedWithdrawalsArg = withdrawals
|
||||
return w.setPayloadExpectedWithdrawalsErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) DequeueBuilderPendingWithdrawals(n uint64) error {
|
||||
w.dequeueBuilderCalled = true
|
||||
w.dequeueBuilderArg = &n
|
||||
return w.dequeueBuilderErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) DequeuePendingPartialWithdrawals(n uint64) error {
|
||||
w.dequeuePartialCalled = true
|
||||
w.dequeuePartialArg = &n
|
||||
return w.dequeuePartialErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) SetNextWithdrawalBuilderIndex(index primitives.BuilderIndex) error {
|
||||
w.setNextBuilderIndexCalled = true
|
||||
w.setNextBuilderIndexArg = &index
|
||||
return w.setNextBuilderIndexErr
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) NextWithdrawalValidatorIndex() (primitives.ValidatorIndex, error) {
|
||||
w.nextValidatorIndexCalled = true
|
||||
if w.nextValidatorIndexErr != nil {
|
||||
return 0, w.nextValidatorIndexErr
|
||||
}
|
||||
return w.nextValidatorIndex, nil
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) NumValidators() int {
|
||||
return int(w.numValidators)
|
||||
}
|
||||
|
||||
func (w *withdrawalsState) SetNextWithdrawalValidatorIndex(index primitives.ValidatorIndex) error {
|
||||
w.setNextValidatorIndexCalled = true
|
||||
w.setNextValidatorIndexArg = &index
|
||||
return w.setNextValidatorIndexErr
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user