mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-04-19 03:01:06 -04:00
Compare commits
118 Commits
check-corr
...
gloas-e2e
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7fab759de6 | ||
|
|
a63d3f9178 | ||
|
|
a7b83c358a | ||
|
|
29a0fd6760 | ||
|
|
209e46bab7 | ||
|
|
108e2806cb | ||
|
|
68c4c36e65 | ||
|
|
67cc68c3bb | ||
|
|
c33f0d04b7 | ||
|
|
f05972a181 | ||
|
|
7352ae03c6 | ||
|
|
4f34624a54 | ||
|
|
4e44fdf55e | ||
|
|
139773aa3a | ||
|
|
6558e947ca | ||
|
|
d8150ac20c | ||
|
|
543746d95d | ||
|
|
8e8c990a04 | ||
|
|
3c9eae6064 | ||
|
|
c0ee666996 | ||
|
|
3e61778d38 | ||
|
|
0bfe736730 | ||
|
|
277797f6f8 | ||
|
|
2898a5e8a2 | ||
|
|
ca8cc65d72 | ||
|
|
445487b4a7 | ||
|
|
86edeef90f | ||
|
|
bfbca75862 | ||
|
|
1f72a1428c | ||
|
|
101dd55710 | ||
|
|
7d797ee4f9 | ||
|
|
45e38d430f | ||
|
|
0a643b177d | ||
|
|
9ea9e1f07c | ||
|
|
8fb4d85bbd | ||
|
|
259f526c8d | ||
|
|
77b5a7a5b3 | ||
|
|
fb9d9d93de | ||
|
|
7781e40abf | ||
|
|
e77724401d | ||
|
|
f43ba7851c | ||
|
|
dfc5bbef7f | ||
|
|
6430e27257 | ||
|
|
b141b5ccd2 | ||
|
|
696a08f3b9 | ||
|
|
65d428db58 | ||
|
|
a6e669d8bc | ||
|
|
605ab1c7ac | ||
|
|
93f7214b32 | ||
|
|
1bffcc84f4 | ||
|
|
7728ad4aa2 | ||
|
|
1362654669 | ||
|
|
3cec3997f8 | ||
|
|
1236519810 | ||
|
|
36052ed1bb | ||
|
|
458d4ebe54 | ||
|
|
8680f3f8bb | ||
|
|
416c49e6d5 | ||
|
|
6605dfbd50 | ||
|
|
84993fdd68 | ||
|
|
1e916418f2 | ||
|
|
8d5d584cf8 | ||
|
|
598509ffa8 | ||
|
|
0fbd643c02 | ||
|
|
0e4f3231d2 | ||
|
|
b17f2752ab | ||
|
|
932e5eb7d8 | ||
|
|
de233438f1 | ||
|
|
e35f6c351a | ||
|
|
4da5ed072c | ||
|
|
dec4b43b3e | ||
|
|
17ea45a011 | ||
|
|
1934afac73 | ||
|
|
d0c9a31657 | ||
|
|
66c70200ee | ||
|
|
928a874e4a | ||
|
|
ed8a3351aa | ||
|
|
5b95d11c5e | ||
|
|
7a4bea0e44 | ||
|
|
e899003973 | ||
|
|
e751a74c64 | ||
|
|
6826e77539 | ||
|
|
15c178ef0c | ||
|
|
e115137591 | ||
|
|
dc62271ebb | ||
|
|
3ec505bc22 | ||
|
|
6be77c0194 | ||
|
|
03fa7042cb | ||
|
|
c620f29aab | ||
|
|
7a652d7ec6 | ||
|
|
b59a830dce | ||
|
|
6999943c3d | ||
|
|
90064edd54 | ||
|
|
393eb1e83c | ||
|
|
d2bcf75c50 | ||
|
|
89d3a6c66f | ||
|
|
bf2485eb71 | ||
|
|
a88f60f1fa | ||
|
|
33f899506f | ||
|
|
f7ead02e6e | ||
|
|
0abf17f6cd | ||
|
|
8307ee1098 | ||
|
|
7e4a039a7f | ||
|
|
d46621eea3 | ||
|
|
8b25ffaa45 | ||
|
|
17f1b78494 | ||
|
|
5874226067 | ||
|
|
544bc3eb45 | ||
|
|
de6d9947d7 | ||
|
|
e5382d95dd | ||
|
|
6d8445d440 | ||
|
|
7b16c34af3 | ||
|
|
91577760b6 | ||
|
|
9ced048510 | ||
|
|
0439151373 | ||
|
|
5f050566f4 | ||
|
|
8a43513f5a | ||
|
|
6c8504ef71 |
@@ -61,7 +61,6 @@ exceptions:
|
||||
- ATTESTATION_TIMELINESS_INDEX#gloas
|
||||
- BUILDER_INDEX_FLAG#gloas
|
||||
- BUILDER_INDEX_SELF_BUILD#gloas
|
||||
- DOMAIN_PROPOSER_PREFERENCES#gloas
|
||||
- NUM_BLOCK_TIMELINESS_DEADLINES#gloas
|
||||
- PTC_TIMELINESS_INDEX#gloas
|
||||
|
||||
@@ -72,9 +71,6 @@ exceptions:
|
||||
- CONTRIBUTION_DUE_BPS_GLOAS#gloas
|
||||
- GLOAS_FORK_EPOCH#gloas
|
||||
- GLOAS_FORK_VERSION#gloas
|
||||
- MAX_REQUEST_PAYLOADS#gloas
|
||||
- MIN_BUILDER_WITHDRAWABILITY_DELAY#gloas
|
||||
- PAYLOAD_ATTESTATION_DUE_BPS#gloas
|
||||
- SYNC_MESSAGE_DUE_BPS_GLOAS#gloas
|
||||
|
||||
ssz_objects:
|
||||
@@ -390,10 +386,8 @@ exceptions:
|
||||
- get_builder_withdrawals#gloas
|
||||
- get_builders_sweep_withdrawals#gloas
|
||||
- get_index_for_new_builder#gloas
|
||||
- get_pending_balance_to_withdraw_for_builder#gloas
|
||||
- get_proposer_preferences_signature#gloas
|
||||
- get_upcoming_proposal_slots#gloas
|
||||
- initiate_builder_exit#gloas
|
||||
- is_active_builder#gloas
|
||||
- is_builder_index#gloas
|
||||
- is_data_available#gloas
|
||||
@@ -404,7 +398,6 @@ exceptions:
|
||||
- is_valid_proposal_slot#gloas
|
||||
- onboard_builders_from_pending_deposits#gloas
|
||||
- process_deposit_request#gloas
|
||||
- process_voluntary_exit#gloas
|
||||
- record_block_timeliness#gloas
|
||||
- record_block_timeliness#phase0
|
||||
- verify_data_column_sidecar_kzg_proofs#gloas
|
||||
|
||||
67
.github/workflows/sbom-export.yaml
vendored
Normal file
67
.github/workflows/sbom-export.yaml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
name: SBOM Export & Centralize
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "develop" ]
|
||||
schedule:
|
||||
- cron: '50 21 * * 2'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
generate-and-upload:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Source Code
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Check for recent changes
|
||||
id: check
|
||||
run: |
|
||||
if [ -z "$(git log --since='7 days ago' --oneline | head -1)" ]; then
|
||||
echo "No commits in the last 7 days, skipping SBOM generation."
|
||||
echo "skip=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Generate CycloneDX SBOM via cdxgen
|
||||
if: steps.check.outputs.skip != 'true'
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
docker run --rm \
|
||||
--user "$(id -u):$(id -g)" \
|
||||
-v /tmp:/tmp \
|
||||
-v "${{ github.workspace }}:/app:rw" \
|
||||
-e FETCH_LICENSE=true \
|
||||
-e GITHUB_TOKEN \
|
||||
ghcr.io/cdxgen/cdxgen:v12.1.1 \
|
||||
-r /app \
|
||||
-o /app/sbom.cdx.json \
|
||||
--no-install-deps \
|
||||
--spec-version 1.6
|
||||
|
||||
if [ ! -s sbom.cdx.json ]; then
|
||||
echo "::error::cdxgen SBOM generation failed or returned empty."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "SBOM generated successfully:"
|
||||
ls -lh sbom.cdx.json
|
||||
|
||||
- name: Upload SBOM to Dependency Track
|
||||
if: steps.check.outputs.skip != 'true'
|
||||
env:
|
||||
DT_API_KEY: ${{ secrets.DEPENDENCY_TRACK_API_KEY }}
|
||||
DT_URL: ${{ secrets.DEPENDENCY_TRACK_URL }}
|
||||
run: |
|
||||
REPO_NAME=${GITHUB_REPOSITORY##*/}
|
||||
|
||||
curl -sf -X POST "${DT_URL}/api/v1/bom" \
|
||||
-H "X-Api-Key: ${DT_API_KEY}" \
|
||||
-F "autoCreate=true" \
|
||||
-F "projectName=${REPO_NAME}" \
|
||||
-F "projectVersion=${{ github.ref_name }}" \
|
||||
-F "bom=@sbom.cdx.json"
|
||||
|
||||
echo "SBOM uploaded to Dependency Track for ${REPO_NAME}@${{ github.ref_name }}"
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -44,6 +44,3 @@ tmp
|
||||
|
||||
# spectest coverage reports
|
||||
report.txt
|
||||
|
||||
# execution client data
|
||||
execution/
|
||||
|
||||
112
CHANGELOG.md
112
CHANGELOG.md
@@ -4,6 +4,116 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v7.1.3](https://github.com/prysmaticlabs/prysm/compare/v7.1.2...v7.1.3) - 2026-03-18
|
||||
|
||||
This release brings extensive Gloas (next fork) groundwork, a major logging infrastructure overhaul, and numerous performance optimizations across the beacon chain. A security update to go-ethereum v1.16.8 is also included.
|
||||
|
||||
Release highlights:
|
||||
|
||||
- **Gloas fork preparation**: Builder registry, bid processing, payload attestation, proposer slashing, slot processing, block API endpoints, and duty timing intervals are all wired up.
|
||||
- **Logging revamp**: New ephemeral debug logfile (24h retention, enabled by default), per-package loggers with CI enforcement, per-hook verbosity control (`--log.vmodule`), and a version banner at startup.
|
||||
- **Performance**: Forkchoice updates moved to background, post-Electra attestation data cached per slot, parallel data column cache warmup, reduced heap allocations in SSZ marshaling and `MixInLength`, and proposer preprocessing behind a feature flag.
|
||||
- **Validator client**: gRPC fallback now matches the REST API implementation — both connect only to fully synced nodes. The gRPC health endpoint returns an error on syncing/optimistic status.
|
||||
- **Security**: go-ethereum updated to v1.16.8; fixed an authentication bypass on `/v2/validator/*` endpoints.
|
||||
- **State storage**: Initial support for the `hdiff` state-diff feature — migration-to-cold and DB initialization are now available behind feature flags.
|
||||
|
||||
There are no known security issues in this release. Operators can update at their convenience.
|
||||
|
||||
### Added
|
||||
|
||||
- Use the head state to validate attestations for the previous epoch if head is compatible with the target checkpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16109)
|
||||
- Added separate logrus hooks for handling the formatting and output of terminal logs vs log-file logs, instead of the. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16102)
|
||||
- Batch publish data columns for faster data propogation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16183)
|
||||
- `--disable-get-blobs-v2` flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16155)
|
||||
- Update spectests to v1.7.0-alpha.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16219)
|
||||
- Added basic Gloas builder support (`Builder` message and `BeaconStateGloas` `builders`/`next_withdrawal_builder_index` fields). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16164)
|
||||
- Added an ephemeral debug logfile that for beacon and validator nodes that captures debug-level logs for 24 hours. It. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16108)
|
||||
- Add a feature flag to pass spectests with low validator count. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16231)
|
||||
- Add feature flag `--enable-proposer-preprocessing` to process the block and verify signatures before proposing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15920)
|
||||
- Add `ProofByFieldIndex` to generalize merkle proof generation for `BeaconState`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15443)
|
||||
- Update spectests to v1.7.0-alpha-1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16246)
|
||||
- Add feature flag to use hashtree instead of gohashtre. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16216)
|
||||
- Migrate to cold with the hdiff feature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16049)
|
||||
- Adding basic fulu fork transition support for mainnet and minimal e2e tests (multi scenario is not included). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15640)
|
||||
- `commitment_count_in_gossip_processed_blocks` gauge metric to track the number of blob KZG commitments in processed beacon blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16254)
|
||||
- Add Gloas latest execution bid processing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15638)
|
||||
- Added shell completion support for `beacon-chain` and `validator` CLI tools. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16245)
|
||||
- add pending payments processing and quorum threshold, plus spectests and state hooks (rotate/append). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15655)
|
||||
- Add slot processing with execution payload availability updates. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15730)
|
||||
- Implement modified proposer slashing for gloas. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16212)
|
||||
- Added missing beacon config in fulu so that the presets don't go missing in /eth/v1/config/spec beacon api. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16170)
|
||||
- Close opened file in data_column.go. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16274)
|
||||
- Flag `--log.vmodule` to set per-package verbosity levels for logging. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16272)
|
||||
- Added a version log at startup to display the version of the build. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16283)
|
||||
- gloas block return support for /eth/v2/beacon/blocks/{block_id} and /eth/v1/beacon/blocks/{block_id}/root endpoints. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16278)
|
||||
- Add Gloas process payload attestation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15650)
|
||||
- Initialize db with state-diff feature flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16203)
|
||||
- Gloas-specific timing intervals for validator attestation, aggregation, and sync duties. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16291)
|
||||
- Added new proofCollector type to ssz-query. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16177)
|
||||
- Added README for maintaining specrefs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16302)
|
||||
- The ability to download the nightly reference tests from a specific day. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16298)
|
||||
- Set beacon node options after reading the config file. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16320)
|
||||
- Implement finalization-based eviction for `CheckpointStateCache`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16458)
|
||||
|
||||
### Changed
|
||||
|
||||
- Performance improvement in ProcessConsolidationRequests: Use more performance HasPendingBalanceToWithdraw instead of PendingBalanceToWithdraw as no need to calculate full total pending balance. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16189)
|
||||
- Extend `httperror` analyzer to more functions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16186)
|
||||
- Do not check block signature on state transition. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14820)
|
||||
- Notify the engine about forkchoice updates in the background. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16149)
|
||||
- Use a separate context when updating the slot cache. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16209)
|
||||
- Data column sidecars cache warmup: Process in parallel all sidecars for a given epoch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16207)
|
||||
- Use lookahead to validate data column sidecar proposer index. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16202)
|
||||
- Summarize DEBUG log corresponding to incoming via gossip data column sidecar. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16210)
|
||||
- Added a log.go file for every important package with a logger variable containing a `package` field set to the package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16059)
|
||||
- Added a CI check to ensure every important package has a log.go file with the correct `package` field. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16059)
|
||||
- Changed the log formatter to use this `package` field instead of the previous `prefix` field. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16059)
|
||||
- Replaced `time.Sleep` with `require.Eventually` polling in tests to fix flaky behavior caused by race conditions between goroutines and assertions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16217)
|
||||
- changed IsHealthy check to IsReady for validator client's interpretation from /eth/v1/node/health, 206 will now return false as the node is syncing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16167)
|
||||
- Performance improvement in state (MarshalSSZTo): use copy() instead of byte-by-byte loop which isn't required. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16222)
|
||||
- Moved verbosity settings to be configurable per hook, rather than just globally. This allows us to control the. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16106)
|
||||
- updated go ethereum to 1.16.7. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15640)
|
||||
- Use dependent root and target root to verify data column proposer index. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16250)
|
||||
- post electra we now call attestation data once per slot and use a cache for subsequent requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16236)
|
||||
- Avoid unnessary heap allocation while calling MixInLength. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16251)
|
||||
- Log commitments instead of indices in missingCommitError. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16258)
|
||||
- Added some defensive checks to prevent overflows in block batch requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16227)
|
||||
- gRPC health endpoint will now return an error on syncing or optimistic status showing that it's unavailable. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16294)
|
||||
- Sample PTC per committee to reduce allocations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16293)
|
||||
- gRPC fallback now matches rest api implementation and will also check and connect to only synced nodes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16215)
|
||||
- Improved node fallback logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16316)
|
||||
- Improved integrations with ethspecify so specrefs can be used throughout the codebase. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16304)
|
||||
- Fixed the logging issue described in #16314. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16322)
|
||||
|
||||
### Removed
|
||||
|
||||
- removed github.com/MariusVanDerWijden/FuzzyVM and github.com/MariusVanDerWijden/tx-fuzz due to lack of support post 1.16.7, only used in e2e for transaction fuzzing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15640)
|
||||
- Remove unused `delay` parameter from `fetchOriginDataColumnSidecars` function. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16262)
|
||||
- Batching of KZG verification for incoming via gossip data column sidecars. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16240)
|
||||
- `--disable-get-blobs-v2` flag from help. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16265)
|
||||
- gRPC resolver for load balancing, the new implementation matches rest api's so we should remove the resolver so it's handled the same way for consistency. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16215)
|
||||
|
||||
### Fixed
|
||||
|
||||
- avoid panic when fork schedule is empty [#16175](https://github.com/OffchainLabs/prysm/pull/16175). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16175)
|
||||
- Fix validation logic for `--backfill-oldest-slot`, which was rejecting slots newer than 1056767. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16173)
|
||||
- Don't call trace.WithMaxExportBatchSize(trace.DefaultMaxExportBatchSize) twice. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16211)
|
||||
- When adding the `--[semi-]supernode` flag, update the ealiest available slot accordingly. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16230)
|
||||
- fixed broken and old links to actual. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15856)
|
||||
- stop SlotIntervalTicker goroutine leaks [#16241](https://github.com/OffchainLabs/prysm/pull/16241). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16241)
|
||||
- Fix `prysmctl testnet generate-genesis` to use the timestamp from `--geth-genesis-json-in` when `--genesis-time` is not explicitly provided. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16239)
|
||||
- Prevent authentication bypass on direct `/v2/validator/*` endpoints by enforcing auth checks for non-public routes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16226)
|
||||
- Fixed a typo: AggregrateDueBPS -> AggregateDueBPS. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16194)
|
||||
- Fixed a bug in `hack/check-logs.sh` where untracked files were ignored. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16287)
|
||||
- Fix hashtree release builds. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16288)
|
||||
- Fix Bazel build failure on macOS x86_64 (darwin_amd64) (adds missing assembly stub to hashtree patch). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16281)
|
||||
- a potential race condition when switching hosts quickly and reconnecting to same host on an old connection. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16316)
|
||||
- Fixed a bug where `cmd/beacon-chain/execution` was being ignored by `hack/gen-logs.sh` due to a `.gitignore` rule. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16328)
|
||||
|
||||
### Security
|
||||
|
||||
- Update go-ethereum to v1.16.8. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16252)
|
||||
|
||||
## [v7.1.2](https://github.com/prysmaticlabs/prysm/compare/v7.1.1...v7.1.2) - 2026-01-07
|
||||
|
||||
Happy new year! This patch release is very small. The main improvement is better management of pending attestation aggregation via [PR 16153](https://github.com/OffchainLabs/prysm/pull/16153).
|
||||
@@ -4046,4 +4156,4 @@ There are no security updates in this release.
|
||||
|
||||
# Older than v2.0.0
|
||||
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
@@ -46,7 +46,7 @@ func EnsureReady(ctx context.Context, provider HostProvider, checker ReadyChecke
|
||||
"previous": startingHost,
|
||||
"current": provider.CurrentHost(),
|
||||
"tried": attemptedHosts,
|
||||
}).Info("Switched to responsive beacon node")
|
||||
}).Warn("Switched to responsive beacon node")
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ go_library(
|
||||
"conversions_blob.go",
|
||||
"conversions_block.go",
|
||||
"conversions_block_execution.go",
|
||||
"conversions_gloas.go",
|
||||
"conversions_lightclient.go",
|
||||
"conversions_state.go",
|
||||
"endpoints_beacon.go",
|
||||
@@ -57,10 +58,12 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
],
|
||||
|
||||
@@ -540,6 +540,12 @@ type PayloadAttestation struct {
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type PayloadAttestationMessage struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
Data *PayloadAttestationData `json:"data"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyGloas struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
@@ -577,3 +583,17 @@ func (s *SignedBeaconBlockGloas) MessageRawJson() ([]byte, error) {
|
||||
func (s *SignedBeaconBlockGloas) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type ExecutionPayloadEnvelope struct {
|
||||
Payload *ExecutionPayloadDeneb `json:"payload"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
BeaconBlockRoot string `json:"beacon_block_root"`
|
||||
Slot string `json:"slot"`
|
||||
StateRoot string `json:"state_root"`
|
||||
}
|
||||
|
||||
type SignedExecutionPayloadEnvelope struct {
|
||||
Message *ExecutionPayloadEnvelope `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
@@ -2966,6 +2966,14 @@ func PayloadAttestationFromConsensus(pa *eth.PayloadAttestation) *PayloadAttesta
|
||||
}
|
||||
}
|
||||
|
||||
func PayloadAttestationMessageFromConsensus(m *eth.PayloadAttestationMessage) *PayloadAttestationMessage {
|
||||
return &PayloadAttestationMessage{
|
||||
ValidatorIndex: fmt.Sprintf("%d", m.ValidatorIndex),
|
||||
Data: PayloadAttestationDataFromConsensus(m.Data),
|
||||
Signature: hexutil.Encode(m.Signature),
|
||||
}
|
||||
}
|
||||
|
||||
func PayloadAttestationDataFromConsensus(d *eth.PayloadAttestationData) *PayloadAttestationData {
|
||||
return &PayloadAttestationData{
|
||||
BeaconBlockRoot: hexutil.Encode(d.BeaconBlockRoot),
|
||||
@@ -3275,3 +3283,26 @@ func (d *PayloadAttestationData) ToConsensus() (*eth.PayloadAttestationData, err
|
||||
BlobDataAvailable: d.BlobDataAvailable,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SignedExecutionPayloadEnvelopeFromConsensus converts a proto envelope to the API struct.
|
||||
func SignedExecutionPayloadEnvelopeFromConsensus(e *eth.SignedExecutionPayloadEnvelope) (*SignedExecutionPayloadEnvelope, error) {
|
||||
payload, err := ExecutionPayloadDenebFromConsensus(e.Message.Payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var requests *ExecutionRequests
|
||||
if e.Message.ExecutionRequests != nil {
|
||||
requests = ExecutionRequestsFromConsensus(e.Message.ExecutionRequests)
|
||||
}
|
||||
return &SignedExecutionPayloadEnvelope{
|
||||
Message: &ExecutionPayloadEnvelope{
|
||||
Payload: payload,
|
||||
ExecutionRequests: requests,
|
||||
BuilderIndex: fmt.Sprintf("%d", e.Message.BuilderIndex),
|
||||
BeaconBlockRoot: hexutil.Encode(e.Message.BeaconBlockRoot),
|
||||
Slot: fmt.Sprintf("%d", e.Message.Slot),
|
||||
StateRoot: hexutil.Encode(e.Message.StateRoot),
|
||||
},
|
||||
Signature: hexutil.Encode(e.Signature),
|
||||
}, nil
|
||||
}
|
||||
|
||||
89
api/server/structs/conversions_gloas.go
Normal file
89
api/server/structs/conversions_gloas.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
func ROExecutionPayloadBidFromConsensus(b interfaces.ROExecutionPayloadBid) *ExecutionPayloadBid {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
pbh := b.ParentBlockHash()
|
||||
pbr := b.ParentBlockRoot()
|
||||
bh := b.BlockHash()
|
||||
pr := b.PrevRandao()
|
||||
fr := b.FeeRecipient()
|
||||
commitments := b.BlobKzgCommitments()
|
||||
blobKzgCommitments := make([]string, 0, len(commitments))
|
||||
for _, commitment := range commitments {
|
||||
blobKzgCommitments = append(blobKzgCommitments, hexutil.Encode(commitment))
|
||||
}
|
||||
return &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(pbh[:]),
|
||||
ParentBlockRoot: hexutil.Encode(pbr[:]),
|
||||
BlockHash: hexutil.Encode(bh[:]),
|
||||
PrevRandao: hexutil.Encode(pr[:]),
|
||||
FeeRecipient: hexutil.Encode(fr[:]),
|
||||
GasLimit: fmt.Sprintf("%d", b.GasLimit()),
|
||||
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex()),
|
||||
Slot: fmt.Sprintf("%d", b.Slot()),
|
||||
Value: fmt.Sprintf("%d", b.Value()),
|
||||
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment()),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
}
|
||||
}
|
||||
|
||||
func BuildersFromConsensus(builders []*ethpb.Builder) []*Builder {
|
||||
newBuilders := make([]*Builder, len(builders))
|
||||
for i, b := range builders {
|
||||
newBuilders[i] = BuilderFromConsensus(b)
|
||||
}
|
||||
return newBuilders
|
||||
}
|
||||
|
||||
func BuilderFromConsensus(b *ethpb.Builder) *Builder {
|
||||
return &Builder{
|
||||
Pubkey: hexutil.Encode(b.Pubkey),
|
||||
Version: hexutil.Encode(b.Version),
|
||||
ExecutionAddress: hexutil.Encode(b.ExecutionAddress),
|
||||
Balance: fmt.Sprintf("%d", b.Balance),
|
||||
DepositEpoch: fmt.Sprintf("%d", b.DepositEpoch),
|
||||
WithdrawableEpoch: fmt.Sprintf("%d", b.WithdrawableEpoch),
|
||||
}
|
||||
}
|
||||
|
||||
func BuilderPendingPaymentsFromConsensus(payments []*ethpb.BuilderPendingPayment) []*BuilderPendingPayment {
|
||||
newPayments := make([]*BuilderPendingPayment, len(payments))
|
||||
for i, p := range payments {
|
||||
newPayments[i] = BuilderPendingPaymentFromConsensus(p)
|
||||
}
|
||||
return newPayments
|
||||
}
|
||||
|
||||
func BuilderPendingPaymentFromConsensus(p *ethpb.BuilderPendingPayment) *BuilderPendingPayment {
|
||||
return &BuilderPendingPayment{
|
||||
Weight: fmt.Sprintf("%d", p.Weight),
|
||||
Withdrawal: BuilderPendingWithdrawalFromConsensus(p.Withdrawal),
|
||||
}
|
||||
}
|
||||
|
||||
func BuilderPendingWithdrawalsFromConsensus(withdrawals []*ethpb.BuilderPendingWithdrawal) []*BuilderPendingWithdrawal {
|
||||
newWithdrawals := make([]*BuilderPendingWithdrawal, len(withdrawals))
|
||||
for i, w := range withdrawals {
|
||||
newWithdrawals[i] = BuilderPendingWithdrawalFromConsensus(w)
|
||||
}
|
||||
return newWithdrawals
|
||||
}
|
||||
|
||||
func BuilderPendingWithdrawalFromConsensus(w *ethpb.BuilderPendingWithdrawal) *BuilderPendingWithdrawal {
|
||||
return &BuilderPendingWithdrawal{
|
||||
FeeRecipient: hexutil.Encode(w.FeeRecipient),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
BuilderIndex: fmt.Sprintf("%d", w.BuilderIndex),
|
||||
}
|
||||
}
|
||||
@@ -972,3 +972,223 @@ func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu,
|
||||
ProposerLookahead: lookahead,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Gloas
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func BeaconStateGloasFromConsensus(st beaconState.BeaconState) (*BeaconStateGloas, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
for i, r := range srcBr {
|
||||
br[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcSr := st.StateRoots()
|
||||
sr := make([]string, len(srcSr))
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr := st.HistoricalRoots()
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcVotes := st.Eth1DataVotes()
|
||||
votes := make([]*Eth1Data, len(srcVotes))
|
||||
for i, e := range srcVotes {
|
||||
votes[i] = Eth1DataFromConsensus(e)
|
||||
}
|
||||
srcVals := st.Validators()
|
||||
vals := make([]*Validator, len(srcVals))
|
||||
for i, v := range srcVals {
|
||||
vals[i] = ValidatorFromConsensus(v)
|
||||
}
|
||||
srcBals := st.Balances()
|
||||
bals := make([]string, len(srcBals))
|
||||
for i, b := range srcBals {
|
||||
bals[i] = fmt.Sprintf("%d", b)
|
||||
}
|
||||
srcRm := st.RandaoMixes()
|
||||
rm := make([]string, len(srcRm))
|
||||
for i, m := range srcRm {
|
||||
rm[i] = hexutil.Encode(m)
|
||||
}
|
||||
srcSlashings := st.Slashings()
|
||||
slashings := make([]string, len(srcSlashings))
|
||||
for i, s := range srcSlashings {
|
||||
slashings[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
srcPrevPart, err := st.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevPart := make([]string, len(srcPrevPart))
|
||||
for i, p := range srcPrevPart {
|
||||
prevPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcCurrPart, err := st.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currPart := make([]string, len(srcCurrPart))
|
||||
for i, p := range srcCurrPart {
|
||||
currPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcIs, err := st.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
is := make([]string, len(srcIs))
|
||||
for i, s := range srcIs {
|
||||
is[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
currSc, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSc, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHs, err := st.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hs := make([]*HistoricalSummary, len(srcHs))
|
||||
for i, s := range srcHs {
|
||||
hs[i] = HistoricalSummaryFromConsensus(s)
|
||||
}
|
||||
nwi, err := st.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nwvi, err := st.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
drsi, err := st.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbtc, err := st.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ebtc, err := st.ExitBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eee, err := st.EarliestExitEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cbtc, err := st.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ece, err := st.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pbd, err := st.PendingDeposits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ppw, err := st.PendingPartialWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pc, err := st.PendingConsolidations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcLookahead, err := st.ProposerLookahead()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lookahead := make([]string, len(srcLookahead))
|
||||
for i, v := range srcLookahead {
|
||||
lookahead[i] = fmt.Sprintf("%d", uint64(v))
|
||||
}
|
||||
// Gloas-specific fields
|
||||
lepb, err := st.LatestExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builders, err := st.Builders()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nwbi, err := st.NextWithdrawalBuilderIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
epa, err := st.ExecutionPayloadAvailabilityVector()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bpp, err := st.BuilderPendingPayments()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bpw, err := st.BuilderPendingWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lbh, err := st.LatestBlockHash()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pew, err := st.PayloadExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BeaconStateGloas{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
|
||||
BlockRoots: br,
|
||||
StateRoots: sr,
|
||||
HistoricalRoots: hr,
|
||||
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
|
||||
Eth1DataVotes: votes,
|
||||
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
RandaoMixes: rm,
|
||||
Slashings: slashings,
|
||||
PreviousEpochParticipation: prevPart,
|
||||
CurrentEpochParticipation: currPart,
|
||||
JustificationBits: hexutil.Encode(st.JustificationBits()),
|
||||
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
|
||||
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
|
||||
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
|
||||
InactivityScores: is,
|
||||
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
|
||||
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
|
||||
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
|
||||
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
|
||||
HistoricalSummaries: hs,
|
||||
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
|
||||
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
|
||||
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
|
||||
EarliestExitEpoch: fmt.Sprintf("%d", eee),
|
||||
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
|
||||
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
|
||||
PendingDeposits: PendingDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
ProposerLookahead: lookahead,
|
||||
LatestExecutionPayloadBid: ROExecutionPayloadBidFromConsensus(lepb),
|
||||
Builders: BuildersFromConsensus(builders),
|
||||
NextWithdrawalBuilderIndex: fmt.Sprintf("%d", nwbi),
|
||||
ExecutionPayloadAvailability: hexutil.Encode(epa),
|
||||
BuilderPendingPayments: BuilderPendingPaymentsFromConsensus(bpp),
|
||||
BuilderPendingWithdrawals: BuilderPendingWithdrawalsFromConsensus(bpw),
|
||||
LatestBlockHash: hexutil.Encode(lbh[:]),
|
||||
PayloadExpectedWithdrawals: WithdrawalsFromConsensus(pew),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,15 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
@@ -355,3 +359,214 @@ func TestIndexedAttestation_ToConsensus(t *testing.T) {
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestROExecutionPayloadBidFromConsensus(t *testing.T) {
|
||||
t.Run("empty blobkzg commitments", func(t *testing.T) {
|
||||
bid := ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x03}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
|
||||
GasLimit: 100,
|
||||
BuilderIndex: 7,
|
||||
Slot: 9,
|
||||
Value: 11,
|
||||
ExecutionPayment: 22,
|
||||
BlobKzgCommitments: [][]byte{},
|
||||
}
|
||||
roBid, err := blocks.WrappedROExecutionPayloadBid(bid)
|
||||
require.NoError(t, err)
|
||||
|
||||
got := ROExecutionPayloadBidFromConsensus(roBid)
|
||||
want := &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(bid.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(bid.ParentBlockRoot),
|
||||
BlockHash: hexutil.Encode(bid.BlockHash),
|
||||
PrevRandao: hexutil.Encode(bid.PrevRandao),
|
||||
FeeRecipient: hexutil.Encode(bid.FeeRecipient),
|
||||
GasLimit: "100",
|
||||
BuilderIndex: "7",
|
||||
Slot: "9",
|
||||
Value: "11",
|
||||
ExecutionPayment: "22",
|
||||
BlobKzgCommitments: []string{},
|
||||
}
|
||||
assert.DeepEqual(t, want, got)
|
||||
})
|
||||
|
||||
t.Run("default", func(t *testing.T) {
|
||||
bid := ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x01}, 32),
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x02}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x03}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
|
||||
GasLimit: 100,
|
||||
BuilderIndex: 7,
|
||||
Slot: 9,
|
||||
Value: 11,
|
||||
ExecutionPayment: 22,
|
||||
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x06}, 48)},
|
||||
}
|
||||
roBid, err := blocks.WrappedROExecutionPayloadBid(bid)
|
||||
require.NoError(t, err)
|
||||
|
||||
var bkcs []string
|
||||
for _, commitment := range roBid.BlobKzgCommitments() {
|
||||
bkcs = append(bkcs, hexutil.Encode(commitment))
|
||||
}
|
||||
|
||||
got := ROExecutionPayloadBidFromConsensus(roBid)
|
||||
want := &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(bid.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(bid.ParentBlockRoot),
|
||||
BlockHash: hexutil.Encode(bid.BlockHash),
|
||||
PrevRandao: hexutil.Encode(bid.PrevRandao),
|
||||
FeeRecipient: hexutil.Encode(bid.FeeRecipient),
|
||||
GasLimit: "100",
|
||||
BuilderIndex: "7",
|
||||
Slot: "9",
|
||||
Value: "11",
|
||||
ExecutionPayment: "22",
|
||||
BlobKzgCommitments: bkcs,
|
||||
}
|
||||
assert.DeepEqual(t, want, got)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderConversionsFromConsensus(t *testing.T) {
|
||||
builder := ð.Builder{
|
||||
Pubkey: bytes.Repeat([]byte{0xAA}, 48),
|
||||
Version: bytes.Repeat([]byte{0x01}, 4),
|
||||
ExecutionAddress: bytes.Repeat([]byte{0xBB}, 20),
|
||||
Balance: 42,
|
||||
DepositEpoch: 3,
|
||||
WithdrawableEpoch: 4,
|
||||
}
|
||||
wantBuilder := &Builder{
|
||||
Pubkey: hexutil.Encode(builder.Pubkey),
|
||||
Version: hexutil.Encode(builder.Version),
|
||||
ExecutionAddress: hexutil.Encode(builder.ExecutionAddress),
|
||||
Balance: "42",
|
||||
DepositEpoch: "3",
|
||||
WithdrawableEpoch: "4",
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, wantBuilder, BuilderFromConsensus(builder))
|
||||
assert.DeepEqual(t, []*Builder{wantBuilder}, BuildersFromConsensus([]*eth.Builder{builder}))
|
||||
}
|
||||
|
||||
func TestBuilderPendingPaymentConversionsFromConsensus(t *testing.T) {
|
||||
withdrawal := ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x10}, 20),
|
||||
Amount: 15,
|
||||
BuilderIndex: 2,
|
||||
}
|
||||
payment := ð.BuilderPendingPayment{
|
||||
Weight: 5,
|
||||
Withdrawal: withdrawal,
|
||||
}
|
||||
wantWithdrawal := &BuilderPendingWithdrawal{
|
||||
FeeRecipient: hexutil.Encode(withdrawal.FeeRecipient),
|
||||
Amount: "15",
|
||||
BuilderIndex: "2",
|
||||
}
|
||||
wantPayment := &BuilderPendingPayment{
|
||||
Weight: "5",
|
||||
Withdrawal: wantWithdrawal,
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, wantPayment, BuilderPendingPaymentFromConsensus(payment))
|
||||
assert.DeepEqual(t, []*BuilderPendingPayment{wantPayment}, BuilderPendingPaymentsFromConsensus([]*eth.BuilderPendingPayment{payment}))
|
||||
assert.DeepEqual(t, wantWithdrawal, BuilderPendingWithdrawalFromConsensus(withdrawal))
|
||||
assert.DeepEqual(t, []*BuilderPendingWithdrawal{wantWithdrawal}, BuilderPendingWithdrawalsFromConsensus([]*eth.BuilderPendingWithdrawal{withdrawal}))
|
||||
}
|
||||
|
||||
func TestBeaconStateGloasFromConsensus(t *testing.T) {
|
||||
st, err := util.NewBeaconStateGloas(func(state *eth.BeaconStateGloas) error {
|
||||
state.GenesisTime = 123
|
||||
state.GenesisValidatorsRoot = bytes.Repeat([]byte{0x10}, 32)
|
||||
state.Slot = 5
|
||||
state.ProposerLookahead = []uint64{1, 2}
|
||||
state.LatestExecutionPayloadBid = ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32),
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x12}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x13}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x14}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x15}, 20),
|
||||
GasLimit: 64,
|
||||
BuilderIndex: 3,
|
||||
Slot: 5,
|
||||
Value: 99,
|
||||
ExecutionPayment: 7,
|
||||
BlobKzgCommitments: [][]byte{bytes.Repeat([]byte{0x16}, 48)},
|
||||
}
|
||||
state.Builders = []*eth.Builder{
|
||||
{
|
||||
Pubkey: bytes.Repeat([]byte{0x20}, 48),
|
||||
Version: bytes.Repeat([]byte{0x21}, 4),
|
||||
ExecutionAddress: bytes.Repeat([]byte{0x22}, 20),
|
||||
Balance: 88,
|
||||
DepositEpoch: 1,
|
||||
WithdrawableEpoch: 2,
|
||||
},
|
||||
}
|
||||
state.NextWithdrawalBuilderIndex = 9
|
||||
state.ExecutionPayloadAvailability = []byte{0x01, 0x02}
|
||||
state.BuilderPendingPayments = []*eth.BuilderPendingPayment{
|
||||
{
|
||||
Weight: 3,
|
||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x23}, 20),
|
||||
Amount: 4,
|
||||
BuilderIndex: 5,
|
||||
},
|
||||
},
|
||||
}
|
||||
state.BuilderPendingWithdrawals = []*eth.BuilderPendingWithdrawal{
|
||||
{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x24}, 20),
|
||||
Amount: 6,
|
||||
BuilderIndex: 7,
|
||||
},
|
||||
}
|
||||
state.LatestBlockHash = bytes.Repeat([]byte{0x25}, 32)
|
||||
state.PayloadExpectedWithdrawals = []*enginev1.Withdrawal{
|
||||
{Index: 1, ValidatorIndex: 2, Address: bytes.Repeat([]byte{0x26}, 20), Amount: 10},
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := BeaconStateGloasFromConsensus(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "123", got.GenesisTime)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x10}, 32)), got.GenesisValidatorsRoot)
|
||||
require.Equal(t, "5", got.Slot)
|
||||
require.DeepEqual(t, []string{"1", "2"}, got.ProposerLookahead)
|
||||
require.Equal(t, "9", got.NextWithdrawalBuilderIndex)
|
||||
require.Equal(t, hexutil.Encode([]byte{0x01, 0x02}), got.ExecutionPayloadAvailability)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x25}, 32)), got.LatestBlockHash)
|
||||
|
||||
require.NotNil(t, got.LatestExecutionPayloadBid)
|
||||
require.Equal(t, "64", got.LatestExecutionPayloadBid.GasLimit)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x11}, 32)), got.LatestExecutionPayloadBid.ParentBlockHash)
|
||||
|
||||
require.NotNil(t, got.Builders)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x20}, 48)), got.Builders[0].Pubkey)
|
||||
require.Equal(t, "88", got.Builders[0].Balance)
|
||||
|
||||
require.Equal(t, "3", got.BuilderPendingPayments[0].Weight)
|
||||
require.Equal(t, "4", got.BuilderPendingPayments[0].Withdrawal.Amount)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x23}, 20)), got.BuilderPendingPayments[0].Withdrawal.FeeRecipient)
|
||||
|
||||
require.Equal(t, "6", got.BuilderPendingWithdrawals[0].Amount)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x24}, 20)), got.BuilderPendingWithdrawals[0].FeeRecipient)
|
||||
|
||||
require.Equal(t, "1", got.PayloadExpectedWithdrawals[0].WithdrawalIndex)
|
||||
require.Equal(t, "2", got.PayloadExpectedWithdrawals[0].ValidatorIndex)
|
||||
require.Equal(t, hexutil.Encode(bytes.Repeat([]byte{0x26}, 20)), got.PayloadExpectedWithdrawals[0].ExecutionAddress)
|
||||
require.Equal(t, "10", got.PayloadExpectedWithdrawals[0].Amount)
|
||||
}
|
||||
|
||||
@@ -285,6 +285,13 @@ type GetBlobsResponse struct {
|
||||
Data []string `json:"data"` //blobs
|
||||
}
|
||||
|
||||
type GetExecutionPayloadEnvelopeResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data *SignedExecutionPayloadEnvelope `json:"data"`
|
||||
}
|
||||
|
||||
type SSZQueryRequest struct {
|
||||
Query string `json:"query"`
|
||||
IncludeProof bool `json:"include_proof,omitempty"`
|
||||
|
||||
@@ -112,3 +112,8 @@ type LightClientOptimisticUpdateEvent struct {
|
||||
Version string `json:"version"`
|
||||
Data *LightClientOptimisticUpdate `json:"data"`
|
||||
}
|
||||
|
||||
type PayloadEvent struct {
|
||||
Slot string `json:"slot"`
|
||||
BlockRoot string `json:"block_root"`
|
||||
}
|
||||
|
||||
@@ -262,3 +262,23 @@ type PendingConsolidation struct {
|
||||
SourceIndex string `json:"source_index"`
|
||||
TargetIndex string `json:"target_index"`
|
||||
}
|
||||
|
||||
type Builder struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
Version string `json:"version"`
|
||||
ExecutionAddress string `json:"execution_address"`
|
||||
Balance string `json:"balance"`
|
||||
DepositEpoch string `json:"deposit_epoch"`
|
||||
WithdrawableEpoch string `json:"withdrawable_epoch"`
|
||||
}
|
||||
|
||||
type BuilderPendingPayment struct {
|
||||
Weight string `json:"weight"`
|
||||
Withdrawal *BuilderPendingWithdrawal `json:"withdrawal"`
|
||||
}
|
||||
|
||||
type BuilderPendingWithdrawal struct {
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
Amount string `json:"amount"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
}
|
||||
|
||||
@@ -221,3 +221,51 @@ type BeaconStateFulu struct {
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
ProposerLookahead []string `json:"proposer_lookahead"`
|
||||
}
|
||||
|
||||
type BeaconStateGloas struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *Fork `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots"`
|
||||
StateRoots []string `json:"state_roots"`
|
||||
HistoricalRoots []string `json:"historical_roots"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*Validator `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation []string `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits"`
|
||||
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
|
||||
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
|
||||
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
|
||||
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
|
||||
EarliestExitEpoch string `json:"earliest_exit_epoch"`
|
||||
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
|
||||
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
ProposerLookahead []string `json:"proposer_lookahead"`
|
||||
LatestExecutionPayloadBid *ExecutionPayloadBid `json:"latest_execution_payload_bid"`
|
||||
Builders []*Builder `json:"builders"`
|
||||
NextWithdrawalBuilderIndex string `json:"next_withdrawal_builder_index"`
|
||||
ExecutionPayloadAvailability string `json:"execution_payload_availability"`
|
||||
BuilderPendingPayments []*BuilderPendingPayment `json:"builder_pending_payments"`
|
||||
BuilderPendingWithdrawals []*BuilderPendingWithdrawal `json:"builder_pending_withdrawals"`
|
||||
LatestBlockHash string `json:"latest_block_hash"`
|
||||
PayloadExpectedWithdrawals []*Withdrawal `json:"payload_expected_withdrawals"`
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -39,11 +40,15 @@ type ChainInfoFetcher interface {
|
||||
// of locking forkchoice
|
||||
type ForkchoiceFetcher interface {
|
||||
Ancestor(context.Context, []byte, primitives.Slot) ([]byte, error)
|
||||
BlockHash(root [32]byte) ([32]byte, error)
|
||||
CachedHeadRoot() [32]byte
|
||||
GetProposerHead() [32]byte
|
||||
SetForkChoiceGenesisTime(time.Time)
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
HighestReceivedBlockRoot() [32]byte
|
||||
HasFullNode([32]byte) bool
|
||||
PayloadContentLookup([32]byte) ([32]byte, bool)
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
|
||||
InsertPayload(interfaces.ROExecutionPayloadEnvelope) error
|
||||
@@ -53,6 +58,8 @@ type ForkchoiceFetcher interface {
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
|
||||
DependentRoot(primitives.Epoch) ([32]byte, error)
|
||||
CanonicalNodeAtSlot(primitives.Slot) ([32]byte, bool)
|
||||
ShouldIgnoreData(parentRoot [32]byte, dataSlot primitives.Slot) bool
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
@@ -114,6 +121,7 @@ type FinalizationFetcher interface {
|
||||
FinalizedBlockHash() [32]byte
|
||||
InForkchoice([32]byte) bool
|
||||
IsFinalized(ctx context.Context, blockRoot [32]byte) bool
|
||||
ParentPayloadReady(interfaces.ReadOnlyBeaconBlock) bool
|
||||
}
|
||||
|
||||
// OptimisticModeFetcher retrieves information about optimistic status of the node.
|
||||
@@ -403,6 +411,32 @@ func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// ParentPayloadReady returns true if the block's parent payload is available
|
||||
// in forkchoice. For pre-Gloas blocks or blocks building on empty, this always
|
||||
// returns true. For blocks building on full, it checks that the full node
|
||||
// exists.
|
||||
func (s *Service) ParentPayloadReady(blk interfaces.ReadOnlyBeaconBlock) bool {
|
||||
if blk.Version() < version.Gloas {
|
||||
return true
|
||||
}
|
||||
parentRoot := blk.ParentRoot()
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
blockHash, err := s.cfg.ForkChoiceStore.BlockHash(parentRoot)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
bid, err := blk.Body().SignedExecutionPayloadBid()
|
||||
if err != nil || bid == nil || bid.Message == nil {
|
||||
return false
|
||||
}
|
||||
parentBlockHash := [32]byte(bid.Message.ParentBlockHash)
|
||||
if parentBlockHash != blockHash {
|
||||
return true // builds on empty, no full node needed
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.HasFullNode(parentRoot)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
@@ -565,3 +599,26 @@ func (s *Service) inRegularSync() bool {
|
||||
func (s *Service) validating() bool {
|
||||
return s.cfg.TrackedValidatorsCache.Validating()
|
||||
}
|
||||
|
||||
// ShouldIgnoreData returns true if the data for the given parent root and slot should be ignored.
|
||||
func (s *Service) ShouldIgnoreData(parentRoot [32]byte, dataSlot primitives.Slot) bool {
|
||||
currentEpoch := slots.ToEpoch(s.CurrentSlot())
|
||||
if slots.ToEpoch(dataSlot) < currentEpoch {
|
||||
return false
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
parentSlot, err := s.cfg.ForkChoiceStore.Slot(parentRoot)
|
||||
if err != nil {
|
||||
// This should not happen. The caller should have already checked the parent is in forkchoice.
|
||||
return false
|
||||
}
|
||||
j := s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
if j == nil {
|
||||
return false
|
||||
}
|
||||
if slots.ToEpoch(parentSlot) >= j.Epoch {
|
||||
return false
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.IsCanonical(parentRoot)
|
||||
}
|
||||
|
||||
@@ -42,6 +42,34 @@ func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
|
||||
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
}
|
||||
|
||||
// HighestReceivedBlockRoot returns the corresponding value from forkchoice
|
||||
func (s *Service) HighestReceivedBlockRoot() [32]byte {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HighestReceivedBlockRoot()
|
||||
}
|
||||
|
||||
// BlockHash returns the execution payload block hash for the given beacon block root from forkchoice.
|
||||
func (s *Service) BlockHash(root [32]byte) ([32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.BlockHash(root)
|
||||
}
|
||||
|
||||
// HasFullNode returns the corresponding value from forkchoice
|
||||
func (s *Service) HasFullNode(root [32]byte) bool {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HasFullNode(root)
|
||||
}
|
||||
|
||||
// PayloadContentLookup returns the preferred payload-content lookup key from forkchoice.
|
||||
func (s *Service) PayloadContentLookup(root [32]byte) ([32]byte, bool) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.PayloadContentLookup(root)
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
|
||||
func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/genesis"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
@@ -620,6 +621,188 @@ func TestService_IsFinalized(t *testing.T) {
|
||||
require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'}))
|
||||
}
|
||||
|
||||
func TestParentPayloadReady(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 0
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
fcs := tr.fcs
|
||||
|
||||
parentRoot := [32]byte{1}
|
||||
parentBlockHash := [32]byte{10}
|
||||
zeroHash := params.BeaconConfig().ZeroHash
|
||||
|
||||
// Insert parent node into forkchoice.
|
||||
st, parentROBlock, err := prepareGloasForkchoiceState(ctx, 1, parentRoot, zeroHash, parentBlockHash, zeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, parentROBlock))
|
||||
|
||||
t.Run("pre-Gloas always true", func(t *testing.T) {
|
||||
blk := util.HydrateSignedBeaconBlockDeneb(ðpb.SignedBeaconBlockDeneb{
|
||||
Block: ðpb.BeaconBlockDeneb{ParentRoot: parentRoot[:]},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
|
||||
t.Run("parent not in forkchoice", func(t *testing.T) {
|
||||
unknownParent := [32]byte{99}
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
},
|
||||
})
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
ParentRoot: unknownParent[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
|
||||
t.Run("builds on empty", func(t *testing.T) {
|
||||
differentHash := [32]byte{99}
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
ParentBlockHash: differentHash[:],
|
||||
},
|
||||
})
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
|
||||
t.Run("builds on full without payload", func(t *testing.T) {
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
},
|
||||
})
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
|
||||
t.Run("builds on full with payload", func(t *testing.T) {
|
||||
pe, err := blocks.WrappedROExecutionPayloadEnvelope(ðpb.ExecutionPayloadEnvelope{
|
||||
BeaconBlockRoot: parentRoot[:],
|
||||
Payload: &enginev1.ExecutionPayloadDeneb{},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertPayload(pe))
|
||||
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: []byte{20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
|
||||
ParentBlockHash: parentBlockHash[:],
|
||||
},
|
||||
})
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: 2,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{SignedExecutionPayloadBid: bid},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, service.ParentPayloadReady(wsb.Block()))
|
||||
})
|
||||
}
|
||||
|
||||
func TestService_ShouldIgnoreData(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
fcs := tr.fcs
|
||||
|
||||
zeroHash := params.BeaconConfig().ZeroHash
|
||||
currentSlot := service.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
// Build a chain in forkchoice:
|
||||
// genesis (slot 0) -> nodeA (slot 1, epoch 0) -> nodeB (slot slotsPerEpoch, epoch 1) -> nodeC (slot 2*slotsPerEpoch, epoch 2)
|
||||
nodeARoot := [32]byte{1}
|
||||
nodeBRoot := [32]byte{2}
|
||||
nodeCRoot := [32]byte{3}
|
||||
nodeASlot := primitives.Slot(1)
|
||||
nodeBSlot := primitives.Slot(slotsPerEpoch) // epoch 1
|
||||
nodeCSlot := primitives.Slot(2 * slotsPerEpoch) // epoch 2
|
||||
|
||||
stA, robA, err := prepareForkchoiceState(ctx, nodeASlot, nodeARoot, zeroHash, [32]byte{10}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stA, robA))
|
||||
|
||||
stB, robB, err := prepareForkchoiceState(ctx, nodeBSlot, nodeBRoot, nodeARoot, [32]byte{11}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stB, robB))
|
||||
|
||||
stC, robC, err := prepareForkchoiceState(ctx, nodeCSlot, nodeCRoot, nodeBRoot, [32]byte{12}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stC, robC))
|
||||
|
||||
// Set justified checkpoint to nodeB (epoch 1).
|
||||
fcs.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 1, Root: nodeBRoot}))
|
||||
|
||||
t.Run("past epoch data is not ignored", func(t *testing.T) {
|
||||
pastSlot := primitives.Slot((currentEpoch - 1) * primitives.Epoch(slotsPerEpoch))
|
||||
require.Equal(t, false, service.ShouldIgnoreData(nodeARoot, pastSlot))
|
||||
})
|
||||
|
||||
t.Run("parent not in forkchoice", func(t *testing.T) {
|
||||
unknownRoot := [32]byte{99}
|
||||
require.Equal(t, false, service.ShouldIgnoreData(unknownRoot, currentSlot))
|
||||
})
|
||||
|
||||
t.Run("parent epoch at or after justified", func(t *testing.T) {
|
||||
// nodeB is at epoch 1, justified is epoch 1 => parentEpoch >= justified => false
|
||||
require.Equal(t, false, service.ShouldIgnoreData(nodeBRoot, currentSlot))
|
||||
})
|
||||
|
||||
t.Run("canonical parent before justified is ignored", func(t *testing.T) {
|
||||
// nodeA is at epoch 0 < justified epoch 1, and is canonical => true
|
||||
require.Equal(t, true, service.ShouldIgnoreData(nodeARoot, currentSlot))
|
||||
})
|
||||
|
||||
t.Run("non-canonical parent before justified is not ignored", func(t *testing.T) {
|
||||
// Insert a fork: nodeD at slot 2 (epoch 0) branching from nodeA, not on the canonical chain.
|
||||
nodeDRoot := [32]byte{4}
|
||||
stD, robD, err := prepareForkchoiceState(ctx, 2, nodeDRoot, nodeARoot, [32]byte{13}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]}, ðpb.Checkpoint{Epoch: 0, Root: zeroHash[:]})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, stD, robD))
|
||||
|
||||
// nodeD is at epoch 0 < justified epoch 1, but not canonical => false
|
||||
require.Equal(t, false, service.ShouldIgnoreData(nodeDRoot, currentSlot))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_hashForGenesisRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -321,7 +321,7 @@ func (s *Service) pruneInvalidBlock(ctx context.Context, root, parentRoot, paren
|
||||
|
||||
// getPayloadAttributes returns the payload attributes for the given state and slot.
|
||||
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot []byte) payloadattribute.Attributer {
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot primitives.Slot, headRoot, accessRoot []byte) payloadattribute.Attributer {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
|
||||
// If it is an epoch boundary then process slots to get the right
|
||||
@@ -343,7 +343,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
// right proposer index pre-Fulu, either way we need to copy the state to process it.
|
||||
st = st.Copy()
|
||||
var err error
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, accessRoot, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not process slots to get payload attribute")
|
||||
return emptyAttri
|
||||
@@ -371,66 +371,91 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
}
|
||||
|
||||
v := st.Version()
|
||||
|
||||
if v >= version.Deneb {
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
Withdrawals: withdrawals,
|
||||
ParentBeaconBlockRoot: headRoot,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
return attr
|
||||
switch {
|
||||
case v >= version.Gloas:
|
||||
return payloadAttributesGloas(st, uint64(t.Unix()), prevRando, val.FeeRecipient[:], headRoot)
|
||||
case v >= version.Deneb:
|
||||
return payloadAttributesDeneb(st, uint64(t.Unix()), prevRando, val.FeeRecipient[:], headRoot)
|
||||
case v >= version.Capella:
|
||||
return payloadAttributesCapella(st, uint64(t.Unix()), prevRando, val.FeeRecipient[:])
|
||||
case v >= version.Bellatrix:
|
||||
return payloadAttributesBellatrix(uint64(t.Unix()), prevRando, val.FeeRecipient[:])
|
||||
default:
|
||||
log.WithField("version", version.String(v)).Error("Could not get payload attribute due to unknown state version")
|
||||
return payloadattribute.EmptyWithVersion(v)
|
||||
}
|
||||
}
|
||||
|
||||
if v >= version.Capella {
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV2{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
Withdrawals: withdrawals,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
return attr
|
||||
func payloadAttributesGloas(st state.BeaconState, timestamp uint64, prevRandao, feeRecipient, parentBeaconBlockRoot []byte) payloadattribute.Attributer {
|
||||
withdrawals, err := st.WithdrawalsForPayload()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload withdrawals to get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(st.Version())
|
||||
}
|
||||
|
||||
if v >= version.Bellatrix {
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
return attr
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRandao,
|
||||
SuggestedFeeRecipient: feeRecipient,
|
||||
Withdrawals: withdrawals,
|
||||
ParentBeaconBlockRoot: parentBeaconBlockRoot,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(st.Version())
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
log.WithField("version", version.String(st.Version())).Error("Could not get payload attribute due to unknown state version")
|
||||
return emptyAttri
|
||||
func payloadAttributesDeneb(st state.BeaconState, timestamp uint64, prevRandao, feeRecipient, parentBeaconBlockRoot []byte) payloadattribute.Attributer {
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(st.Version())
|
||||
}
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRandao,
|
||||
SuggestedFeeRecipient: feeRecipient,
|
||||
Withdrawals: withdrawals,
|
||||
ParentBeaconBlockRoot: parentBeaconBlockRoot,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(st.Version())
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
func payloadAttributesCapella(st state.BeaconState, timestamp uint64, prevRandao, feeRecipient []byte) payloadattribute.Attributer {
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(st.Version())
|
||||
}
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV2{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRandao,
|
||||
SuggestedFeeRecipient: feeRecipient,
|
||||
Withdrawals: withdrawals,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(st.Version())
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
func payloadAttributesBellatrix(timestamp uint64, prevRandao, feeRecipient []byte) payloadattribute.Attributer {
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
Timestamp: timestamp,
|
||||
PrevRandao: prevRandao,
|
||||
SuggestedFeeRecipient: feeRecipient,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return payloadattribute.EmptyWithVersion(version.Bellatrix)
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
// removeInvalidBlockAndState removes the invalid block, blob and its corresponding state from the cache and DB.
|
||||
|
||||
@@ -717,14 +717,14 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{}, []byte{})
|
||||
require.Equal(t, true, attr.IsEmpty())
|
||||
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
|
||||
@@ -732,7 +732,7 @@ func Test_GetPayloadAttribute(t *testing.T) {
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
}
|
||||
@@ -747,7 +747,7 @@ func Test_GetPayloadAttribute_PrepareAllPayloads(t *testing.T) {
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{}, []byte{})
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
}
|
||||
@@ -757,14 +757,14 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateCapella(t, 1)
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{})
|
||||
attr := service.getPayloadAttribute(ctx, st, 0, []byte{}, []byte{})
|
||||
require.Equal(t, true, attr.IsEmpty())
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
a, err := attr.Withdrawals()
|
||||
@@ -775,7 +775,7 @@ func Test_GetPayloadAttributeV2(t *testing.T) {
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
attr = service.getPayloadAttribute(ctx, st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
@@ -809,14 +809,14 @@ func Test_GetPayloadAttributeV3(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
attr := service.getPayloadAttribute(ctx, test.st, 0, []byte{})
|
||||
attr := service.getPayloadAttribute(ctx, test.st, 0, []byte{}, []byte{})
|
||||
require.Equal(t, true, attr.IsEmpty())
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient()).String())
|
||||
a, err := attr.Withdrawals()
|
||||
@@ -827,7 +827,7 @@ func Test_GetPayloadAttributeV3(t *testing.T) {
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:])
|
||||
attr = service.getPayloadAttribute(ctx, test.st, slot, params.BeaconConfig().ZeroHash[:], params.BeaconConfig().ZeroHash[:])
|
||||
require.Equal(t, false, attr.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient()))
|
||||
a, err = attr.Withdrawals()
|
||||
|
||||
@@ -18,19 +18,21 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (s *Service) isNewHead(r [32]byte) bool {
|
||||
func (s *Service) isNewHead(r [32]byte, full bool) bool {
|
||||
s.headLock.RLock()
|
||||
defer s.headLock.RUnlock()
|
||||
|
||||
currentHeadRoot := s.originBlockRoot
|
||||
currentFull := false
|
||||
if s.head != nil {
|
||||
currentHeadRoot = s.headRoot()
|
||||
currentFull = s.head.full
|
||||
}
|
||||
|
||||
return r != currentHeadRoot || r == [32]byte{}
|
||||
return r != currentHeadRoot || full != currentFull || r == [32]byte{}
|
||||
}
|
||||
|
||||
func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.BeaconState, interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
func (s *Service) getStateAndBlock(ctx context.Context, r, h [32]byte) (state.BeaconState, interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
if !s.hasBlockInInitSyncOrDB(ctx, r) {
|
||||
return nil, nil, errors.New("block does not exist")
|
||||
}
|
||||
@@ -38,7 +40,7 @@ func (s *Service) getStateAndBlock(ctx context.Context, r [32]byte) (state.Beaco
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, r)
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, h)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -70,7 +72,7 @@ func (s *Service) sendFCU(cfg *postBlockProcessConfig) {
|
||||
return
|
||||
}
|
||||
// If head has not been updated and attributes are nil, we can skip the FCU.
|
||||
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
|
||||
if !s.isNewHead(cfg.headRoot, false) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
|
||||
return
|
||||
}
|
||||
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
|
||||
@@ -81,7 +83,7 @@ func (s *Service) sendFCU(cfg *postBlockProcessConfig) {
|
||||
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
|
||||
if s.isNewHead(fcuArgs.headRoot) {
|
||||
if s.isNewHead(fcuArgs.headRoot, false) {
|
||||
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
@@ -19,23 +19,42 @@ import (
|
||||
func TestService_isNewHead(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
require.Equal(t, true, service.isNewHead([32]byte{}))
|
||||
|
||||
// Zero root is always a new head
|
||||
require.Equal(t, true, service.isNewHead([32]byte{}, false))
|
||||
require.Equal(t, true, service.isNewHead([32]byte{}, true))
|
||||
|
||||
// Different root is a new head
|
||||
service.head = &head{root: [32]byte{1}}
|
||||
require.Equal(t, true, service.isNewHead([32]byte{2}))
|
||||
require.Equal(t, false, service.isNewHead([32]byte{1}))
|
||||
require.Equal(t, true, service.isNewHead([32]byte{2}, false))
|
||||
|
||||
// Same root and same full status is not a new head
|
||||
require.Equal(t, false, service.isNewHead([32]byte{1}, false))
|
||||
|
||||
// Same root but different full status is a new head
|
||||
require.Equal(t, true, service.isNewHead([32]byte{1}, true))
|
||||
|
||||
// Same root and both full is not a new head
|
||||
service.head = &head{root: [32]byte{1}, full: true}
|
||||
require.Equal(t, false, service.isNewHead([32]byte{1}, true))
|
||||
|
||||
// Same root, head is full but incoming is not full, is a new head
|
||||
require.Equal(t, true, service.isNewHead([32]byte{1}, false))
|
||||
|
||||
// Nil head should use origin root
|
||||
service.head = nil
|
||||
service.originBlockRoot = [32]byte{3}
|
||||
require.Equal(t, true, service.isNewHead([32]byte{2}))
|
||||
require.Equal(t, false, service.isNewHead([32]byte{3}))
|
||||
require.Equal(t, true, service.isNewHead([32]byte{2}, false))
|
||||
require.Equal(t, false, service.isNewHead([32]byte{3}, false))
|
||||
|
||||
// Nil head with full=true is always a new head (originBlockRoot has full=false)
|
||||
require.Equal(t, true, service.isNewHead([32]byte{3}, true))
|
||||
}
|
||||
|
||||
func TestService_getHeadStateAndBlock(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
_, _, err := service.getStateAndBlock(t.Context(), [32]byte{})
|
||||
_, _, err := service.getStateAndBlock(t.Context(), [32]byte{}, [32]byte{})
|
||||
require.ErrorContains(t, "block does not exist", err)
|
||||
|
||||
blk, err := blocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{Signature: []byte{1}}))
|
||||
|
||||
@@ -1,11 +1,42 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
consensus_blocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (s *Service) waitUntilEpoch(target primitives.Epoch, secondsPerSlot uint64) error {
|
||||
if slots.ToEpoch(s.CurrentSlot()) >= target {
|
||||
return nil
|
||||
}
|
||||
ticker := slots.NewSlotTicker(s.genesisTime, secondsPerSlot)
|
||||
defer ticker.Done()
|
||||
for {
|
||||
select {
|
||||
case slot := <-ticker.C():
|
||||
if slots.ToEpoch(slot) >= target {
|
||||
return nil
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
return s.ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getLookupParentRoot returns the root that serves as key to generate the parent state for the passed beacon block.
|
||||
// if it is based on empty or it is pre-Gloas, it is the parent root of the block, otherwise if it is based on full it is
|
||||
// the parent hash.
|
||||
@@ -16,6 +47,14 @@ func (s *Service) getLookupParentRoot(b consensus_blocks.ROBlock) ([32]byte, err
|
||||
if b.Version() < version.Gloas {
|
||||
return parentRoot, nil
|
||||
}
|
||||
parentSlot, err := s.cfg.ForkChoiceStore.Slot(parentRoot)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "failed to get slot for parent root")
|
||||
}
|
||||
|
||||
if slots.ToEpoch(parentSlot) < params.BeaconConfig().GloasForkEpoch {
|
||||
return parentRoot, nil
|
||||
}
|
||||
blockHash, err := s.cfg.ForkChoiceStore.BlockHash(parentRoot)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "failed to get block hash for parent root")
|
||||
@@ -33,3 +72,142 @@ func (s *Service) getLookupParentRoot(b consensus_blocks.ROBlock) ([32]byte, err
|
||||
}
|
||||
return parentRoot, nil
|
||||
}
|
||||
|
||||
func (s *Service) runLatePayloadTasks() {
|
||||
if err := s.waitForSync(); err != nil {
|
||||
log.WithError(err).Error("Failed to wait for initial sync")
|
||||
return
|
||||
}
|
||||
cfg := params.BeaconConfig()
|
||||
if cfg.GloasForkEpoch == math.MaxUint64 {
|
||||
return
|
||||
}
|
||||
if err := s.waitUntilEpoch(cfg.GloasForkEpoch, cfg.SecondsPerSlot); err != nil {
|
||||
return
|
||||
}
|
||||
offset := cfg.SlotComponentDuration(cfg.PayloadAttestationDueBPS)
|
||||
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, offset, cfg.SecondsPerSlot)
|
||||
defer ticker.Done()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C():
|
||||
s.latePayloadTasks(s.ctx)
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting late payload tasks routine")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) checkIfProposing(st state.ReadOnlyBeaconState, slot primitives.Slot) (cache.TrackedValidator, bool) {
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(st.Slot())
|
||||
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
|
||||
if e == stateEpoch || fuluAndNextEpoch {
|
||||
return s.trackedProposer(st, slot)
|
||||
}
|
||||
return cache.TrackedValidator{}, false
|
||||
}
|
||||
|
||||
// This is a Gloas version of getPayloadAttribute that avoids all the clutter that was originally due to the proposer Index.
|
||||
// It is guaranteed to be called for the current slot + 1 and the head state to have been advanced to at least the current epoch.
|
||||
func (s *Service) getPayloadAttributeGloas(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot, headRoot, accessRoot []byte) payloadattribute.Attributer {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
val, proposing := s.checkIfProposing(st, slot)
|
||||
if !proposing {
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
st, err := transition.ProcessSlotsIfNeeded(ctx, st, accessRoot, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not process slots to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
// Get previous randao.
|
||||
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get randao mix to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
// Get timestamp.
|
||||
t, err := slots.StartTime(s.genesisTime, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get timestamp to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
withdrawals, err := st.WithdrawalsForPayload()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
Withdrawals: withdrawals,
|
||||
ParentBeaconBlockRoot: headRoot,
|
||||
})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
return attr
|
||||
}
|
||||
|
||||
// latePayloadTasks updates the NSC and epoch boundary caches when there is no payload in the current slot (and there is a block)
|
||||
// The case where the block was also missing would have been dealt by lateBlockTasks already.
|
||||
// We call FCU only if we are proposing next slot, as the execution head is assumed to not have changed.
|
||||
func (s *Service) latePayloadTasks(ctx context.Context) {
|
||||
currentSlot := s.CurrentSlot()
|
||||
if currentSlot != s.HeadSlot() {
|
||||
// We must've already sent a FCU and updated the caches in lateBlockTaks.
|
||||
return
|
||||
}
|
||||
r, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get head root")
|
||||
return
|
||||
}
|
||||
hr := [32]byte(r)
|
||||
if s.payloadBeingSynced.isSyncing(hr) {
|
||||
return
|
||||
}
|
||||
if s.HasFullNode(hr) {
|
||||
return
|
||||
}
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get head state")
|
||||
return
|
||||
}
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
}
|
||||
attr := s.getPayloadAttributeGloas(ctx, st, currentSlot+1, r, r)
|
||||
if attr == nil || attr.IsEmpty() {
|
||||
return
|
||||
}
|
||||
beaconLatePayloadTaskTriggeredTotal.Inc()
|
||||
// Head is the empty block.
|
||||
bh, err := st.LatestBlockHash()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get latest block hash to notify engine")
|
||||
return
|
||||
}
|
||||
pid, err := s.notifyForkchoiceUpdateGloas(ctx, bh, attr)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not notify forkchoice update")
|
||||
return
|
||||
}
|
||||
if pid == nil {
|
||||
log.Warn("Received nil payload ID from forkchoice update.")
|
||||
return
|
||||
}
|
||||
var pId [8]byte
|
||||
copy(pId[:], pid[:])
|
||||
s.cfg.PayloadIDCache.Set(currentSlot+1, hr, pId)
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
@@ -445,6 +446,37 @@ func TestPostPayloadHeadUpdate_NotHead(t *testing.T) {
|
||||
require.NoError(t, s.postPayloadHeadUpdate(ctx, envelope, st, root, headRoot[:]))
|
||||
}
|
||||
|
||||
func TestPostPayloadHeadUpdate_SetsHeadFull(t *testing.T) {
|
||||
s, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
root := bytesutil.ToBytes32([]byte("root1"))
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
base, blk := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.head = &head{root: root, block: signed, state: st, slot: 1}
|
||||
require.Equal(t, false, s.head.full)
|
||||
|
||||
env := ðpb.ExecutionPayloadEnvelope{
|
||||
BeaconBlockRoot: root[:],
|
||||
Payload: &enginev1.ExecutionPayloadDeneb{BlockHash: blockHash[:], ParentHash: make([]byte, 32)},
|
||||
Slot: 1,
|
||||
}
|
||||
envelope, err := blocks.WrappedROExecutionPayloadEnvelope(env)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, s.postPayloadHeadUpdate(ctx, envelope, st, root, root[:]))
|
||||
|
||||
s.headLock.RLock()
|
||||
require.Equal(t, true, s.head.full)
|
||||
s.headLock.RUnlock()
|
||||
}
|
||||
|
||||
func TestGetLookupParentRoot_PreGloas(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
|
||||
@@ -465,6 +497,12 @@ func TestGetLookupParentRoot_PreGloas(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetLookupParentRoot_GloasBuildsOnEmpty(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 0
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, req := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -506,6 +544,12 @@ func TestGetLookupParentRoot_GloasBuildsOnEmpty(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetLookupParentRoot_GloasBuildsOnFull(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 0
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, req := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -546,6 +590,128 @@ func TestGetLookupParentRoot_GloasBuildsOnFull(t *testing.T) {
|
||||
require.Equal(t, parentNodeBlockHash, got)
|
||||
}
|
||||
|
||||
func TestGetLookupParentRoot_GloasParentPreForkEpoch(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 2
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, req := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
|
||||
parentRoot := [32]byte{1}
|
||||
parentNodeBlockHash := [32]byte{10}
|
||||
parentSlot, err := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
|
||||
require.NoError(t, err)
|
||||
parentSlot = parentSlot - 1
|
||||
|
||||
st, parentROBlock, err := prepareGloasForkchoiceState(
|
||||
ctx,
|
||||
parentSlot,
|
||||
parentRoot,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
parentNodeBlockHash,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
0,
|
||||
0,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, req.fcs.InsertNode(ctx, st, parentROBlock))
|
||||
|
||||
blockHash := [32]byte{20}
|
||||
bid := util.HydrateSignedExecutionPayloadBid(ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
BlockHash: blockHash[:],
|
||||
ParentBlockHash: parentNodeBlockHash[:],
|
||||
},
|
||||
})
|
||||
|
||||
blk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: parentSlot + 1,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
SignedExecutionPayloadBid: bid,
|
||||
},
|
||||
},
|
||||
})
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := service.getLookupParentRoot(roblock)
|
||||
require.NoError(t, err)
|
||||
// Parent slot is pre-fork, so always return parentRoot.
|
||||
require.Equal(t, parentRoot, got)
|
||||
}
|
||||
|
||||
func TestLatePayloadTasks_ReturnsEarlyWhenBlockLate(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
service, tr := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
base, _ := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
|
||||
base.LatestBlockHash = blockHash[:]
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
headRoot := bytesutil.ToBytes32([]byte("headroot"))
|
||||
service.head = &head{
|
||||
root: headRoot,
|
||||
state: st,
|
||||
slot: 1,
|
||||
}
|
||||
// Set genesis time so CurrentSlot > HeadSlot.
|
||||
service.SetGenesisTime(time.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
|
||||
service.latePayloadTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "Could not notify forkchoice update")
|
||||
// No payload ID should have been cached.
|
||||
_, has := service.cfg.PayloadIDCache.PayloadID(service.CurrentSlot()+1, headRoot)
|
||||
require.Equal(t, false, has)
|
||||
}
|
||||
|
||||
func TestLatePayloadTasks_SendsFCU(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
PrepareAllPayloads: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
pid := &enginev1.PayloadIDBytes{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
service, tr := setupGloasService(t, &mockExecution.EngineClient{PayloadIDBytes: pid})
|
||||
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
base, blk := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
|
||||
base.LatestBlockHash = blockHash[:]
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
headRoot := bytesutil.ToBytes32([]byte("headroot"))
|
||||
service.head = &head{
|
||||
root: headRoot,
|
||||
block: signed,
|
||||
state: st,
|
||||
slot: 1,
|
||||
}
|
||||
// CurrentSlot == HeadSlot == 1: place genesis 1.5 slots ago so we're solidly in slot 1.
|
||||
service.SetGenesisTime(time.Now().Add(-3 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second / 2))
|
||||
service.SetForkChoiceGenesisTime(service.genesisTime)
|
||||
|
||||
service.latePayloadTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "Could not notify forkchoice update")
|
||||
require.LogsDoNotContain(t, logHook, "Could not get")
|
||||
// Payload ID should have been cached.
|
||||
cachedPid, has := service.cfg.PayloadIDCache.PayloadID(service.CurrentSlot()+1, headRoot)
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, primitives.PayloadID(pid[:]), cachedPid)
|
||||
}
|
||||
|
||||
func TestLateBlockTasks_GloasFCU(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
@@ -575,4 +741,217 @@ func TestLateBlockTasks_GloasFCU(t *testing.T) {
|
||||
|
||||
service.lateBlockTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
|
||||
// Payload ID should have been cached by the Gloas FCU path.
|
||||
cachedPid, has := service.cfg.PayloadIDCache.PayloadID(service.CurrentSlot()+1, headRoot)
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, primitives.PayloadID(pid[:]), cachedPid)
|
||||
}
|
||||
|
||||
// TestSaveHead_GloasForkBoundary_PreforkBidForcesEmptyHead verifies that saveHead does not
|
||||
// treat the head as "full" when the latest execution payload bid was issued in a pre-fork epoch.
|
||||
// This guards against the Fulu->Gloas upgrade-seeded bid (bid.BlockHash == latestBlockHash,
|
||||
// bid.Slot == 0) causing a spurious full=true head before any real Gloas bid has been processed.
|
||||
func TestSaveHead_GloasForkBoundary_PreforkBidForcesEmptyHead(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 1
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
blockRoot := bytesutil.ToBytes32([]byte("root1"))
|
||||
parentRoot := params.BeaconConfig().ZeroHash
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
// Create a Gloas state where IsParentBlockFull()==true (bid.BlockHash == LatestBlockHash)
|
||||
// but bid.Slot is 0 (epoch 0, pre-fork). This mimics the upgrade-seeded state.
|
||||
base, blk := testGloasState(t, 1, parentRoot, blockHash)
|
||||
base.LatestBlockHash = blockHash[:]
|
||||
// bid.Slot defaults to 0, which is before GloasForkEpoch=1.
|
||||
|
||||
// Set a valid initial head so saveHead's headBlock() call does not panic.
|
||||
// We do NOT insert the old block into forkchoice because insertGloasBlock
|
||||
// would claim the tree root slot; the target block (parentRoot=ZeroHash) must
|
||||
// be the first node inserted so it can become the tree root.
|
||||
oldBlk := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{})
|
||||
oldSigned, err2 := blocks.NewSignedBeaconBlock(oldBlk)
|
||||
require.NoError(t, err2)
|
||||
oldSt, err2 := state_native.InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
Slot: 0,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{ParentRoot: make([]byte, 32), StateRoot: make([]byte, 32), BodyRoot: make([]byte, 32)},
|
||||
Eth1Data: ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)},
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{BlockHash: make([]byte, 32), ParentBlockHash: make([]byte, 32), ParentBlockRoot: make([]byte, 32), PrevRandao: make([]byte, 32), FeeRecipient: make([]byte, 20), BlobKzgCommitments: [][]byte{make([]byte, 48)}},
|
||||
BuilderPendingPayments: func() []*ethpb.BuilderPendingPayment {
|
||||
pp := make([]*ethpb.BuilderPendingPayment, 64)
|
||||
for i := range pp {
|
||||
pp[i] = ðpb.BuilderPendingPayment{Withdrawal: ðpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)}}
|
||||
}
|
||||
return pp
|
||||
}(),
|
||||
ExecutionPayloadAvailability: make([]byte, 1024),
|
||||
LatestBlockHash: make([]byte, 32),
|
||||
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
ProposerLookahead: make([]uint64, 64),
|
||||
})
|
||||
require.NoError(t, err2)
|
||||
oldRoot := bytesutil.ToBytes32([]byte("oldroot1"))
|
||||
service.head = &head{root: oldRoot, block: oldSigned, state: oldSt, slot: 0}
|
||||
|
||||
insertGloasBlock(t, service, base, blk, blockRoot)
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify precondition: IsParentBlockFull() is true.
|
||||
full, err := st.IsParentBlockFull()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, full, "precondition: IsParentBlockFull must be true")
|
||||
|
||||
// Verify guard precondition: bid.Slot is pre-fork.
|
||||
bid, err := st.LatestExecutionPayloadBid()
|
||||
require.NoError(t, err)
|
||||
isPrefork := slots.ToEpoch(bid.Slot()) < params.BeaconConfig().GloasForkEpoch
|
||||
require.Equal(t, true, isPrefork, "precondition: bid.Slot must be pre-fork")
|
||||
|
||||
ssigned, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// saveHead should NOT mark the head as full because bid.Slot < GloasForkEpoch.
|
||||
require.NoError(t, service.saveHead(ctx, blockRoot, ssigned, st))
|
||||
|
||||
service.headLock.RLock()
|
||||
headFull := service.head.full
|
||||
service.headLock.RUnlock()
|
||||
require.Equal(t, false, headFull, "head must not be full for upgrade-seeded bid")
|
||||
}
|
||||
|
||||
// TestSaveHead_GloasForkBoundary_PostforkBidSetsFullHead verifies that saveHead correctly
|
||||
// marks the head as full when the latest bid is from a post-fork epoch.
|
||||
func TestSaveHead_GloasForkBoundary_PostforkBidSetsFullHead(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 1
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
service, _ := setupGloasService(t, &mockExecution.EngineClient{})
|
||||
ctx := t.Context()
|
||||
|
||||
forkSlot, err := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockRoot := bytesutil.ToBytes32([]byte("root1"))
|
||||
parentRoot := params.BeaconConfig().ZeroHash
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
|
||||
// Set a valid initial head so saveHead's headBlock() call does not panic.
|
||||
// Do NOT use insertGloasBlock for the old block — the target block must be
|
||||
// the first node inserted so it can claim the tree root (parentRoot=ZeroHash).
|
||||
oldBlk2 := util.HydrateSignedBeaconBlockGloas(ðpb.SignedBeaconBlockGloas{})
|
||||
oldSigned2, err2 := blocks.NewSignedBeaconBlock(oldBlk2)
|
||||
require.NoError(t, err2)
|
||||
oldSt2, err2 := state_native.InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{
|
||||
Slot: 0,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
LatestBlockHeader: ðpb.BeaconBlockHeader{ParentRoot: make([]byte, 32), StateRoot: make([]byte, 32), BodyRoot: make([]byte, 32)},
|
||||
Eth1Data: ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)},
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{BlockHash: make([]byte, 32), ParentBlockHash: make([]byte, 32), ParentBlockRoot: make([]byte, 32), PrevRandao: make([]byte, 32), FeeRecipient: make([]byte, 20), BlobKzgCommitments: [][]byte{make([]byte, 48)}},
|
||||
BuilderPendingPayments: func() []*ethpb.BuilderPendingPayment {
|
||||
pp := make([]*ethpb.BuilderPendingPayment, 64)
|
||||
for i := range pp {
|
||||
pp[i] = ðpb.BuilderPendingPayment{Withdrawal: ðpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)}}
|
||||
}
|
||||
return pp
|
||||
}(),
|
||||
ExecutionPayloadAvailability: make([]byte, 1024),
|
||||
LatestBlockHash: make([]byte, 32),
|
||||
PayloadExpectedWithdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
ProposerLookahead: make([]uint64, 64),
|
||||
})
|
||||
require.NoError(t, err2)
|
||||
oldRoot2 := bytesutil.ToBytes32([]byte("oldroot2"))
|
||||
service.head = &head{root: oldRoot2, block: oldSigned2, state: oldSt2, slot: 0}
|
||||
|
||||
base, blk := testGloasState(t, forkSlot+1, parentRoot, blockHash)
|
||||
base.LatestBlockHash = blockHash[:]
|
||||
// Set bid.Slot to a post-fork epoch slot.
|
||||
base.LatestExecutionPayloadBid.Slot = forkSlot + 1
|
||||
|
||||
insertGloasBlock(t, service, base, blk, blockRoot)
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify preconditions.
|
||||
full, err := st.IsParentBlockFull()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, full, "precondition: IsParentBlockFull must be true")
|
||||
|
||||
bid, err := st.LatestExecutionPayloadBid()
|
||||
require.NoError(t, err)
|
||||
isPostfork := slots.ToEpoch(bid.Slot()) >= params.BeaconConfig().GloasForkEpoch
|
||||
require.Equal(t, true, isPostfork, "precondition: bid.Slot must be post-fork")
|
||||
|
||||
ssigned, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
// saveHead SHOULD mark the head as full because bid.Slot >= GloasForkEpoch.
|
||||
require.NoError(t, service.saveHead(ctx, blockRoot, ssigned, st))
|
||||
|
||||
service.headLock.RLock()
|
||||
headFull := service.head.full
|
||||
service.headLock.RUnlock()
|
||||
require.Equal(t, true, headFull, "head must be full for real post-fork bid")
|
||||
}
|
||||
|
||||
// TestLateBlockTasks_GloasForkBoundary_PreforkBidUsesHeadRoot verifies that lateBlockTasks
|
||||
// uses headRoot (not LatestBlockHash) as the accessRoot when the bid is pre-fork epoch.
|
||||
// Without this guard, the upgrade-seeded bid would cause lateBlockTasks to use the wrong
|
||||
// access root for the next-slot cache.
|
||||
func TestLateBlockTasks_GloasForkBoundary_PreforkBidUsesHeadRoot(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
PrepareAllPayloads: true,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.GloasForkEpoch = 1
|
||||
cfg.InitializeForkSchedule()
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
pid := &enginev1.PayloadIDBytes{1, 2, 3, 4, 5, 6, 7, 8}
|
||||
service, tr := setupGloasService(t, &mockExecution.EngineClient{PayloadIDBytes: pid})
|
||||
|
||||
blockHash := bytesutil.ToBytes32([]byte("hash1"))
|
||||
base, _ := testGloasState(t, 1, params.BeaconConfig().ZeroHash, blockHash)
|
||||
// Make IsParentBlockFull() true: bid.BlockHash == LatestBlockHash.
|
||||
base.LatestBlockHash = blockHash[:]
|
||||
// bid.Slot is 0 (pre-fork epoch): the epoch guard should prevent using LatestBlockHash as accessRoot.
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
headRoot := bytesutil.ToBytes32([]byte("headroot"))
|
||||
service.head = &head{
|
||||
root: headRoot,
|
||||
state: st,
|
||||
slot: 1,
|
||||
}
|
||||
|
||||
// Trigger late block logic: CurrentSlot > HeadSlot.
|
||||
service.SetGenesisTime(time.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second))
|
||||
service.SetForkChoiceGenesisTime(service.genesisTime)
|
||||
|
||||
service.lateBlockTasks(tr.ctx)
|
||||
require.LogsDoNotContain(t, logHook, "could not perform late block tasks")
|
||||
}
|
||||
|
||||
@@ -50,6 +50,7 @@ type head struct {
|
||||
block interfaces.ReadOnlySignedBeaconBlock // current head block.
|
||||
state state.BeaconState // current head state.
|
||||
slot primitives.Slot // the head block slot number
|
||||
full bool // whether the head is post-CL or post-EL after Gloas
|
||||
optimistic bool // optimistic status when saved head
|
||||
}
|
||||
|
||||
@@ -60,8 +61,24 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
|
||||
defer span.End()
|
||||
|
||||
// Pre-Gloas we use empty for head because we still key states by blockroot
|
||||
var full bool
|
||||
var err error
|
||||
if headState.Version() >= version.Gloas {
|
||||
gloasFirstSlot, err := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute gloas first slot")
|
||||
}
|
||||
if headState.Slot() > gloasFirstSlot {
|
||||
full, err = headState.IsParentBlockFull()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if head is full or not")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Do nothing if head hasn't changed.
|
||||
if !s.isNewHead(newHeadRoot) {
|
||||
if !s.isNewHead(newHeadRoot, full) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -157,6 +174,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
state: headState,
|
||||
optimistic: isOptimistic,
|
||||
slot: headBlock.Block().Slot(),
|
||||
full: full,
|
||||
}
|
||||
if err := s.setHead(newHead); err != nil {
|
||||
return errors.Wrap(err, "could not set head")
|
||||
@@ -217,6 +235,7 @@ func (s *Service) setHead(newHead *head) error {
|
||||
root: newHead.root,
|
||||
block: bCp,
|
||||
state: newHead.state.Copy(),
|
||||
full: newHead.full,
|
||||
optimistic: newHead.optimistic,
|
||||
slot: newHead.slot,
|
||||
}
|
||||
@@ -333,13 +352,16 @@ func (s *Service) notifyNewHeadEvent(
|
||||
if currentDutyDependentRoot == [32]byte{} {
|
||||
currentDutyDependentRoot = s.originBlockRoot
|
||||
}
|
||||
previousDutyDependentRoot := currentDutyDependentRoot
|
||||
var previousDutyDependentRoot [32]byte
|
||||
if currEpoch > 0 {
|
||||
previousDutyDependentRoot, err = s.DependentRoot(currEpoch.Sub(1))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty dependent root")
|
||||
}
|
||||
}
|
||||
if previousDutyDependentRoot == [32]byte{} {
|
||||
previousDutyDependentRoot = s.originBlockRoot
|
||||
}
|
||||
|
||||
isOptimistic, err := s.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -213,7 +213,7 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
Block: newHeadRoot[:],
|
||||
State: newHeadStateRoot[:],
|
||||
EpochTransition: true,
|
||||
PreviousDutyDependentRoot: make([]byte, 32),
|
||||
PreviousDutyDependentRoot: srv.originBlockRoot[:],
|
||||
CurrentDutyDependentRoot: srv.originBlockRoot[:],
|
||||
}
|
||||
require.DeepSSZEqual(t, wanted, eventHead)
|
||||
@@ -243,11 +243,35 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
Block: newHeadRoot[:],
|
||||
State: newHeadStateRoot[:],
|
||||
EpochTransition: true,
|
||||
PreviousDutyDependentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
PreviousDutyDependentRoot: srv.originBlockRoot[:],
|
||||
CurrentDutyDependentRoot: srv.originBlockRoot[:],
|
||||
}
|
||||
require.DeepSSZEqual(t, wanted, eventHead)
|
||||
})
|
||||
t.Run("previous dependent root zero hash falls back to origin", func(t *testing.T) {
|
||||
srv := testServiceWithDB(t)
|
||||
srv.SetGenesisTime(time.Now())
|
||||
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
|
||||
srv.originBlockRoot = [32]byte{0xab}
|
||||
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
newHeadRoot := [32]byte{3}
|
||||
st, blk, err = prepareForkchoiceState(t.Context(), 32, newHeadRoot, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
newHeadSlot := params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), newHeadSlot, []byte{2}, newHeadRoot[:]))
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
|
||||
eventHead, ok := events[0].Data.(*ethpbv1.EventHead)
|
||||
require.Equal(t, true, ok)
|
||||
// DependentRoot(0) returns zero hash since the forkchoice tree is sparse.
|
||||
// The fix ensures it falls back to originBlockRoot instead of sending zeros.
|
||||
assert.DeepEqual(t, srv.originBlockRoot[:], eventHead.PreviousDutyDependentRoot)
|
||||
assert.DeepEqual(t, srv.originBlockRoot[:], eventHead.CurrentDutyDependentRoot)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
|
||||
@@ -77,10 +77,10 @@ func VerifyBlobKZGProofBatch(blobs [][]byte, commitments [][]byte, proofs [][]by
|
||||
return fmt.Errorf("blobs len (%d) differs from expected (%d)", len(blobs[i]), len(ckzg4844.Blob{}))
|
||||
}
|
||||
if len(commitments[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Blob{}))
|
||||
return fmt.Errorf("commitments len (%d) differs from expected (%d)", len(commitments[i]), len(ckzg4844.Bytes48{}))
|
||||
}
|
||||
if len(proofs[i]) != len(ckzg4844.Bytes48{}) {
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Blob{}))
|
||||
return fmt.Errorf("proofs len (%d) differs from expected (%d)", len(proofs[i]), len(ckzg4844.Bytes48{}))
|
||||
}
|
||||
ckzgBlobs[i] = ckzg4844.Blob(blobs[i])
|
||||
ckzgCommitments[i] = ckzg4844.Bytes48(commitments[i])
|
||||
|
||||
@@ -132,6 +132,15 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
}
|
||||
if block.Version() < version.Gloas {
|
||||
moreFields["dataAvailabilityWaitedTime"] = daWaitedTime
|
||||
} else {
|
||||
signedBid, err := block.Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get signed execution payload bid for logging")
|
||||
} else {
|
||||
moreFields["blockHash"] = fmt.Sprintf("%#x", bytesutil.Trunc(signedBid.Message.BlockHash))
|
||||
moreFields["parentHash"] = fmt.Sprintf("%#x", bytesutil.Trunc(signedBid.Message.ParentBlockHash))
|
||||
moreFields["builderIndex"] = signedBid.Message.BuilderIndex
|
||||
}
|
||||
}
|
||||
|
||||
level := logs.PackageVerbosity("beacon-chain/blockchain")
|
||||
|
||||
@@ -234,6 +234,25 @@ var (
|
||||
Help: "The maximum number of blobs allowed in a block.",
|
||||
},
|
||||
)
|
||||
beaconExecutionPayloadEnvelopeValidTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_execution_payload_envelope_valid_total",
|
||||
Help: "Count the number of execution payload envelopes that were processed successfully.",
|
||||
})
|
||||
beaconExecutionPayloadEnvelopeInvalidTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_execution_payload_envelope_invalid_total",
|
||||
Help: "Count the number of execution payload envelopes that failed processing.",
|
||||
})
|
||||
beaconExecutionPayloadEnvelopeProcessingDurationSeconds = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_execution_payload_envelope_processing_duration_seconds",
|
||||
Help: "Captures end-to-end processing time for execution payload envelopes.",
|
||||
Buckets: prometheus.DefBuckets,
|
||||
},
|
||||
)
|
||||
beaconLatePayloadTaskTriggeredTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "beacon_late_payload_task_triggered_total",
|
||||
Help: "Count the number of times late payload tasks fired.",
|
||||
})
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
@@ -107,6 +108,11 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
}
|
||||
if cfg.roblock.Version() < version.Gloas {
|
||||
s.sendFCU(cfg)
|
||||
} else if s.isNewHead(cfg.headRoot, false) { // We reach this only when the incoming block is head.
|
||||
if err := s.saveHead(ctx, cfg.headRoot, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(ctx, cfg.postState, cfg.roblock)
|
||||
}
|
||||
|
||||
// Pre-Fulu the caches are updated when computing the payload attributes
|
||||
@@ -129,7 +135,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
|
||||
var err error
|
||||
preStateVersion := st.Version()
|
||||
switch preStateVersion {
|
||||
case version.Phase0, version.Altair:
|
||||
case version.Phase0, version.Altair, version.Gloas:
|
||||
default:
|
||||
preStateHeader, err = st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
@@ -139,7 +145,112 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
|
||||
return preStateVersion, preStateHeader, nil
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
// applyPayloadIfNeeded applies the parent block's execution payload envelope to
|
||||
// preState when the current block's bid indicates it built on a full parent.
|
||||
func (s *Service) applyPayloadIfNeeded(ctx context.Context, b interfaces.ReadOnlyBeaconBlock, parentRoot [32]byte, preState state.BeaconState) error {
|
||||
if b.Version() < version.Gloas || parentRoot == [32]byte{} {
|
||||
return nil
|
||||
}
|
||||
parentBlock, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get parent block with root %#x", parentRoot)
|
||||
}
|
||||
if parentBlock.Version() < version.Gloas {
|
||||
return nil
|
||||
}
|
||||
sb, err := b.Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload bid for block")
|
||||
}
|
||||
if sb == nil || sb.Message == nil {
|
||||
return fmt.Errorf("missing execution payload bid for block at slot %d", b.Slot())
|
||||
}
|
||||
parentBid, err := parentBlock.Block().Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get execution payload bid for parent block with root %#x", parentRoot)
|
||||
}
|
||||
if parentBid == nil || parentBid.Message == nil {
|
||||
return fmt.Errorf("missing execution payload bid for parent block with root %#x", parentRoot)
|
||||
}
|
||||
if !bytes.Equal(sb.Message.ParentBlockHash, parentBid.Message.BlockHash) {
|
||||
return nil
|
||||
}
|
||||
signedEnvelope, err := s.cfg.BeaconDB.ExecutionPayloadEnvelope(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get execution payload envelope for parent block with root %#x", parentRoot)
|
||||
}
|
||||
if signedEnvelope == nil || signedEnvelope.Message == nil {
|
||||
return nil
|
||||
}
|
||||
envelope, err := consensusblocks.WrappedROBlindedExecutionPayloadEnvelope(signedEnvelope.Message)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not wrap blinded execution payload envelope for parent block with root %#x", parentRoot)
|
||||
}
|
||||
return gloas.ApplyBlindedExecutionPayloadEnvelopeForStateGen(ctx, preState, parentBlock.Block().StateRoot(), envelope)
|
||||
}
|
||||
|
||||
// getBatchPrestate returns the pre-state to apply to the first beacon block in the batch and returns true if it applied the first envelope before
|
||||
func (s *Service) getBatchPrestate(ctx context.Context, b consensusblocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope) (state.BeaconState, bool, error) {
|
||||
if len(envelopes) == 0 || b.Version() < version.Gloas {
|
||||
blockPreState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.Block().ParentRoot())
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get block pre state")
|
||||
}
|
||||
return blockPreState, false, nil
|
||||
}
|
||||
full, err := consensusblocks.BlockBuiltOnEnvelope(envelopes[0], b)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not check if block builds on envelope")
|
||||
}
|
||||
if !full {
|
||||
blockPreState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.Block().ParentRoot())
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get block pre state")
|
||||
}
|
||||
return blockPreState, false, nil
|
||||
}
|
||||
parentRoot := b.Block().ParentRoot()
|
||||
if s.cfg.BeaconDB.HasExecutionPayloadEnvelope(ctx, parentRoot) {
|
||||
// This path should have been filtered already in init sync.
|
||||
log.Debugf("Ignoring already processed envelope for blockroot %#x", parentRoot)
|
||||
env, err := envelopes[0].Envelope()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
blockPreState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, env.BlockHash())
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return blockPreState, false, nil
|
||||
}
|
||||
env, err := envelopes[0].Envelope()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
// notify the engine of the new envelope
|
||||
blockPreState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.Block().ParentRoot())
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get block pre state")
|
||||
}
|
||||
if _, err := s.notifyNewEnvelope(ctx, blockPreState, env); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
parentBlock, err := s.cfg.BeaconDB.Block(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not get parent block")
|
||||
}
|
||||
if err := gloas.ApplyBlindedExecutionPayloadEnvelopeForStateGen(ctx, blockPreState, parentBlock.Block().StateRoot(), env); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return blockPreState, true, nil
|
||||
}
|
||||
|
||||
type versionAndHeader struct {
|
||||
version int
|
||||
header interfaces.ExecutionData
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -153,16 +264,35 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
b := blks[0].Block()
|
||||
|
||||
// Retrieve incoming block's pre state.
|
||||
if err := s.verifyBlkPreState(ctx, b.ParentRoot()); err != nil {
|
||||
parentRoot := b.ParentRoot()
|
||||
if err := s.verifyBlkPreState(ctx, parentRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
preState, err := s.cfg.StateGen.StateByRootInitialSync(ctx, b.ParentRoot())
|
||||
preState, applied, err := s.getBatchPrestate(ctx, blks[0], envelopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if preState == nil || preState.IsNil() {
|
||||
return fmt.Errorf("nil pre state for slot %d", b.Slot())
|
||||
}
|
||||
var eidx int
|
||||
var br [32]byte
|
||||
sigSet := bls.NewSet()
|
||||
if applied {
|
||||
eidx = 1
|
||||
envSigSet, err := gloas.ExecutionPayloadEnvelopeSignatureBatch(preState, envelopes[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigSet.Join(envSigSet)
|
||||
}
|
||||
if eidx < len(envelopes) {
|
||||
env, err := envelopes[eidx].Envelope()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
br = env.BeaconBlockRoot()
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.FinalizedCheckpoint(), preState.CurrentJustifiedCheckpoint()); err != nil {
|
||||
@@ -171,11 +301,6 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
|
||||
jCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
fCheckpoints := make([]*ethpb.Checkpoint, len(blks))
|
||||
sigSet := bls.NewSet()
|
||||
type versionAndHeader struct {
|
||||
version int
|
||||
header interfaces.ExecutionData
|
||||
}
|
||||
preVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
||||
postVersionAndHeaders := make([]*versionAndHeader, len(blks))
|
||||
var set *bls.SignatureBatch
|
||||
@@ -197,6 +322,23 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
if err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
if b.Root() == br && eidx < len(envelopes) {
|
||||
envSigSet, err := gloas.ApplyExecutionPayloadNoVerifySig(ctx, preState, b.Block().StateRoot(), envelopes[eidx])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigSet.Join(envSigSet)
|
||||
eidx++
|
||||
if eidx < len(envelopes) {
|
||||
nextEnv, err := envelopes[eidx].Envelope()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
br = nextEnv.BeaconBlockRoot()
|
||||
} else {
|
||||
br = [32]byte{}
|
||||
}
|
||||
}
|
||||
// Save potential boundary states.
|
||||
if slots.IsEpochStart(preState.Slot()) {
|
||||
boundaries[b.Root()] = preState.Copy()
|
||||
@@ -228,56 +370,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.New("batch block signature verification failed")
|
||||
}
|
||||
|
||||
// blocks have been verified, save them and call the engine
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, len(blks))
|
||||
var isValidPayload bool
|
||||
for i, b := range blks {
|
||||
root := b.Root()
|
||||
isValidPayload, err = s.notifyNewPayload(ctx,
|
||||
postVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].header, b)
|
||||
if err != nil {
|
||||
// this call does not have the root in forkchoice yet.
|
||||
return s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
|
||||
preVersionAndHeaders[i].header, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
|
||||
}
|
||||
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[i] = args
|
||||
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: b.Block().Slot(),
|
||||
Root: root[:],
|
||||
}); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
if i > 0 && fCheckpoints[i].Epoch > fCheckpoints[i-1].Epoch {
|
||||
if err := s.updateFinalized(ctx, fCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
pendingNodes, isValidPayload, err := s.notifyEngineAndSaveData(ctx, blks, envelopes, avs, preVersionAndHeaders, postVersionAndHeaders, jCheckpoints, fCheckpoints)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Save boundary states that will be useful for forkchoice
|
||||
for r, st := range boundaries {
|
||||
@@ -292,6 +387,15 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return err
|
||||
}
|
||||
// Insert all nodes to forkchoice
|
||||
if applied {
|
||||
env, err := envelopes[0].Envelope()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertPayload(env); err != nil {
|
||||
return errors.Wrap(err, "could not insert first payload in batch to forkchoice")
|
||||
}
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertChain(ctx, pendingNodes); err != nil {
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
@@ -304,6 +408,102 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) notifyEngineAndSaveData(
|
||||
ctx context.Context,
|
||||
blks []consensusblocks.ROBlock,
|
||||
envelopes []interfaces.ROSignedExecutionPayloadEnvelope,
|
||||
avs das.AvailabilityChecker,
|
||||
preVersionAndHeaders []*versionAndHeader,
|
||||
postVersionAndHeaders []*versionAndHeader,
|
||||
jCheckpoints []*ethpb.Checkpoint,
|
||||
fCheckpoints []*ethpb.Checkpoint,
|
||||
) ([]*forkchoicetypes.BlockAndCheckpoints, bool, error) {
|
||||
span := trace.FromContext(ctx)
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, len(blks))
|
||||
var isValidPayload bool
|
||||
var err error
|
||||
|
||||
envMap := make(map[[32]byte]int, len(envelopes))
|
||||
for i, e := range envelopes {
|
||||
env, err := e.Envelope()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
envMap[env.BeaconBlockRoot()] = i
|
||||
}
|
||||
|
||||
for i, b := range blks {
|
||||
root := b.Root()
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
if b.Version() < version.Gloas {
|
||||
isValidPayload, err = s.notifyNewPayload(ctx,
|
||||
postVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].header, b)
|
||||
if err != nil {
|
||||
return nil, false, s.handleInvalidExecutionError(ctx, err, root, b.Block().ParentRoot(), [32]byte(postVersionAndHeaders[i].header.ParentHash()))
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, preVersionAndHeaders[i].version,
|
||||
preVersionAndHeaders[i].header, b); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
idx, ok := envMap[root]
|
||||
if ok {
|
||||
env, err := envelopes[idx].Envelope()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
isValidPayload, err = s.notifyNewEnvelopeFromBlock(ctx, b, env)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrap(err, "could not notify new envelope from block")
|
||||
}
|
||||
args.HasPayload = true
|
||||
bh := env.BlockHash()
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: b.Block().Slot(),
|
||||
Root: bh[:],
|
||||
}); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := s.areSidecarsAvailable(ctx, avs, b); err != nil {
|
||||
return nil, false, errors.Wrapf(err, "could not validate sidecar availability for block %#x at slot %d", b.Root(), b.Block().Slot())
|
||||
}
|
||||
|
||||
pendingNodes[i] = args
|
||||
if err := s.saveInitSyncBlock(ctx, root, b); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, false, err
|
||||
}
|
||||
if err := s.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: b.Block().Slot(),
|
||||
Root: root[:],
|
||||
}); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, false, err
|
||||
}
|
||||
if i > 0 && jCheckpoints[i].Epoch > jCheckpoints[i-1].Epoch {
|
||||
if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
if i > 0 && fCheckpoints[i].Epoch > fCheckpoints[i-1].Epoch {
|
||||
if err := s.updateFinalized(ctx, fCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return pendingNodes, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityChecker, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
@@ -383,9 +583,49 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
return nil
|
||||
}
|
||||
|
||||
// refreshCaches updates the next slot state cache and epoch boundary caches.
|
||||
// Before Fulu this is done synchronously, after Fulu it is deferred to a goroutine.
|
||||
func (s *Service) refreshCaches(ctx context.Context, currentSlot primitives.Slot, headRoot [32]byte, headState state.BeaconState, accessRoot [32]byte) {
|
||||
lastRoot, lastState := transition.LastCachedState()
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
if lastState.Version() < version.Fulu {
|
||||
s.updateCachesAndEpochBoundary(ctx, currentSlot, headState, accessRoot, lastRoot, lastState)
|
||||
} else {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
s.updateCachesAndEpochBoundary(ctx, currentSlot, headState, accessRoot, lastRoot, lastState)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// updateCachesAndEpochBoundary updates the next slot state cache and handles
|
||||
// epoch boundary processing. If the lastRoot matches accessRoot, the cached
|
||||
// last state is reused; otherwise, the head state is advanced instead.
|
||||
func (s *Service) updateCachesAndEpochBoundary(ctx context.Context, currentSlot primitives.Slot, headState state.BeaconState, accessRoot [32]byte, lastRoot []byte, lastState state.BeaconState) {
|
||||
if bytes.Equal(lastRoot, accessRoot[:]) {
|
||||
// Happy case, the last advanced state is head, we thus keep it
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
} else {
|
||||
// Last advanced state was not head, we do not advance this but rather use headstate
|
||||
headState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, accessRoot[:], headState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, accessRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}
|
||||
|
||||
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
|
||||
// caches. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.ReadOnlyBeaconState, blockRoot []byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
defer span.End()
|
||||
// return early if we are advancing to a past epoch
|
||||
@@ -976,37 +1216,20 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
headRoot := s.headRoot()
|
||||
headState := s.headState(ctx)
|
||||
s.headLock.RUnlock()
|
||||
lastRoot, lastState := transition.LastCachedState()
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
// Before Fulu we need to process the next slot to find out if we are proposing.
|
||||
if lastState.Version() < version.Fulu {
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
|
||||
var accessRoot [32]byte
|
||||
isFull, err := headState.IsParentBlockFull()
|
||||
gloasFirstSlot, _ := slots.EpochStart(params.BeaconConfig().GloasForkEpoch)
|
||||
if err != nil || !isFull || headState.Slot() <= gloasFirstSlot {
|
||||
accessRoot = headRoot
|
||||
} else {
|
||||
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
|
||||
defer func() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}()
|
||||
}()
|
||||
accessRoot, err = headState.LatestBlockHash()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve latest block hash, using head root as access root")
|
||||
accessRoot = headRoot
|
||||
}
|
||||
}
|
||||
s.refreshCaches(ctx, currentSlot, headRoot, headState, accessRoot)
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
_, has := s.cfg.PayloadIDCache.PayloadID(s.CurrentSlot()+1, headRoot)
|
||||
@@ -1014,7 +1237,7 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:], accessRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
return
|
||||
@@ -1026,32 +1249,35 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve latest block hash")
|
||||
return
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdateGloas(ctx, bh, attribute)
|
||||
id, err := s.notifyForkchoiceUpdateGloas(ctx, bh, attribute)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
} else {
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
s.headLock.RUnlock()
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
|
||||
return
|
||||
if id != nil {
|
||||
s.cfg.PayloadIDCache.Set(s.CurrentSlot()+1, headRoot, [8]byte(*id))
|
||||
}
|
||||
return
|
||||
}
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
s.headLock.RUnlock()
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
|
||||
return
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
mathutil "github.com/OffchainLabs/prysm/v7/math"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
@@ -44,7 +46,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig) (*fcuConfig, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:], cfg.headRoot[:])
|
||||
return fcuArgs, nil
|
||||
}
|
||||
|
||||
@@ -64,26 +66,32 @@ func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig) (*fcuConfig,
|
||||
// block is not the head of the chain. It requires the caller holds a lock on
|
||||
// Forkchoice.
|
||||
func (s *Service) logNonCanonicalBlockReceived(blockRoot [32]byte, headRoot [32]byte) {
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.Weight(blockRoot)
|
||||
receivedWeight, err := s.cfg.ForkChoiceStore.ConsensusNodeWeight(blockRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", blockRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
headWeight, err := s.cfg.ForkChoiceStore.Weight(headRoot)
|
||||
headWeight, err := s.cfg.ForkChoiceStore.ConsensusNodeWeight(headRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", headRoot)).Warn("Could not determine node weight")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
fields := logrus.Fields{
|
||||
"receivedRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
"receivedWeight": receivedWeight,
|
||||
"headRoot": fmt.Sprintf("%#x", headRoot),
|
||||
"headWeight": headWeight,
|
||||
}).Debug("Head block is not the received block")
|
||||
}
|
||||
headEmpty, headFull, err := s.cfg.ForkChoiceStore.PayloadWeights(headRoot)
|
||||
if err == nil {
|
||||
fields["headEmptyWeight"] = headEmpty
|
||||
fields["headFullWeight"] = headFull
|
||||
}
|
||||
log.WithFields(fields).Debug("Head block is not the received block")
|
||||
}
|
||||
|
||||
// fcuArgsNonCanonicalBlock returns the arguments to the FCU call when the
|
||||
// incoming block is non-canonical, that is, based on the head root.
|
||||
func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig) (*fcuConfig, error) {
|
||||
headState, headBlock, err := s.getStateAndBlock(cfg.ctx, cfg.headRoot)
|
||||
headState, headBlock, err := s.getStateAndBlock(cfg.ctx, cfg.headRoot, cfg.headRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -193,10 +201,32 @@ func reportProcessingTime(startTime time.Time) {
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// GetPrestateToPropose returns the pre-state for a proposer to base its block on.
|
||||
// It is similar to GetBlockPreState but it lacks unnecessary verifications.
|
||||
func (s *Service) GetPrestateToPropose(ctx context.Context, b consensus_blocks.ROBlock) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.GetPreStateToPropose")
|
||||
defer span.End()
|
||||
|
||||
accessRoot, err := s.getLookupParentRoot(b)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get lookup parent root")
|
||||
}
|
||||
|
||||
bl := b.Block()
|
||||
preState, err := s.cfg.StateGen.StateByRoot(ctx, accessRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", bl.Slot())
|
||||
}
|
||||
if preState == nil || preState.IsNil() {
|
||||
return nil, errors.Wrapf(err, "nil pre state for slot %d", bl.Slot())
|
||||
}
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
// GetBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
func (s *Service) getBlockPreState(ctx context.Context, b consensus_blocks.ROBlock) (state.BeaconState, error) {
|
||||
func (s *Service) GetBlockPreState(ctx context.Context, b consensus_blocks.ROBlock) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.getBlockPreState")
|
||||
defer span.End()
|
||||
|
||||
@@ -359,6 +389,7 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
return err
|
||||
}
|
||||
root := signed.Block().ParentRoot()
|
||||
child := signed
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
@@ -372,10 +403,33 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hasPayload := false
|
||||
if roblock.Version() >= version.Gloas {
|
||||
sbid, err := child.Block().Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get execution payload bid for block at slot %d", child.Block().Slot())
|
||||
}
|
||||
if sbid == nil || sbid.Message == nil {
|
||||
return fmt.Errorf("missing execution payload bid for block at slot %d", child.Block().Slot())
|
||||
}
|
||||
parentBid, err := b.Block().Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get execution payload bid for block at slot %d", b.Block().Slot())
|
||||
}
|
||||
if parentBid == nil || parentBid.Message == nil {
|
||||
return fmt.Errorf("missing execution payload bid for block at slot %d", b.Block().Slot())
|
||||
}
|
||||
if bytes.Equal(sbid.Message.ParentBlockHash, parentBid.Message.BlockHash) {
|
||||
hasPayload = true
|
||||
}
|
||||
}
|
||||
root = b.Block().ParentRoot()
|
||||
child = b
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint,
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
FinalizedCheckpoint: fCheckpoint,
|
||||
HasPayload: hasPayload,
|
||||
}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
}
|
||||
if len(pendingNodes) == 0 {
|
||||
|
||||
@@ -164,7 +164,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, rwsb)
|
||||
}
|
||||
err := service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{})
|
||||
err := service.onBlockBatch(ctx, blks, nil, &das.MockAvailabilityStore{})
|
||||
require.NoError(t, err)
|
||||
jcp := service.CurrentJustifiedCheckpt()
|
||||
jroot := bytesutil.ToBytes32(jcp.Root)
|
||||
@@ -194,7 +194,7 @@ func TestStore_OnBlockBatch_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, service.saveInitSyncBlock(ctx, rwsb.Root(), wsb))
|
||||
blks = append(blks, rwsb)
|
||||
}
|
||||
require.NoError(t, service.onBlockBatch(ctx, blks, &das.MockAvailabilityStore{}))
|
||||
require.NoError(t, service.onBlockBatch(ctx, blks, nil, &das.MockAvailabilityStore{}))
|
||||
}
|
||||
|
||||
func TestCachedPreState_CanGetFromStateSummary(t *testing.T) {
|
||||
@@ -734,7 +734,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -786,7 +786,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -850,7 +850,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1325,7 +1325,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
go func() {
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
@@ -1339,7 +1339,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
go func() {
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
@@ -1353,7 +1353,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
go func() {
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
@@ -1367,7 +1367,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
go func() {
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
@@ -1445,7 +1445,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1467,7 +1467,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1490,7 +1490,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1515,7 +1515,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1552,7 +1552,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, rowsb)
|
||||
preState, err = service.GetBlockPreState(ctx, rowsb)
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
@@ -1581,7 +1581,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, roblock)
|
||||
preState, err = service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1650,7 +1650,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1673,7 +1673,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1695,7 +1695,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1726,7 +1726,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1756,7 +1756,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, rowsb)
|
||||
preState, err = service.GetBlockPreState(ctx, rowsb)
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
@@ -1796,7 +1796,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, roblock)
|
||||
preState, err = service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1823,7 +1823,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1853,7 +1853,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, roblock)
|
||||
preState, err = service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1915,7 +1915,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1937,7 +1937,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1959,7 +1959,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -1992,7 +1992,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
currStoreFinalizedEpoch := service.FinalizedCheckpt().Epoch
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2024,7 +2024,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, rowsb)
|
||||
preState, err = service.GetBlockPreState(ctx, rowsb)
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
@@ -2074,7 +2074,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
rwsb, err := consensusblocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
// We use onBlockBatch here because the valid chain is missing in forkchoice
|
||||
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}, &das.MockAvailabilityStore{}))
|
||||
require.NoError(t, service.onBlockBatch(ctx, []consensusblocks.ROBlock{rwsb}, nil, &das.MockAvailabilityStore{}))
|
||||
// Check that the head is now VALID and the node is not optimistic
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
headRoot, err = service.HeadRoot(ctx)
|
||||
@@ -2116,7 +2116,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2184,7 +2184,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2457,7 +2457,7 @@ func TestRollbackBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2517,7 +2517,7 @@ func TestRollbackBlock_SavePostStateInfo_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2575,7 +2575,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -2592,7 +2592,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, roblock)
|
||||
preState, err = service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -3639,3 +3639,61 @@ func TestHandleBlockPayloadAttestations(t *testing.T) {
|
||||
require.NoError(t, s.handleBlockPayloadAttestations(ctx, wsb.Block(), headState))
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateCachesAndEpochBoundary_MatchingRoots(t *testing.T) {
|
||||
service := testServiceNoDB(t)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
accessRoot := [32]byte{'a'}
|
||||
|
||||
service.updateCachesAndEpochBoundary(t.Context(), 1, st, accessRoot, accessRoot[:], st)
|
||||
|
||||
cached := transition.NextSlotState(accessRoot[:], 1)
|
||||
require.NotNil(t, cached)
|
||||
require.Equal(t, primitives.Slot(1), cached.Slot())
|
||||
}
|
||||
|
||||
func TestUpdateCachesAndEpochBoundary_DifferentRoots(t *testing.T) {
|
||||
service := testServiceNoDB(t)
|
||||
headState, _ := util.DeterministicGenesisState(t, 1)
|
||||
lastState, _ := util.DeterministicGenesisState(t, 1)
|
||||
accessRoot := [32]byte{'a'}
|
||||
lastRoot := [32]byte{'b'}
|
||||
|
||||
service.updateCachesAndEpochBoundary(t.Context(), 1, headState, accessRoot, lastRoot[:], lastState)
|
||||
|
||||
// Cache should be keyed by accessRoot, not lastRoot.
|
||||
cached := transition.NextSlotState(accessRoot[:], 1)
|
||||
require.NotNil(t, cached)
|
||||
require.Equal(t, primitives.Slot(1), cached.Slot())
|
||||
|
||||
cached = transition.NextSlotState(lastRoot[:], 1)
|
||||
require.Equal(t, true, cached == nil)
|
||||
}
|
||||
|
||||
func TestRefreshCaches_NoCachedState(t *testing.T) {
|
||||
service := testServiceNoDB(t)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
headRoot := [32]byte{'h'}
|
||||
|
||||
service.refreshCaches(t.Context(), 1, headRoot, st, headRoot)
|
||||
|
||||
cached := transition.NextSlotState(headRoot[:], 1)
|
||||
require.NotNil(t, cached)
|
||||
require.Equal(t, primitives.Slot(1), cached.Slot())
|
||||
}
|
||||
|
||||
func TestRefreshCaches_CachedStateMatchesAccessRoot(t *testing.T) {
|
||||
service := testServiceNoDB(t)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
accessRoot := [32]byte{'a'}
|
||||
headRoot := [32]byte{'h'}
|
||||
|
||||
// Pre-populate the cache with accessRoot.
|
||||
require.NoError(t, transition.UpdateNextSlotCache(t.Context(), accessRoot[:], st))
|
||||
|
||||
service.refreshCaches(t.Context(), 1, headRoot, st, accessRoot)
|
||||
|
||||
cached := transition.NextSlotState(accessRoot[:], 1)
|
||||
require.NotNil(t, cached)
|
||||
require.Equal(t, primitives.Slot(1), cached.Slot())
|
||||
}
|
||||
|
||||
@@ -134,38 +134,64 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
|
||||
start = time.Now()
|
||||
// return early if we haven't changed head
|
||||
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx)
|
||||
newHeadRoot, newHeadBlockHash, full, err := s.cfg.ForkChoiceStore.FullHead(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute head from new attestations")
|
||||
return
|
||||
}
|
||||
if !s.isNewHead(newHeadRoot) {
|
||||
if !s.isNewHead(newHeadRoot, full) {
|
||||
return
|
||||
}
|
||||
log.WithField("newHeadRoot", fmt.Sprintf("%#x", newHeadRoot)).Debug("Head changed due to attestations")
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot)
|
||||
var accessRoot [32]byte
|
||||
postGloas := slots.ToEpoch(proposingSlot) >= params.BeaconConfig().GloasForkEpoch
|
||||
if full && postGloas {
|
||||
accessRoot = newHeadBlockHash
|
||||
} else {
|
||||
accessRoot = newHeadRoot
|
||||
}
|
||||
headState, headBlock, err := s.getStateAndBlock(ctx, newHeadRoot, accessRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block")
|
||||
log.WithError(err).Error("Could not get head block and state")
|
||||
return
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: newHeadRoot,
|
||||
headBlock: headBlock,
|
||||
proposingSlot: proposingSlot,
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
attr := s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:], accessRoot[:])
|
||||
if attr != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
|
||||
if postGloas {
|
||||
go func() {
|
||||
pid, err := s.notifyForkchoiceUpdateGloas(s.ctx, newHeadBlockHash, attr)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice with engine")
|
||||
}
|
||||
if pid == nil {
|
||||
if attr != nil {
|
||||
log.Warn("Engine did not return a payload ID for the fork choice update with attributes")
|
||||
}
|
||||
return
|
||||
}
|
||||
var pId [8]byte
|
||||
copy(pId[:], pid[:])
|
||||
s.cfg.PayloadIDCache.Set(proposingSlot, newHeadRoot, pId)
|
||||
}()
|
||||
} else {
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: newHeadRoot,
|
||||
headBlock: headBlock,
|
||||
proposingSlot: proposingSlot,
|
||||
attributes: attr,
|
||||
}
|
||||
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
|
||||
}
|
||||
}
|
||||
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
if err := s.saveHead(s.ctx, newHeadRoot, headBlock, headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
s.pruneAttsFromPool(s.ctx, headState, headBlock)
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -112,7 +112,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
@@ -174,7 +174,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -41,10 +41,12 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityChecker) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
GetBlockPreState(ctx context.Context, b blocks.ROBlock) (state.BeaconState, error)
|
||||
GetPrestateToPropose(ctx context.Context, b blocks.ROBlock) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// BlobReceiver interface defines the methods of chain service for receiving new
|
||||
@@ -100,7 +102,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, roblock)
|
||||
preState, err := s.GetBlockPreState(ctx, roblock)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
}
|
||||
@@ -151,7 +153,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
|
||||
// Have we been finalizing? Should we start saving hot states to db?
|
||||
if err := s.checkSaveHotStateDB(ctx); err != nil {
|
||||
return errors.Wrap(err, "check save hot state db")
|
||||
log.WithError(err).Error("Could not check save hot state DB")
|
||||
}
|
||||
|
||||
// We apply the same heuristic to some of our more important caches.
|
||||
@@ -364,12 +366,14 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
go s.checkpointStateCache.EvictUpTo(finalized.Epoch)
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, envelopes []interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -377,7 +381,7 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Apply state transition on the incoming newly received block batches, one by one.
|
||||
if err := s.onBlockBatch(ctx, blocks, avs); err != nil {
|
||||
if err := s.onBlockBatch(ctx, blocks, envelopes, avs); err != nil {
|
||||
err := errors.Wrap(err, "could not process block in batch")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -417,6 +421,15 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
|
||||
if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, e := range envelopes {
|
||||
protoEnv, ok := e.Proto().(*ethpb.SignedExecutionPayloadEnvelope)
|
||||
if !ok {
|
||||
return errors.New("could not type assert signed envelope to proto")
|
||||
}
|
||||
if err := s.cfg.BeaconDB.SaveExecutionPayloadEnvelope(ctx, protoEnv); err != nil {
|
||||
return errors.Wrap(err, "could not save execution payload envelope")
|
||||
}
|
||||
}
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
if finalized == nil {
|
||||
return errNilFinalizedInStore
|
||||
|
||||
@@ -281,7 +281,7 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
rwsb, err := blocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb}, &das.MockAvailabilityStore{})
|
||||
err = s.ReceiveBlockBatch(ctx, []blocks.ROBlock{rwsb}, nil, &das.MockAvailabilityStore{})
|
||||
if tt.wantedErr != "" {
|
||||
assert.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
|
||||
@@ -4,11 +4,15 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -29,9 +33,18 @@ type ExecutionPayloadEnvelopeReceiver interface {
|
||||
}
|
||||
|
||||
// ReceiveExecutionPayloadEnvelope processes a signed execution payload envelope for the Gloas fork.
|
||||
func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope) error {
|
||||
func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope) (err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveExecutionPayloadEnvelope")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
beaconExecutionPayloadEnvelopeProcessingDurationSeconds.Observe(time.Since(start).Seconds())
|
||||
if err != nil {
|
||||
beaconExecutionPayloadEnvelopeInvalidTotal.Inc()
|
||||
return
|
||||
}
|
||||
beaconExecutionPayloadEnvelopeValidTotal.Inc()
|
||||
}()
|
||||
|
||||
envelope, err := signed.Envelope()
|
||||
if err != nil {
|
||||
@@ -102,9 +115,25 @@ func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed in
|
||||
return err
|
||||
}
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.PayloadProcessed,
|
||||
Data: &statefeed.PayloadProcessedData{
|
||||
Slot: envelope.Slot(),
|
||||
BlockRoot: root,
|
||||
},
|
||||
})
|
||||
|
||||
execution, err := envelope.Execution()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get execution payload from envelope for logging")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": envelope.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", root),
|
||||
"slot": envelope.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(execution.BlockHash())),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(execution.ParentHash())),
|
||||
}).Info("Processed execution payload envelope")
|
||||
return nil
|
||||
}
|
||||
@@ -121,13 +150,21 @@ func (s *Service) postPayloadHeadUpdate(ctx context.Context, envelope interfaces
|
||||
|
||||
s.headLock.Lock()
|
||||
s.head.state = st
|
||||
s.head.full = true
|
||||
s.headLock.Unlock()
|
||||
|
||||
if err := transition.UpdateNextSlotCache(ctx, blockHash[:], st); err != nil {
|
||||
log.WithError(err).Error("Could not update next slot cache")
|
||||
}
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
if err := transition.UpdateNextSlotCache(ctx, blockHash[:], st); err != nil {
|
||||
log.WithError(err).Error("Could not update next slot cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, envelope.Slot(), st, blockHash[:]); err != nil {
|
||||
log.WithError(err).Error("Could not handle epoch boundary")
|
||||
}
|
||||
}()
|
||||
|
||||
attr := s.getPayloadAttribute(ctx, st, envelope.Slot()+1, headRoot)
|
||||
attr := s.getPayloadAttribute(ctx, st, envelope.Slot()+1, headRoot, blockHash[:])
|
||||
if s.inRegularSync() {
|
||||
go func() {
|
||||
pid, err := s.notifyForkchoiceUpdateGloas(s.ctx, blockHash, attr)
|
||||
@@ -166,6 +203,50 @@ func (s *Service) getPayloadEnvelopePrestate(ctx context.Context, envelope inter
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
func (s *Service) callNewPayload(
|
||||
ctx context.Context,
|
||||
payload interfaces.ExecutionData,
|
||||
versionedHashes []common.Hash,
|
||||
parentRoot common.Hash,
|
||||
requests *enginev1.ExecutionRequests,
|
||||
slot primitives.Slot,
|
||||
) (bool, error) {
|
||||
_, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &parentRoot, requests)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic envelope")
|
||||
return false, nil
|
||||
}
|
||||
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
|
||||
return false, invalidBlock{error: ErrInvalidPayload}
|
||||
}
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
|
||||
func (s *Service) notifyNewEnvelopeFromBlock(ctx context.Context, b blocks.ROBlock, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewEnvelopeFromBlock")
|
||||
defer span.End()
|
||||
|
||||
payload, err := envelope.Execution()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution payload from envelope")
|
||||
}
|
||||
sbid, err := b.Block().Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get signed execution payload bid from block")
|
||||
}
|
||||
versionedHashes := make([]common.Hash, len(sbid.Message.BlobKzgCommitments))
|
||||
for i, c := range sbid.Message.BlobKzgCommitments {
|
||||
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(c)
|
||||
}
|
||||
return s.callNewPayload(ctx, payload, versionedHashes, common.Hash(b.Block().ParentRoot()), envelope.ExecutionRequests(), envelope.Slot())
|
||||
}
|
||||
|
||||
// The returned boolean indicates whether the payload was valid or if it was accepted as syncing (optimistic).
|
||||
func (s *Service) notifyNewEnvelope(ctx context.Context, st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewEnvelope")
|
||||
@@ -175,7 +256,6 @@ func (s *Service) notifyNewEnvelope(ctx context.Context, st state.BeaconState, e
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution payload from envelope")
|
||||
}
|
||||
|
||||
latestBid, err := st.LatestExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get latest execution payload bid")
|
||||
@@ -185,25 +265,7 @@ func (s *Service) notifyNewEnvelope(ctx context.Context, st state.BeaconState, e
|
||||
for i, c := range commitments {
|
||||
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(c)
|
||||
}
|
||||
|
||||
parentRoot := common.Hash(bytesutil.ToBytes32(st.LatestBlockHeader().ParentRoot))
|
||||
requests := envelope.ExecutionRequests()
|
||||
|
||||
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &parentRoot, requests)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": envelope.Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic envelope")
|
||||
return false, nil
|
||||
}
|
||||
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
|
||||
return false, invalidBlock{error: ErrInvalidPayload}
|
||||
}
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
return s.callNewPayload(ctx, payload, versionedHashes, common.Hash(bytesutil.ToBytes32(st.LatestBlockHeader().ParentRoot)), envelope.ExecutionRequests(), envelope.Slot())
|
||||
}
|
||||
|
||||
func (s *Service) validateExecutionOnEnvelope(ctx context.Context, st state.BeaconState, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
|
||||
|
||||
@@ -2,7 +2,6 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
@@ -29,16 +28,12 @@ func (s *Service) ReceivePayloadAttestationMessage(ctx context.Context, a *ethpb
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ptc, err := gloas.PayloadCommittee(ctx, st, a.Data.Slot)
|
||||
idx, err := gloas.PayloadCommitteeIndex(ctx, st, a.Data.Slot, a.ValidatorIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idx := slices.Index(ptc, a.ValidatorIndex)
|
||||
if idx == -1 {
|
||||
return errors.New("validator not in PTC")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
s.cfg.ForkChoiceStore.SetPTCVote(root, uint64(idx), a.Data.PayloadPresent, a.Data.BlobDataAvailable)
|
||||
s.cfg.ForkChoiceStore.SetPTCVote(root, idx, a.Data.PayloadPresent, a.Data.BlobDataAvailable)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
@@ -213,6 +214,7 @@ func (s *Service) Start() {
|
||||
}
|
||||
s.spawnProcessAttestationsRoutine()
|
||||
go s.runLateBlockTasks()
|
||||
go s.runLatePayloadTasks()
|
||||
}
|
||||
|
||||
// Stop the blockchain service's main event loop and associated goroutines.
|
||||
@@ -343,7 +345,7 @@ func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) erro
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
}
|
||||
if err := s.setHead(&head{root, blk, st, blk.Block().Slot(), false}); err != nil {
|
||||
if err := s.setHead(&head{root, blk, st, blk.Block().Slot(), false, false}); err != nil {
|
||||
return errors.Wrap(err, "could not set head")
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -419,6 +421,12 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, gb); err != nil {
|
||||
log.WithError(err).Fatal("Could not process genesis block for fork choice")
|
||||
}
|
||||
// In Gloas, blocks start as empty (pending) nodes and become full when the
|
||||
// execution payload envelope arrives. The genesis block has no separate
|
||||
// payload delivery, so mark it as full immediately.
|
||||
if genesisState.Version() >= version.Gloas {
|
||||
s.cfg.ForkChoiceStore.MarkFullNode(genesisBlkRoot)
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetOriginRoot(genesisBlkRoot)
|
||||
// Set genesis as fully validated
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
|
||||
@@ -432,6 +440,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
genesisState,
|
||||
genesisBlk.Block().Slot(),
|
||||
false,
|
||||
false,
|
||||
}); err != nil {
|
||||
log.WithError(err).Fatal("Could not set head")
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -77,8 +78,12 @@ func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
|
||||
log.WithError(err).Error("Could not build forkchoice chain, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
resolveChainPayloadStatus(chain)
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.markFinalizedRootFull(chain, fRoot); err != nil {
|
||||
log.WithError(err).Error("Could not mark finalized root as full in forkchoice")
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.InsertChain(s.ctx, chain)
|
||||
}
|
||||
|
||||
@@ -145,6 +150,68 @@ func (s *Service) setupForkchoiceRoot(st state.BeaconState) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// resolveChainPayloadStatus determines which blocks in the chain had their
|
||||
// execution payloads delivered by checking if consecutive blocks' bids indicate
|
||||
// payload delivery. For each pair of blocks (chain[i], chain[i+1]), if the next
|
||||
// block's bid parentBlockHash equals the current block's bid blockHash, the
|
||||
// current block's payload was delivered.
|
||||
func resolveChainPayloadStatus(chain []*forkchoicetypes.BlockAndCheckpoints) {
|
||||
for i := 0; i < len(chain)-1; i++ {
|
||||
curr := chain[i].Block.Block()
|
||||
next := chain[i+1].Block.Block()
|
||||
if curr.Version() < version.Gloas || next.Version() < version.Gloas {
|
||||
continue
|
||||
}
|
||||
currBid, err := curr.Body().SignedExecutionPayloadBid()
|
||||
if err != nil || currBid == nil || currBid.Message == nil {
|
||||
continue
|
||||
}
|
||||
nextBid, err := next.Body().SignedExecutionPayloadBid()
|
||||
if err != nil || nextBid == nil || nextBid.Message == nil {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(nextBid.Message.ParentBlockHash, currBid.Message.BlockHash) {
|
||||
chain[i].HasPayload = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// markFinalizedRootFull checks whether the finalized root block's execution
|
||||
// payload was delivered by inspecting the first block in the chain. If the first
|
||||
// block's bid parentBlockHash equals the finalized block's bid blockHash, the
|
||||
// finalized block's payload was delivered and a full node must be created in
|
||||
// forkchoice. The caller must hold the forkchoice lock.
|
||||
func (s *Service) markFinalizedRootFull(chain []*forkchoicetypes.BlockAndCheckpoints, fRoot [32]byte) error {
|
||||
if len(chain) == 0 {
|
||||
return nil
|
||||
}
|
||||
firstBlock := chain[0].Block.Block()
|
||||
if firstBlock.Version() < version.Gloas {
|
||||
return nil
|
||||
}
|
||||
firstBid, err := firstBlock.Body().SignedExecutionPayloadBid()
|
||||
if err != nil || firstBid == nil || firstBid.Message == nil {
|
||||
return nil
|
||||
}
|
||||
fBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
if fBlock.Block().Version() < version.Gloas {
|
||||
return nil
|
||||
}
|
||||
fBid, err := fBlock.Block().Body().SignedExecutionPayloadBid()
|
||||
if err != nil || fBid == nil || fBid.Message == nil {
|
||||
return nil
|
||||
}
|
||||
if !bytes.Equal(firstBid.Message.ParentBlockHash, fBid.Message.BlockHash) {
|
||||
return nil
|
||||
}
|
||||
// The finalized block's payload was delivered. Create the full node.
|
||||
s.cfg.ForkChoiceStore.MarkFullNode(fRoot)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) setupForkchoiceCheckpoints() error {
|
||||
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
@@ -166,11 +233,11 @@ func (s *Service) setupForkchoiceCheckpoints() error {
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
log.WithError(err).Error("Could not update forkchoice's justified checkpoint, trying to update finalized checkpoint anyway")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: fRoot}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
log.WithError(err).Error("Could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(s.genesisTime)
|
||||
return nil
|
||||
|
||||
@@ -106,7 +106,7 @@ func Test_setupForkchoiceTree_Head(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, roblock)
|
||||
preState, err := service.GetBlockPreState(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -94,6 +94,11 @@ func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []bl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastForEpoch(_ context.Context, _ proto.Message, _ primitives.Epoch) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
|
||||
@@ -77,6 +77,14 @@ type ChainService struct {
|
||||
DataColumns []blocks.VerifiedRODataColumn
|
||||
TargetRoot [32]byte
|
||||
MockHeadSlot *primitives.Slot
|
||||
DependentRootCB func([32]byte, primitives.Epoch) ([32]byte, error)
|
||||
MockCanonicalRoots map[primitives.Slot][32]byte
|
||||
MockCanonicalFull map[primitives.Slot]bool
|
||||
MockPayloadContentLookup map[[32]byte][32]byte
|
||||
MockPayloadContentIsFull map[[32]byte]bool
|
||||
ParentPayloadReadyVal *bool
|
||||
ForkchoiceRoots map[[32]byte]bool
|
||||
ForkchoiceBlockHashes map[[32]byte][32]byte
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -274,7 +282,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityChecker) error {
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ []interfaces.ROSignedExecutionPayloadEnvelope, _ das.AvailabilityChecker) error {
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
@@ -334,6 +342,16 @@ func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOn
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlockPreState mocks the same method in the chain service.
|
||||
func (s *ChainService) GetBlockPreState(_ context.Context, _ blocks.ROBlock) (state.BeaconState, error) {
|
||||
return s.State, nil
|
||||
}
|
||||
|
||||
// GetPrestateToPropose mocks the same method in the chain service.
|
||||
func (s *ChainService) GetPrestateToPropose(_ context.Context, _ blocks.ROBlock) (state.BeaconState, error) {
|
||||
return s.State.Copy(), nil
|
||||
}
|
||||
|
||||
// HeadSlot mocks HeadSlot method in chain service.
|
||||
func (s *ChainService) HeadSlot() primitives.Slot {
|
||||
if s.MockHeadSlot != nil {
|
||||
@@ -569,10 +587,23 @@ func (s *ChainService) IsOptimistic(_ context.Context) (bool, error) {
|
||||
}
|
||||
|
||||
// InForkchoice mocks the same method in the chain service
|
||||
func (s *ChainService) InForkchoice(_ [32]byte) bool {
|
||||
func (s *ChainService) InForkchoice(root [32]byte) bool {
|
||||
if s.ForkchoiceRoots != nil {
|
||||
return s.ForkchoiceRoots[root]
|
||||
}
|
||||
return !s.NotFinalized
|
||||
}
|
||||
|
||||
// BlockHash mocks the execution payload block hash lookup for a beacon block root.
|
||||
func (s *ChainService) BlockHash(root [32]byte) ([32]byte, error) {
|
||||
if s.ForkchoiceBlockHashes != nil {
|
||||
if blockHash, ok := s.ForkchoiceBlockHashes[root]; ok {
|
||||
return blockHash, nil
|
||||
}
|
||||
}
|
||||
return [32]byte{}, errors.New("block hash not found")
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot mocks the same method in the chain service.
|
||||
func (s *ChainService) IsOptimisticForRoot(_ context.Context, root [32]byte) (bool, error) {
|
||||
s.OptimisticCheckRootReceived = root
|
||||
@@ -630,7 +661,7 @@ func prepareForkchoiceState(
|
||||
}
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
st, err := state_native.InitializeFromProtoUnsafeBellatrix(base)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
@@ -689,7 +720,60 @@ func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
}
|
||||
return 0
|
||||
if s.Slot != nil {
|
||||
return *s.Slot
|
||||
}
|
||||
return s.BlockSlot
|
||||
}
|
||||
|
||||
// HighestReceivedBlockRoot mocks the same method in the chain service
|
||||
func (s *ChainService) HighestReceivedBlockRoot() [32]byte {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.HighestReceivedBlockRoot()
|
||||
}
|
||||
if s.Slot != nil && s.MockCanonicalRoots != nil {
|
||||
if root, ok := s.MockCanonicalRoots[*s.Slot]; ok {
|
||||
return root
|
||||
}
|
||||
}
|
||||
if len(s.Root) == 32 {
|
||||
return bytesutil.ToBytes32(s.Root)
|
||||
}
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// HasFullNode mocks the same method in the chain service
|
||||
func (s *ChainService) HasFullNode(root [32]byte) bool {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.HasFullNode(root)
|
||||
}
|
||||
if s.Slot != nil && s.MockCanonicalRoots != nil && s.MockCanonicalFull != nil {
|
||||
if r, ok := s.MockCanonicalRoots[*s.Slot]; ok && r == root {
|
||||
return s.MockCanonicalFull[*s.Slot]
|
||||
}
|
||||
}
|
||||
if s.ForkchoiceRoots != nil {
|
||||
return s.ForkchoiceRoots[root]
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ShouldIgnoreData returns true if the data for the given parent root and slot should be ignored.
|
||||
func (s *ChainService) ShouldIgnoreData(_ [32]byte, _ primitives.Slot) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PayloadContentLookup mocks the same method in the chain service.
|
||||
func (s *ChainService) PayloadContentLookup(root [32]byte) ([32]byte, bool) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.PayloadContentLookup(root)
|
||||
}
|
||||
if s.MockPayloadContentLookup != nil {
|
||||
if value, ok := s.MockPayloadContentLookup[root]; ok {
|
||||
return value, s.MockPayloadContentIsFull[root]
|
||||
}
|
||||
}
|
||||
return root, false
|
||||
}
|
||||
|
||||
// InsertNode mocks the same method in the chain service
|
||||
@@ -775,8 +859,19 @@ func (c *ChainService) ReceiveExecutionPayloadEnvelope(_ context.Context, _ inte
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParentPayloadReady mocks the same method in the chain service.
|
||||
func (s *ChainService) ParentPayloadReady(_ interfaces.ReadOnlyBeaconBlock) bool {
|
||||
if s.ParentPayloadReadyVal != nil {
|
||||
return *s.ParentPayloadReadyVal
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// DependentRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
func (c *ChainService) DependentRootForEpoch(root [32]byte, epoch primitives.Epoch) ([32]byte, error) {
|
||||
if c.DependentRootCB != nil {
|
||||
return c.DependentRootCB(root, epoch)
|
||||
}
|
||||
return c.TargetRoot, nil
|
||||
}
|
||||
|
||||
@@ -785,6 +880,17 @@ func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]b
|
||||
return c.TargetRoot, nil
|
||||
}
|
||||
|
||||
func (c *ChainService) CanonicalNodeAtSlot(slot primitives.Slot) ([32]byte, bool) {
|
||||
var root [32]byte
|
||||
if c.MockCanonicalRoots != nil {
|
||||
root = c.MockCanonicalRoots[slot]
|
||||
}
|
||||
if c.MockCanonicalFull != nil {
|
||||
return root, c.MockCanonicalFull[slot]
|
||||
}
|
||||
return root, false
|
||||
}
|
||||
|
||||
// MockSyncChecker is a mock implementation of blockchain.Checker.
|
||||
// We can't make an assertion here that this is true because that would create a circular dependency.
|
||||
type MockSyncChecker struct {
|
||||
|
||||
5
beacon-chain/cache/BUILD.bazel
vendored
5
beacon-chain/cache/BUILD.bazel
vendored
@@ -15,6 +15,7 @@ go_library(
|
||||
"common.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"highest_execution_payload_bid.go",
|
||||
"interfaces.go",
|
||||
"log.go",
|
||||
"payload_attestation.go",
|
||||
@@ -22,6 +23,7 @@ go_library(
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
"proposer_indices_type.go",
|
||||
"proposer_preferences.go",
|
||||
"registration.go",
|
||||
"skip_slot_cache.go",
|
||||
"subnet_ids.go",
|
||||
@@ -55,6 +57,7 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
@@ -77,10 +80,12 @@ go_test(
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"highest_execution_payload_bid_test.go",
|
||||
"payload_attestation_test.go",
|
||||
"payload_id_test.go",
|
||||
"private_access_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"proposer_preferences_test.go",
|
||||
"registration_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"subnet_ids_test.go",
|
||||
|
||||
9
beacon-chain/cache/attestation_data.go
vendored
9
beacon-chain/cache/attestation_data.go
vendored
@@ -9,10 +9,11 @@ import (
|
||||
)
|
||||
|
||||
type AttestationConsensusData struct {
|
||||
Slot primitives.Slot
|
||||
HeadRoot []byte
|
||||
Target forkchoicetypes.Checkpoint
|
||||
Source forkchoicetypes.Checkpoint
|
||||
Slot primitives.Slot
|
||||
HeadRoot []byte
|
||||
Target forkchoicetypes.Checkpoint
|
||||
Source forkchoicetypes.Checkpoint
|
||||
IsPayloadFull bool
|
||||
}
|
||||
|
||||
// AttestationDataCache stores cached results of AttestationData requests.
|
||||
|
||||
51
beacon-chain/cache/checkpoint_state.go
vendored
51
beacon-chain/cache/checkpoint_state.go
vendored
@@ -3,8 +3,10 @@ package cache
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
lruwrpr "github.com/OffchainLabs/prysm/v7/cache/lru"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
@@ -25,6 +27,14 @@ var (
|
||||
Name: "check_point_state_cache_hit",
|
||||
Help: "The number of check point state requests that are present in the cache.",
|
||||
})
|
||||
checkpointStateSize = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "check_point_state_cache_size",
|
||||
Help: "The number of entries in the check point state cache.",
|
||||
})
|
||||
checkpointStateEvicted = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "check_point_state_cache_evicted_total",
|
||||
Help: "The number of entries evicted from the check point state cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// CheckpointStateCache is a struct with 1 queue for looking up state by checkpoint.
|
||||
@@ -49,14 +59,14 @@ func (c *CheckpointStateCache) StateByCheckpoint(cp *ethpb.Checkpoint) (state.Be
|
||||
|
||||
item, exists := c.cache.Get(h)
|
||||
|
||||
if exists && item != nil {
|
||||
checkpointStateHit.Inc()
|
||||
// Copy here is unnecessary since the return will only be used to verify attestation signature.
|
||||
return item.(state.BeaconState), nil
|
||||
if !exists || item == nil {
|
||||
checkpointStateMiss.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
checkpointStateMiss.Inc()
|
||||
return nil, nil
|
||||
checkpointStateHit.Inc()
|
||||
// Copy here is unnecessary since the return will only be used to verify attestation signature.
|
||||
return item.(state.BeaconState), nil
|
||||
}
|
||||
|
||||
// AddCheckpointState adds CheckpointState object to the cache. This method also trims the least
|
||||
@@ -66,6 +76,35 @@ func (c *CheckpointStateCache) AddCheckpointState(cp *ethpb.Checkpoint, s state.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.cache.Add(h, s)
|
||||
checkpointStateSize.Set(float64(c.cache.Len()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EvictUpTo removes all entries from the cache whose state epoch is at
|
||||
// or before the given epoch. Returns the number of evicted entries.
|
||||
func (c *CheckpointStateCache) EvictUpTo(epoch primitives.Epoch) int {
|
||||
evicted := 0
|
||||
for _, key := range c.cache.Keys() {
|
||||
// Peek is used here to avoid updating the recency of the entry,
|
||||
// as we are only checking for eviction.
|
||||
v, ok := c.cache.Peek(key)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
st := v.(state.ReadOnlyBeaconState)
|
||||
if slots.ToEpoch(st.Slot()) <= epoch {
|
||||
c.cache.Remove(key)
|
||||
evicted++
|
||||
}
|
||||
}
|
||||
|
||||
if evicted > 0 {
|
||||
checkpointStateSize.Set(float64(c.cache.Len()))
|
||||
checkpointStateEvicted.Add(float64(evicted))
|
||||
}
|
||||
|
||||
return evicted
|
||||
}
|
||||
|
||||
73
beacon-chain/cache/checkpoint_state_test.go
vendored
73
beacon-chain/cache/checkpoint_state_test.go
vendored
@@ -72,3 +72,76 @@ func TestCheckpointStateCache_MaxSize(t *testing.T) {
|
||||
|
||||
assert.Equal(t, cache.MaxCheckpointStateSize(), len(c.Cache().Keys()))
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_EvictFinalized_FinalizedEntry(t *testing.T) {
|
||||
c := cache.NewCheckpointStateCache()
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 32})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.AddCheckpointState(cp, st))
|
||||
|
||||
evicted := c.EvictUpTo(1)
|
||||
assert.Equal(t, 1, evicted, "expected finalized entry to be evicted")
|
||||
|
||||
s, err := c.StateByCheckpoint(cp)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, state.BeaconState(nil), s, "expected cache to be empty after eviction")
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_EvictFinalized_NotFinalizedEntry(t *testing.T) {
|
||||
c := cache.NewCheckpointStateCache()
|
||||
|
||||
cp := ðpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 160})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.AddCheckpointState(cp, st))
|
||||
|
||||
evicted := c.EvictUpTo(3)
|
||||
assert.Equal(t, 0, evicted, "expected non-finalized entry NOT to be evicted")
|
||||
|
||||
s, err := c.StateByCheckpoint(cp)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, s, "expected entry to still be in cache")
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_EvictFinalized_Mixed(t *testing.T) {
|
||||
c := cache.NewCheckpointStateCache()
|
||||
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'A'}, 32)}
|
||||
st1, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 32})
|
||||
require.NoError(t, err)
|
||||
|
||||
cp2 := ðpb.Checkpoint{Epoch: 2, Root: bytesutil.PadTo([]byte{'B'}, 32)}
|
||||
st2, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 64})
|
||||
require.NoError(t, err)
|
||||
|
||||
cp5 := ðpb.Checkpoint{Epoch: 5, Root: bytesutil.PadTo([]byte{'C'}, 32)}
|
||||
st5, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 160})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, c.AddCheckpointState(cp1, st1))
|
||||
require.NoError(t, c.AddCheckpointState(cp2, st2))
|
||||
require.NoError(t, c.AddCheckpointState(cp5, st5))
|
||||
|
||||
evicted := c.EvictUpTo(3)
|
||||
assert.Equal(t, 2, evicted, "expected epochs 1 and 2 to be evicted")
|
||||
|
||||
s, err := c.StateByCheckpoint(cp1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, state.BeaconState(nil), s, "expected cp1 to be evicted")
|
||||
|
||||
s, err = c.StateByCheckpoint(cp2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, state.BeaconState(nil), s, "expected cp2 to be evicted")
|
||||
|
||||
s, err = c.StateByCheckpoint(cp5)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, s, "expected cp5 to still be in cache")
|
||||
}
|
||||
|
||||
func TestCheckpointStateCache_EvictFinalized_EmptyCache(t *testing.T) {
|
||||
c := cache.NewCheckpointStateCache()
|
||||
evicted := c.EvictUpTo(0)
|
||||
assert.Equal(t, 0, evicted, "expected no eviction from empty cache")
|
||||
}
|
||||
|
||||
76
beacon-chain/cache/highest_execution_payload_bid.go
vendored
Normal file
76
beacon-chain/cache/highest_execution_payload_bid.go
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
type executionPayloadBidKey struct {
|
||||
slot primitives.Slot
|
||||
parentHash [32]byte
|
||||
parentRoot [32]byte
|
||||
}
|
||||
|
||||
// HighestExecutionPayloadBidCache stores the highest bid for each
|
||||
// (slot, parent_block_hash, parent_block_root) tuple.
|
||||
type HighestExecutionPayloadBidCache struct {
|
||||
bids map[executionPayloadBidKey]*ethpb.SignedExecutionPayloadBid
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewHighestExecutionPayloadBidCache initializes a highest-bid cache.
|
||||
func NewHighestExecutionPayloadBidCache() *HighestExecutionPayloadBidCache {
|
||||
return &HighestExecutionPayloadBidCache{
|
||||
bids: make(map[executionPayloadBidKey]*ethpb.SignedExecutionPayloadBid),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns the highest cached bid for the given tuple.
|
||||
func (c *HighestExecutionPayloadBidCache) Get(
|
||||
slot primitives.Slot,
|
||||
parentHash [32]byte,
|
||||
parentRoot [32]byte,
|
||||
) (*ethpb.SignedExecutionPayloadBid, bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
bid, ok := c.bids[executionPayloadBidKey{
|
||||
slot: slot,
|
||||
parentHash: parentHash,
|
||||
parentRoot: parentRoot,
|
||||
}]
|
||||
return bid, ok
|
||||
}
|
||||
|
||||
// SetIfHigher inserts the bid if absent, or replaces the cached bid only if
|
||||
// the incoming value is strictly greater.
|
||||
func (c *HighestExecutionPayloadBidCache) SetIfHigher(bid *ethpb.SignedExecutionPayloadBid) bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
key := executionPayloadBidKey{
|
||||
slot: bid.Message.Slot,
|
||||
parentHash: [32]byte(bid.Message.ParentBlockHash),
|
||||
parentRoot: [32]byte(bid.Message.ParentBlockRoot),
|
||||
}
|
||||
cached, ok := c.bids[key]
|
||||
if !ok || bid.Message.Value > cached.Message.Value {
|
||||
c.bids[key] = bid
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PruneBefore removes all cached bids for slots before the provided slot.
|
||||
func (c *HighestExecutionPayloadBidCache) PruneBefore(slot primitives.Slot) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
for key := range c.bids {
|
||||
if key.slot < slot {
|
||||
delete(c.bids, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
105
beacon-chain/cache/highest_execution_payload_bid_test.go
vendored
Normal file
105
beacon-chain/cache/highest_execution_payload_bid_test.go
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestHighestExecutionPayloadBidCache_GetSetIfHigher(t *testing.T) {
|
||||
c := NewHighestExecutionPayloadBidCache()
|
||||
bid := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 100)
|
||||
|
||||
inserted := c.SetIfHigher(bid)
|
||||
require.Equal(t, true, inserted)
|
||||
|
||||
got, ok := c.Get(10, [32]byte{0x01}, [32]byte{0x02})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, bid, got)
|
||||
}
|
||||
|
||||
func TestHighestExecutionPayloadBidCache_SetIfHigher_ReplacesOnlyOnHigherValue(t *testing.T) {
|
||||
c := NewHighestExecutionPayloadBidCache()
|
||||
low := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 100)
|
||||
same := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 100)
|
||||
high := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 101)
|
||||
|
||||
require.Equal(t, true, c.SetIfHigher(low))
|
||||
require.Equal(t, false, c.SetIfHigher(same))
|
||||
|
||||
got, ok := c.Get(10, [32]byte{0x01}, [32]byte{0x02})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, low, got)
|
||||
|
||||
require.Equal(t, true, c.SetIfHigher(high))
|
||||
got, ok = c.Get(10, [32]byte{0x01}, [32]byte{0x02})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, high, got)
|
||||
}
|
||||
|
||||
func TestHighestExecutionPayloadBidCache_SetIfHigher_KeepsDistinctTuples(t *testing.T) {
|
||||
c := NewHighestExecutionPayloadBidCache()
|
||||
first := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x02}, 100)
|
||||
second := testSignedExecutionPayloadBid(10, [32]byte{0x03}, [32]byte{0x02}, 50)
|
||||
third := testSignedExecutionPayloadBid(10, [32]byte{0x01}, [32]byte{0x04}, 75)
|
||||
|
||||
require.Equal(t, true, c.SetIfHigher(first))
|
||||
require.Equal(t, true, c.SetIfHigher(second))
|
||||
require.Equal(t, true, c.SetIfHigher(third))
|
||||
|
||||
got, ok := c.Get(10, [32]byte{0x01}, [32]byte{0x02})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, first, got)
|
||||
|
||||
got, ok = c.Get(10, [32]byte{0x03}, [32]byte{0x02})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, second, got)
|
||||
|
||||
got, ok = c.Get(10, [32]byte{0x01}, [32]byte{0x04})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, third, got)
|
||||
}
|
||||
|
||||
func TestHighestExecutionPayloadBidCache_PruneBefore(t *testing.T) {
|
||||
c := NewHighestExecutionPayloadBidCache()
|
||||
oldBid := testSignedExecutionPayloadBid(9, [32]byte{0x01}, [32]byte{0x02}, 100)
|
||||
currentBid := testSignedExecutionPayloadBid(10, [32]byte{0x03}, [32]byte{0x04}, 101)
|
||||
|
||||
require.Equal(t, true, c.SetIfHigher(oldBid))
|
||||
require.Equal(t, true, c.SetIfHigher(currentBid))
|
||||
|
||||
c.PruneBefore(10)
|
||||
|
||||
_, ok := c.Get(9, [32]byte{0x01}, [32]byte{0x02})
|
||||
require.Equal(t, false, ok)
|
||||
|
||||
got, ok := c.Get(10, [32]byte{0x03}, [32]byte{0x04})
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, currentBid, got)
|
||||
}
|
||||
|
||||
func testSignedExecutionPayloadBid(
|
||||
slot primitives.Slot,
|
||||
parentHash [32]byte,
|
||||
parentRoot [32]byte,
|
||||
value uint64,
|
||||
) *ethpb.SignedExecutionPayloadBid {
|
||||
return ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
Slot: slot,
|
||||
ParentBlockHash: bytes.Clone(parentHash[:]),
|
||||
ParentBlockRoot: bytes.Clone(parentRoot[:]),
|
||||
BlockHash: bytes.Repeat([]byte{0x03}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x04}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x05}, 20),
|
||||
GasLimit: 30_000_000,
|
||||
BuilderIndex: 1,
|
||||
Value: primitives.Gwei(value),
|
||||
ExecutionPayment: 10,
|
||||
},
|
||||
Signature: bytes.Repeat([]byte{0x06}, 96),
|
||||
}
|
||||
}
|
||||
87
beacon-chain/cache/proposer_preferences.go
vendored
Normal file
87
beacon-chain/cache/proposer_preferences.go
vendored
Normal file
@@ -0,0 +1,87 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// ProposerPreference stores the proposer fee recipient and gas limit for a slot.
|
||||
type ProposerPreference struct {
|
||||
FeeRecipient []byte
|
||||
GasLimit uint64
|
||||
}
|
||||
|
||||
// ProposerPreferencesCache stores proposer preferences by slot.
|
||||
type ProposerPreferencesCache struct {
|
||||
slotToPreferences map[primitives.Slot]ProposerPreference
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewProposerPreferencesCache initializes a proposer preferences cache.
|
||||
func NewProposerPreferencesCache() *ProposerPreferencesCache {
|
||||
return &ProposerPreferencesCache{
|
||||
slotToPreferences: make(map[primitives.Slot]ProposerPreference),
|
||||
}
|
||||
}
|
||||
|
||||
// Add stores proposer preferences for a slot. If the slot already exists, the
|
||||
// existing value is kept and false is returned.
|
||||
func (c *ProposerPreferencesCache) Add(slot primitives.Slot, feeRecipient []byte, gasLimit uint64) bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if _, ok := c.slotToPreferences[slot]; ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// FeeRecipient comes from validated SSZ-decoded proposer preferences, so
|
||||
// retaining the slice reference here is intentional.
|
||||
c.slotToPreferences[slot] = ProposerPreference{
|
||||
FeeRecipient: feeRecipient,
|
||||
GasLimit: gasLimit,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Get returns proposer preferences for a slot.
|
||||
func (c *ProposerPreferencesCache) Get(slot primitives.Slot) (ProposerPreference, bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
pref, ok := c.slotToPreferences[slot]
|
||||
if !ok {
|
||||
return ProposerPreference{}, false
|
||||
}
|
||||
|
||||
return pref, true
|
||||
}
|
||||
|
||||
// Has returns true if proposer preferences for the slot already exist.
|
||||
func (c *ProposerPreferencesCache) Has(slot primitives.Slot) bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
_, ok := c.slotToPreferences[slot]
|
||||
return ok
|
||||
}
|
||||
|
||||
// PruneBefore removes all proposer preferences for slots before the provided slot.
|
||||
func (c *ProposerPreferencesCache) PruneBefore(slot primitives.Slot) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
for cachedSlot := range c.slotToPreferences {
|
||||
if cachedSlot < slot {
|
||||
delete(c.slotToPreferences, cachedSlot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear removes all cached proposer preferences.
|
||||
func (c *ProposerPreferencesCache) Clear() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
c.slotToPreferences = make(map[primitives.Slot]ProposerPreference)
|
||||
}
|
||||
63
beacon-chain/cache/proposer_preferences_test.go
vendored
Normal file
63
beacon-chain/cache/proposer_preferences_test.go
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestProposerPreferencesCache_AddGetHas(t *testing.T) {
|
||||
c := NewProposerPreferencesCache()
|
||||
slot := primitives.Slot(123)
|
||||
feeRecipient := []byte{1, 2, 3, 4}
|
||||
|
||||
require.Equal(t, false, c.Has(slot))
|
||||
added := c.Add(slot, feeRecipient, 42)
|
||||
require.Equal(t, true, added)
|
||||
require.Equal(t, true, c.Has(slot))
|
||||
|
||||
pref, ok := c.Get(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, feeRecipient, pref.FeeRecipient)
|
||||
require.Equal(t, uint64(42), pref.GasLimit)
|
||||
}
|
||||
|
||||
func TestProposerPreferencesCache_AddDuplicateSlot(t *testing.T) {
|
||||
c := NewProposerPreferencesCache()
|
||||
slot := primitives.Slot(456)
|
||||
|
||||
require.Equal(t, true, c.Add(slot, []byte{1}, 10))
|
||||
require.Equal(t, false, c.Add(slot, []byte{2}, 20))
|
||||
|
||||
pref, ok := c.Get(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.DeepEqual(t, []byte{1}, pref.FeeRecipient)
|
||||
require.Equal(t, uint64(10), pref.GasLimit)
|
||||
}
|
||||
|
||||
func TestProposerPreferencesCache_Clear(t *testing.T) {
|
||||
c := NewProposerPreferencesCache()
|
||||
slot := primitives.Slot(789)
|
||||
|
||||
require.Equal(t, true, c.Add(slot, []byte{1}, 10))
|
||||
c.Clear()
|
||||
|
||||
require.Equal(t, false, c.Has(slot))
|
||||
_, ok := c.Get(slot)
|
||||
require.Equal(t, false, ok)
|
||||
}
|
||||
|
||||
func TestProposerPreferencesCache_PruneBefore(t *testing.T) {
|
||||
c := NewProposerPreferencesCache()
|
||||
|
||||
require.Equal(t, true, c.Add(10, []byte{1}, 10))
|
||||
require.Equal(t, true, c.Add(11, []byte{2}, 11))
|
||||
require.Equal(t, true, c.Add(12, []byte{3}, 12))
|
||||
|
||||
c.PruneBefore(11)
|
||||
|
||||
require.Equal(t, false, c.Has(10))
|
||||
require.Equal(t, true, c.Has(11))
|
||||
require.Equal(t, true, c.Has(12))
|
||||
}
|
||||
2
beacon-chain/cache/sync_committee.go
vendored
2
beacon-chain/cache/sync_committee.go
vendored
@@ -172,7 +172,7 @@ func (s *SyncCommitteeCache) idxPositionInCommittee(
|
||||
// UpdatePositionsInCommittee updates caching of validators position in sync committee in respect to
|
||||
// current epoch and next epoch. This should be called when `current_sync_committee` and `next_sync_committee`
|
||||
// change and that happens every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`.
|
||||
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.BeaconState) error {
|
||||
func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, st state.ReadOnlyBeaconState) error {
|
||||
// since we call UpdatePositionsInCommittee asynchronously, keep track of the cache value
|
||||
// seen at the beginning of the routine and compare at the end before updating. If the underlying value has been
|
||||
// cycled (new address), don't update it.
|
||||
|
||||
@@ -32,7 +32,7 @@ func (s *FakeSyncCommitteeCache) NextPeriodIndexPosition(root [32]byte, valIdx p
|
||||
}
|
||||
|
||||
// UpdatePositionsInCommittee -- fake.
|
||||
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.BeaconState) error {
|
||||
func (s *FakeSyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoot [32]byte, state state.ReadOnlyBeaconState) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -60,6 +60,7 @@ go_test(
|
||||
"block_operations_fuzz_test.go",
|
||||
"block_regression_test.go",
|
||||
"eth1_data_test.go",
|
||||
"exit_builder_test.go",
|
||||
"exit_test.go",
|
||||
"exports_test.go",
|
||||
"genesis_test.go",
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
@@ -62,6 +63,16 @@ func ProcessVoluntaryExits(
|
||||
if exit == nil || exit.Exit == nil {
|
||||
return nil, errors.New("nil voluntary exit in block body")
|
||||
}
|
||||
// [New in Gloas:EIP7732] Builder exits are identified by the builder index flag.
|
||||
if beaconState.Version() >= version.Gloas && exit.Exit.ValidatorIndex.IsBuilderIndex() {
|
||||
if err := verifyBuilderExitAndSignature(beaconState, exit); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not verify builder exit %d", idx)
|
||||
}
|
||||
if err := gloas.InitiateBuilderExit(beaconState, exit.Exit.ValidatorIndex.ToBuilderIndex()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
val, err := beaconState.ValidatorAtIndexReadOnly(exit.Exit.ValidatorIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -102,19 +113,24 @@ func ProcessVoluntaryExits(
|
||||
// initiate_validator_exit(state, voluntary_exit.validator_index)
|
||||
func VerifyExitAndSignature(
|
||||
validator state.ReadOnlyValidator,
|
||||
state state.ReadOnlyBeaconState,
|
||||
st state.ReadOnlyBeaconState,
|
||||
signed *ethpb.SignedVoluntaryExit,
|
||||
) error {
|
||||
if signed == nil || signed.Exit == nil {
|
||||
return errors.New("nil exit")
|
||||
}
|
||||
|
||||
fork := state.Fork()
|
||||
genesisRoot := state.GenesisValidatorsRoot()
|
||||
// [New in Gloas:EIP7732] Builder exits are verified separately.
|
||||
if st.Version() >= version.Gloas && signed.Exit.ValidatorIndex.IsBuilderIndex() {
|
||||
return verifyBuilderExitAndSignature(st, signed)
|
||||
}
|
||||
|
||||
fork := st.Fork()
|
||||
genesisRoot := st.GenesisValidatorsRoot()
|
||||
|
||||
// EIP-7044: Beginning in Deneb, fix the fork version to Capella.
|
||||
// This allows for signed validator exits to be valid forever.
|
||||
if state.Version() >= version.Deneb {
|
||||
if st.Version() >= version.Deneb {
|
||||
fork = ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
@@ -123,7 +139,7 @@ func VerifyExitAndSignature(
|
||||
}
|
||||
|
||||
exit := signed.Exit
|
||||
if err := verifyExitConditions(state, validator, exit); err != nil {
|
||||
if err := verifyExitConditions(st, validator, exit); err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, exit.Epoch, params.BeaconConfig().DomainVoluntaryExit, genesisRoot)
|
||||
@@ -198,3 +214,57 @@ func verifyExitConditions(st state.ReadOnlyBeaconState, validator state.ReadOnly
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyBuilderExitAndSignature validates a builder voluntary exit.
|
||||
// [New in Gloas:EIP7732]
|
||||
func verifyBuilderExitAndSignature(st state.ReadOnlyBeaconState, signed *ethpb.SignedVoluntaryExit) error {
|
||||
if signed == nil || signed.Exit == nil {
|
||||
return errors.New("nil exit")
|
||||
}
|
||||
exit := signed.Exit
|
||||
builderIndex := exit.ValidatorIndex.ToBuilderIndex()
|
||||
|
||||
// Exits must specify an epoch when they become valid; they are not valid before then.
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
if currentEpoch < exit.Epoch {
|
||||
return fmt.Errorf("expected current epoch >= exit epoch, received %d < %d", currentEpoch, exit.Epoch)
|
||||
}
|
||||
|
||||
// Verify the builder is active.
|
||||
active, err := st.IsActiveBuilder(builderIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if builder is active")
|
||||
}
|
||||
if !active {
|
||||
return fmt.Errorf("builder %d is not active", builderIndex)
|
||||
}
|
||||
|
||||
// Only exit builder if it has no pending balance to withdraw.
|
||||
pendingBalance, err := st.BuilderPendingBalanceToWithdraw(builderIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get builder pending balance to withdraw")
|
||||
}
|
||||
if pendingBalance != 0 {
|
||||
return fmt.Errorf("builder %d has pending balance to withdraw: %d", builderIndex, pendingBalance)
|
||||
}
|
||||
|
||||
// Verify signature using builder pubkey with Capella fork version (EIP-7044).
|
||||
pubkey, err := st.BuilderPubkey(builderIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get builder pubkey")
|
||||
}
|
||||
fork := ðpb.Fork{
|
||||
PreviousVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
CurrentVersion: params.BeaconConfig().CapellaForkVersion,
|
||||
Epoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
}
|
||||
genesisRoot := st.GenesisValidatorsRoot()
|
||||
domain, err := signing.Domain(fork, exit.Epoch, params.BeaconConfig().DomainVoluntaryExit, genesisRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := signing.VerifySigningRoot(exit, pubkey[:], signed.Signature, domain); err != nil {
|
||||
return signing.ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
307
beacon-chain/core/blocks/exit_builder_test.go
Normal file
307
beacon-chain/core/blocks/exit_builder_test.go
Normal file
@@ -0,0 +1,307 @@
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
// setGloasTestConfig sets fork epochs so Gloas is active at epoch 5.
|
||||
func setGloasTestConfig(t *testing.T) {
|
||||
t.Helper()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
cfg.GloasForkEpoch = 5
|
||||
params.SetActiveTestCleanup(t, cfg)
|
||||
}
|
||||
|
||||
// newGloasStateWithBuilder creates a minimal Gloas beacon state with one active builder
|
||||
// and returns the state along with the builder's BLS private key.
|
||||
func newGloasStateWithBuilder(t *testing.T, builderIndex primitives.BuilderIndex, epoch primitives.Epoch) (state.BeaconState, bls.SecretKey) {
|
||||
t.Helper()
|
||||
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
builder := ðpb.Builder{
|
||||
Pubkey: priv.PublicKey().Marshal(),
|
||||
WithdrawableEpoch: cfg.FarFutureEpoch,
|
||||
DepositEpoch: 0,
|
||||
Balance: 32_000_000_000,
|
||||
ExecutionAddress: make([]byte, 20),
|
||||
}
|
||||
|
||||
builders := make([]*ethpb.Builder, int(builderIndex)+1)
|
||||
for i := range builders {
|
||||
if primitives.BuilderIndex(i) == builderIndex {
|
||||
builders[i] = builder
|
||||
} else {
|
||||
builders[i] = ðpb.Builder{
|
||||
Pubkey: make([]byte, 48),
|
||||
WithdrawableEpoch: cfg.FarFutureEpoch,
|
||||
DepositEpoch: 0,
|
||||
ExecutionAddress: make([]byte, 20),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stProto := ðpb.BeaconStateGloas{
|
||||
Slot: cfg.SlotsPerEpoch * primitives.Slot(epoch),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: cfg.FuluForkVersion,
|
||||
CurrentVersion: cfg.GloasForkVersion,
|
||||
Epoch: cfg.GloasForkEpoch,
|
||||
},
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{
|
||||
Epoch: epoch - 1,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Builders: builders,
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
ExitEpoch: cfg.FarFutureEpoch,
|
||||
ActivationEpoch: 0,
|
||||
PublicKey: make([]byte, 48),
|
||||
},
|
||||
},
|
||||
Balances: []uint64{32_000_000_000},
|
||||
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
RandaoMixes: make([][]byte, cfg.EpochsPerHistoricalVector),
|
||||
Slashings: make([]uint64, cfg.EpochsPerSlashingsVector),
|
||||
ExecutionPayloadAvailability: make([]byte, cfg.SlotsPerHistoricalRoot/8),
|
||||
}
|
||||
|
||||
for i := range stProto.BlockRoots {
|
||||
stProto.BlockRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range stProto.StateRoots {
|
||||
stProto.StateRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range stProto.RandaoMixes {
|
||||
stProto.RandaoMixes[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(stProto)
|
||||
require.NoError(t, err)
|
||||
return st, priv
|
||||
}
|
||||
|
||||
func signBuilderExit(t *testing.T, st state.ReadOnlyBeaconState, exit *ethpb.VoluntaryExit, priv bls.SecretKey) *ethpb.SignedVoluntaryExit {
|
||||
t.Helper()
|
||||
|
||||
sb, err := signing.ComputeDomainAndSign(st, exit.Epoch, exit, params.BeaconConfig().DomainVoluntaryExit, priv)
|
||||
require.NoError(t, err)
|
||||
sig, err := bls.SignatureFromBytes(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return ðpb.SignedVoluntaryExit{
|
||||
Exit: exit,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature_BuilderExit_HappyPath(t *testing.T) {
|
||||
setGloasTestConfig(t)
|
||||
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
epoch := primitives.Epoch(10)
|
||||
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
|
||||
|
||||
exit := ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch,
|
||||
}
|
||||
signed := signBuilderExit(t, st, exit, priv)
|
||||
|
||||
err := blocks.VerifyExitAndSignature(nil, st, signed)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature_BuilderNotActive(t *testing.T) {
|
||||
setGloasTestConfig(t)
|
||||
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
epoch := primitives.Epoch(10)
|
||||
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
|
||||
|
||||
// Make builder not active by setting withdrawable epoch (already initiated exit).
|
||||
builder, err := st.Builder(builderIndex)
|
||||
require.NoError(t, err)
|
||||
builder.WithdrawableEpoch = 5
|
||||
require.NoError(t, st.UpdateBuilderAtIndex(builderIndex, builder))
|
||||
|
||||
exit := ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch,
|
||||
}
|
||||
signed := signBuilderExit(t, st, exit, priv)
|
||||
|
||||
err = blocks.VerifyExitAndSignature(nil, st, signed)
|
||||
assert.ErrorContains(t, "is not active", err)
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature_BuilderPendingWithdrawal(t *testing.T) {
|
||||
setGloasTestConfig(t)
|
||||
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
epoch := primitives.Epoch(10)
|
||||
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
|
||||
|
||||
// Give the builder a pending withdrawal.
|
||||
require.NoError(t, st.AppendBuilderPendingWithdrawals([]*ethpb.BuilderPendingWithdrawal{
|
||||
{
|
||||
BuilderIndex: builderIndex,
|
||||
Amount: 1000,
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}))
|
||||
|
||||
exit := ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch,
|
||||
}
|
||||
signed := signBuilderExit(t, st, exit, priv)
|
||||
|
||||
err := blocks.VerifyExitAndSignature(nil, st, signed)
|
||||
assert.ErrorContains(t, "pending balance to withdraw", err)
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature_BuilderBadSignature(t *testing.T) {
|
||||
setGloasTestConfig(t)
|
||||
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
epoch := primitives.Epoch(10)
|
||||
st, _ := newGloasStateWithBuilder(t, builderIndex, epoch)
|
||||
|
||||
wrongKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
exit := ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch,
|
||||
}
|
||||
signed := signBuilderExit(t, st, exit, wrongKey)
|
||||
|
||||
err = blocks.VerifyExitAndSignature(nil, st, signed)
|
||||
assert.ErrorContains(t, "signature did not verify", err)
|
||||
}
|
||||
|
||||
func TestVerifyExitAndSignature_BuilderExitInFuture(t *testing.T) {
|
||||
setGloasTestConfig(t)
|
||||
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
epoch := primitives.Epoch(10)
|
||||
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
|
||||
|
||||
exit := ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch + 1, // Future epoch.
|
||||
}
|
||||
signed := signBuilderExit(t, st, exit, priv)
|
||||
|
||||
err := blocks.VerifyExitAndSignature(nil, st, signed)
|
||||
assert.ErrorContains(t, "expected current epoch >= exit epoch", err)
|
||||
}
|
||||
|
||||
func TestProcessVoluntaryExits_BuilderExit(t *testing.T) {
|
||||
setGloasTestConfig(t)
|
||||
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
epoch := primitives.Epoch(10)
|
||||
st, priv := newGloasStateWithBuilder(t, builderIndex, epoch)
|
||||
|
||||
exit := ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch,
|
||||
}
|
||||
signed := signBuilderExit(t, st, exit, priv)
|
||||
|
||||
newState, err := blocks.ProcessVoluntaryExits(t.Context(), st, []*ethpb.SignedVoluntaryExit{signed}, validators.ExitInformation(st))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify builder's withdrawable epoch was set.
|
||||
builder, err := newState.Builder(builderIndex)
|
||||
require.NoError(t, err)
|
||||
cfg := params.BeaconConfig()
|
||||
expectedWithdrawableEpoch := epoch + cfg.MinBuilderWithdrawabilityDelay
|
||||
assert.Equal(t, expectedWithdrawableEpoch, builder.WithdrawableEpoch)
|
||||
}
|
||||
|
||||
func TestProcessVoluntaryExits_BuilderExitPreGloas(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
cfg.FuluForkEpoch = 4
|
||||
cfg.GloasForkEpoch = 100 // Gloas not yet active.
|
||||
params.SetActiveTestCleanup(t, cfg)
|
||||
|
||||
epoch := primitives.Epoch(10)
|
||||
builderIndex := primitives.BuilderIndex(0)
|
||||
|
||||
stProto := ðpb.BeaconStateFulu{
|
||||
Slot: cfg.SlotsPerEpoch * primitives.Slot(epoch),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: cfg.DenebForkVersion,
|
||||
CurrentVersion: cfg.FuluForkVersion,
|
||||
Epoch: cfg.FuluForkEpoch,
|
||||
},
|
||||
GenesisValidatorsRoot: make([]byte, 32),
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Validators: []*ethpb.Validator{
|
||||
{ExitEpoch: cfg.FarFutureEpoch, ActivationEpoch: 0, PublicKey: make([]byte, 48)},
|
||||
},
|
||||
Balances: []uint64{32_000_000_000},
|
||||
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
RandaoMixes: make([][]byte, cfg.EpochsPerHistoricalVector),
|
||||
Slashings: make([]uint64, cfg.EpochsPerSlashingsVector),
|
||||
}
|
||||
for i := range stProto.BlockRoots {
|
||||
stProto.BlockRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range stProto.StateRoots {
|
||||
stProto.StateRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range stProto.RandaoMixes {
|
||||
stProto.RandaoMixes[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeFulu(stProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedVoluntaryExit{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
ValidatorIndex: builderIndex.ToValidatorIndex(),
|
||||
Epoch: epoch,
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
|
||||
// On pre-Gloas state, builder-flagged exits are not routed to the builder path.
|
||||
// ProcessVoluntaryExits treats the builder-flagged index as a regular validator index,
|
||||
// which fails because no such validator exists.
|
||||
_, err = blocks.ProcessVoluntaryExits(t.Context(), st, []*ethpb.SignedVoluntaryExit{signed}, validators.ExitInformation(st))
|
||||
require.ErrorContains(t, "out of bounds", err)
|
||||
}
|
||||
@@ -2,4 +2,5 @@ package blocks
|
||||
|
||||
var ProcessBLSToExecutionChange = processBLSToExecutionChange
|
||||
var ErrInvalidBLSPrefix = errInvalidBLSPrefix
|
||||
var ErrInvalidWithdrawalCredentials = errInvalidWithdrawalCredentials
|
||||
var VerifyBlobCommitmentCount = verifyBlobCommitmentCount
|
||||
|
||||
@@ -192,11 +192,47 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
Block: electraGenesisBlock(root),
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateGloas:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockGloas{
|
||||
Block: gloasGenesisBlock(root),
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
default:
|
||||
return nil, ErrUnrecognizedState
|
||||
}
|
||||
}
|
||||
|
||||
func gloasGenesisBlock(root [fieldparams.RootLength]byte) *ethpb.BeaconBlockGloas {
|
||||
return ðpb.BeaconBlockGloas{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
SignedExecutionPayloadBid: ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
PayloadAttestations: make([]*ethpb.PayloadAttestation, 0),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func electraGenesisBlock(root [fieldparams.RootLength]byte) *ethpb.BeaconBlockElectra {
|
||||
return ðpb.BeaconBlockElectra{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
|
||||
@@ -147,7 +147,7 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
_, err = blocks.ValidateBLSToExecutionChange(st, signed)
|
||||
// The state should return an empty validator, even when the validator object in the registry is
|
||||
// nil. This error should return when the withdrawal credentials are invalid or too short.
|
||||
require.ErrorIs(t, err, blocks.ErrInvalidBLSPrefix)
|
||||
require.ErrorIs(t, err, blocks.ErrInvalidWithdrawalCredentials)
|
||||
})
|
||||
t.Run("non-existent validator", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
|
||||
@@ -20,37 +20,46 @@ import (
|
||||
)
|
||||
|
||||
func TestProcessPendingDepositsMultiplesSameDeposits(t *testing.T) {
|
||||
st := stateWithActiveBalanceETH(t, 1000)
|
||||
deps := make([]*eth.PendingDeposit, 2) // Make same deposit twice
|
||||
validators := st.Validators()
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
validators[i].PublicKey = sk.PublicKey().Marshal()
|
||||
validators[i].WithdrawalCredentials = wc
|
||||
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, 32, bytesutil.ToBytes32(wc), 0)
|
||||
}
|
||||
require.NoError(t, st.SetPendingDeposits(deps))
|
||||
const (
|
||||
depositCount = uint64(2)
|
||||
amountETH = uint64(32)
|
||||
slot = 0
|
||||
activeBalanceGwei = 10_000
|
||||
)
|
||||
|
||||
err = electra.ProcessPendingDeposits(context.TODO(), st, 10000)
|
||||
state := stateWithActiveBalanceETH(t, 0)
|
||||
|
||||
secretKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
val := st.Validators()
|
||||
seenPubkeys := make(map[string]struct{})
|
||||
for i := 0; i < len(val); i += 1 {
|
||||
if len(val[i].PublicKey) == 0 {
|
||||
continue
|
||||
}
|
||||
_, ok := seenPubkeys[string(val[i].PublicKey)]
|
||||
if ok {
|
||||
t.Fatalf("duplicated pubkeys")
|
||||
} else {
|
||||
seenPubkeys[string(val[i].PublicKey)] = struct{}{}
|
||||
}
|
||||
withdrawalCredentialsBytes := make([]byte, 32)
|
||||
withdrawalCredentialsBytes[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
withdrawalCredentials := bytesutil.ToBytes32(withdrawalCredentialsBytes)
|
||||
|
||||
validators := state.Validators()
|
||||
require.Equal(t, 0, len(validators))
|
||||
|
||||
deposits := make([]*eth.PendingDeposit, 0, depositCount)
|
||||
for range depositCount {
|
||||
deposit := stateTesting.GeneratePendingDeposit(t, secretKey, amountETH, withdrawalCredentials, slot)
|
||||
deposits = append(deposits, deposit)
|
||||
}
|
||||
|
||||
err = state.SetPendingDeposits(deposits)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = electra.ProcessPendingDeposits(t.Context(), state, activeBalanceGwei)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The first deposit should create a new validator,
|
||||
// and the second deposit should top up the same validator
|
||||
// We should have 1 validator with balance of 64 ETH.
|
||||
validators = state.Validators()
|
||||
require.Equal(t, 1, len(validators))
|
||||
|
||||
balance, err := state.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, depositCount*amountETH, balance)
|
||||
}
|
||||
|
||||
func TestProcessPendingDeposits(t *testing.T) {
|
||||
|
||||
@@ -39,9 +39,6 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
|
||||
|
||||
// Update effective balances with hysteresis.
|
||||
validatorFunc := func(idx int, val state.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
|
||||
if val.IsNil() {
|
||||
return nil, fmt.Errorf("validator %d is nil in state", idx)
|
||||
}
|
||||
if idx >= len(bals) {
|
||||
return nil, fmt.Errorf("validator index exceeds validator length in state %d >= %d", idx, len(st.Balances()))
|
||||
}
|
||||
|
||||
@@ -16,9 +16,6 @@ import (
|
||||
func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
WithdrawalCredentials: []byte{}, // No withdrawal credentials
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xFF}, // Has withdrawal credentials
|
||||
},
|
||||
@@ -27,22 +24,19 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
params.BeaconConfig().MinActivationBalance + 100_000, // Has excess balance
|
||||
},
|
||||
})
|
||||
// Test that a validator with no withdrawal credentials cannot be switched to compounding.
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, "validator has no withdrawal credentials", electra.SwitchToCompoundingValidator(s, 0))
|
||||
|
||||
// Test that a validator with withdrawal credentials can be switched to compounding.
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 1))
|
||||
v, err := s.ValidatorAtIndex(1)
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 0))
|
||||
v, err := s.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.HasPrefix(v.WithdrawalCredentials, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}), "withdrawal credentials were not updated")
|
||||
// val_1 Balance is not changed
|
||||
b, err := s.BalanceAtIndex(1)
|
||||
// val_0 Balance is not changed
|
||||
b, err := s.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was changed")
|
||||
pbd, err := s.PendingDeposits()
|
||||
@@ -50,8 +44,8 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
require.Equal(t, 0, len(pbd), "pending balance deposits should be empty")
|
||||
|
||||
// Test that a validator with excess balance can be switched to compounding, excess balance is queued.
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 2))
|
||||
b, err = s.BalanceAtIndex(2)
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(s, 1))
|
||||
b, err = s.BalanceAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was not changed")
|
||||
pbd, err = s.PendingDeposits()
|
||||
|
||||
@@ -46,6 +46,9 @@ const (
|
||||
|
||||
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
|
||||
DataColumnReceived = 12
|
||||
|
||||
// PayloadAttestationMessageReceived is sent after a payload attestation message is received from gossip or rpc.
|
||||
PayloadAttestationMessageReceived = 13
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -114,3 +117,8 @@ type DataColumnReceivedData struct {
|
||||
BlockRoot [32]byte
|
||||
KzgCommitments [][]byte
|
||||
}
|
||||
|
||||
// PayloadAttestationMessageReceivedData is the data sent with PayloadAttestationMessageReceived events.
|
||||
type PayloadAttestationMessageReceivedData struct {
|
||||
Message *ethpb.PayloadAttestationMessage
|
||||
}
|
||||
|
||||
@@ -33,6 +33,8 @@ const (
|
||||
LightClientOptimisticUpdate
|
||||
// PayloadAttributes events are fired upon a missed slot or new head.
|
||||
PayloadAttributes
|
||||
// PayloadProcessed is sent after a payload envelope has been processed.
|
||||
PayloadProcessed
|
||||
)
|
||||
|
||||
// BlockProcessedData is the data sent with BlockProcessed events.
|
||||
@@ -72,3 +74,9 @@ type InitializedData struct {
|
||||
// GenesisValidatorsRoot represents state.validators.HashTreeRoot().
|
||||
GenesisValidatorsRoot []byte
|
||||
}
|
||||
|
||||
// PayloadProcessedData is the data sent with PayloadProcessed events.
|
||||
type PayloadProcessedData struct {
|
||||
Slot primitives.Slot
|
||||
BlockRoot [32]byte
|
||||
}
|
||||
|
||||
@@ -5,8 +5,10 @@ go_library(
|
||||
srcs = [
|
||||
"attestation.go",
|
||||
"bid.go",
|
||||
"builder_exit.go",
|
||||
"deposit_request.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"payload.go",
|
||||
"payload_attestation.go",
|
||||
"pending_payment.go",
|
||||
@@ -38,6 +40,8 @@ go_library(
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -110,7 +110,7 @@ func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyB
|
||||
return fmt.Errorf("builder %d cannot cover bid amount %d", builderIndex, amount)
|
||||
}
|
||||
|
||||
if err := validatePayloadBidSignature(st, wrappedBid); err != nil {
|
||||
if err := ValidatePayloadBidSignature(st, wrappedBid); err != nil {
|
||||
return errors.Wrap(err, "bid signature validation failed")
|
||||
}
|
||||
}
|
||||
@@ -179,10 +179,10 @@ func validateBidConsistency(st state.BeaconState, bid interfaces.ROExecutionPayl
|
||||
return nil
|
||||
}
|
||||
|
||||
// validatePayloadBidSignature verifies the BLS signature on a signed execution payload bid.
|
||||
// ValidatePayloadBidSignature verifies the BLS signature on a signed execution payload bid.
|
||||
// It validates that the signature was created by the builder specified in the bid
|
||||
// using the appropriate domain for the beacon builder.
|
||||
func validatePayloadBidSignature(st state.ReadOnlyBeaconState, signedBid interfaces.ROSignedExecutionPayloadBid) error {
|
||||
func ValidatePayloadBidSignature(st state.ReadOnlyBeaconState, signedBid interfaces.ROSignedExecutionPayloadBid) error {
|
||||
bid, err := signedBid.Bid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get bid")
|
||||
|
||||
37
beacon-chain/core/gloas/builder_exit.go
Normal file
37
beacon-chain/core/gloas/builder_exit.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// InitiateBuilderExit initiates the exit of a builder by setting its withdrawable epoch.
|
||||
//
|
||||
// <spec fn="initiate_builder_exit" fork="gloas" hash="3da938d5">
|
||||
// def initiate_builder_exit(state: BeaconState, builder_index: BuilderIndex) -> None:
|
||||
// """
|
||||
// Initiate the exit of the builder with index ``index``.
|
||||
// """
|
||||
// # Return if builder already initiated exit
|
||||
// builder = state.builders[builder_index]
|
||||
// if builder.withdrawable_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
//
|
||||
// # Set builder exit epoch
|
||||
// builder.withdrawable_epoch = get_current_epoch(state) + MIN_BUILDER_WITHDRAWABILITY_DELAY
|
||||
// </spec>
|
||||
func InitiateBuilderExit(s state.BeaconState, builderIndex primitives.BuilderIndex) error {
|
||||
builder, err := s.Builder(builderIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Return if builder already initiated exit.
|
||||
if builder.WithdrawableEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return nil
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(s.Slot())
|
||||
builder.WithdrawableEpoch = currentEpoch + params.BeaconConfig().MinBuilderWithdrawabilityDelay
|
||||
return s.UpdateBuilderAtIndex(builderIndex, builder)
|
||||
}
|
||||
@@ -65,26 +65,37 @@ func processDepositRequests(ctx context.Context, beaconState state.BeaconState,
|
||||
// )
|
||||
// </spec>
|
||||
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) error {
|
||||
var err error
|
||||
defer func() {
|
||||
if err == nil {
|
||||
builderDepositsProcessedTotal.Inc()
|
||||
}
|
||||
}()
|
||||
|
||||
if request == nil {
|
||||
return errors.New("nil deposit request")
|
||||
err = errors.New("nil deposit request")
|
||||
return err
|
||||
}
|
||||
|
||||
applied, err := applyBuilderDepositRequest(beaconState, request)
|
||||
var applied bool
|
||||
applied, err = applyBuilderDepositRequest(beaconState, request)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not apply builder deposit")
|
||||
err = errors.Wrap(err, "could not apply builder deposit")
|
||||
return err
|
||||
}
|
||||
if applied {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
if err = beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: request.Pubkey,
|
||||
WithdrawalCredentials: request.WithdrawalCredentials,
|
||||
Amount: request.Amount,
|
||||
Signature: request.Signature,
|
||||
Slot: beaconState.Slot(),
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "could not append deposit request")
|
||||
err = errors.Wrap(err, "could not append deposit request")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
27
beacon-chain/core/gloas/metrics.go
Normal file
27
beacon-chain/core/gloas/metrics.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
builderPendingPaymentsProcessedTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "builder_pending_payments_processed_total",
|
||||
Help: "The number of builder pending payments promoted into the builder pending withdrawal queue.",
|
||||
},
|
||||
)
|
||||
builderDepositsProcessedTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "builder_deposits_processed_total",
|
||||
Help: "The number of builder-related deposit requests processed.",
|
||||
},
|
||||
)
|
||||
builderExitsProcessedTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "builder_exits_processed_total",
|
||||
Help: "The number of processed builder exits.",
|
||||
},
|
||||
)
|
||||
)
|
||||
@@ -225,7 +225,20 @@ func ApplyExecutionPayload(
|
||||
return errors.Errorf("payload timestamp does not match expected timestamp: payload=%d, expected=%d", payload.Timestamp(), uint64(t.Unix()))
|
||||
}
|
||||
|
||||
if err := processExecutionRequests(ctx, st, envelope.ExecutionRequests()); err != nil {
|
||||
if err := ApplyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), [32]byte(payload.BlockHash())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ApplyExecutionPayloadStateMutations(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
executionRequests *enginev1.ExecutionRequests,
|
||||
blockHash [32]byte,
|
||||
) error {
|
||||
if err := processExecutionRequests(ctx, st, executionRequests); err != nil {
|
||||
return errors.Wrap(err, "could not process execution requests")
|
||||
}
|
||||
|
||||
@@ -237,13 +250,66 @@ func ApplyExecutionPayload(
|
||||
return errors.Wrap(err, "could not set execution payload availability")
|
||||
}
|
||||
|
||||
if err := st.SetLatestBlockHash([32]byte(payload.BlockHash())); err != nil {
|
||||
if err := st.SetLatestBlockHash(blockHash); err != nil {
|
||||
return errors.Wrap(err, "could not set latest block hash")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyBlindedExecutionPayloadEnvelopeForStateGen applies the post-bid state mutations from a
|
||||
// blinded execution payload envelope for replay/state-generation paths.
|
||||
// It patches the latest block header with the previous state root, validates minimal consistency
|
||||
// with the committed bid, and then applies the state mutations.
|
||||
// A nil envelope is a no-op (the payload was not delivered for that slot).
|
||||
func ApplyBlindedExecutionPayloadEnvelopeForStateGen(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
previousStateRoot [32]byte,
|
||||
envelope interfaces.ROBlindedExecutionPayloadEnvelope,
|
||||
) error {
|
||||
if envelope == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
latestHeader := st.LatestBlockHeader()
|
||||
latestHeader.StateRoot = previousStateRoot[:]
|
||||
if err := st.SetLatestBlockHeader(latestHeader); err != nil {
|
||||
return errors.Wrap(err, "could not set latest block header")
|
||||
}
|
||||
|
||||
if envelope.Slot() != st.Slot() {
|
||||
return errors.Errorf("blinded envelope slot does not match state slot: envelope=%d, state=%d", envelope.Slot(), st.Slot())
|
||||
}
|
||||
|
||||
latestBid, err := st.LatestExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get latest execution payload bid")
|
||||
}
|
||||
if latestBid == nil {
|
||||
return errors.New("latest execution payload bid is nil")
|
||||
}
|
||||
if envelope.BuilderIndex() != latestBid.BuilderIndex() {
|
||||
return errors.Errorf(
|
||||
"blinded envelope builder index does not match committed bid builder index: envelope=%d, bid=%d",
|
||||
envelope.BuilderIndex(),
|
||||
latestBid.BuilderIndex(),
|
||||
)
|
||||
}
|
||||
|
||||
bidBlockHash := latestBid.BlockHash()
|
||||
envelopeBlockHash := envelope.BlockHash()
|
||||
if bidBlockHash != envelopeBlockHash {
|
||||
return errors.Errorf(
|
||||
"blinded envelope block hash does not match committed bid block hash: envelope=%#x, bid=%#x",
|
||||
envelopeBlockHash,
|
||||
bidBlockHash,
|
||||
)
|
||||
}
|
||||
|
||||
return ApplyExecutionPayloadStateMutations(ctx, st, envelope.ExecutionRequests(), envelopeBlockHash)
|
||||
}
|
||||
|
||||
func envelopePublicKey(st state.BeaconState, builderIdx primitives.BuilderIndex) (bls.PublicKey, error) {
|
||||
if builderIdx == params.BeaconConfig().BuilderIndexSelfBuild {
|
||||
return proposerPublicKey(st)
|
||||
@@ -301,6 +367,89 @@ func processExecutionRequests(ctx context.Context, st state.BeaconState, rqs *en
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExecutionPayloadEnvelopeSignatureBatch extracts the BLS signature from a signed execution payload
|
||||
// envelope as a SignatureBatch for deferred batch verification.
|
||||
func ExecutionPayloadEnvelopeSignatureBatch(
|
||||
st state.BeaconState,
|
||||
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
|
||||
) (*bls.SignatureBatch, error) {
|
||||
envelope, err := signedEnvelope.Envelope()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get envelope: %w", err)
|
||||
}
|
||||
|
||||
builderIdx := envelope.BuilderIndex()
|
||||
publicKey, err := envelopePublicKey(st, builderIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(envelope.Slot())
|
||||
domain, err := signing.Domain(
|
||||
st.Fork(),
|
||||
currentEpoch,
|
||||
params.BeaconConfig().DomainBeaconBuilder,
|
||||
st.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to compute signing domain: %w", err)
|
||||
}
|
||||
|
||||
signingRoot, err := signedEnvelope.SigningRoot(domain)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to compute signing root: %w", err)
|
||||
}
|
||||
|
||||
signatureBytes := signedEnvelope.Signature()
|
||||
return &bls.SignatureBatch{
|
||||
Signatures: [][]byte{signatureBytes[:]},
|
||||
PublicKeys: []bls.PublicKey{publicKey},
|
||||
Messages: [][32]byte{signingRoot},
|
||||
Descriptions: []string{"execution payload envelope signature"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ApplyExecutionPayloadNoVerifySig applies the execution payload envelope to the state without
|
||||
// verifying the envelope signature (returned as a SignatureBatch for deferred batch verification).
|
||||
// The caller provides previousStateRoot instead of recomputing it. After applying the payload,
|
||||
// it verifies the post-envelope state root matches the envelope's declared state root.
|
||||
func ApplyExecutionPayloadNoVerifySig(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
previousStateRoot [32]byte,
|
||||
signedEnvelope interfaces.ROSignedExecutionPayloadEnvelope,
|
||||
) (*bls.SignatureBatch, error) {
|
||||
sigBatch, err := ExecutionPayloadEnvelopeSignatureBatch(st, signedEnvelope)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not extract envelope signature batch")
|
||||
}
|
||||
|
||||
envelope, err := signedEnvelope.Envelope()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get envelope from signed envelope")
|
||||
}
|
||||
|
||||
latestHeader := st.LatestBlockHeader()
|
||||
latestHeader.StateRoot = previousStateRoot[:]
|
||||
if err := st.SetLatestBlockHeader(latestHeader); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set latest block header")
|
||||
}
|
||||
|
||||
if err := ApplyExecutionPayload(ctx, st, envelope); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute post-envelope state root")
|
||||
}
|
||||
if r != envelope.StateRoot() {
|
||||
return nil, fmt.Errorf("envelope state root mismatch: expected %#x, got %#x", envelope.StateRoot(), r)
|
||||
}
|
||||
|
||||
return sigBatch, nil
|
||||
}
|
||||
|
||||
// VerifyExecutionPayloadEnvelopeSignature verifies the BLS signature on a signed execution payload envelope.
|
||||
// <spec fn="verify_execution_payload_envelope_signature" fork="gloas" style="full" hash="49483ae2">
|
||||
// def verify_execution_payload_envelope_signature(
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
@@ -23,6 +24,8 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var ErrValidatorNotInPTC = stderrors.New("validator not in PTC")
|
||||
|
||||
// ProcessPayloadAttestations validates payload attestations in a block body.
|
||||
//
|
||||
// <spec fn="process_payload_attestation" fork="gloas" hash="f46bf0b0">
|
||||
@@ -156,6 +159,24 @@ func PayloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot pr
|
||||
return selected, nil
|
||||
}
|
||||
|
||||
// PayloadCommitteeIndex returns the validator's index position in the payload committee for a slot.
|
||||
func PayloadCommitteeIndex(
|
||||
ctx context.Context,
|
||||
st state.ReadOnlyBeaconState,
|
||||
slot primitives.Slot,
|
||||
validatorIndex primitives.ValidatorIndex,
|
||||
) (uint64, error) {
|
||||
ptc, err := PayloadCommittee(ctx, st, slot)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
idx := slices.Index(ptc, validatorIndex)
|
||||
if idx == -1 {
|
||||
return 0, fmt.Errorf("%w: validator=%d slot=%d", ErrValidatorNotInPTC, validatorIndex, slot)
|
||||
}
|
||||
return uint64(idx), nil
|
||||
}
|
||||
|
||||
// ptcSeed computes the seed for the payload timeliness committee.
|
||||
func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitives.Slot) ([32]byte, error) {
|
||||
seed, err := helpers.Seed(st, epoch, params.BeaconConfig().DomainPTCAttester)
|
||||
@@ -254,12 +275,12 @@ func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex
|
||||
offset := (round % 16) * 2
|
||||
randomValue := uint64(binary.LittleEndian.Uint16(random[offset : offset+2])) // 16-bit draw per spec
|
||||
|
||||
val, err := st.ValidatorAtIndex(idx)
|
||||
val, err := st.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "validator %d", idx)
|
||||
}
|
||||
|
||||
return val.EffectiveBalance*fieldparams.MaxRandomValueElectra >= maxBalance*randomValue, nil
|
||||
return val.EffectiveBalance()*fieldparams.MaxRandomValueElectra >= maxBalance*randomValue, nil
|
||||
}
|
||||
|
||||
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
|
||||
|
||||
@@ -211,7 +211,8 @@ func TestProcessPayloadAttestations_IndexedVerificationError(t *testing.T) {
|
||||
errIndex: 0,
|
||||
}
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), errState, body)
|
||||
require.ErrorContains(t, "failed to verify indexed form", err)
|
||||
require.ErrorContains(t, "failed to convert to indexed form", err)
|
||||
require.ErrorContains(t, "failed to sample beacon committee 0", err)
|
||||
require.ErrorContains(t, "validator 0", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -242,6 +242,23 @@ func TestProcessExecutionPayload_Success(t *testing.T) {
|
||||
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func TestApplyExecutionPayloadStateMutations_UpdatesAvailabilityAndLatestHash(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
newHash := [32]byte{}
|
||||
newHash[0] = 0x99
|
||||
|
||||
require.NoError(t, ApplyExecutionPayloadStateMutations(t.Context(), fixture.state, fixture.envelope.ExecutionRequests, newHash))
|
||||
|
||||
latestHash, err := fixture.state.LatestBlockHash()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, newHash, latestHash)
|
||||
|
||||
available, err := fixture.state.ExecutionPayloadAvailability(fixture.slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), available)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayload_PrevRandaoMismatch(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, func(_ *enginev1.ExecutionPayloadDeneb, bid *ethpb.ExecutionPayloadBid, _ *ethpb.ExecutionPayloadEnvelope) {
|
||||
bid.PrevRandao = bytes.Repeat([]byte{0xFF}, 32)
|
||||
@@ -265,6 +282,95 @@ func TestQueueBuilderPayment_ZeroAmountClearsSlot(t *testing.T) {
|
||||
require.Equal(t, primitives.Gwei(0), payment.Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func TestApplyBlindedExecutionPayloadEnvelopeForStateGen_NilEnvelope(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
require.NoError(t, ApplyBlindedExecutionPayloadEnvelopeForStateGen(t.Context(), fixture.state, [32]byte{}, nil))
|
||||
}
|
||||
|
||||
func TestApplyBlindedExecutionPayloadEnvelopeForStateGen_Success(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
st := fixture.state
|
||||
|
||||
blockHash := [32]byte(fixture.payload.BlockHash)
|
||||
stateRoot := [32]byte{0xAA}
|
||||
envelope := ðpb.SignedBlindedExecutionPayloadEnvelope{
|
||||
Message: ðpb.BlindedExecutionPayloadEnvelope{
|
||||
Slot: fixture.slot,
|
||||
BuilderIndex: fixture.envelope.BuilderIndex,
|
||||
BlockHash: blockHash[:],
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
ExecutionRequests: fixture.envelope.ExecutionRequests,
|
||||
},
|
||||
}
|
||||
|
||||
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, ApplyBlindedExecutionPayloadEnvelopeForStateGen(t.Context(), st, stateRoot, wrappedEnv))
|
||||
|
||||
latestHash, err := st.LatestBlockHash()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blockHash, latestHash)
|
||||
|
||||
available, err := st.ExecutionPayloadAvailability(fixture.slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), available)
|
||||
|
||||
header := st.LatestBlockHeader()
|
||||
require.DeepEqual(t, stateRoot[:], header.StateRoot)
|
||||
}
|
||||
|
||||
func TestApplyBlindedExecutionPayloadEnvelopeForStateGen_SlotMismatch(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
envelope := ðpb.SignedBlindedExecutionPayloadEnvelope{
|
||||
Message: ðpb.BlindedExecutionPayloadEnvelope{
|
||||
Slot: fixture.slot + 1,
|
||||
BlockHash: make([]byte, 32),
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
}
|
||||
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
|
||||
require.NoError(t, err)
|
||||
err = ApplyBlindedExecutionPayloadEnvelopeForStateGen(t.Context(), fixture.state, [32]byte{}, wrappedEnv)
|
||||
require.ErrorContains(t, "blinded envelope slot does not match state slot", err)
|
||||
}
|
||||
|
||||
func TestApplyBlindedExecutionPayloadEnvelopeForStateGen_BuilderIndexMismatch(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
blockHash := [32]byte(fixture.payload.BlockHash)
|
||||
envelope := ðpb.SignedBlindedExecutionPayloadEnvelope{
|
||||
Message: ðpb.BlindedExecutionPayloadEnvelope{
|
||||
Slot: fixture.slot,
|
||||
BuilderIndex: 999,
|
||||
BlockHash: blockHash[:],
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
}
|
||||
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
|
||||
require.NoError(t, err)
|
||||
err = ApplyBlindedExecutionPayloadEnvelopeForStateGen(t.Context(), fixture.state, [32]byte{}, wrappedEnv)
|
||||
require.ErrorContains(t, "builder index does not match", err)
|
||||
}
|
||||
|
||||
func TestApplyBlindedExecutionPayloadEnvelopeForStateGen_BlockHashMismatch(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
wrongHash := bytes.Repeat([]byte{0xFF}, 32)
|
||||
envelope := ðpb.SignedBlindedExecutionPayloadEnvelope{
|
||||
Message: ðpb.BlindedExecutionPayloadEnvelope{
|
||||
Slot: fixture.slot,
|
||||
BuilderIndex: fixture.envelope.BuilderIndex,
|
||||
BlockHash: wrongHash,
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
}
|
||||
wrappedEnv, err := blocks.WrappedROBlindedExecutionPayloadEnvelope(envelope.Message)
|
||||
require.NoError(t, err)
|
||||
err = ApplyBlindedExecutionPayloadEnvelopeForStateGen(t.Context(), fixture.state, [32]byte{}, wrappedEnv)
|
||||
require.ErrorContains(t, "block hash does not match", err)
|
||||
}
|
||||
|
||||
func TestVerifyExecutionPayloadEnvelopeSignature(t *testing.T) {
|
||||
fixture := buildPayloadFixture(t, nil)
|
||||
|
||||
|
||||
@@ -52,6 +52,7 @@ func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
||||
if err := state.RotateBuilderPendingPayments(); err != nil {
|
||||
return errors.Wrap(err, "could not rotate builder pending payments")
|
||||
}
|
||||
builderPendingPaymentsProcessedTotal.Add(float64(len(withdrawals)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -121,7 +121,7 @@ func IsNextPeriodSyncCommittee(
|
||||
// CurrentPeriodSyncSubcommitteeIndices returns the subcommittee indices of the
|
||||
// current period sync committee for input validator.
|
||||
func CurrentPeriodSyncSubcommitteeIndices(
|
||||
st state.BeaconState, valIdx primitives.ValidatorIndex,
|
||||
st state.ReadOnlyBeaconState, valIdx primitives.ValidatorIndex,
|
||||
) ([]primitives.CommitteeIndex, error) {
|
||||
root, err := SyncPeriodBoundaryRoot(st)
|
||||
if err != nil {
|
||||
|
||||
@@ -63,11 +63,33 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// while we are verifying all the KZG proofs from multiple sidecars in a batch.
|
||||
// This is done to improve performance since the internal KZG library is way more
|
||||
// efficient when verifying in batch.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar_kzg_proofs
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/p2p-interface.md#modified-verify_data_column_sidecar_kzg_proofs
|
||||
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
commitmentsBySidecar := make([][][]byte, len(sidecars))
|
||||
for i := range sidecars {
|
||||
commitmentsBySidecar[i] = sidecars[i].KzgCommitments
|
||||
}
|
||||
return verifyDataColumnsSidecarKZGProofs(sidecars, commitmentsBySidecar)
|
||||
}
|
||||
|
||||
// VerifyDataColumnsSidecarKZGProofsWithCommitments verifies KZG proofs using
|
||||
// explicitly provided commitments instead of the sidecar's own. This is used
|
||||
// by Gloas, which validates against bid.blob_kzg_commitments.
|
||||
func VerifyDataColumnsSidecarKZGProofsWithCommitments(sidecars []blocks.RODataColumn, commitmentsBySidecar [][][]byte) error {
|
||||
return verifyDataColumnsSidecarKZGProofs(sidecars, commitmentsBySidecar)
|
||||
}
|
||||
|
||||
func verifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn, commitmentsBySidecar [][][]byte) error {
|
||||
if len(sidecars) != len(commitmentsBySidecar) {
|
||||
return ErrMismatchLength
|
||||
}
|
||||
|
||||
// Compute the total count.
|
||||
count := 0
|
||||
for _, sidecar := range sidecars {
|
||||
for i, sidecar := range sidecars {
|
||||
if len(sidecar.Column) != len(commitmentsBySidecar[i]) {
|
||||
return ErrMismatchLength
|
||||
}
|
||||
count += len(sidecar.Column)
|
||||
}
|
||||
|
||||
@@ -76,7 +98,7 @@ func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
cells := make([]kzg.Cell, 0, count)
|
||||
proofs := make([]kzg.Bytes48, 0, count)
|
||||
|
||||
for _, sidecar := range sidecars {
|
||||
for sidecarIndex, sidecar := range sidecars {
|
||||
for i := range sidecar.Column {
|
||||
var (
|
||||
commitment kzg.Bytes48
|
||||
@@ -84,7 +106,7 @@ func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
proof kzg.Bytes48
|
||||
)
|
||||
|
||||
commitmentBytes := sidecar.KzgCommitments[i]
|
||||
commitmentBytes := commitmentsBySidecar[sidecarIndex][i]
|
||||
cellBytes := sidecar.Column[i]
|
||||
proofBytes := sidecar.KzgProofs[i]
|
||||
|
||||
|
||||
@@ -89,6 +89,12 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with commitments", func(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofsWithCommitments(sidecars, sidecarCommitments(sidecars))
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_VerifyKZGInclusionProofColumn(t *testing.T) {
|
||||
@@ -348,6 +354,14 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch4(b *testing
|
||||
}
|
||||
}
|
||||
|
||||
func sidecarCommitments(sidecars []blocks.RODataColumn) [][][]byte {
|
||||
commitmentsBySidecar := make([][][]byte, len(sidecars))
|
||||
for i := range sidecars {
|
||||
commitmentsBySidecar[i] = sidecars[i].KzgCommitments
|
||||
}
|
||||
return commitmentsBySidecar
|
||||
}
|
||||
|
||||
func createTestSidecar(t *testing.T, index uint64, column, kzgCommitments, kzgProofs [][]byte) blocks.RODataColumn {
|
||||
pbSignedBeaconBlock := util.NewBeaconBlockDeneb()
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbSignedBeaconBlock)
|
||||
|
||||
@@ -14,9 +14,34 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessSlotsForBlock advances the given state to the slot of the given block.
|
||||
// This function assumes that the parent state is the latest state that has been processed before the given block.
|
||||
// In particular, all that it is needed to get the blocks's prestate is to advance slots and possible epoch transitions.
|
||||
func ProcessSlotsForBlock(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
b interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
accessRoot := b.ParentRoot()
|
||||
if st.Version() < version.Gloas {
|
||||
return ProcessSlotsUsingNextSlotCache(ctx, st, accessRoot[:], b.Slot())
|
||||
}
|
||||
full, err := st.IsParentBlockFull()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not determine if parent block is full")
|
||||
}
|
||||
if full {
|
||||
accessRoot, err = st.LatestBlockHash()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get latest block hash")
|
||||
}
|
||||
}
|
||||
return ProcessSlotsUsingNextSlotCache(ctx, st, accessRoot[:], b.Slot())
|
||||
}
|
||||
|
||||
// ProcessOperations
|
||||
//
|
||||
// Spec definition:
|
||||
|
||||
@@ -120,7 +120,8 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
|
||||
|
||||
slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
|
||||
|
||||
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators())
|
||||
compactValidators := stateutil.CompactValidatorsFromProto(preState.Validators())
|
||||
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(compactValidators)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err)
|
||||
}
|
||||
@@ -224,7 +225,7 @@ func OptimizedGenesisBeaconStateBellatrix(genesisTime uint64, preState state.Bea
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
ist, err := state_native.InitializeFromProtoBellatrix(st)
|
||||
ist, err := state_native.InitializeFromProtoUnsafeBellatrix(st)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -276,5 +277,5 @@ func EmptyGenesisStateBellatrix() (state.BeaconState, error) {
|
||||
},
|
||||
}
|
||||
|
||||
return state_native.InitializeFromProtoBellatrix(st)
|
||||
return state_native.InitializeFromProtoUnsafeBellatrix(st)
|
||||
}
|
||||
|
||||
@@ -147,7 +147,8 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
|
||||
slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector)
|
||||
|
||||
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators())
|
||||
compactValidators := stateutil.CompactValidatorsFromProto(preState.Validators())
|
||||
genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(compactValidators)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err)
|
||||
}
|
||||
@@ -217,7 +218,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState,
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
return state_native.InitializeFromProtoPhase0(st)
|
||||
return state_native.InitializeFromProtoUnsafePhase0(st)
|
||||
}
|
||||
|
||||
// EmptyGenesisState returns an empty beacon state object.
|
||||
@@ -259,7 +260,7 @@ func EmptyGenesisState() (state.BeaconState, error) {
|
||||
Eth1DataVotes: []*ethpb.Eth1Data{},
|
||||
Eth1DepositIndex: 0,
|
||||
}
|
||||
return state_native.InitializeFromProtoPhase0(st)
|
||||
return state_native.InitializeFromProtoUnsafePhase0(st)
|
||||
}
|
||||
|
||||
// IsValidGenesisState gets called whenever there's a deposit event,
|
||||
|
||||
@@ -55,7 +55,7 @@ func NextSlotState(root []byte, wantedSlot types.Slot) state.BeaconState {
|
||||
// UpdateNextSlotCache updates the `nextSlotCache`. It saves the input state after advancing the state slot by 1
|
||||
// by calling `ProcessSlots`, it also saves the input root for later look up.
|
||||
// This is useful to call after successfully processing a block.
|
||||
func UpdateNextSlotCache(ctx context.Context, root []byte, state state.BeaconState) error {
|
||||
func UpdateNextSlotCache(ctx context.Context, root []byte, state state.ReadOnlyBeaconState) error {
|
||||
// Advancing one slot by using a copied state.
|
||||
copied := state.Copy()
|
||||
copied, err := ProcessSlots(ctx, copied, copied.Slot()+1)
|
||||
|
||||
@@ -159,6 +159,15 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
|
||||
return state, nil
|
||||
}
|
||||
|
||||
// ProcessSlotsIfNeeded takes a ReadOnlyBeaconState and processes it only if its needed, it returns a ReadOnlyBeaconState
|
||||
func ProcessSlotsIfNeeded(ctx context.Context, state state.ReadOnlyBeaconState, accessRoot []byte, slot primitives.Slot) (state.ReadOnlyBeaconState, error) {
|
||||
if slot <= state.Slot() {
|
||||
return state, nil
|
||||
}
|
||||
copied := state.Copy()
|
||||
return ProcessSlotsUsingNextSlotCache(ctx, copied, accessRoot, slot)
|
||||
}
|
||||
|
||||
// ProcessSlotsUsingNextSlotCache processes slots by using next slot cache for higher efficiency.
|
||||
func ProcessSlotsUsingNextSlotCache(
|
||||
ctx context.Context,
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
engine "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -73,12 +75,12 @@ func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) stat
|
||||
ExecutionPayloadAvailability: availability,
|
||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, int(cfg.SlotsPerEpoch*2)),
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, 48)},
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, 48)},
|
||||
},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
@@ -139,3 +141,217 @@ func testBeaconBlockHeader() *ethpb.BeaconBlockHeader {
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
// newGloasForkBoundaryState returns a Gloas BeaconState where IsParentBlockFull()==true
|
||||
// because bid.BlockHash == latestBlockHash. The parentBlockRoot parameter controls
|
||||
// whether the bid looks like an upgrade-seed (all-zeros) or a real committed bid (non-zero).
|
||||
func newGloasForkBoundaryState(
|
||||
t *testing.T,
|
||||
slot primitives.Slot,
|
||||
blockHash [32]byte,
|
||||
parentBlockRoot [32]byte,
|
||||
) state.BeaconState {
|
||||
t.Helper()
|
||||
cfg := params.BeaconConfig()
|
||||
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
|
||||
protoState := ðpb.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
LatestBlockHeader: testBeaconBlockHeader(),
|
||||
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
RandaoMixes: make([][]byte, fieldparams.RandaoMixesLength),
|
||||
ExecutionPayloadAvailability: availability,
|
||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, int(cfg.SlotsPerEpoch*2)),
|
||||
// bid.BlockHash == LatestBlockHash so that IsParentBlockFull() returns true.
|
||||
LatestBlockHash: blockHash[:],
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: parentBlockRoot[:],
|
||||
BlockHash: blockHash[:],
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitments: [][]byte{make([]byte, 48)},
|
||||
},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
PreviousEpochParticipation: []byte{},
|
||||
CurrentEpochParticipation: []byte{},
|
||||
JustificationBits: []byte{0},
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
PayloadExpectedWithdrawals: make([]*engine.Withdrawal, 0),
|
||||
ProposerLookahead: make([]uint64, 0),
|
||||
Builders: make([]*ethpb.Builder, 0),
|
||||
}
|
||||
for i := range protoState.BlockRoots {
|
||||
protoState.BlockRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range protoState.StateRoots {
|
||||
protoState.StateRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range protoState.RandaoMixes {
|
||||
protoState.RandaoMixes[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range protoState.BuilderPendingPayments {
|
||||
protoState.BuilderPendingPayments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)},
|
||||
}
|
||||
}
|
||||
pubkeys := make([][]byte, cfg.SyncCommitteeSize)
|
||||
for i := range pubkeys {
|
||||
pubkeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
aggPubkey := make([]byte, fieldparams.BLSPubkeyLength)
|
||||
protoState.CurrentSyncCommittee = ðpb.SyncCommittee{Pubkeys: pubkeys, AggregatePubkey: aggPubkey}
|
||||
protoState.NextSyncCommittee = ðpb.SyncCommittee{Pubkeys: pubkeys, AggregatePubkey: aggPubkey}
|
||||
st, err := state_native.InitializeFromProtoGloas(protoState)
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}
|
||||
|
||||
// newGloasTestBlock returns an ROBlock at the given slot with the given parentRoot.
|
||||
func newGloasTestBlock(t *testing.T, slot primitives.Slot, parentRoot [32]byte) consensusblocks.ROBlock {
|
||||
t.Helper()
|
||||
blkProto := ðpb.SignedBeaconBlockGloas{
|
||||
Block: ðpb.BeaconBlockGloas{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: make([]byte, 32),
|
||||
Body: ðpb.BeaconBlockBodyGloas{
|
||||
RandaoReveal: make([]byte, fieldparams.BLSSignatureLength),
|
||||
Graffiti: make([]byte, 32),
|
||||
Eth1Data: ðpb.Eth1Data{DepositRoot: make([]byte, 32), BlockHash: make([]byte, 32)},
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncAggregateSyncCommitteeBytesLength),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
SignedExecutionPayloadBid: ðpb.SignedExecutionPayloadBid{
|
||||
Message: ðpb.ExecutionPayloadBid{
|
||||
Slot: slot,
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitments: [][]byte{},
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
PayloadAttestations: []*ethpb.PayloadAttestation{},
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blkProto)
|
||||
require.NoError(t, err)
|
||||
rob, err := consensusblocks.NewROBlock(wsb)
|
||||
require.NoError(t, err)
|
||||
return rob
|
||||
}
|
||||
|
||||
// TestProcessSlotsForBlock_UpgradeSeededBid verifies that ProcessSlotsForBlock uses
|
||||
// b.ParentRoot() as the NSC access key when the state has an upgrade-seeded bid
|
||||
// (bid.ParentBlockRoot == zero). This guards against the Fulu->Gloas fork-boundary
|
||||
// false positive where UpgradeToGloas seeds bid.BlockHash == latestBlockHash while
|
||||
// leaving bid.ParentBlockRoot as all-zeros.
|
||||
func TestProcessSlotsForBlock_UpgradeSeededBid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
parentRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := [32]byte{0xAA, 0xBB, 0xCC}
|
||||
targetSlot := primitives.Slot(9)
|
||||
|
||||
// Build a Gloas state at slot 8 with IsParentBlockFull()==true but
|
||||
// bid.ParentBlockRoot==zero (upgrade-seeded: not a real committed bid).
|
||||
st := newGloasForkBoundaryState(t, targetSlot-1, blockHash, [32]byte{})
|
||||
require.Equal(t, version.Gloas, st.Version())
|
||||
|
||||
// Verify preconditions.
|
||||
full, err := st.IsParentBlockFull()
|
||||
require.NoError(t, err)
|
||||
require.True(t, full, "precondition: IsParentBlockFull must be true")
|
||||
|
||||
bid, err := st.LatestExecutionPayloadBid()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{}, bid.ParentBlockRoot(), "upgrade-seeded bid must have zero ParentBlockRoot")
|
||||
|
||||
// Prime NSC with parentRoot as the access key.
|
||||
// With the guard in place (realBid==false), ProcessSlotsForBlock will use
|
||||
// b.ParentRoot() as the NSC key and find this cached entry.
|
||||
require.NoError(t, UpdateNextSlotCache(ctx, parentRoot[:], st))
|
||||
|
||||
blk := newGloasTestBlock(t, targetSlot, parentRoot)
|
||||
|
||||
out, err := ProcessSlotsForBlock(ctx, st, blk.Block())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, targetSlot, out.Slot())
|
||||
|
||||
// Verify that the NSC entry primed under parentRoot is still present,
|
||||
// confirming it was used (read) rather than bypassed.
|
||||
cached := NextSlotState(parentRoot[:], targetSlot)
|
||||
require.NotNil(t, cached, "NSC entry under parentRoot should still be present after use")
|
||||
}
|
||||
|
||||
// TestProcessSlotsForBlock_RealBid verifies that ProcessSlotsForBlock uses
|
||||
// LatestBlockHash as the NSC access key when the state has a real committed bid
|
||||
// (bid.ParentBlockRoot != zero). This is the normal post-fork case.
|
||||
func TestProcessSlotsForBlock_RealBid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
parentRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
blockHash := [32]byte{0xAA, 0xBB, 0xCC}
|
||||
realParentBlockRoot := [32]byte{0xDE, 0xAD, 0xBE, 0xEF}
|
||||
targetSlot := primitives.Slot(9)
|
||||
|
||||
// Build a Gloas state at slot 8 with IsParentBlockFull()==true and
|
||||
// bid.ParentBlockRoot!=zero (a real committed bid).
|
||||
st := newGloasForkBoundaryState(t, targetSlot-1, blockHash, realParentBlockRoot)
|
||||
require.Equal(t, version.Gloas, st.Version())
|
||||
|
||||
// Verify preconditions.
|
||||
full, err := st.IsParentBlockFull()
|
||||
require.NoError(t, err)
|
||||
require.True(t, full, "precondition: IsParentBlockFull must be true")
|
||||
|
||||
bid, err := st.LatestExecutionPayloadBid()
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, [32]byte{}, bid.ParentBlockRoot(), "real bid must have non-zero ParentBlockRoot")
|
||||
|
||||
// Prime NSC with the EL block hash as access key.
|
||||
// With the guard in place (realBid==true), ProcessSlotsForBlock will use
|
||||
// LatestBlockHash as the NSC key and find this cached entry.
|
||||
require.NoError(t, UpdateNextSlotCache(ctx, blockHash[:], st))
|
||||
|
||||
blk := newGloasTestBlock(t, targetSlot, parentRoot)
|
||||
|
||||
out, err := ProcessSlotsForBlock(ctx, st, blk.Block())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, targetSlot, out.Slot())
|
||||
|
||||
// Verify that the NSC entry primed under blockHash is still present,
|
||||
// confirming it was used (read) rather than bypassed.
|
||||
cached := NextSlotState(blockHash[:], targetSlot)
|
||||
require.NotNil(t, cached, "NSC entry under blockHash should still be present after use")
|
||||
}
|
||||
|
||||
// TestProcessSlotsForBlock_PreGloas verifies that ProcessSlotsForBlock uses
|
||||
// b.ParentRoot() as access key on pre-Gloas (Fulu) states, unchanged by the fix.
|
||||
func TestProcessSlotsForBlock_PreGloas(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
parentRoot := [32]byte{0x01, 0x02, 0x03}
|
||||
targetSlot := primitives.Slot(5)
|
||||
|
||||
// newGloasState creates a Gloas-versioned state; we need a Fulu/pre-Gloas state.
|
||||
// Use newGloasState as a base and just verify the slot advancement works.
|
||||
// Note: version.Gloas is the version created by newGloasState; for pre-Gloas
|
||||
// the function takes the version < Gloas path. We build a minimal Gloas state
|
||||
// to test, but note ProcessSlotsForBlock has an explicit version check at top.
|
||||
st := newGloasState(t, targetSlot-1, bytes.Repeat([]byte{0}, int(params.BeaconConfig().SlotsPerHistoricalRoot/8)))
|
||||
|
||||
blk := newGloasTestBlock(t, targetSlot, parentRoot)
|
||||
|
||||
out, err := ProcessSlotsForBlock(ctx, st, blk.Block())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, targetSlot, out.Slot())
|
||||
}
|
||||
|
||||
@@ -63,8 +63,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
interop.WriteBlockToDisk(signed, false /* Has the block failed */)
|
||||
interop.WriteStateToDisk(st)
|
||||
|
||||
parentRoot := signed.Block().ParentRoot()
|
||||
st, err = ProcessSlotsUsingNextSlotCache(ctx, st, parentRoot[:], signed.Block().Slot())
|
||||
st, err = ProcessSlotsForBlock(ctx, st, signed.Block())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not process slots")
|
||||
}
|
||||
@@ -135,8 +134,7 @@ func CalculateStateRoot(
|
||||
|
||||
// Execute per slots transition.
|
||||
var err error
|
||||
parentRoot := signed.Block().ParentRoot()
|
||||
state, err = ProcessSlotsUsingNextSlotCache(ctx, state, parentRoot[:], signed.Block().Slot())
|
||||
state, err = ProcessSlotsForBlock(ctx, state, signed.Block())
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process slots")
|
||||
}
|
||||
|
||||
@@ -34,6 +34,7 @@ type ReadOnlyDatabase interface {
|
||||
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
HighestRootsBelowSlot(ctx context.Context, slot primitives.Slot) (primitives.Slot, [][32]byte, error)
|
||||
LowestRootsAtOrAboveSlot(ctx context.Context, slot primitives.Slot) (primitives.Slot, [][32]byte, error)
|
||||
EarliestSlot(ctx context.Context) (primitives.Slot, error)
|
||||
// State related methods.
|
||||
State(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error)
|
||||
@@ -68,6 +69,7 @@ type ReadOnlyDatabase interface {
|
||||
|
||||
// Execution payload envelope operations (Gloas+).
|
||||
ExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) (*ethpb.SignedBlindedExecutionPayloadEnvelope, error)
|
||||
ExecutionPayloadEnvelopeByBlockHash(ctx context.Context, blockHash [32]byte) (*ethpb.SignedBlindedExecutionPayloadEnvelope, error)
|
||||
HasExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) bool
|
||||
|
||||
// P2P Metadata operations.
|
||||
|
||||
@@ -107,6 +107,7 @@ go_test(
|
||||
"migration_block_slot_index_test.go",
|
||||
"migration_state_validators_test.go",
|
||||
"p2p_test.go",
|
||||
"state_diff_helpers_test.go",
|
||||
"state_diff_test.go",
|
||||
"state_summary_test.go",
|
||||
"state_test.go",
|
||||
@@ -145,7 +146,6 @@ go_test(
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@io_bazel_rules_go//go/tools/bazel:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -680,12 +680,12 @@ func (s *Store) SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveHeadBlockRoot")
|
||||
defer span.End()
|
||||
hasStateSummary := s.HasStateSummary(ctx, blockRoot)
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(blockRoot[:]) != nil
|
||||
if !(hasStateInDB || hasStateSummary) {
|
||||
return errors.New("no state or state summary found with head block root")
|
||||
}
|
||||
hasStateInDB := s.HasState(ctx, blockRoot)
|
||||
if !(hasStateInDB || hasStateSummary) {
|
||||
return errors.New("no state or state summary found with head block root")
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
return bucket.Put(headBlockRootKey, blockRoot[:])
|
||||
})
|
||||
@@ -808,6 +808,44 @@ func (s *Store) HighestRootsBelowSlot(ctx context.Context, slot primitives.Slot)
|
||||
return fs, roots, nil
|
||||
}
|
||||
|
||||
// LowestRootsAtOrAboveSlot returns roots from the database slot index at or above the input slot.
|
||||
// The returned slot is the slot where the roots were found. This is the mirror of HighestRootsBelowSlot.
|
||||
// If no block exists at or above the given slot, an empty root slice is returned.
|
||||
func (s *Store) LowestRootsAtOrAboveSlot(ctx context.Context, slot primitives.Slot) (fs primitives.Slot, roots [][32]byte, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.LowestRootsAtOrAboveSlot")
|
||||
defer span.End()
|
||||
|
||||
sk := bytesutil.Uint64ToBytesBigEndian(uint64(slot))
|
||||
err = s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
c := bkt.Cursor()
|
||||
// Seek positions the cursor at the smallest key >= sk.
|
||||
// If no key >= sk exists, sl is nil and we return empty.
|
||||
for sl, r := c.Seek(sk); sl != nil; sl, r = c.Next() {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if r == nil {
|
||||
continue
|
||||
}
|
||||
fs = bytesutil.BytesToSlotBigEndian(sl)
|
||||
roots, err = splitRoots(r)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing packed roots %#x", r)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// No block found at or above slot — fall back to the head block root.
|
||||
headRoot := tx.Bucket(blocksBucket).Get(headBlockRootKey)
|
||||
if headRoot == nil {
|
||||
return nil
|
||||
}
|
||||
roots = [][32]byte{bytesutil.ToBytes32(headRoot)}
|
||||
return nil
|
||||
})
|
||||
return fs, roots, err
|
||||
}
|
||||
|
||||
// FeeRecipientByValidatorID returns the fee recipient for a validator id.
|
||||
// `ErrNotFoundFeeRecipient` is returned if the validator id is not found.
|
||||
func (s *Store) FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error) {
|
||||
|
||||
@@ -3,6 +3,7 @@ package kv
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -1460,3 +1461,74 @@ func TestStore_EarliestSlot(t *testing.T) {
|
||||
assert.Equal(t, nextEpochSlot, slot)
|
||||
})
|
||||
}
|
||||
|
||||
func TestStore_LowestRootsAtOrAboveSlot(t *testing.T) {
|
||||
for _, tt := range blockTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
block1, err := tt.newBlock(primitives.Slot(10), nil)
|
||||
require.NoError(t, err)
|
||||
block2, err := tt.newBlock(primitives.Slot(50), nil)
|
||||
require.NoError(t, err)
|
||||
block3, err := tt.newBlock(primitives.Slot(100), nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.SaveBlock(ctx, block1))
|
||||
require.NoError(t, db.SaveBlock(ctx, block2))
|
||||
require.NoError(t, db.SaveBlock(ctx, block3))
|
||||
|
||||
// Before first block: slot 5 → block at slot 10.
|
||||
foundSlot, roots, err := db.LowestRootsAtOrAboveSlot(ctx, 5)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(roots))
|
||||
assert.Equal(t, primitives.Slot(10), foundSlot)
|
||||
|
||||
// Exact match: slot 10 → block at slot 10.
|
||||
foundSlot, roots, err = db.LowestRootsAtOrAboveSlot(ctx, 10)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(roots))
|
||||
assert.Equal(t, primitives.Slot(10), foundSlot)
|
||||
|
||||
// Gap: slot 11 → block at slot 50 (slots 11-49 missing).
|
||||
foundSlot, roots, err = db.LowestRootsAtOrAboveSlot(ctx, 11)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(roots))
|
||||
assert.Equal(t, primitives.Slot(50), foundSlot)
|
||||
|
||||
// Past last block: slot 101 → nothing.
|
||||
_, roots, err = db.LowestRootsAtOrAboveSlot(ctx, 101)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(roots))
|
||||
|
||||
// Max-slot: should return empty.
|
||||
_, roots, err = db.LowestRootsAtOrAboveSlot(ctx, math.MaxUint64)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(roots))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_LowestRootsAtOrAboveSlot_MultipleBlocksSameSlot(t *testing.T) {
|
||||
for _, tt := range blockTests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
// Save two different blocks at the same slot with different parent roots.
|
||||
block1, err := tt.newBlock(primitives.Slot(20), bytesutil.PadTo([]byte{1}, 32))
|
||||
require.NoError(t, err)
|
||||
block2, err := tt.newBlock(primitives.Slot(20), bytesutil.PadTo([]byte{2}, 32))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, db.SaveBlock(ctx, block1))
|
||||
require.NoError(t, db.SaveBlock(ctx, block2))
|
||||
|
||||
foundSlot, roots, err := db.LowestRootsAtOrAboveSlot(ctx, 20)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(20), foundSlot)
|
||||
assert.Equal(t, 2, len(roots))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -70,9 +70,9 @@ func (s *Store) SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.C
|
||||
return err
|
||||
}
|
||||
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
hasStateInDB := s.HasState(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(checkpointBucket)
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
|
||||
if !(hasStateInDB || hasStateSummary) {
|
||||
log.Warnf("Recovering state summary for finalized root: %#x", bytesutil.Trunc(checkpoint.Root))
|
||||
if err := recoverStateSummary(ctx, tx, checkpoint.Root); err != nil {
|
||||
@@ -99,9 +99,9 @@ func (s *Store) saveCheckpoint(ctx context.Context, key []byte, checkpoint *ethp
|
||||
return err
|
||||
}
|
||||
hasStateSummary := s.HasStateSummary(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
hasStateInDB := s.HasState(ctx, bytesutil.ToBytes32(checkpoint.Root))
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(checkpointBucket)
|
||||
hasStateInDB := tx.Bucket(stateBucket).Get(checkpoint.Root) != nil
|
||||
if !(hasStateInDB || hasStateSummary) {
|
||||
log.WithField("root", fmt.Sprintf("%#x", bytesutil.Trunc(checkpoint.Root))).Warn("Recovering state summary")
|
||||
if err := recoverStateSummary(ctx, tx, checkpoint.Root); err != nil {
|
||||
|
||||
@@ -26,6 +26,15 @@ var ErrNotFoundMetadataSeqNum = errors.Wrap(ErrNotFound, "metadata sequence numb
|
||||
// but the database was created without state-diff support.
|
||||
var ErrStateDiffIncompatible = errors.New("state-diff feature enabled but database was created without state-diff support")
|
||||
|
||||
// ErrStateDiffCorrupted is returned when state-diff metadata or data is missing or invalid.
|
||||
var ErrStateDiffCorrupted = errors.New("state-diff database corrupted")
|
||||
|
||||
// ErrStateDiffExponentMismatch is returned when configured exponents differ from stored metadata.
|
||||
var ErrStateDiffExponentMismatch = errors.New("state-diff exponents mismatch")
|
||||
|
||||
// ErrStateDiffMissingSnapshot is returned when the offset snapshot is missing.
|
||||
var ErrStateDiffMissingSnapshot = errors.New("state-diff offset snapshot missing")
|
||||
|
||||
var errEmptyBlockSlice = errors.New("[]blocks.ROBlock is empty")
|
||||
var errIncorrectBlockParent = errors.New("unexpected missing or forked blocks in a []ROBlock")
|
||||
var errFinalizedChildNotFound = errors.New("unable to find finalized root descending from backfill batch")
|
||||
|
||||
@@ -15,6 +15,8 @@ import (
|
||||
// beacon block root. The envelope is stored in blinded form: the full execution payload is replaced
|
||||
// with its block hash. The full payload can later be retrieved from the EL via
|
||||
// engine_getPayloadBodiesByHash.
|
||||
// A secondary index from BlockHash → BeaconBlockRoot is maintained so that
|
||||
// envelopes can be looked up by execution block hash.
|
||||
func (s *Store) SaveExecutionPayloadEnvelope(ctx context.Context, env *ethpb.SignedExecutionPayloadEnvelope) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveExecutionPayloadEnvelope")
|
||||
defer span.End()
|
||||
@@ -24,6 +26,7 @@ func (s *Store) SaveExecutionPayloadEnvelope(ctx context.Context, env *ethpb.Sig
|
||||
}
|
||||
|
||||
blockRoot := bytesutil.ToBytes32(env.Message.BeaconBlockRoot)
|
||||
blockHash := bytesutil.ToBytes32(env.Message.Payload.BlockHash)
|
||||
blinded := blindEnvelope(env)
|
||||
|
||||
enc, err := encodeBlindedEnvelope(blinded)
|
||||
@@ -32,8 +35,10 @@ func (s *Store) SaveExecutionPayloadEnvelope(ctx context.Context, env *ethpb.Sig
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(executionPayloadEnvelopesBucket)
|
||||
return bkt.Put(blockRoot[:], enc)
|
||||
if err := tx.Bucket(executionPayloadEnvelopesBucket).Put(blockRoot[:], enc); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.Bucket(executionPayloadEnvelopeBlockHashBucket).Put(blockHash[:], blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
@@ -56,6 +61,30 @@ func (s *Store) ExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte
|
||||
return decodeBlindedEnvelope(enc)
|
||||
}
|
||||
|
||||
// ExecutionPayloadEnvelopeByBlockHash retrieves the blinded signed execution payload envelope
|
||||
// by execution block hash. It uses the secondary BlockHash → BeaconBlockRoot index and then
|
||||
// fetches the envelope from the primary bucket.
|
||||
func (s *Store) ExecutionPayloadEnvelopeByBlockHash(ctx context.Context, blockHash [32]byte) (*ethpb.SignedBlindedExecutionPayloadEnvelope, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.ExecutionPayloadEnvelopeByBlockHash")
|
||||
defer span.End()
|
||||
|
||||
var enc []byte
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
blockRoot := tx.Bucket(executionPayloadEnvelopeBlockHashBucket).Get(blockHash[:])
|
||||
if blockRoot == nil {
|
||||
return nil
|
||||
}
|
||||
enc = tx.Bucket(executionPayloadEnvelopesBucket).Get(blockRoot)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if enc == nil {
|
||||
return nil, errors.Wrap(ErrNotFound, "execution payload envelope not found for block hash")
|
||||
}
|
||||
return decodeBlindedEnvelope(enc)
|
||||
}
|
||||
|
||||
// HasExecutionPayloadEnvelope checks whether an execution payload envelope exists for the given beacon block root.
|
||||
func (s *Store) HasExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasExecutionPayloadEnvelope")
|
||||
@@ -72,13 +101,25 @@ func (s *Store) HasExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]b
|
||||
return exists
|
||||
}
|
||||
|
||||
// DeleteExecutionPayloadEnvelope removes a signed execution payload envelope by beacon block root.
|
||||
// DeleteExecutionPayloadEnvelope removes a signed execution payload envelope by beacon block root
|
||||
// and cleans up the BlockHash index entry.
|
||||
func (s *Store) DeleteExecutionPayloadEnvelope(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.DeleteExecutionPayloadEnvelope")
|
||||
defer span.End()
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(executionPayloadEnvelopesBucket)
|
||||
// Read the existing entry to find the BlockHash for index cleanup.
|
||||
enc := bkt.Get(blockRoot[:])
|
||||
if enc != nil {
|
||||
blinded, err := decodeBlindedEnvelope(enc)
|
||||
if err == nil && blinded.Message != nil {
|
||||
blockHash := bytesutil.ToBytes32(blinded.Message.BlockHash)
|
||||
if err := tx.Bucket(executionPayloadEnvelopeBlockHashBucket).Delete(blockHash[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return bkt.Delete(blockRoot[:])
|
||||
})
|
||||
}
|
||||
@@ -95,6 +136,7 @@ func blindEnvelope(env *ethpb.SignedExecutionPayloadEnvelope) *ethpb.SignedBlind
|
||||
BeaconBlockRoot: env.Message.BeaconBlockRoot,
|
||||
Slot: env.Message.Slot,
|
||||
StateRoot: env.Message.StateRoot,
|
||||
ParentBlockHash: env.Message.Payload.ParentHash,
|
||||
},
|
||||
Signature: env.Signature,
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
@@ -75,6 +76,7 @@ func TestStore_SaveAndRetrieveExecutionPayloadEnvelope(t *testing.T) {
|
||||
|
||||
// BlockHash should be the payload's block hash (not a hash tree root).
|
||||
assert.DeepEqual(t, env.Message.Payload.BlockHash, loaded.Message.BlockHash)
|
||||
assert.Equal(t, true, bytes.Equal(env.Message.Payload.ParentHash, loaded.Message.ParentBlockHash))
|
||||
}
|
||||
|
||||
func TestStore_DeleteExecutionPayloadEnvelope(t *testing.T) {
|
||||
@@ -107,6 +109,52 @@ func TestStore_SaveExecutionPayloadEnvelope_NilRejected(t *testing.T) {
|
||||
require.ErrorContains(t, "nil", err)
|
||||
}
|
||||
|
||||
func TestStore_ExecutionPayloadEnvelopeByBlockHash(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
env := testEnvelope(t)
|
||||
blockHash := bytesutil.ToBytes32(env.Message.Payload.BlockHash)
|
||||
|
||||
// Save envelope — should populate both primary and BlockHash index.
|
||||
require.NoError(t, db.SaveExecutionPayloadEnvelope(ctx, env))
|
||||
|
||||
// Look up by block hash.
|
||||
loaded, err := db.ExecutionPayloadEnvelopeByBlockHash(ctx, blockHash)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, env.Message.Slot, loaded.Message.Slot)
|
||||
assert.DeepEqual(t, env.Message.Payload.BlockHash, loaded.Message.BlockHash)
|
||||
assert.Equal(t, true, bytes.Equal(env.Message.Payload.ParentHash, loaded.Message.ParentBlockHash))
|
||||
}
|
||||
|
||||
func TestStore_ExecutionPayloadEnvelopeByBlockHash_NotFound(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
nonExistent := bytesutil.ToBytes32([]byte("nonexistent"))
|
||||
|
||||
_, err := db.ExecutionPayloadEnvelopeByBlockHash(ctx, nonExistent)
|
||||
require.ErrorContains(t, "not found", err)
|
||||
}
|
||||
|
||||
func TestStore_DeleteExecutionPayloadEnvelope_CleansBlockHashIndex(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
env := testEnvelope(t)
|
||||
blockRoot := bytesutil.ToBytes32(env.Message.BeaconBlockRoot)
|
||||
blockHash := bytesutil.ToBytes32(env.Message.Payload.BlockHash)
|
||||
|
||||
require.NoError(t, db.SaveExecutionPayloadEnvelope(ctx, env))
|
||||
|
||||
// Verify BlockHash lookup works before delete.
|
||||
_, err := db.ExecutionPayloadEnvelopeByBlockHash(ctx, blockHash)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete should clean up both buckets.
|
||||
require.NoError(t, db.DeleteExecutionPayloadEnvelope(ctx, blockRoot))
|
||||
|
||||
_, err = db.ExecutionPayloadEnvelopeByBlockHash(ctx, blockHash)
|
||||
require.ErrorContains(t, "not found", err)
|
||||
}
|
||||
|
||||
func TestBlindEnvelope_PreservesBlockHash(t *testing.T) {
|
||||
env := testEnvelope(t)
|
||||
|
||||
@@ -114,6 +162,7 @@ func TestBlindEnvelope_PreservesBlockHash(t *testing.T) {
|
||||
|
||||
// Should contain the block hash from the payload, not a hash tree root.
|
||||
assert.DeepEqual(t, env.Message.Payload.BlockHash, blinded.Message.BlockHash)
|
||||
assert.Equal(t, true, bytes.Equal(env.Message.Payload.ParentHash, blinded.Message.ParentBlockHash))
|
||||
|
||||
// Metadata should be preserved.
|
||||
assert.Equal(t, env.Message.BuilderIndex, blinded.Message.BuilderIndex)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
dbIface "github.com/OffchainLabs/prysm/v7/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/ssz/detect"
|
||||
"github.com/OffchainLabs/prysm/v7/genesis"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -26,9 +27,17 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
|
||||
if err := s.SaveBlock(ctx, wsb); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis block")
|
||||
}
|
||||
if err := s.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state")
|
||||
|
||||
if features.Get().EnableStateDiff {
|
||||
if err := s.initializeStateDiff(0, genesisState); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize state diff for genesis")
|
||||
}
|
||||
} else {
|
||||
if err := s.SaveState(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis state")
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: 0,
|
||||
Root: genesisBlkRoot[:],
|
||||
@@ -43,9 +52,6 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
|
||||
return errors.Wrap(err, "could not save genesis block root")
|
||||
}
|
||||
|
||||
if err := s.initializeStateDiff(0, genesisState); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize state diff for genesis")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -7,9 +7,11 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/iface"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
@@ -21,6 +23,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
prombolt "github.com/prysmaticlabs/prombbolt"
|
||||
logrus "github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
@@ -127,6 +130,7 @@ var Buckets = [][]byte{
|
||||
registrationBucket,
|
||||
custodyBucket,
|
||||
executionPayloadEnvelopesBucket,
|
||||
executionPayloadEnvelopeBlockHashBucket,
|
||||
}
|
||||
|
||||
// KVStoreOption is a functional option that modifies a kv.Store.
|
||||
@@ -224,8 +228,45 @@ func (kv *Store) startStateDiff(ctx context.Context) error {
|
||||
}
|
||||
|
||||
if hasOffset {
|
||||
// Existing state-diff database - restarts not yet supported.
|
||||
return errors.New("restarting with existing state-diff database not yet supported")
|
||||
storedExponents, err := kv.loadStateDiffExponents()
|
||||
if err != nil {
|
||||
if errors.Is(err, errExponentsMetadataMissing) {
|
||||
return fmt.Errorf("%w: database has state-diff offset but no exponents metadata. "+
|
||||
"This may indicate the database was created by an older software version that predates exponent storage. "+
|
||||
"Delete database and re-sync from genesis/checkpoint", ErrStateDiffCorrupted)
|
||||
}
|
||||
return fmt.Errorf("%w: state-diff exponents metadata corrupted: %v", ErrStateDiffCorrupted, err)
|
||||
}
|
||||
currentExponents := flags.Get().StateDiffExponents
|
||||
if !slices.Equal(storedExponents, currentExponents) {
|
||||
return errors.Wrapf(
|
||||
ErrStateDiffExponentMismatch,
|
||||
"state-diff exponents changed; database incompatible. "+
|
||||
"Database was initialized with: %v. "+
|
||||
"Current configuration: %v. "+
|
||||
"Options: use original exponents (--state-diff-exponents=%s) or delete database and re-sync from genesis/checkpoint.",
|
||||
storedExponents,
|
||||
currentExponents,
|
||||
formatStateDiffExponents(storedExponents),
|
||||
)
|
||||
}
|
||||
offset, err := kv.loadOffset()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cache, err := populateStateDiffCacheFromDB(kv, offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kv.stateDiffCache = cache
|
||||
if err := validateStateDiffCache(ctx, kv, cache); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
"offset": offset,
|
||||
"exponents": storedExponents,
|
||||
}).Info("State-diff cache initialized from existing database")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if this is a new database (no head block).
|
||||
@@ -263,6 +304,15 @@ func (s *Store) ClearDB() error {
|
||||
// Close closes the underlying BoltDB database.
|
||||
func (s *Store) Close() error {
|
||||
prometheus.Unregister(createBoltCollector(s.db))
|
||||
// Clear cache references after close so shutdown releases memory promptly.
|
||||
if s.blockCache != nil {
|
||||
s.blockCache.Close()
|
||||
s.blockCache = nil
|
||||
}
|
||||
if s.validatorEntryCache != nil {
|
||||
s.validatorEntryCache.Close()
|
||||
s.validatorEntryCache = nil
|
||||
}
|
||||
|
||||
// Before DB closes, we should dump the cached state summary objects to DB.
|
||||
if err := s.saveCachedStateSummariesDB(s.ctx); err != nil {
|
||||
|
||||
@@ -3,12 +3,15 @@ package kv
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -27,6 +30,107 @@ func setupDB(t testing.TB) *Store {
|
||||
return db
|
||||
}
|
||||
|
||||
func TestStartStateDiff_ExponentMismatch(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
|
||||
defer resetCfg()
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
store := setupDB(t)
|
||||
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
offsetBytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, 0)
|
||||
if err := bucket.Put(offsetKey, offsetBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
encoded, err := encodeStateDiffExponents([]int{20, 10})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bucket.Put(exponentsKey, encoded)
|
||||
}))
|
||||
|
||||
ctx := t.Context()
|
||||
err := store.startStateDiff(ctx)
|
||||
require.ErrorContains(t, "state-diff exponents changed", err)
|
||||
}
|
||||
|
||||
func TestStartStateDiff_MissingOffsetSnapshot(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
|
||||
defer resetCfg()
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
store := setupDB(t)
|
||||
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
offsetBytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, 0)
|
||||
if err := bucket.Put(offsetKey, offsetBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
encoded, err := encodeStateDiffExponents(flags.Get().StateDiffExponents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bucket.Put(exponentsKey, encoded)
|
||||
}))
|
||||
|
||||
ctx := t.Context()
|
||||
err := store.startStateDiff(ctx)
|
||||
require.ErrorContains(t, "offset snapshot", err)
|
||||
}
|
||||
|
||||
func TestStartStateDiff_ValidateOnStartup(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{EnableStateDiff: true})
|
||||
defer resetCfg()
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
globalFlags := flags.GlobalFlags{
|
||||
StateDiffExponents: flags.Get().StateDiffExponents,
|
||||
}
|
||||
flags.Init(&globalFlags)
|
||||
|
||||
store := setupDB(t)
|
||||
require.NoError(t, store.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bolt.ErrBucketNotFound
|
||||
}
|
||||
st, _ := createState(t, 0, version.Phase0)
|
||||
stateBytes, err := st.MarshalSSZ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enc, err := addKey(st.Version(), stateBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
offsetBytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, 0)
|
||||
if err := bucket.Put(offsetKey, offsetBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
encoded, err := encodeStateDiffExponents(flags.Get().StateDiffExponents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bucket.Put(exponentsKey, encoded); err != nil {
|
||||
return err
|
||||
}
|
||||
key := makeKeyForStateDiffTree(0, 0)
|
||||
return bucket.Put(key, enc)
|
||||
}))
|
||||
|
||||
err := store.startStateDiff(t.Context())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_setupBlockStorageType(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
t.Run("fresh database with feature enabled to store full blocks should store full blocks", func(t *testing.T) {
|
||||
|
||||
@@ -209,7 +209,7 @@ func stateBucketKeys(stateBucket *bolt.Bucket) ([][]byte, error) {
|
||||
|
||||
func insertValidatorHashes(ctx context.Context, validators []*v1alpha1.Validator, valBkt *bolt.Bucket) ([]byte, error) {
|
||||
// move all the validators in this state registry out to a new bucket.
|
||||
var validatorKeys []byte
|
||||
validatorKeys := make([]byte, 0, len(validators)*32)
|
||||
for _, val := range validators {
|
||||
valBytes, encodeErr := encode(ctx, val)
|
||||
if encodeErr != nil {
|
||||
|
||||
@@ -7,17 +7,18 @@ package kv
|
||||
// it easy to scan for keys that have a certain shard number as a prefix and return those
|
||||
// corresponding attestations.
|
||||
var (
|
||||
blocksBucket = []byte("blocks")
|
||||
stateBucket = []byte("state")
|
||||
stateSummaryBucket = []byte("state-summary")
|
||||
chainMetadataBucket = []byte("chain-metadata")
|
||||
checkpointBucket = []byte("check-point")
|
||||
powchainBucket = []byte("powchain")
|
||||
stateValidatorsBucket = []byte("state-validators")
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
stateDiffBucket = []byte("state-diff")
|
||||
executionPayloadEnvelopesBucket = []byte("execution-payload-envelopes")
|
||||
blocksBucket = []byte("blocks")
|
||||
stateBucket = []byte("state")
|
||||
stateSummaryBucket = []byte("state-summary")
|
||||
chainMetadataBucket = []byte("chain-metadata")
|
||||
checkpointBucket = []byte("check-point")
|
||||
powchainBucket = []byte("powchain")
|
||||
stateValidatorsBucket = []byte("state-validators")
|
||||
feeRecipientBucket = []byte("fee-recipient")
|
||||
registrationBucket = []byte("registration")
|
||||
stateDiffBucket = []byte("state-diff")
|
||||
executionPayloadEnvelopesBucket = []byte("execution-payload-envelopes")
|
||||
executionPayloadEnvelopeBlockHashBucket = []byte("execution-payload-envelope-block-hash-index")
|
||||
|
||||
// Light Client Updates Bucket
|
||||
lightClientUpdatesBucket = []byte("light-client-updates")
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
statenative "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/genesis"
|
||||
@@ -122,6 +123,11 @@ func (s *Store) LegacyGenesisState(ctx context.Context) (state.BeaconState, erro
|
||||
func (s *Store) SaveState(ctx context.Context, st state.ReadOnlyBeaconState, blockRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveState")
|
||||
defer span.End()
|
||||
|
||||
if features.Get().EnableStateDiff && s.stateDiffCache != nil {
|
||||
return s.saveStateByDiff(ctx, st)
|
||||
}
|
||||
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -557,6 +563,19 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
}
|
||||
|
||||
switch {
|
||||
case hasGloasKey(enc):
|
||||
protoState := ðpb.BeaconStateGloas{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(gloasKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Gloas")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
protoState.Validators = validatorEntries
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeGloas(protoState)
|
||||
case hasFuluKey(enc):
|
||||
protoState := ðpb.BeaconStateFulu{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(fuluKey):]); err != nil {
|
||||
@@ -742,6 +761,19 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(fuluKey, rawObj...)), nil
|
||||
case version.Gloas:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateGloas)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
if rState == nil {
|
||||
return nil, errors.New("nil state")
|
||||
}
|
||||
rawObj, err := rState.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(gloasKey, rawObj...)), nil
|
||||
default:
|
||||
return nil, errors.New("invalid inner state")
|
||||
}
|
||||
@@ -1048,36 +1080,115 @@ func (s *Store) isStateValidatorMigrationOver() (bool, error) {
|
||||
}
|
||||
|
||||
func (s *Store) getStateUsingStateDiff(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) {
|
||||
slot, err := s.SlotByBlockRoot(ctx, blockRoot)
|
||||
stateSummary, err := s.StateSummary(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var slot primitives.Slot
|
||||
var blk interfaces.ReadOnlySignedBeaconBlock
|
||||
if stateSummary == nil {
|
||||
blk, err = s.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if blk == nil || blk.IsNil() {
|
||||
return nil, ErrNotFoundState
|
||||
}
|
||||
slot = blk.Block().Slot()
|
||||
} else {
|
||||
slot = stateSummary.Slot
|
||||
}
|
||||
|
||||
if uint64(slot) < s.getOffset() {
|
||||
return nil, ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
if computeLevel(s.getOffset(), slot) == -1 {
|
||||
return nil, ErrNotFoundState
|
||||
}
|
||||
|
||||
st, err := s.stateByDiff(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if st == nil || st.IsNil() {
|
||||
return nil, errors.New("state not found")
|
||||
return nil, ErrNotFoundState
|
||||
}
|
||||
|
||||
if blk == nil {
|
||||
blk, err = s.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if blk == nil || blk.IsNil() {
|
||||
// Existing databases may have state summaries without corresponding blocks.
|
||||
// In that case we return the slot-derived state but mark the verification gap.
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Warn("Block not found for state-diff root verification; returning unverified state")
|
||||
return st, nil
|
||||
}
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if stateRoot != blk.Block().StateRoot() {
|
||||
return nil, errors.Wrap(ErrNotFoundState, "state root mismatch for block")
|
||||
}
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (s *Store) hasStateUsingStateDiff(ctx context.Context, blockRoot [32]byte) (bool, error) {
|
||||
slot, err := s.SlotByBlockRoot(ctx, blockRoot)
|
||||
stateSummary, err := s.StateSummary(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var slot primitives.Slot
|
||||
if stateSummary == nil {
|
||||
blk, err := s.Block(ctx, blockRoot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if blk == nil || blk.IsNil() {
|
||||
return false, nil
|
||||
}
|
||||
slot = blk.Block().Slot()
|
||||
} else {
|
||||
slot = stateSummary.Slot
|
||||
}
|
||||
|
||||
if uint64(slot) < s.getOffset() {
|
||||
return false, ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
stateLvl := computeLevel(s.getOffset(), slot)
|
||||
return stateLvl != -1, nil
|
||||
if stateLvl == -1 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !s.stateDiffCache.levelHasData(stateLvl) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
hasState := false
|
||||
err = s.db.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return errors.New("state diff bucket not found")
|
||||
}
|
||||
|
||||
if stateLvl == 0 {
|
||||
hasState = bucket.Get(makeKeyForStateDiffTree(stateLvl, uint64(slot))) != nil
|
||||
return nil
|
||||
}
|
||||
|
||||
hasState = hasCompleteDiffAtLevelSlot(bucket, stateLvl, uint64(slot))
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return hasState, nil
|
||||
}
|
||||
|
||||
@@ -19,6 +19,8 @@ const (
|
||||
balancesSuffix = "_b"
|
||||
)
|
||||
|
||||
var errSnapshotNotFound = errors.New("full snapshot not found")
|
||||
|
||||
/*
|
||||
We use a level-based approach to save state diffs. Each level corresponds to an exponent of 2 (exponents[lvl]).
|
||||
The data at level 0 is saved every 2**exponent[0] slots and always contains a full state snapshot that is used as a base for the delta saved at other levels.
|
||||
@@ -58,7 +60,7 @@ func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconStat
|
||||
}
|
||||
|
||||
// Get anchor state to compute the diff from.
|
||||
anchorState, err := s.getAnchorState(offset, lvl, slot)
|
||||
anchorState, err := s.getAnchorState(ctx, offset, lvl, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -133,6 +135,9 @@ func (s *Store) saveHdiff(lvl int, anchor, st state.ReadOnlyBeaconState) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.stateDiffCache.setLevelHasData(lvl); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -168,8 +173,12 @@ func (s *Store) saveFullSnapshot(st state.ReadOnlyBeaconState) error {
|
||||
}
|
||||
// Save the full state to the cache, and invalidate other levels.
|
||||
s.stateDiffCache.clearAnchors()
|
||||
err = s.stateDiffCache.setAnchor(0, st)
|
||||
if err != nil {
|
||||
if len(flags.Get().StateDiffExponents) > 1 {
|
||||
if err = s.stateDiffCache.setAnchor(0, st); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.stateDiffCache.setLevelHasData(0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -230,7 +239,7 @@ func (s *Store) getFullSnapshot(slot uint64) (state.BeaconState, error) {
|
||||
}
|
||||
rawEnc := bucket.Get(key)
|
||||
if rawEnc == nil {
|
||||
return errors.New("state not found")
|
||||
return errSnapshotNotFound
|
||||
}
|
||||
enc = slices.Clone(rawEnc)
|
||||
return nil
|
||||
|
||||
@@ -1,19 +1,153 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
pkgerrors "github.com/pkg/errors"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type stateDiffCache struct {
|
||||
sync.RWMutex
|
||||
anchors []state.ReadOnlyBeaconState
|
||||
offset uint64
|
||||
anchors []state.ReadOnlyBeaconState
|
||||
levelsWithData []bool
|
||||
offset uint64
|
||||
}
|
||||
|
||||
func populateStateDiffCacheFromDB(s *Store, offset uint64) (*stateDiffCache, error) {
|
||||
cache := &stateDiffCache{
|
||||
anchors: make([]state.ReadOnlyBeaconState, len(flags.Get().StateDiffExponents)-1),
|
||||
levelsWithData: make([]bool, len(flags.Get().StateDiffExponents)),
|
||||
offset: offset,
|
||||
}
|
||||
|
||||
if err := s.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
for level := range cache.levelsWithData {
|
||||
if level == 0 {
|
||||
if bucket.Get(makeKeyForStateDiffTree(0, offset)) != nil {
|
||||
cache.levelsWithData[level] = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
cursor := bucket.Cursor()
|
||||
prefix := []byte{byte(level)}
|
||||
key, _ := cursor.Seek(prefix)
|
||||
if key != nil && key[0] == byte(level) {
|
||||
slot, ok := slotFromStateDiffKey(key)
|
||||
if !ok {
|
||||
return ErrStateDiffCorrupted
|
||||
}
|
||||
if slot < offset {
|
||||
return ErrStateDiffCorrupted
|
||||
}
|
||||
if computeLevel(offset, primitives.Slot(slot)) != level {
|
||||
return ErrStateDiffCorrupted
|
||||
}
|
||||
if !hasCompleteDiffAtLevelSlot(bucket, level, slot) {
|
||||
return ErrStateDiffCorrupted
|
||||
}
|
||||
cache.levelsWithData[level] = true
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
anchor0, err := s.getFullSnapshot(offset)
|
||||
if err != nil {
|
||||
if errors.Is(err, errSnapshotNotFound) {
|
||||
return nil, pkgerrors.Wrapf(ErrStateDiffMissingSnapshot, "offset snapshot at slot %d", offset)
|
||||
}
|
||||
return nil, pkgerrors.Wrapf(ErrStateDiffCorrupted, "failed to load offset snapshot at slot %d: %v", offset, err)
|
||||
}
|
||||
// Only cache anchor if there are higher levels that need it.
|
||||
// With a single exponent, len(anchors)==0 and no caching is needed.
|
||||
if len(cache.anchors) > 0 {
|
||||
cache.anchors[0] = anchor0
|
||||
}
|
||||
cache.levelsWithData[0] = true
|
||||
|
||||
return cache, nil
|
||||
}
|
||||
|
||||
func validateStateDiffCache(ctx context.Context, s *Store, cache *stateDiffCache) error {
|
||||
// Copy level flags under lock, then release before validation work.
|
||||
// stateByDiff may consult cache metadata and should never be called while holding cache locks.
|
||||
cache.RLock()
|
||||
levels := make([]bool, len(cache.levelsWithData))
|
||||
copy(levels, cache.levelsWithData)
|
||||
cache.RUnlock()
|
||||
|
||||
for level, hasData := range levels {
|
||||
if !hasData || level == 0 {
|
||||
continue
|
||||
}
|
||||
maxSlot, err := latestSlotForLevel(s, level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.stateByDiff(ctx, primitives.Slot(maxSlot)); err != nil {
|
||||
return pkgerrors.Wrapf(ErrStateDiffCorrupted, "state diff validation failed for level %d slot %d: %v", level, maxSlot, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func latestSlotForLevel(s *Store, level int) (uint64, error) {
|
||||
var maxSlot uint64
|
||||
found := false
|
||||
err := s.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
cursor := bucket.Cursor()
|
||||
prefix := []byte{byte(level)}
|
||||
for key, _ := cursor.Seek(prefix); key != nil && key[0] == byte(level); key, _ = cursor.Next() {
|
||||
slot, ok := slotFromStateDiffKey(key)
|
||||
if !ok {
|
||||
return ErrStateDiffCorrupted
|
||||
}
|
||||
if !found || slot > maxSlot {
|
||||
maxSlot = slot
|
||||
found = true
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !found {
|
||||
return 0, ErrStateDiffCorrupted
|
||||
}
|
||||
return maxSlot, nil
|
||||
}
|
||||
|
||||
func slotFromStateDiffKey(key []byte) (uint64, bool) {
|
||||
if len(key) < 9 {
|
||||
return 0, false
|
||||
}
|
||||
return binary.LittleEndian.Uint64(key[1:9]), true
|
||||
}
|
||||
|
||||
func hasCompleteDiffAtLevelSlot(bucket *bbolt.Bucket, level int, slot uint64) bool {
|
||||
key := makeKeyForStateDiffTree(level, slot)
|
||||
stateKey := append(append([]byte{}, key...), stateSuffix...)
|
||||
validatorKey := append(append([]byte{}, key...), validatorSuffix...)
|
||||
balancesKey := append(append([]byte{}, key...), balancesSuffix...)
|
||||
return bucket.Get(stateKey) != nil && bucket.Get(validatorKey) != nil && bucket.Get(balancesKey) != nil
|
||||
}
|
||||
|
||||
func newStateDiffCache(s *Store) (*stateDiffCache, error) {
|
||||
@@ -37,8 +171,9 @@ func newStateDiffCache(s *Store) (*stateDiffCache, error) {
|
||||
}
|
||||
|
||||
return &stateDiffCache{
|
||||
anchors: make([]state.ReadOnlyBeaconState, len(flags.Get().StateDiffExponents)-1), // -1 because last level doesn't need to be cached
|
||||
offset: offset,
|
||||
anchors: make([]state.ReadOnlyBeaconState, len(flags.Get().StateDiffExponents)-1), // -1 because last level doesn't need to be cached
|
||||
levelsWithData: make([]bool, len(flags.Get().StateDiffExponents)),
|
||||
offset: offset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -58,6 +193,25 @@ func (c *stateDiffCache) setAnchor(level int, anchor state.ReadOnlyBeaconState)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) levelHasData(level int) bool {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
if level < 0 || level >= len(c.levelsWithData) {
|
||||
return false
|
||||
}
|
||||
return c.levelsWithData[level]
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) setLevelHasData(level int) error {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
if level < 0 || level >= len(c.levelsWithData) {
|
||||
return errors.New("state diff cache: level data index out of range")
|
||||
}
|
||||
c.levelsWithData[level] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *stateDiffCache) getOffset() uint64 {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user