mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
183 Commits
payload-ev
...
46b300c6ca
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
46b300c6ca | ||
|
|
04d45b6194 | ||
|
|
3e7cd8c2f1 | ||
|
|
ea1962bf17 | ||
|
|
6072e9c335 | ||
|
|
7d92cc5c32 | ||
|
|
4b8973289a | ||
|
|
51c64e75c0 | ||
|
|
d1ceff6621 | ||
|
|
e63c1bebfe | ||
|
|
25becdcd33 | ||
|
|
792fa22099 | ||
|
|
c5b3d3531c | ||
|
|
cc4510bb77 | ||
|
|
6fa0e9cf5f | ||
|
|
6b5ba5ad01 | ||
|
|
a2459aa365 | ||
|
|
80641dc3ae | ||
|
|
59bb4a8301 | ||
|
|
0db74365e0 | ||
|
|
6f90101364 | ||
|
|
49e1763ec2 | ||
|
|
c2527c82cd | ||
|
|
d4ea8fafd6 | ||
|
|
07d1d6bdf9 | ||
|
|
f938da99d9 | ||
|
|
9deec69cc7 | ||
|
|
2767f08f4d | ||
|
|
d46c620783 | ||
|
|
dd05e44ef3 | ||
|
|
9da36a5de6 | ||
|
|
7950a24926 | ||
|
|
ea51253be9 | ||
|
|
2ac30f5ce6 | ||
|
|
7418c00ad6 | ||
|
|
66342655fd | ||
|
|
18eca953c1 | ||
|
|
8191bb5711 | ||
|
|
d4613aee0c | ||
|
|
9fcc1a7a77 | ||
|
|
08f117f04f | ||
|
|
0b6365781f | ||
|
|
0c994445ea | ||
|
|
77c32203af | ||
|
|
743e6bab07 | ||
|
|
451d2a8bc5 | ||
|
|
6508bdfa9a | ||
|
|
d2bf512f36 | ||
|
|
e62fe66b0a | ||
|
|
08fff3dec4 | ||
|
|
75dea214ac | ||
|
|
4374e709cb | ||
|
|
be300f80bd | ||
|
|
096cba5b2d | ||
|
|
d5127233e4 | ||
|
|
3d35cc20ec | ||
|
|
1e658530a7 | ||
|
|
75a3d45470 | ||
|
|
bbd2d4da0f | ||
|
|
378468c1ec | ||
|
|
25d375dbe0 | ||
|
|
2d5f3112c8 | ||
|
|
1d49a2a88d | ||
|
|
b119290584 | ||
|
|
9b0cffcdea | ||
|
|
979466a3d7 | ||
|
|
08ba8dd487 | ||
|
|
a235a581e1 | ||
|
|
45b88de7f2 | ||
|
|
b360794c9c | ||
|
|
f69e017f6a | ||
|
|
19e5684875 | ||
|
|
24fc76b7fb | ||
|
|
0a4aad543b | ||
|
|
ab8584d138 | ||
|
|
0fc9ab925a | ||
|
|
9b6cd96012 | ||
|
|
dda5ee3334 | ||
|
|
14c67376c3 | ||
|
|
9c8b68a66d | ||
|
|
a3210157e2 | ||
|
|
1536d59e30 | ||
|
|
11e46a4560 | ||
|
|
5a2e51b894 | ||
|
|
d20ec4c7a1 | ||
|
|
7a70abbd15 | ||
|
|
bba1358637 | ||
|
|
a2b84c9320 | ||
|
|
edef17e41d | ||
|
|
85c5d31b5b | ||
|
|
e6af417c62 | ||
|
|
3509622323 | ||
|
|
80d7bd6084 | ||
|
|
8d3c3fd40b | ||
|
|
1caf7074a7 | ||
|
|
92bd155e3f | ||
|
|
8360b0f882 | ||
|
|
bc0d60138c | ||
|
|
32f377a665 | ||
|
|
2ab792b0fe | ||
|
|
2ae8de05dd | ||
|
|
cff96129b5 | ||
|
|
8abd1db7c1 | ||
|
|
a4d726184f | ||
|
|
85acf242f6 | ||
|
|
fa056c2d21 | ||
|
|
ba6d1d0c6b | ||
|
|
69d3453f97 | ||
|
|
a69cf6f5d4 | ||
|
|
0a46c2d16d | ||
|
|
3c1b8859bc | ||
|
|
9f645ae0a4 | ||
|
|
5eeeb9ed15 | ||
|
|
61de11e2c4 | ||
|
|
2773bdef89 | ||
|
|
2a23dc7f4a | ||
|
|
f97622b054 | ||
|
|
08d0f42725 | ||
|
|
74c8a25354 | ||
|
|
a466c6db9c | ||
|
|
4da6c4291f | ||
|
|
2d242a8d09 | ||
|
|
6be1541e57 | ||
|
|
b845222ce7 | ||
|
|
2061fc8a2f | ||
|
|
5bbdebee22 | ||
|
|
26100e074d | ||
|
|
768fa0e5a1 | ||
|
|
eb93350583 | ||
|
|
affdab7776 | ||
|
|
eb556adab5 | ||
|
|
11bb8542a4 | ||
|
|
09d886c676 | ||
|
|
11c5c6fb8b | ||
|
|
10804bbb56 | ||
|
|
e6f3b636ac | ||
|
|
e95676dd91 | ||
|
|
b78c2c354b | ||
|
|
55e2001a0b | ||
|
|
c093283b1b | ||
|
|
5449fd0352 | ||
|
|
3d7f7b588b | ||
|
|
86b65e0912 | ||
|
|
e9dac06037 | ||
|
|
893cf60921 | ||
|
|
41c9f160a2 | ||
|
|
707abe6112 | ||
|
|
76975a134d | ||
|
|
672de432a2 | ||
|
|
4a6d88d9fb | ||
|
|
2f067c4164 | ||
|
|
81266f60af | ||
|
|
207f36065a | ||
|
|
eb9feabd6f | ||
|
|
bc0868e232 | ||
|
|
35c1ab5e88 | ||
|
|
1ff836e549 | ||
|
|
21bb6f5258 | ||
|
|
08f038fe80 | ||
|
|
63279bcadf | ||
|
|
61628efd44 | ||
|
|
9b07f13cd3 | ||
|
|
1397a79b4c | ||
|
|
929115639d | ||
|
|
14dca40786 | ||
|
|
fa7596bceb | ||
|
|
5161f087fc | ||
|
|
71050ab076 | ||
|
|
614367ddcf | ||
|
|
3f7371445b | ||
|
|
a15a1ade17 | ||
|
|
798376b1d7 | ||
|
|
93271050bf | ||
|
|
8dfbabc691 | ||
|
|
af2522e5f0 | ||
|
|
452d42bd10 | ||
|
|
3e985377ce | ||
|
|
ab2e836d3f | ||
|
|
14158bea9c | ||
|
|
e14590636f | ||
|
|
ce3660d2e7 | ||
|
|
7853cb9db0 | ||
|
|
8cfeda1473 |
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -34,4 +34,5 @@ Fixes #
|
||||
|
||||
- [ ] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
|
||||
- [ ] I have included a uniquely named [changelog fragment file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
|
||||
- [ ] I have added a description to this PR with sufficient context for reviewers to understand this PR.
|
||||
- [ ] I have added a description with sufficient context for reviewers to understand this PR.
|
||||
- [ ] I have tested that my changes work as expected and I added a testing plan to the PR description (if applicable).
|
||||
|
||||
23
.github/workflows/check-logs.yml
vendored
Normal file
23
.github/workflows/check-logs.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Check log.go files
|
||||
on: [ pull_request ]
|
||||
|
||||
jobs:
|
||||
check-logs:
|
||||
runs-on: ubuntu-4
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go 1.25.1
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25.1'
|
||||
|
||||
- name: Install ripgrep
|
||||
run: sudo apt-get install -y ripgrep
|
||||
|
||||
- name: Check log.go files
|
||||
run: ./hack/check-logs.sh
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -38,9 +38,13 @@ metaData
|
||||
|
||||
# execution API authentication
|
||||
jwt.hex
|
||||
execution/
|
||||
|
||||
# manual testing
|
||||
tmp
|
||||
|
||||
# spectest coverage reports
|
||||
report.txt
|
||||
|
||||
# execution client data
|
||||
execution/
|
||||
|
||||
@@ -193,6 +193,7 @@ nogo(
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/httpwriter:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
|
||||
126
CHANGELOG.md
126
CHANGELOG.md
@@ -4,6 +4,132 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v7.1.1](https://github.com/prysmaticlabs/prysm/compare/v7.1.0...v7.1.1) - 2025-12-18
|
||||
|
||||
Release highlights:
|
||||
|
||||
- Fixed potential deadlock scenario in data column batch verification
|
||||
- Improved processing and metrics for cells and proofs
|
||||
|
||||
We are aware of [an issue](https://github.com/OffchainLabs/prysm/issues/16160) where Prysm struggles to sync from an out of sync state. We will have another release before the end of the year to address this issue.
|
||||
|
||||
Our postmortem document from the December 4th mainnet issue has been published on our [documentation site](https://prysm.offchainlabs.com/docs/misc/mainnet-postmortems/)
|
||||
|
||||
### Added
|
||||
|
||||
- Track the dependent root of the latest finalized checkpoint in forkchoice. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16103)
|
||||
- Proposal design document to implement graffiti. Currently it is empty by default and the idea is to have it of the form GE168dPR63af. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15983)
|
||||
- Add support for detecting and logging per address reachability via libp2p AutoNAT v2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16100)
|
||||
- Static analyzer that ensures each `httputil.HandleError` call is followed by a `return` statement. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16134)
|
||||
- Prometheus histogram `cells_and_proofs_from_structured_computation_milliseconds` to track computation time for cells and proofs from structured blobs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16115)
|
||||
- Prometheus histogram `get_blobs_v2_latency_milliseconds` to track RPC latency for `getBlobsV2` calls to the execution layer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16115)
|
||||
|
||||
### Changed
|
||||
|
||||
- Optimise migratetocold by not doing brute force for loop. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16101)
|
||||
- e2e sync committee evaluator now skips the first slot after startup, we already skip the fork epoch for checks here, this skip only applies on startup, due to altair always from 0 and validators need to warm up. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16145)
|
||||
- Run `ComputeCellsAndProofsFromFlat` in parallel to improve performance when computing cells and proofs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16115)
|
||||
- Run `ComputeCellsAndProofsFromStructured` in parallel to improve performance when computing cells and proofs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16115)
|
||||
|
||||
### Removed
|
||||
|
||||
- Unnecessary copy is removed from Eth1DataHasEnoughSupport. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16118)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Incorrect constructor return type [#16084](https://github.com/OffchainLabs/prysm/pull/16084). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16084)
|
||||
- Fixed possible race when validating two attestations at the same time. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16105)
|
||||
- Fix missing return after version header check in SubmitAttesterSlashingsV2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16126)
|
||||
- Fix deadlock in data column gossip KZG batch verification when a caller times out preventing result delivery. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16141)
|
||||
- Fixed replay state issue in rest api caused by attester and sync committee duties endpoints. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16136)
|
||||
- Do not error when committee has been computed correctly but updating the cache failed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16142)
|
||||
- Prevent blocked sends to the KZG batch verifier when the caller context is already canceled, avoiding useless queueing and potential hangs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16144)
|
||||
|
||||
## [v7.1.0](https://github.com/prysmaticlabs/prysm/compare/v7.0.0...v7.1.0) - 2025-12-10
|
||||
|
||||
This release includes several key features/fixes. If you are running v7.0.0 then you should update to v7.0.1 or later and remove the flag `--disable-last-epoch-targets`.
|
||||
|
||||
Release highlights:
|
||||
|
||||
- Backfill is now supported in Fulu. Backfill from checkpoint sync now supports data columns. Run with `--enable-backfill` when using checkpoint sync.
|
||||
- A new node configuration to custody enough data columns to reconstruct blobs. Use flag `--semi-supernode` to custody at least 50% of the data columns.
|
||||
- Critical fixes in attestation processing.
|
||||
|
||||
A post mortem doc with full details on the mainnet attestation processing issue from December 4th is expected in the coming days.
|
||||
|
||||
### Added
|
||||
|
||||
- add fulu support to light client processing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15995)
|
||||
- Record data column gossip KZG batch verification latency in both the pooled worker and fallback paths so the `beacon_kzg_verification_data_column_batch_milliseconds` histogram reflects gossip traffic, annotated with `path` labels to distinguish the sources. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16018)
|
||||
- Implement Gloas state. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15611)
|
||||
- Add initial configs for the state-diff feature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15903)
|
||||
- Add kv functions for the state-diff feature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15903)
|
||||
- Add supported version for fork versions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16030)
|
||||
- prometheus metric `gossip_attestation_verification_milliseconds` to track attestation gossip topic validation latency. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15785)
|
||||
- Integrate state-diff into `State()`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16033)
|
||||
- Implement Gloas fork support in consensus-types/blocks with factory methods, getters, setters, and proto handling. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15618)
|
||||
- Integrate state-diff into `HasState()`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16045)
|
||||
- Added `--semi-supernode` flag to custody half of a super node's datacolumn requirements but allowing for reconstruction for blob retrieval. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16029)
|
||||
- Data column backfill. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15580)
|
||||
- Backfill metrics for columns: backfill_data_column_sidecar_downloaded, backfill_data_column_sidecar_downloaded_bytes, backfill_batch_columns_download_ms, backfill_batch_columns_verify_ms. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15580)
|
||||
- prometheus summary `gossip_data_column_sidecar_arrival_milliseconds` to track data column sidecar arrival latency since slot start. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16099)
|
||||
|
||||
### Changed
|
||||
|
||||
- Improve readability in slashing import and remove duplicated code. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15957)
|
||||
- Use dependent root instead of target when possible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15996)
|
||||
- Changed `--subscribe-all-data-subnets` flag to `--supernode` and aliased `--subscribe-all-data-subnets` for existing users. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16012)
|
||||
- Use explicit slot component timing configs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15999)
|
||||
- Downgraded log level from INFO to DEBUG on PrepareBeaconProposer updated fee recipients. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15998)
|
||||
- Change the logging behaviour of Updated fee recipients to only log count of validators at Debug level and all validator indices at Trace level. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15998)
|
||||
- Stop emitting payload attribute events during late block handling when we are not proposing the next slot. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16026)
|
||||
- Initialize the `ExecutionRequests` field in gossip block map. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16047)
|
||||
- Avoid redundant WithHttpEndpoint when JWT is provided. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16032)
|
||||
- Removed dead slot parameter from blobCacheEntry.filter. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16021)
|
||||
- Added log prefix to the `genesis` package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
|
||||
- Added log prefix to the `params` package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
|
||||
- `WithGenesisValidatorsRoot`: Use camelCase for log field param. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
|
||||
- Move `Origin checkpoint found in db` from WARN to INFO, since it is the expected behaviour. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
|
||||
- backfill metrics that changed name and/or histogram buckets: backfill_batch_time_verify -> backfill_batch_verify_ms, backfill_batch_time_waiting -> backfill_batch_waiting_ms, backfill_batch_time_roundtrip -> backfill_batch_roundtrip_ms, backfill_blocks_bytes_downloaded -> backfill_blocks_downloaded_bytes, backfill_batch_time_verify -> backfill_batch_verify_ms, backfill_batch_blocks_time_download -> backfill_batch_blocks_download_ms, backfill_batch_blobs_time_download -> backfill_batch_blobs_download_ms, backfill_blobs_bytes_downloaded -> backfill_blocks_downloaded_bytes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15580)
|
||||
- Move the "Not enough connected peers" (for a given subnet) from WARN to DEBUG. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16087)
|
||||
- `blobsDataFromStoredDataColumns`: Ask the use to use the `--supernode` flag and shorten the error mesage. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16097)
|
||||
- Introduced flag `--ignore-unviable-attestations` (replaces and deprecates `--disable-last-epoch-targets`) to drop attestations whose target state is not viable; default remains to process them unless explicitly enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16094)
|
||||
|
||||
### Removed
|
||||
|
||||
- Remove validator cross-client from end-to-end tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16025)
|
||||
- `NUMBER_OF_COLUMNS` configuration (not in the specification any more, replaced by a preset). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16073)
|
||||
- `MAX_CELLS_IN_EXTENDED_MATRIX` configuration (not in the specification any more). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16073)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Nil check for block if it doesn't exist in the DB in fetchOriginSidecars. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16006)
|
||||
- Fix proposals progress bar count [#16020](https://github.com/OffchainLabs/prysm/pull/16020). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16020)
|
||||
- Move `BlockGossipReceived` event to the end of gossip validation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16031)
|
||||
- Fix state diff repetitive anchor slot bug. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16037)
|
||||
- Check the JWT secret length is exactly 256 bits (32 bytes) as per Engine API specification. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15939)
|
||||
- http_error_count now matches the other cases by listing the endpoint name rather than the actual URL requested. This improves metrics cardinality. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16055)
|
||||
- Fix array out of bounds in static analyzer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16058)
|
||||
- fixes E2E tests to be able to start from Electra genesis fork or future forks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16048)
|
||||
- Use head state to validate attestations for old blocks if they are compatible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16095)
|
||||
|
||||
## [v7.0.1](https://github.com/prysmaticlabs/prysm/compare/v7.0.0...v7.0.1) - 2025-12-08
|
||||
|
||||
This patch release contains 4 cherry-picked changes to address the mainnet attestation processing issue from 2025-12-04. Operators are encouraged to update to this release as soon as practical. As of this release, the feature flag `--disable-last-epoch-targets` has been deprecated and can be safely removed from your node configuration.
|
||||
|
||||
A post mortem doc with full details is expected to be published later this week.
|
||||
|
||||
### Changed
|
||||
|
||||
- Move the "Not enough connected peers" (for a given subnet) from WARN to DEBUG. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16087)
|
||||
- Use dependent root instead of target when possible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15996)
|
||||
- Introduced flag `--ignore-unviable-attestations` (replaces and deprecates `--disable-last-epoch-targets`) to drop attestations whose target state is not viable; default remains to process them unless explicitly enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16094)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Use head state to validate attestations for old blocks if they are compatible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16095)
|
||||
|
||||
|
||||
## [v7.0.0](https://github.com/prysmaticlabs/prysm/compare/v6.1.4...v7.0.0) - 2025-11-10
|
||||
|
||||
This is our initial mainnet release for the Ethereum mainnet Fulu fork on December 3rd, 2025. All operators MUST update to v7.0.0 or later release prior to the fulu fork epoch `411392`. See the [Ethereum Foundation blog post](https://blog.ethereum.org/2025/11/06/fusaka-mainnet-announcement) for more information on Fulu.
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"common.go",
|
||||
"header.go",
|
||||
"log.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/apiutil",
|
||||
visibility = ["//visibility:public"],
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type mediaRange struct {
|
||||
|
||||
9
api/apiutil/log.go
Normal file
9
api/apiutil/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package apiutil
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/apiutil")
|
||||
@@ -1,5 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package beacon
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "beacon")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/client/beacon")
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
"bid.go",
|
||||
"client.go",
|
||||
"errors.go",
|
||||
"log.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/client/builder",
|
||||
@@ -63,6 +64,5 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
)
|
||||
|
||||
@@ -70,7 +70,7 @@ type requestLogger struct{}
|
||||
func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
b := bytes.NewBuffer(nil)
|
||||
if r.Body == nil {
|
||||
log.WithFields(log.Fields{
|
||||
log.WithFields(logrus.Fields{
|
||||
"bodyBase64": "(nil value)",
|
||||
"url": r.URL.String(),
|
||||
}).Info("Builder http request")
|
||||
@@ -87,7 +87,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
return err
|
||||
}
|
||||
r.Body = io.NopCloser(b)
|
||||
log.WithFields(log.Fields{
|
||||
log.WithFields(logrus.Fields{
|
||||
"bodyBase64": string(body),
|
||||
"url": r.URL.String(),
|
||||
}).Info("Builder http request")
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type roundtrip func(*http.Request) (*http.Response, error)
|
||||
|
||||
9
api/client/builder/log.go
Normal file
9
api/client/builder/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package builder
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/client/builder")
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"event_stream.go",
|
||||
"log.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/client/event",
|
||||
@@ -23,8 +24,5 @@ go_test(
|
||||
"utils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/client"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestNewEventStream(t *testing.T) {
|
||||
|
||||
9
api/client/event/log.go
Normal file
9
api/client/event/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package event
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/client/event")
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"grpcutils.go",
|
||||
"log.go",
|
||||
"parameters.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/api/grpc",
|
||||
|
||||
@@ -32,7 +32,7 @@ func LogRequests(
|
||||
)
|
||||
start := time.Now()
|
||||
err := invoker(ctx, method, req, reply, cc, opts...)
|
||||
logrus.WithField("backend", header["x-backend"]).
|
||||
log.WithField("backend", header["x-backend"]).
|
||||
WithField("method", method).WithField("duration", time.Since(start)).
|
||||
Debug("gRPC request finished.")
|
||||
return err
|
||||
@@ -58,7 +58,7 @@ func LogStream(
|
||||
grpc.Header(&header),
|
||||
)
|
||||
strm, err := streamer(ctx, sd, conn, method, opts...)
|
||||
logrus.WithField("backend", header["x-backend"]).
|
||||
log.WithField("backend", header["x-backend"]).
|
||||
WithField("method", method).
|
||||
Debug("gRPC stream started.")
|
||||
return strm, err
|
||||
@@ -71,7 +71,7 @@ func AppendHeaders(parent context.Context, headers []string) context.Context {
|
||||
if h != "" {
|
||||
keyValue := strings.Split(h, "=")
|
||||
if len(keyValue) < 2 {
|
||||
logrus.Warnf("Incorrect gRPC header flag format. Skipping %v", keyValue[0])
|
||||
log.Warnf("Incorrect gRPC header flag format. Skipping %v", keyValue[0])
|
||||
continue
|
||||
}
|
||||
parent = metadata.AppendToOutgoingContext(parent, keyValue[0], strings.Join(keyValue[1:], "=")) // nolint:fatcontext
|
||||
|
||||
9
api/grpc/log.go
Normal file
9
api/grpc/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package grpc
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/grpc")
|
||||
@@ -1,5 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package httprest
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "httprest")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/server/httprest")
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"log.go",
|
||||
"middleware.go",
|
||||
"util.go",
|
||||
],
|
||||
@@ -27,6 +28,5 @@ go_test(
|
||||
"//api:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
9
api/server/middleware/log.go
Normal file
9
api/server/middleware/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package middleware
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "api/server/middleware")
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/api/apiutil"
|
||||
"github.com/rs/cors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Middleware func(http.Handler) http.Handler
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/api"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// frozenHeaderRecorder allows asserting that response headers were not modified
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"debounce.go",
|
||||
"every.go",
|
||||
"log.go",
|
||||
"multilock.go",
|
||||
"scatter.go",
|
||||
],
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RunEvery runs the provided command periodically.
|
||||
|
||||
9
async/log.go
Normal file
9
async/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package async
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "async")
|
||||
@@ -14,6 +14,7 @@ go_library(
|
||||
"head_sync_committee_info.go",
|
||||
"init_sync_process_block.go",
|
||||
"log.go",
|
||||
"log_helpers.go",
|
||||
"merge_ascii_art.go",
|
||||
"metrics.go",
|
||||
"options.go",
|
||||
|
||||
@@ -323,14 +323,17 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
var ok bool
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(st.Slot())
|
||||
if e == stateEpoch {
|
||||
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
|
||||
if e == stateEpoch || fuluAndNextEpoch {
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
st = st.Copy()
|
||||
if slot > st.Slot() {
|
||||
// At this point either we know we are proposing on a future slot or we need to still compute the
|
||||
// right proposer index pre-Fulu, either way we need to copy the state to process it.
|
||||
st = st.Copy()
|
||||
var err error
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||
if err != nil {
|
||||
@@ -338,7 +341,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
if e > stateEpoch {
|
||||
if e > stateEpoch && !fuluAndNextEpoch {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
|
||||
@@ -1053,40 +1053,3 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
|
||||
require.Equal(t, vhs[0].String(), vh0)
|
||||
require.Equal(t, vhs[1].String(), vh1)
|
||||
}
|
||||
|
||||
func TestComputePayloadAttribute(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
proposingSlot: slot,
|
||||
headRoot: [32]byte{},
|
||||
}
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -53,58 +54,53 @@ type fcuConfig struct {
|
||||
}
|
||||
|
||||
// sendFCU handles the logic to notify the engine of a forckhoice update
|
||||
// for the first time when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions when the
|
||||
// incoming block is late, preparing payload attributes in this case while it
|
||||
// only sends a message with empty attributes for early blocks.
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if !s.isNewHead(cfg.headRoot) {
|
||||
return nil
|
||||
// when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions .
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if cfg.postState.Version() < version.Fulu {
|
||||
// update the caches to compute the right proposer index
|
||||
// this function is called under a forkchoice lock which we need to release.
|
||||
s.ForkChoicer().Unlock()
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
s.ForkChoicer().Lock()
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return
|
||||
}
|
||||
// If head has not been updated and attributes are nil, we can skip the FCU.
|
||||
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
|
||||
return
|
||||
}
|
||||
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
|
||||
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
|
||||
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
|
||||
// to the engine if needed
|
||||
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = slotCtx
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
if s.isNewHead(fcuArgs.headRoot) {
|
||||
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
}
|
||||
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine.
|
||||
// The caller of this function should NOT have a lock in forkchoice store.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) {
|
||||
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
|
||||
defer span.End()
|
||||
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
|
||||
ctx = trace.NewContext(s.ctx, span)
|
||||
s.ForkChoicer().Lock()
|
||||
defer s.ForkChoicer().Unlock()
|
||||
_, err := s.notifyForkchoiceUpdate(ctx, args)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify forkchoice update")
|
||||
log.WithError(err).Error("Could not notify forkchoice update")
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
headBlock: wsb,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
|
||||
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
|
||||
require.Equal(t, true, has)
|
||||
@@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
headRoot: r,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
}
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
|
||||
@@ -1,164 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"time"
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "blockchain")
|
||||
|
||||
// logs state transition related data every slot.
|
||||
func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
log := log.WithField("slot", b.Slot())
|
||||
if len(b.Body().Attestations()) > 0 {
|
||||
log = log.WithField("attestations", len(b.Body().Attestations()))
|
||||
}
|
||||
if len(b.Body().AttesterSlashings()) > 0 {
|
||||
log = log.WithField("attesterSlashings", len(b.Body().AttesterSlashings()))
|
||||
}
|
||||
if len(b.Body().ProposerSlashings()) > 0 {
|
||||
log = log.WithField("proposerSlashings", len(b.Body().ProposerSlashings()))
|
||||
}
|
||||
if len(b.Body().VoluntaryExits()) > 0 {
|
||||
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
|
||||
}
|
||||
if b.Version() >= version.Altair {
|
||||
agg, err := b.Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
if b.Version() >= version.Bellatrix {
|
||||
p, err := b.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
case err != nil:
|
||||
return err
|
||||
default:
|
||||
log = log.WithField("txCount", len(txs))
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Deneb {
|
||||
kzgs, err := b.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
} else if len(kzgs) > 0 {
|
||||
log = log.WithField("kzgCommitmentCount", len(kzgs))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Electra {
|
||||
eReqs, err := b.Body().ExecutionRequests()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get execution requests")
|
||||
} else {
|
||||
if len(eReqs.Deposits) > 0 {
|
||||
log = log.WithField("depositRequestCount", len(eReqs.Deposits))
|
||||
}
|
||||
if len(eReqs.Consolidations) > 0 {
|
||||
log = log.WithField("consolidationRequestCount", len(eReqs.Consolidations))
|
||||
}
|
||||
if len(eReqs.Withdrawals) > 0 {
|
||||
log = log.WithField("withdrawalRequestCount", len(eReqs.Withdrawals))
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.StartTime(genesis, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
level := log.Logger.GetLevel()
|
||||
if level >= logrus.DebugLevel {
|
||||
parentRoot := block.ParentRoot()
|
||||
lf := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}).Info("Synced new block")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// logs payload related data every slot.
|
||||
func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if block is execution block")
|
||||
}
|
||||
if !isExecutionBlk {
|
||||
return nil
|
||||
}
|
||||
payload, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.GasLimit() == 0 {
|
||||
return errors.New("gas limit should not be 0")
|
||||
}
|
||||
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
|
||||
fields := logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
|
||||
"blockNumber": payload.BlockNumber(),
|
||||
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||
}
|
||||
if block.Version() >= version.Capella {
|
||||
withdrawals, err := payload.Withdrawals()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get withdrawals")
|
||||
}
|
||||
fields["withdrawals"] = len(withdrawals)
|
||||
changes, err := block.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
if len(changes) > 0 {
|
||||
fields["blsToExecutionChanges"] = len(changes)
|
||||
}
|
||||
}
|
||||
log.WithFields(fields).Debug("Synced new payload")
|
||||
return nil
|
||||
}
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/blockchain")
|
||||
|
||||
162
beacon-chain/blockchain/log_helpers.go
Normal file
162
beacon-chain/blockchain/log_helpers.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// logs state transition related data every slot.
|
||||
func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
log := log.WithField("slot", b.Slot())
|
||||
if len(b.Body().Attestations()) > 0 {
|
||||
log = log.WithField("attestations", len(b.Body().Attestations()))
|
||||
}
|
||||
if len(b.Body().AttesterSlashings()) > 0 {
|
||||
log = log.WithField("attesterSlashings", len(b.Body().AttesterSlashings()))
|
||||
}
|
||||
if len(b.Body().ProposerSlashings()) > 0 {
|
||||
log = log.WithField("proposerSlashings", len(b.Body().ProposerSlashings()))
|
||||
}
|
||||
if len(b.Body().VoluntaryExits()) > 0 {
|
||||
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
|
||||
}
|
||||
if b.Version() >= version.Altair {
|
||||
agg, err := b.Body().SyncAggregate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
if b.Version() >= version.Bellatrix {
|
||||
p, err := b.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
case err != nil:
|
||||
return err
|
||||
default:
|
||||
log = log.WithField("txCount", len(txs))
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Deneb {
|
||||
kzgs, err := b.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
} else if len(kzgs) > 0 {
|
||||
log = log.WithField("kzgCommitmentCount", len(kzgs))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Electra {
|
||||
eReqs, err := b.Body().ExecutionRequests()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get execution requests")
|
||||
} else {
|
||||
if len(eReqs.Deposits) > 0 {
|
||||
log = log.WithField("depositRequestCount", len(eReqs.Deposits))
|
||||
}
|
||||
if len(eReqs.Consolidations) > 0 {
|
||||
log = log.WithField("consolidationRequestCount", len(eReqs.Consolidations))
|
||||
}
|
||||
if len(eReqs.Withdrawals) > 0 {
|
||||
log = log.WithField("withdrawalRequestCount", len(eReqs.Withdrawals))
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
return nil
|
||||
}
|
||||
|
||||
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
|
||||
startTime, err := slots.StartTime(genesis, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
level := log.Logger.GetLevel()
|
||||
if level >= logrus.DebugLevel {
|
||||
parentRoot := block.ParentRoot()
|
||||
lf := logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
"justifiedEpoch": justified.Epoch,
|
||||
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
|
||||
"version": version.String(block.Version()),
|
||||
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
|
||||
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
|
||||
"finalizedEpoch": finalized.Epoch,
|
||||
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
|
||||
"epoch": slots.ToEpoch(block.Slot()),
|
||||
}).Info("Synced new block")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// logs payload related data every slot.
|
||||
func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if block is execution block")
|
||||
}
|
||||
if !isExecutionBlk {
|
||||
return nil
|
||||
}
|
||||
payload, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload.GasLimit() == 0 {
|
||||
return errors.New("gas limit should not be 0")
|
||||
}
|
||||
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
|
||||
fields := logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
|
||||
"blockNumber": payload.BlockNumber(),
|
||||
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
|
||||
}
|
||||
if block.Version() >= version.Capella {
|
||||
withdrawals, err := payload.Withdrawals()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get withdrawals")
|
||||
}
|
||||
fields["withdrawals"] = len(withdrawals)
|
||||
changes, err := block.Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
if len(changes) > 0 {
|
||||
fields["blsToExecutionChanges"] = len(changes)
|
||||
}
|
||||
}
|
||||
log.WithFields(fields).Debug("Synced new payload")
|
||||
return nil
|
||||
}
|
||||
@@ -34,7 +34,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" prefix=blockchain slot=0",
|
||||
want: "\"Finished applying state transition\" package=beacon-chain/blockchain slot=0",
|
||||
},
|
||||
{name: "has attestation",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -42,7 +42,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
|
||||
want: "\"Finished applying state transition\" attestations=1 package=beacon-chain/blockchain slot=0",
|
||||
},
|
||||
{name: "has deposit",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -53,7 +53,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
|
||||
want: "\"Finished applying state transition\" attestations=1 package=beacon-chain/blockchain slot=0",
|
||||
},
|
||||
{name: "has attester slashing",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -62,7 +62,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attesterSlashings=1 prefix=blockchain slot=0",
|
||||
want: "\"Finished applying state transition\" attesterSlashings=1 package=beacon-chain/blockchain slot=0",
|
||||
},
|
||||
{name: "has proposer slashing",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -71,7 +71,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" prefix=blockchain proposerSlashings=1 slot=0",
|
||||
want: "\"Finished applying state transition\" package=beacon-chain/blockchain proposerSlashings=1 slot=0",
|
||||
},
|
||||
{name: "has exit",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -80,7 +80,7 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" prefix=blockchain slot=0 voluntaryExits=1",
|
||||
want: "\"Finished applying state transition\" package=beacon-chain/blockchain slot=0 voluntaryExits=1",
|
||||
},
|
||||
{name: "has everything",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock {
|
||||
@@ -93,11 +93,11 @@ func Test_logStateTransitionData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return wb
|
||||
},
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 package=beacon-chain/blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
|
||||
},
|
||||
{name: "has payload",
|
||||
b: func() interfaces.ReadOnlyBeaconBlock { return wrappedPayloadBlk },
|
||||
want: "\"Finished applying state transition\" payloadHash=0x010203 prefix=blockchain slot=0 syncBitsCount=0 txCount=2",
|
||||
want: "\"Finished applying state transition\" package=beacon-chain/blockchain payloadHash=0x010203 slot=0 syncBitsCount=0 txCount=2",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -22,10 +22,7 @@ import (
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch < headEpoch {
|
||||
return nil
|
||||
}
|
||||
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
|
||||
return nil
|
||||
}
|
||||
// Only use head state if the head state is compatible with the target checkpoint.
|
||||
@@ -33,11 +30,13 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch)
|
||||
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
|
||||
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch)
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -46,14 +45,18 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
}
|
||||
|
||||
// If the head state alone is enough, we can return it directly read only.
|
||||
if c.Epoch == headEpoch {
|
||||
if c.Epoch <= headEpoch {
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return st
|
||||
}
|
||||
// Otherwise we need to advance the head state to the start of the target epoch.
|
||||
// At this point we can only have c.Epoch > headEpoch.
|
||||
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
return nil
|
||||
}
|
||||
// Advance the head state to the start of the target epoch.
|
||||
// This point can only be reached if c.Root == headRoot and c.Epoch > headEpoch.
|
||||
slot, err := slots.EpochStart(c.Epoch)
|
||||
if err != nil {
|
||||
|
||||
@@ -170,17 +170,141 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 31,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 1, Root: ckRoot}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Epoch_0(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
|
||||
// Create a fork 31 <-- 32 <--- 64
|
||||
// \---------33
|
||||
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
block: headBlock,
|
||||
slot: 64,
|
||||
state: s,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
|
||||
// Create a fork 30 <-- 31 <-- 32 <--- 64
|
||||
// \---------33
|
||||
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
|
||||
st, blk, err := prepareForkchoiceState(ctx, 30, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 31, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'U'},
|
||||
block: headBlock,
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Different(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
}
|
||||
|
||||
func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
|
||||
@@ -66,9 +66,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
startTime := time.Now()
|
||||
fcuArgs := &fcuConfig{}
|
||||
|
||||
if s.inRegularSync() {
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
}
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
}
|
||||
@@ -105,14 +102,17 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return nil
|
||||
}
|
||||
if err := s.sendFCU(cfg, fcuArgs); err != nil {
|
||||
return errors.Wrap(err, "could not send FCU to engine")
|
||||
}
|
||||
s.sendFCU(cfg, fcuArgs)
|
||||
|
||||
// Pre-Fulu the caches are updated when computing the payload attributes
|
||||
if cfg.postState.Version() >= version.Fulu {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = ctx
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -134,7 +134,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
|
||||
return preStateVersion, preStateHeader, nil
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -295,18 +295,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
arg := &fcuConfig{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB,
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityChecker, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
@@ -330,6 +322,7 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
|
||||
return nil
|
||||
}
|
||||
|
||||
// the caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -359,7 +352,9 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
s.ForkChoicer().RLock()
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
s.ForkChoicer().RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
@@ -372,7 +367,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
|
||||
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
|
||||
// caches.
|
||||
// caches. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
defer span.End()
|
||||
@@ -634,9 +629,7 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
if uint64(len(expected)) > numberOfColumns {
|
||||
if len(expected) > fieldparams.NumberOfColumns {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
@@ -818,10 +811,9 @@ func (s *Service) areDataColumnsAvailable(
|
||||
|
||||
case <-ctx.Done():
|
||||
var missingIndices any = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missing))
|
||||
missingIndicesCount := len(missing)
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
if missingIndicesCount < fieldparams.NumberOfColumns {
|
||||
missingIndices = helpers.SortedPrettySliceFromMap(missing)
|
||||
}
|
||||
|
||||
@@ -915,8 +907,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if currentSlot == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
// return early if we are in init sync
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
@@ -929,14 +919,32 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
// Before Fulu we need to process the next slot to find out if we are proposing.
|
||||
if lastState.Version() < version.Fulu {
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
} else {
|
||||
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
|
||||
defer func() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -948,13 +956,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
|
||||
}
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), headBlock, headRoot, s.CurrentSlot()+1)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -973,6 +974,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
|
||||
@@ -42,14 +42,8 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(s.genesisTime, slot) {
|
||||
return nil
|
||||
}
|
||||
return s.computePayloadAttributes(cfg, fcuArgs)
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
@@ -173,26 +167,19 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
// boundary in order to compute the right proposer indices after processing
|
||||
// state transition. This function is called on late blocks while still locked,
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
// state transition. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) {
|
||||
slot := cfg.postState.Slot()
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
log.WithError(err).Error("Could not update next slot state cache")
|
||||
return
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil {
|
||||
log.WithError(err).Error("Could not handle epoch boundary")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,20 +189,6 @@ func reportProcessingTime(startTime time.Time) {
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}
|
||||
|
||||
// computePayloadAttributes modifies the passed FCU arguments to
|
||||
// contain the right payload attributes with the tracked proposer. It gets
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
|
||||
@@ -738,7 +738,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -788,7 +790,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -816,25 +820,9 @@ func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = []byte{'a'} // Mutate the signature.
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -866,7 +854,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1339,7 +1329,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1351,7 +1343,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1363,7 +1357,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1375,7 +1371,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1400,197 +1398,6 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
// INVALID from FCU, with LVH block 17. No head is viable. We check
|
||||
// that the node is optimistic and that we can actually import a block on top of
|
||||
// 17 and recover.
|
||||
func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SlotsPerEpoch = 6
|
||||
config.AltairForkEpoch = 1
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
driftGenesisTime(service, 18, 0)
|
||||
validHeadState, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
firstInvalidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 19, 0)
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
// not finish importing) one and that the node is optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// import another block based on the last valid head state
|
||||
mockEngine = &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 20, 0)
|
||||
b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
@@ -1642,7 +1449,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1662,8 +1471,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
@@ -1684,8 +1494,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1708,7 +1519,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1718,6 +1531,10 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isBlock18OptimisticAfterImport)
|
||||
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
|
||||
@@ -1768,7 +1585,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1835,7 +1654,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1856,8 +1677,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -1877,7 +1699,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1906,8 +1730,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1975,7 +1800,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -2000,7 +1827,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -2028,7 +1857,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -2072,7 +1903,6 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
t.Log(i)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2089,7 +1919,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -2109,8 +1941,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -2130,7 +1963,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -2161,7 +1996,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -2282,7 +2119,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2348,7 +2187,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2495,7 +2336,8 @@ func TestMissingBlobIndices(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMissingDataColumnIndices(t *testing.T) {
|
||||
countPlusOne := params.BeaconConfig().NumberOfColumns + 1
|
||||
const countPlusOne = fieldparams.NumberOfColumns + 1
|
||||
|
||||
tooManyColumns := make(map[uint64]bool, countPlusOne)
|
||||
for i := range countPlusOne {
|
||||
tooManyColumns[uint64(i)] = true
|
||||
@@ -2630,7 +2472,10 @@ func TestRollbackBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -2731,7 +2576,9 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
@@ -2765,7 +2612,10 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -2805,6 +2655,10 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
|
||||
|
||||
for _, testVersion := range version.All()[1:] {
|
||||
if testVersion == version.Gloas {
|
||||
// TODO(16027): Unskip light client tests for Gloas
|
||||
continue
|
||||
}
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
@@ -3257,7 +3111,9 @@ func Test_postBlockProcess_EventSending(t *testing.T) {
|
||||
}
|
||||
|
||||
// Execute postBlockProcess
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(cfg)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Check error expectation
|
||||
if tt.expectError {
|
||||
|
||||
@@ -156,13 +156,15 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
|
||||
}
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -117,7 +117,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -177,7 +179,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -39,8 +39,8 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
@@ -69,7 +69,7 @@ type SlashingReceiver interface {
|
||||
// 1. Validate block, apply state transition and update checkpoints
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
// Return early if the block is blacklisted
|
||||
@@ -242,7 +242,7 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
return postState, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityChecker, block blocks.ROBlock) (time.Duration, error) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
if avs != nil {
|
||||
@@ -332,7 +332,7 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
@@ -470,30 +471,35 @@ func (s *Service) removeStartupState() {
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
isSupernode := flags.Get().Supernode
|
||||
isSemiSupernode := flags.Get().SemiSupernode
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
custodyRequirement := cfg.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
|
||||
wasSupernode, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update subscription status to all data subnets")
|
||||
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
|
||||
}
|
||||
|
||||
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
|
||||
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
|
||||
log.Warnf(
|
||||
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
|
||||
flags.SubscribeAllDataSubnets.Name,
|
||||
)
|
||||
// Compute the target custody group count based on current flag configuration.
|
||||
targetCustodyGroupCount := custodyRequirement
|
||||
|
||||
// Supernode: custody all groups (either currently set or previously enabled)
|
||||
if isSupernode {
|
||||
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
|
||||
if isSemiSupernode {
|
||||
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "minimum custody group count")
|
||||
}
|
||||
|
||||
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
@@ -510,12 +516,23 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
|
||||
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
if isSupernode {
|
||||
log.WithFields(logrus.Fields{
|
||||
"current": actualCustodyGroupCount,
|
||||
"target": cfg.NumberOfCustodyGroups,
|
||||
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
|
||||
}
|
||||
|
||||
if wasSupernode && !isSupernode {
|
||||
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, actualCustodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
|
||||
@@ -603,7 +603,6 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
custodyRequirement = uint64(4)
|
||||
earliestStoredSlot = primitives.Slot(12)
|
||||
numberOfCustodyGroups = uint64(64)
|
||||
numberOfColumns = uint64(128)
|
||||
)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
@@ -611,7 +610,6 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
cfg.CustodyRequirement = custodyRequirement
|
||||
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
|
||||
cfg.NumberOfColumns = numberOfColumns
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := t.Context()
|
||||
@@ -642,7 +640,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
@@ -680,7 +678,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
// ----------
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
@@ -695,4 +693,121 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("Supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.Supernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should still be supernode
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.SemiSupernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start with semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Upgrade to full supernode
|
||||
gFlags.SemiSupernode = false
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should upgrade to full supernode
|
||||
upgradeSlot := slot + 2
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Mock a high custody requirement (simulating many validators)
|
||||
// We need to override the custody requirement calculation
|
||||
// For this test, we'll verify the logic by checking if custodyRequirement > 64
|
||||
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
|
||||
// This would require a different test setup with actual validators
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
// With low validator requirements (4), should use semi-supernode minimum (64)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3,7 +3,10 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = ["mock.go"],
|
||||
srcs = [
|
||||
"log.go",
|
||||
"mock.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
|
||||
9
beacon-chain/blockchain/testing/log.go
Normal file
9
beacon-chain/blockchain/testing/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package testing
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/blockchain/testing")
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var ErrNilState = errors.New("nil state")
|
||||
@@ -267,7 +266,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
log.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
}
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
@@ -275,7 +274,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityStore) error {
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityChecker) error {
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
@@ -296,7 +295,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl
|
||||
if err := s.DB.SaveBlock(ctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, b.Block().Slot())
|
||||
log.Infof("Saved block with root: %#x at slot %d", signingRoot, b.Block().Slot())
|
||||
}
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = b
|
||||
@@ -305,7 +304,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityStore) error {
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityChecker) error {
|
||||
if s.ReceiveBlockMockErr != nil {
|
||||
return s.ReceiveBlockMockErr
|
||||
}
|
||||
@@ -328,7 +327,7 @@ func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOn
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
log.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
}
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
@@ -585,11 +584,11 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
|
||||
ojc := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, slot, bytesutil.ToBytes32(s.Root), [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Could not update head")
|
||||
log.WithError(err).Error("Could not update head")
|
||||
}
|
||||
err = s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Could not insert node to forkchoice")
|
||||
log.WithError(err).Error("Could not insert node to forkchoice")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package builder
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "builder")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/builder")
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -16,6 +16,7 @@ go_library(
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"interfaces.go",
|
||||
"log.go",
|
||||
"payload_id.go",
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
|
||||
1
beacon-chain/cache/attestation.go
vendored
1
beacon-chain/cache/attestation.go
vendored
@@ -9,7 +9,6 @@ import (
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type attGroup struct {
|
||||
|
||||
1
beacon-chain/cache/committee.go
vendored
1
beacon-chain/cache/committee.go
vendored
@@ -17,7 +17,6 @@ import (
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"deposit_pruner.go",
|
||||
"deposit_tree.go",
|
||||
"deposit_tree_snapshot.go",
|
||||
"log.go",
|
||||
"merkle_tree.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/cache/depositsnapshot",
|
||||
|
||||
@@ -20,7 +20,6 @@ var (
|
||||
Name: "beacondb_all_deposits_eip4881",
|
||||
Help: "The number of total deposits in memory",
|
||||
})
|
||||
log = logrus.WithField("prefix", "cache")
|
||||
)
|
||||
|
||||
// InsertDeposit into the database. If deposit or block number are nil
|
||||
|
||||
9
beacon-chain/cache/depositsnapshot/log.go
vendored
Normal file
9
beacon-chain/cache/depositsnapshot/log.go
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package depositsnapshot
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/cache/depositsnapshot")
|
||||
9
beacon-chain/cache/log.go
vendored
Normal file
9
beacon-chain/cache/log.go
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package cache
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/cache")
|
||||
1
beacon-chain/cache/sync_committee.go
vendored
1
beacon-chain/cache/sync_committee.go
vendored
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
|
||||
5
beacon-chain/cache/tracked_validators.go
vendored
5
beacon-chain/cache/tracked_validators.go
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -67,7 +66,7 @@ func (t *TrackedValidatorsCache) Validator(index primitives.ValidatorIndex) (Tra
|
||||
|
||||
val, ok := item.(TrackedValidator)
|
||||
if !ok {
|
||||
logrus.Errorf("Failed to cast tracked validator from cache, got unexpected item type %T", item)
|
||||
log.Errorf("Failed to cast tracked validator from cache, got unexpected item type %T", item)
|
||||
return TrackedValidator{}, false
|
||||
}
|
||||
|
||||
@@ -113,7 +112,7 @@ func (t *TrackedValidatorsCache) Indices() map[primitives.ValidatorIndex]bool {
|
||||
for cacheKey := range items {
|
||||
index, err := fromCacheKey(cacheKey)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to get validator index from cache key")
|
||||
log.WithError(err).Error("Failed to get validator index from cache key")
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"deposit.go",
|
||||
"epoch_precompute.go",
|
||||
"epoch_spec.go",
|
||||
"log.go",
|
||||
"reward.go",
|
||||
"sync_committee.go",
|
||||
"transition.go",
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProcessSyncCommitteeUpdates processes sync client committee updates for the beacon state.
|
||||
|
||||
9
beacon-chain/core/altair/log.go
Normal file
9
beacon-chain/core/altair/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package altair
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/altair")
|
||||
@@ -60,7 +60,7 @@ func Eth1DataHasEnoughSupport(beaconState state.ReadOnlyBeaconState, data *ethpb
|
||||
voteCount := uint64(0)
|
||||
|
||||
for _, vote := range beaconState.Eth1DataVotes() {
|
||||
if AreEth1DataEqual(vote, data.Copy()) {
|
||||
if AreEth1DataEqual(vote, data) {
|
||||
voteCount++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -290,52 +290,3 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
|
||||
}
|
||||
|
||||
func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Slashed: true,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ProposerIndex: 0,
|
||||
})))
|
||||
|
||||
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 10
|
||||
block.Block.ProposerIndex = pID
|
||||
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
|
||||
block.Block.ParentRoot = latestBlockSignedRoot[:]
|
||||
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
validators[proposerIdx].Slashed = false
|
||||
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
|
||||
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
|
||||
}
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package blocks
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "blocks")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/blocks")
|
||||
|
||||
@@ -90,6 +90,9 @@ func IsExecutionEnabled(st state.ReadOnlyBeaconState, body interfaces.ReadOnlyBe
|
||||
if st == nil || body == nil {
|
||||
return false, errors.New("nil state or block body")
|
||||
}
|
||||
if st.Version() >= version.Capella {
|
||||
return true, nil
|
||||
}
|
||||
if IsPreBellatrixVersion(st.Version()) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -260,11 +260,12 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
|
||||
|
||||
func Test_IsExecutionEnabled(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header interfaces.ExecutionData
|
||||
useAltairSt bool
|
||||
want bool
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header interfaces.ExecutionData
|
||||
useAltairSt bool
|
||||
useCapellaSt bool
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "use older than bellatrix state",
|
||||
@@ -331,6 +332,17 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
}(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "capella state always enabled",
|
||||
payload: emptyPayload(),
|
||||
header: func() interfaces.ExecutionData {
|
||||
h, err := emptyPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
return h
|
||||
}(),
|
||||
useCapellaSt: true,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -342,6 +354,8 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
if tt.useAltairSt {
|
||||
st, _ = util.DeterministicGenesisStateAltair(t, 1)
|
||||
} else if tt.useCapellaSt {
|
||||
st, _ = util.DeterministicGenesisStateCapella(t, 1)
|
||||
}
|
||||
got, err := blocks.IsExecutionEnabled(st, body)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -122,24 +122,6 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
|
||||
}
|
||||
|
||||
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
|
||||
// from a block and its corresponding state.
|
||||
func RandaoSignatureBatch(
|
||||
|
||||
@@ -9,6 +9,7 @@ go_library(
|
||||
"deposits.go",
|
||||
"effective_balance_updates.go",
|
||||
"error.go",
|
||||
"log.go",
|
||||
"registry_updates.go",
|
||||
"transition.go",
|
||||
"transition_no_verify_sig.go",
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProcessPendingConsolidations implements the spec definition below. This method makes mutating
|
||||
@@ -278,12 +277,12 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
bal, err := st.PendingBalanceToWithdraw(srcIdx)
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if bal > 0 {
|
||||
if hasBal {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProcessDeposits is one of the operations performed on each processed
|
||||
|
||||
9
beacon-chain/core/electra/log.go
Normal file
9
beacon-chain/core/electra/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package electra
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/electra")
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProcessWithdrawalRequests processes the validator withdrawals from the provided execution payload
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"block.go",
|
||||
"genesis.go",
|
||||
"legacy.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"randao.go",
|
||||
"ranges.go",
|
||||
|
||||
9
beacon-chain/core/helpers/log.go
Normal file
9
beacon-chain/core/helpers/log.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package helpers
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/helpers")
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -152,7 +151,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
log.WithError(err).Error("Could not update committee cache")
|
||||
}
|
||||
|
||||
return indices, nil
|
||||
|
||||
@@ -45,12 +45,13 @@ go_test(
|
||||
"p2p_interface_test.go",
|
||||
"reconstruction_helpers_test.go",
|
||||
"reconstruction_test.go",
|
||||
"semi_supernode_test.go",
|
||||
"utils_test.go",
|
||||
"validator_test.go",
|
||||
"verification_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
@@ -96,8 +97,7 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
return nil, ErrCustodyGroupTooLarge
|
||||
}
|
||||
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
|
||||
numberOfColumns := uint64(fieldparams.NumberOfColumns)
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
columns := make([]uint64, 0, columnsPerGroup)
|
||||
@@ -112,8 +112,9 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
|
||||
|
||||
if columnIndex >= numberOfColumns {
|
||||
|
||||
@@ -30,7 +30,6 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
|
||||
func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.NumberOfColumns = 128
|
||||
config.NumberOfCustodyGroups = 64
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"maps"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -107,3 +108,102 @@ func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCac
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// ColumnIndices represents as a set of ColumnIndices. This could be the set of indices that a node is required to custody,
|
||||
// the set that a peer custodies, missing indices for a given block, indices that are present on disk, etc.
|
||||
type ColumnIndices map[uint64]struct{}
|
||||
|
||||
// Has returns true if the index is present in the ColumnIndices.
|
||||
func (ci ColumnIndices) Has(index uint64) bool {
|
||||
_, ok := ci[index]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Count returns the number of indices present in the ColumnIndices.
|
||||
func (ci ColumnIndices) Count() int {
|
||||
return len(ci)
|
||||
}
|
||||
|
||||
// Set sets the index in the ColumnIndices.
|
||||
func (ci ColumnIndices) Set(index uint64) {
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
|
||||
// Unset removes the index from the ColumnIndices.
|
||||
func (ci ColumnIndices) Unset(index uint64) {
|
||||
delete(ci, index)
|
||||
}
|
||||
|
||||
// Copy creates a copy of the ColumnIndices.
|
||||
func (ci ColumnIndices) Copy() ColumnIndices {
|
||||
newCi := make(ColumnIndices, len(ci))
|
||||
maps.Copy(newCi, ci)
|
||||
return newCi
|
||||
}
|
||||
|
||||
// Intersection returns a new ColumnIndices that contains only the indices that are present in both ColumnIndices.
|
||||
func (ci ColumnIndices) Intersection(other ColumnIndices) ColumnIndices {
|
||||
result := make(ColumnIndices)
|
||||
for index := range ci {
|
||||
if other.Has(index) {
|
||||
result.Set(index)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Merge mutates the receiver so that any index that is set in either of
|
||||
// the two ColumnIndices is set in the receiver after the function finishes.
|
||||
// It does not mutate the other ColumnIndices given as a function argument.
|
||||
func (ci ColumnIndices) Merge(other ColumnIndices) {
|
||||
for index := range other {
|
||||
ci.Set(index)
|
||||
}
|
||||
}
|
||||
|
||||
// ToMap converts a ColumnIndices into a map[uint64]struct{}.
|
||||
// In the future ColumnIndices may be changed to a bit map, so using
|
||||
// ToMap will ensure forwards-compatibility.
|
||||
func (ci ColumnIndices) ToMap() map[uint64]struct{} {
|
||||
return ci.Copy()
|
||||
}
|
||||
|
||||
// ToSlice converts a ColumnIndices into a slice of uint64 indices.
|
||||
func (ci ColumnIndices) ToSlice() []uint64 {
|
||||
indices := make([]uint64, 0, len(ci))
|
||||
for index := range ci {
|
||||
indices = append(indices, index)
|
||||
}
|
||||
return indices
|
||||
}
|
||||
|
||||
// NewColumnIndicesFromSlice creates a ColumnIndices from a slice of uint64.
|
||||
func NewColumnIndicesFromSlice(indices []uint64) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for _, index := range indices {
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
// NewColumnIndicesFromMap creates a ColumnIndices from a map[uint64]bool. This kind of map
|
||||
// is used in several places in peerdas code. Converting from this map type to ColumnIndices
|
||||
// will allow us to move ColumnIndices underlying type to a bitmap in the future and avoid
|
||||
// lots of loops for things like intersections/unions or copies.
|
||||
func NewColumnIndicesFromMap(indices map[uint64]bool) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for index, set := range indices {
|
||||
if !set {
|
||||
continue
|
||||
}
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
// NewColumnIndices creates an empty ColumnIndices.
|
||||
// In the future ColumnIndices may change from a reference type to a value type,
|
||||
// so using this constructor will ensure forwards-compatibility.
|
||||
func NewColumnIndices() ColumnIndices {
|
||||
return make(ColumnIndices)
|
||||
}
|
||||
|
||||
@@ -25,3 +25,10 @@ func TestInfo(t *testing.T) {
|
||||
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewColumnIndicesFromMap(t *testing.T) {
|
||||
t.Run("nil map", func(t *testing.T) {
|
||||
ci := peerdas.NewColumnIndicesFromMap(nil)
|
||||
require.Equal(t, 0, ci.Count())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,10 +5,20 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
var (
|
||||
dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
)
|
||||
|
||||
cellsAndProofsFromStructuredComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cells_and_proofs_from_structured_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute cells and proofs from structured computation.",
|
||||
Buckets: []float64{10, 20, 30, 40, 50, 100, 200},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -33,8 +33,7 @@ func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCou
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// The sidecar index must be within the valid range.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if sidecar.Index >= numberOfColumns {
|
||||
if sidecar.Index >= fieldparams.NumberOfColumns {
|
||||
return ErrIndexTooLarge
|
||||
}
|
||||
|
||||
|
||||
@@ -281,8 +281,11 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
|
||||
}
|
||||
|
||||
func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.B) {
|
||||
const blobCount = 12
|
||||
numberOfColumns := int64(params.BeaconConfig().NumberOfColumns)
|
||||
const (
|
||||
blobCount = 12
|
||||
numberOfColumns = fieldparams.NumberOfColumns
|
||||
)
|
||||
|
||||
err := kzg.Start()
|
||||
require.NoError(b, err)
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package peerdas
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -26,7 +27,40 @@ var (
|
||||
func MinimumColumnCountToReconstruct() uint64 {
|
||||
// If the number of columns is odd, then we need total / 2 + 1 columns to reconstruct.
|
||||
// If the number of columns is even, then we need total / 2 columns to reconstruct.
|
||||
return (params.BeaconConfig().NumberOfColumns + 1) / 2
|
||||
return (fieldparams.NumberOfColumns + 1) / 2
|
||||
}
|
||||
|
||||
// MinimumCustodyGroupCountToReconstruct returns the minimum number of custody groups needed to
|
||||
// custody enough data columns for reconstruction. This accounts for the relationship between
|
||||
// custody groups and columns, making it future-proof if these values change.
|
||||
// Returns an error if the configuration values are invalid (zero or would cause division by zero).
|
||||
func MinimumCustodyGroupCountToReconstruct() (uint64, error) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
// Validate configuration values
|
||||
if numberOfColumns == 0 {
|
||||
return 0, errors.New("NumberOfColumns cannot be zero")
|
||||
}
|
||||
if cfg.NumberOfCustodyGroups == 0 {
|
||||
return 0, errors.New("NumberOfCustodyGroups cannot be zero")
|
||||
}
|
||||
|
||||
minimumColumnCount := MinimumColumnCountToReconstruct()
|
||||
|
||||
// Calculate how many columns each custody group represents
|
||||
columnsPerGroup := numberOfColumns / cfg.NumberOfCustodyGroups
|
||||
|
||||
// If there are more groups than columns (columnsPerGroup = 0), this is an invalid configuration
|
||||
// for reconstruction purposes as we cannot determine a meaningful custody group count
|
||||
if columnsPerGroup == 0 {
|
||||
return 0, errors.Errorf("invalid configuration: NumberOfCustodyGroups (%d) exceeds NumberOfColumns (%d)",
|
||||
cfg.NumberOfCustodyGroups, numberOfColumns)
|
||||
}
|
||||
|
||||
// Use ceiling division to ensure we have enough groups to cover the minimum columns
|
||||
// ceiling(a/b) = (a + b - 1) / b
|
||||
return (minimumColumnCount + columnsPerGroup - 1) / columnsPerGroup, nil
|
||||
}
|
||||
|
||||
// recoverCellsForBlobs reconstructs cells for specified blobs from the given data column sidecars.
|
||||
@@ -253,7 +287,8 @@ func ReconstructBlobSidecars(block blocks.ROBlock, verifiedDataColumnSidecars []
|
||||
|
||||
// ComputeCellsAndProofsFromFlat computes the cells and proofs from blobs and cell flat proofs.
|
||||
func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
@@ -262,32 +297,42 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
return nil, nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, blobCount)
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, blobCount)
|
||||
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, proofs)
|
||||
proofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = proofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
@@ -295,42 +340,55 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
|
||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
|
||||
|
||||
for i, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, nil, ErrNilBlobAndProof
|
||||
}
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong copied KZG proof size - should never happen")
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, kzgProofs)
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = kzgProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
|
||||
@@ -17,41 +17,9 @@ import (
|
||||
)
|
||||
|
||||
func TestMinimumColumnsCountToReconstruct(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
numberOfColumns uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "numberOfColumns=128",
|
||||
numberOfColumns: 128,
|
||||
expected: 64,
|
||||
},
|
||||
{
|
||||
name: "numberOfColumns=129",
|
||||
numberOfColumns: 129,
|
||||
expected: 65,
|
||||
},
|
||||
{
|
||||
name: "numberOfColumns=130",
|
||||
numberOfColumns: 130,
|
||||
expected: 65,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the total number of columns.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfColumns = tc.numberOfColumns
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the minimum number of columns needed to reconstruct.
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
const expected = uint64(64)
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
@@ -200,7 +168,6 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 3
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
roBlock, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
@@ -236,7 +203,7 @@ func TestReconstructBlobSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Flatten proofs.
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
cellProofs := make([][]byte, 0, blobCount*fieldparams.NumberOfColumns)
|
||||
for _, proofs := range inputProofsPerBlob {
|
||||
for _, proof := range proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
@@ -428,13 +395,12 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("mismatched blob and proof counts", func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Create one blob but proofs for two blobs
|
||||
blobs := [][]byte{{}}
|
||||
|
||||
@@ -447,7 +413,6 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 2
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Generate test blobs
|
||||
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
137
beacon-chain/core/peerdas/semi_supernode_test.go
Normal file
137
beacon-chain/core/peerdas/semi_supernode_test.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
func TestSemiSupernodeCustody(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 128
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Create a test node ID
|
||||
nodeID := enode.ID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32})
|
||||
|
||||
t.Run("semi-supernode custodies exactly 64 columns", func(t *testing.T) {
|
||||
// Semi-supernode uses 64 custody groups (half of 128)
|
||||
const semiSupernodeCustodyGroupCount = 64
|
||||
|
||||
// Get custody groups for semi-supernode
|
||||
custodyGroups, err := CustodyGroups(nodeID, semiSupernodeCustodyGroupCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, semiSupernodeCustodyGroupCount, len(custodyGroups))
|
||||
|
||||
// Verify we get exactly 64 custody columns
|
||||
custodyColumns, err := CustodyColumns(custodyGroups)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, semiSupernodeCustodyGroupCount, len(custodyColumns))
|
||||
|
||||
// Verify the columns are valid (within 0-127 range)
|
||||
for columnIndex := range custodyColumns {
|
||||
if columnIndex >= numberOfColumns {
|
||||
t.Fatalf("Invalid column index %d, should be less than %d", columnIndex, numberOfColumns)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("64 columns is exactly the minimum for reconstruction", func(t *testing.T) {
|
||||
minimumCount := MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, uint64(64), minimumCount)
|
||||
})
|
||||
|
||||
t.Run("semi-supernode vs supernode custody", func(t *testing.T) {
|
||||
// Semi-supernode (64 custody groups)
|
||||
semiSupernodeGroups, err := CustodyGroups(nodeID, 64)
|
||||
require.NoError(t, err)
|
||||
semiSupernodeColumns, err := CustodyColumns(semiSupernodeGroups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Supernode (128 custody groups = all groups)
|
||||
supernodeGroups, err := CustodyGroups(nodeID, 128)
|
||||
require.NoError(t, err)
|
||||
supernodeColumns, err := CustodyColumns(supernodeGroups)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify semi-supernode has exactly half the columns of supernode
|
||||
require.Equal(t, 64, len(semiSupernodeColumns))
|
||||
require.Equal(t, 128, len(supernodeColumns))
|
||||
require.Equal(t, len(supernodeColumns)/2, len(semiSupernodeColumns))
|
||||
|
||||
// Verify all semi-supernode columns are a subset of supernode columns
|
||||
for columnIndex := range semiSupernodeColumns {
|
||||
if !supernodeColumns[columnIndex] {
|
||||
t.Fatalf("Semi-supernode column %d not found in supernode columns", columnIndex)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMinimumCustodyGroupCountToReconstruct(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
numberOfGroups uint64
|
||||
expectedResult uint64
|
||||
}{
|
||||
{
|
||||
name: "Standard 1:1 ratio (128 columns, 128 groups)",
|
||||
numberOfGroups: 128,
|
||||
expectedResult: 64, // Need half of 128 groups
|
||||
},
|
||||
{
|
||||
name: "2 columns per group (128 columns, 64 groups)",
|
||||
numberOfGroups: 64,
|
||||
expectedResult: 32, // Need 64 columns, which is 32 groups (64/2)
|
||||
},
|
||||
{
|
||||
name: "4 columns per group (128 columns, 32 groups)",
|
||||
numberOfGroups: 32,
|
||||
expectedResult: 16, // Need 64 columns, which is 16 groups (64/4)
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = tt.numberOfGroups
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
result, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMinimumCustodyGroupCountToReconstruct_ErrorCases(t *testing.T) {
|
||||
t.Run("Returns error when NumberOfCustodyGroups is zero", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
_, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, err.Error() == "NumberOfCustodyGroups cannot be zero")
|
||||
})
|
||||
|
||||
t.Run("Returns error when NumberOfCustodyGroups exceeds NumberOfColumns", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.NumberOfCustodyGroups = 256
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
_, err := MinimumCustodyGroupCountToReconstruct()
|
||||
require.NotNil(t, err)
|
||||
// Just check that we got an error about the configuration
|
||||
require.Equal(t, true, len(err.Error()) > 0)
|
||||
})
|
||||
}
|
||||
@@ -102,11 +102,13 @@ func ValidatorsCustodyRequirement(state beaconState.ReadOnlyBeaconState, validat
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_block and
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/validator.md#get_data_column_sidecars_from_column_sidecar
|
||||
func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof, src ConstructionPopulator) ([]blocks.RODataColumn, error) {
|
||||
const numberOfColumns = uint64(fieldparams.NumberOfColumns)
|
||||
|
||||
if len(cellsPerBlob) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
start := time.Now()
|
||||
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, params.BeaconConfig().NumberOfColumns)
|
||||
cells, proofs, err := rotateRowsToCols(cellsPerBlob, proofsPerBlob, numberOfColumns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rotate cells and proofs")
|
||||
}
|
||||
@@ -115,9 +117,8 @@ func DataColumnSidecars(cellsPerBlob [][]kzg.Cell, proofsPerBlob [][]kzg.Proof,
|
||||
return nil, errors.Wrap(err, "extract block info")
|
||||
}
|
||||
|
||||
maxIdx := params.BeaconConfig().NumberOfColumns
|
||||
roSidecars := make([]blocks.RODataColumn, 0, maxIdx)
|
||||
for idx := range maxIdx {
|
||||
roSidecars := make([]blocks.RODataColumn, 0, numberOfColumns)
|
||||
for idx := range numberOfColumns {
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
Index: idx,
|
||||
Column: cells[idx],
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
@@ -59,6 +59,8 @@ func TestValidatorsCustodyRequirement(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
t.Run("sizes mismatch", func(t *testing.T) {
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
@@ -69,10 +71,10 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
|
||||
// Create cells and proofs.
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, params.BeaconConfig().NumberOfColumns),
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
}
|
||||
proofsPerBlob := [][]kzg.Proof{
|
||||
make([]kzg.Proof, params.BeaconConfig().NumberOfColumns),
|
||||
make([]kzg.Proof, numberOfColumns),
|
||||
}
|
||||
|
||||
rob, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
@@ -117,7 +119,6 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with sufficient cells but insufficient proofs.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
}
|
||||
@@ -149,7 +150,6 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
@@ -197,6 +197,7 @@ func TestDataColumnSidecars(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReconstructionSource(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
// Create a Fulu block with blob commitments.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockFulu()
|
||||
commitment1 := make([]byte, 48)
|
||||
@@ -212,7 +213,6 @@ func TestReconstructionSource(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create cells and proofs with correct dimensions.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
cellsPerBlob := [][]kzg.Cell{
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
make([]kzg.Cell, numberOfColumns),
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
// Package interop contains useful utilities for persisting
|
||||
// ssz-encoded states and blocks to disk during each state
|
||||
// transition for development purposes.
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package interop
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "interop")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/transition/interop")
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package transition
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "state")
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/transition")
|
||||
|
||||
@@ -182,12 +182,6 @@ func ProcessBlockNoVerifyAnySig(
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sig := signed.Signature()
|
||||
bSet, err := b.BlockSignatureBatch(st, blk.ProposerIndex(), sig[:], blk.HashTreeRoot)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve block signature set")
|
||||
}
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
|
||||
if err != nil {
|
||||
@@ -201,7 +195,7 @@ func ProcessBlockNoVerifyAnySig(
|
||||
|
||||
// Merge beacon block, randao and attestations signatures into a set.
|
||||
set := bls.NewSet()
|
||||
set.Join(bSet).Join(rSet).Join(aSet)
|
||||
set.Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
|
||||
@@ -157,9 +157,8 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "block signature", set.Descriptions[0])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[1])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[2])
|
||||
assert.Equal(t, "randao signature", set.Descriptions[0])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[1])
|
||||
}
|
||||
|
||||
func TestProcessOperationsNoVerifyAttsSigs_OK(t *testing.T) {
|
||||
|
||||
@@ -4,14 +4,19 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability_blobs.go",
|
||||
"availability_columns.go",
|
||||
"bisect.go",
|
||||
"blob_cache.go",
|
||||
"data_column_cache.go",
|
||||
"iface.go",
|
||||
"log.go",
|
||||
"mock.go",
|
||||
"needs.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -21,6 +26,7 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -30,11 +36,14 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_blobs_test.go",
|
||||
"availability_columns_test.go",
|
||||
"blob_cache_test.go",
|
||||
"data_column_cache_test.go",
|
||||
"needs_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -45,6 +54,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -11,9 +11,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/logging"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -24,12 +23,13 @@ var (
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreBlob struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *blobCache
|
||||
verifier BlobBatchVerifier
|
||||
store *filesystem.BlobStorage
|
||||
cache *blobCache
|
||||
verifier BlobBatchVerifier
|
||||
shouldRetain RetentionChecker
|
||||
}
|
||||
|
||||
var _ AvailabilityStore = &LazilyPersistentStoreBlob{}
|
||||
var _ AvailabilityChecker = &LazilyPersistentStoreBlob{}
|
||||
|
||||
// BlobBatchVerifier enables LazyAvailabilityStore to manage the verification process
|
||||
// going from ROBlob->VerifiedROBlob, while avoiding the decision of which individual verifications
|
||||
@@ -42,11 +42,12 @@ type BlobBatchVerifier interface {
|
||||
|
||||
// NewLazilyPersistentStore creates a new LazilyPersistentStore. This constructor should always be used
|
||||
// when creating a LazilyPersistentStore because it needs to initialize the cache under the hood.
|
||||
func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchVerifier) *LazilyPersistentStoreBlob {
|
||||
func NewLazilyPersistentStore(store *filesystem.BlobStorage, verifier BlobBatchVerifier, shouldRetain RetentionChecker) *LazilyPersistentStoreBlob {
|
||||
return &LazilyPersistentStoreBlob{
|
||||
store: store,
|
||||
cache: newBlobCache(),
|
||||
verifier: verifier,
|
||||
store: store,
|
||||
cache: newBlobCache(),
|
||||
verifier: verifier,
|
||||
shouldRetain: shouldRetain,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,9 +67,6 @@ func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ..
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sidecars[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromSidecar(sidecars[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for _, blobSidecar := range sidecars {
|
||||
@@ -81,8 +79,17 @@ func (s *LazilyPersistentStoreBlob) Persist(current primitives.Slot, sidecars ..
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, current)
|
||||
func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current primitives.Slot, blks ...blocks.ROBlock) error {
|
||||
for _, b := range blks {
|
||||
if err := s.checkOne(ctx, current, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStoreBlob) checkOne(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, s.shouldRetain)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not check data availability for block %#x", b.Root())
|
||||
}
|
||||
@@ -100,7 +107,7 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
sidecars, err := entry.filter(root, blockCommitments, b.Block().Slot())
|
||||
sidecars, err := entry.filter(root, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete BlobSidecar batch")
|
||||
}
|
||||
@@ -112,7 +119,7 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
ok := errors.As(err, &me)
|
||||
if ok {
|
||||
fails := me.Failures()
|
||||
lf := make(log.Fields, len(fails))
|
||||
lf := make(logrus.Fields, len(fails))
|
||||
for i := range fails {
|
||||
lf[fmt.Sprintf("fail_%d", i)] = fails[i].Error()
|
||||
}
|
||||
@@ -131,13 +138,12 @@ func (s *LazilyPersistentStoreBlob) IsDataAvailable(ctx context.Context, current
|
||||
return nil
|
||||
}
|
||||
|
||||
func commitmentsToCheck(b blocks.ROBlock, current primitives.Slot) ([][]byte, error) {
|
||||
func commitmentsToCheck(b blocks.ROBlock, shouldRetain RetentionChecker) ([][]byte, error) {
|
||||
if b.Version() < version.Deneb {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUEST
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(b.Block().Slot()), slots.ToEpoch(current)) {
|
||||
if !shouldRetain(b.Block().Slot()) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,10 @@ import (
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func testShouldRetainAlways(s primitives.Slot) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func Test_commitmentsToCheck(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
@@ -30,11 +34,12 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
commits[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
err error
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
err error
|
||||
shouldRetain RetentionChecker
|
||||
}{
|
||||
{
|
||||
name: "pre deneb",
|
||||
@@ -60,6 +65,7 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
shouldRetain: testShouldRetainAlways,
|
||||
commits: func() [][]byte {
|
||||
mb := params.GetNetworkScheduleEntry(slots.ToEpoch(fulu + 100)).MaxBlobsPerBlock
|
||||
return commits[:mb]
|
||||
@@ -79,7 +85,8 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: fulu + windowSlots + 1,
|
||||
shouldRetain: func(s primitives.Slot) bool { return false },
|
||||
slot: fulu + windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "excessive commitments",
|
||||
@@ -97,14 +104,15 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
require.Equal(t, true, len(c) > params.BeaconConfig().MaxBlobsPerBlock(sb.Block().Slot()))
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
err: errIndexOutOfBounds,
|
||||
shouldRetain: testShouldRetainAlways,
|
||||
slot: windowSlots + 1,
|
||||
err: errIndexOutOfBounds,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
b := c.block(t)
|
||||
co, err := commitmentsToCheck(b, c.slot)
|
||||
co, err := commitmentsToCheck(b, c.shouldRetain)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
} else {
|
||||
@@ -126,7 +134,7 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
as := NewLazilyPersistentStore(store, mbv, testShouldRetainAlways)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[2]))
|
||||
@@ -153,7 +161,7 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
as := NewLazilyPersistentStore(store, mbv, testShouldRetainAlways)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[0]))
|
||||
@@ -166,11 +174,11 @@ func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 6)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{}, testShouldRetainAlways)
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(ds, blobSidecars...))
|
||||
// ignores duplicates
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars...), ErrDuplicateSidecar)
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars...), errDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
blobSidecars[0].Index = 6
|
||||
@@ -183,7 +191,7 @@ func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
require.NoError(t, as.Persist(slotOOB, moreBlobSidecars[0]))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
require.NoError(t, as.Persist(ds, moreBlobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, moreBlobSidecars[1:]...))
|
||||
}
|
||||
|
||||
type mockBlobBatchVerifier struct {
|
||||
|
||||
244
beacon-chain/das/availability_columns.go
Normal file
244
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,244 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any data columns passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.DataColumnStorage
|
||||
cache *dataColumnCache
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier
|
||||
custody *custodyRequirement
|
||||
bisector Bisector
|
||||
shouldRetain RetentionChecker
|
||||
}
|
||||
|
||||
var _ AvailabilityChecker = &LazilyPersistentStoreColumn{}
|
||||
|
||||
// DataColumnsVerifier enables LazilyPersistentStoreColumn to manage the verification process
|
||||
// going from RODataColumn->VerifiedRODataColumn, while avoiding the decision of which individual verifications
|
||||
// to run and in what order. Since LazilyPersistentStoreColumn always tries to verify and save data columns only when
|
||||
// they are all available, the interface takes a slice of data column sidecars.
|
||||
type DataColumnsVerifier interface {
|
||||
VerifiedRODataColumns(ctx context.Context, blk blocks.ROBlock, scs []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error)
|
||||
}
|
||||
|
||||
// NewLazilyPersistentStoreColumn creates a new LazilyPersistentStoreColumn.
|
||||
// WARNING: The resulting LazilyPersistentStoreColumn is NOT thread-safe.
|
||||
func NewLazilyPersistentStoreColumn(
|
||||
store *filesystem.DataColumnStorage,
|
||||
newDataColumnsVerifier verification.NewDataColumnsVerifier,
|
||||
nodeID enode.ID,
|
||||
cgc uint64,
|
||||
bisector Bisector,
|
||||
shouldRetain RetentionChecker,
|
||||
) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newDataColumnCache(),
|
||||
newDataColumnsVerifier: newDataColumnsVerifier,
|
||||
custody: &custodyRequirement{nodeID: nodeID, cgc: cgc},
|
||||
bisector: bisector,
|
||||
shouldRetain: shouldRetain,
|
||||
}
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. Columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all columns referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) Persist(_ primitives.Slot, sidecars ...blocks.RODataColumn) error {
|
||||
for _, sidecar := range sidecars {
|
||||
if err := s.cache.stash(sidecar); err != nil {
|
||||
return errors.Wrap(err, "stash DataColumnSidecar")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// DataColumnsSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(ctx context.Context, _ primitives.Slot, blks ...blocks.ROBlock) error {
|
||||
toVerify := make([]blocks.RODataColumn, 0)
|
||||
for _, block := range blks {
|
||||
indices, err := s.required(block)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x`", block.Root())
|
||||
}
|
||||
if indices.Count() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := keyFromBlock(block)
|
||||
entry := s.cache.entry(key)
|
||||
toVerify, err = entry.append(toVerify, IndicesNotStored(s.store.Summary(block.Root()), indices))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "entry filter")
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.verifyAndSave(toVerify); err != nil {
|
||||
log.Warn("Batch verification failed, bisecting columns by peer")
|
||||
if err := s.bisectVerification(toVerify); err != nil {
|
||||
return errors.Wrap(err, "bisect verification")
|
||||
}
|
||||
}
|
||||
|
||||
s.cache.cleanup(blks)
|
||||
return nil
|
||||
}
|
||||
|
||||
// required returns the set of column indices to check for a given block.
|
||||
func (s *LazilyPersistentStoreColumn) required(block blocks.ROBlock) (peerdas.ColumnIndices, error) {
|
||||
if !s.shouldRetain(block.Block().Slot()) {
|
||||
return peerdas.NewColumnIndices(), nil
|
||||
}
|
||||
|
||||
// If there are any commitments in the block, there are blobs,
|
||||
// and if there are blobs, we need the columns bisecting those blobs.
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
// No DA check needed if the block has no blobs.
|
||||
if len(commitments) == 0 {
|
||||
return peerdas.NewColumnIndices(), nil
|
||||
}
|
||||
|
||||
return s.custody.required()
|
||||
}
|
||||
|
||||
// verifyAndSave calls Save on the column store if the columns pass verification.
|
||||
func (s *LazilyPersistentStoreColumn) verifyAndSave(columns []blocks.RODataColumn) error {
|
||||
if len(columns) == 0 {
|
||||
return nil
|
||||
}
|
||||
verified, err := s.verifyColumns(columns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verify columns")
|
||||
}
|
||||
if err := s.store.Save(verified); err != nil {
|
||||
return errors.Wrap(err, "save data column sidecars")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LazilyPersistentStoreColumn) verifyColumns(columns []blocks.RODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
if len(columns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
verifier := s.newDataColumnsVerifier(columns, verification.ByRangeRequestDataColumnSidecarRequirements)
|
||||
if err := verifier.ValidFields(); err != nil {
|
||||
return nil, errors.Wrap(err, "valid fields")
|
||||
}
|
||||
if err := verifier.SidecarInclusionProven(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar inclusion proven")
|
||||
}
|
||||
if err := verifier.SidecarKzgProofVerified(); err != nil {
|
||||
return nil, errors.Wrap(err, "sidecar KZG proof verified")
|
||||
}
|
||||
|
||||
return verifier.VerifiedRODataColumns()
|
||||
}
|
||||
|
||||
// bisectVerification is used when verification of a batch of columns fails. Since the batch could
|
||||
// span multiple blocks or have been fetched from multiple peers, this pattern enables code using the
|
||||
// store to break the verification into smaller units and learn the results, in order to plan to retry
|
||||
// retrieval of the unusable columns.
|
||||
func (s *LazilyPersistentStoreColumn) bisectVerification(columns []blocks.RODataColumn) error {
|
||||
if len(columns) == 0 {
|
||||
return nil
|
||||
}
|
||||
if s.bisector == nil {
|
||||
return errors.New("bisector not initialized")
|
||||
}
|
||||
iter, err := s.bisector.Bisect(columns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Bisector.Bisect")
|
||||
}
|
||||
// It's up to the bisector how to chunk up columns for verification,
|
||||
// which could be by block, or by peer, or any other strategy.
|
||||
// For the purposes of range syncing or backfill this will be by peer,
|
||||
// so that the node can learn which peer is giving us bad data and downscore them.
|
||||
for columns, err := iter.Next(); columns != nil; columns, err = iter.Next() {
|
||||
if err != nil {
|
||||
if !errors.Is(err, io.EOF) {
|
||||
return errors.Wrap(err, "Bisector.Next")
|
||||
}
|
||||
break // io.EOF signals end of iteration
|
||||
}
|
||||
// We save the parts of the batch that have been verified successfully even though we don't know
|
||||
// if all columns for the block will be available until the block is imported.
|
||||
if err := s.verifyAndSave(s.columnsNotStored(columns)); err != nil {
|
||||
iter.OnError(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// This should give us a single error representing any unresolved errors seen via onError.
|
||||
return iter.Error()
|
||||
}
|
||||
|
||||
// columnsNotStored filters the list of ROColumnSidecars to only include those that are not found in the storage summary.
|
||||
func (s *LazilyPersistentStoreColumn) columnsNotStored(sidecars []blocks.RODataColumn) []blocks.RODataColumn {
|
||||
// We use this method to filter a set of sidecars that were previously seen to be unavailable on disk. So our base assumption
|
||||
// is that they are still available and we don't need to copy the list. Instead we make a slice of any indices that are unexpectedly
|
||||
// stored and only when we find that the storage view has changed do we need to create a new slice.
|
||||
stored := make(map[int]struct{}, 0)
|
||||
lastRoot := [32]byte{}
|
||||
var sum filesystem.DataColumnStorageSummary
|
||||
for i, sc := range sidecars {
|
||||
if sc.BlockRoot() != lastRoot {
|
||||
sum = s.store.Summary(sc.BlockRoot())
|
||||
lastRoot = sc.BlockRoot()
|
||||
}
|
||||
if sum.HasIndex(sc.Index) {
|
||||
stored[i] = struct{}{}
|
||||
}
|
||||
}
|
||||
// If the view on storage hasn't changed, return the original list.
|
||||
if len(stored) == 0 {
|
||||
return sidecars
|
||||
}
|
||||
shift := 0
|
||||
for i := range sidecars {
|
||||
if _, ok := stored[i]; ok {
|
||||
// If the index is stored, skip and overwrite it.
|
||||
// Track how many spaces down to shift unseen sidecars (to overwrite the previously shifted or seen).
|
||||
shift++
|
||||
continue
|
||||
}
|
||||
if shift > 0 {
|
||||
// If the index is not stored and we have seen stored indices,
|
||||
// we need to shift the current index down.
|
||||
sidecars[i-shift] = sidecars[i]
|
||||
}
|
||||
}
|
||||
return sidecars[:len(sidecars)-shift]
|
||||
}
|
||||
|
||||
type custodyRequirement struct {
|
||||
nodeID enode.ID
|
||||
cgc uint64 // custody group count
|
||||
indices peerdas.ColumnIndices
|
||||
}
|
||||
|
||||
func (c *custodyRequirement) required() (peerdas.ColumnIndices, error) {
|
||||
peerInfo, _, err := peerdas.Info(c.nodeID, c.cgc)
|
||||
if err != nil {
|
||||
return peerdas.NewColumnIndices(), errors.Wrap(err, "peer info")
|
||||
}
|
||||
return peerdas.NewColumnIndicesFromMap(peerInfo.CustodyColumns), nil
|
||||
}
|
||||
908
beacon-chain/das/availability_columns_test.go
Normal file
908
beacon-chain/das/availability_columns_test.go
Normal file
@@ -0,0 +1,908 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func mockShouldRetain(current primitives.Epoch) RetentionChecker {
|
||||
return func(slot primitives.Slot) bool {
|
||||
return params.WithinDAPeriod(slots.ToEpoch(slot), current)
|
||||
}
|
||||
}
|
||||
|
||||
var commitments = [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
|
||||
func TestPersist(t *testing.T) {
|
||||
t.Run("no sidecars", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, nil, enode.ID{}, 0, nil, mockShouldRetain(0))
|
||||
err := lazilyPersistentStoreColumns.Persist(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("outside DA period", func(t *testing.T) {
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: 1, Index: 1},
|
||||
}
|
||||
|
||||
var current primitives.Slot = 1_000_000
|
||||
sr := mockShouldRetain(slots.ToEpoch(current))
|
||||
roSidecars, _ := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, nil, enode.ID{}, 0, nil, sr)
|
||||
|
||||
err := lazilyPersistentStoreColumns.Persist(current, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(roSidecars), len(lazilyPersistentStoreColumns.cache.entries))
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const slot = 42
|
||||
store := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
dataColumnParamsByBlockRoot := []util.DataColumnParam{
|
||||
{Slot: slot, Index: 1},
|
||||
{Slot: slot, Index: 5},
|
||||
}
|
||||
|
||||
roSidecars, roDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dataColumnParamsByBlockRoot)
|
||||
avs := NewLazilyPersistentStoreColumn(store, nil, enode.ID{}, 0, nil, mockShouldRetain(slots.ToEpoch(slot)))
|
||||
|
||||
err := avs.Persist(slot, roSidecars...)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(avs.cache.entries))
|
||||
|
||||
key := cacheKey{slot: slot, root: roDataColumns[0].BlockRoot()}
|
||||
entry, ok := avs.cache.entries[key]
|
||||
require.Equal(t, true, ok)
|
||||
summary := store.Summary(key.root)
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
require.Equal(t, uint64(0), summary.Count())
|
||||
require.Equal(t, len(roSidecars), len(entry.scs))
|
||||
|
||||
idx1 := entry.scs[1]
|
||||
require.NotNil(t, idx1)
|
||||
require.DeepSSZEqual(t, roDataColumns[0].BlockRoot(), idx1.BlockRoot())
|
||||
idx5 := entry.scs[5]
|
||||
require.NotNil(t, idx5)
|
||||
require.DeepSSZEqual(t, roDataColumns[1].BlockRoot(), idx5.BlockRoot())
|
||||
|
||||
for i, roDataColumn := range entry.scs {
|
||||
if map[uint64]bool{1: true, 5: true}[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
require.IsNil(t, roDataColumn)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
newDataColumnsVerifier := func(dataColumnSidecars []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
return &mockDataColumnsVerifier{t: t, dataColumnSidecars: dataColumnSidecars}
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
t.Run("without commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
lazilyPersistentStoreColumns := NewLazilyPersistentStoreColumn(dataColumnStorage, newDataColumnsVerifier, enode.ID{}, 0, nil, mockShouldRetain(0))
|
||||
|
||||
err := lazilyPersistentStoreColumns.IsDataAvailable(ctx, 0, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("with commitments", func(t *testing.T) {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Slot = primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedRoBlock := newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
block := signedRoBlock.Block()
|
||||
slot := block.Slot()
|
||||
proposerIndex := block.ProposerIndex()
|
||||
parentRoot := block.ParentRoot()
|
||||
stateRoot := block.StateRoot()
|
||||
bodyRoot, err := block.Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
root := signedRoBlock.Root()
|
||||
|
||||
storage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
indices := []uint64{1, 17, 19, 42, 75, 87, 102, 117}
|
||||
avs := NewLazilyPersistentStoreColumn(storage, newDataColumnsVerifier, enode.ID{}, uint64(len(indices)), nil, mockShouldRetain(slots.ToEpoch(slot)))
|
||||
dcparams := make([]util.DataColumnParam, 0, len(indices))
|
||||
for _, index := range indices {
|
||||
dataColumnParams := util.DataColumnParam{
|
||||
Index: index,
|
||||
KzgCommitments: commitments,
|
||||
|
||||
Slot: slot,
|
||||
ProposerIndex: proposerIndex,
|
||||
ParentRoot: parentRoot[:],
|
||||
StateRoot: stateRoot[:],
|
||||
BodyRoot: bodyRoot[:],
|
||||
}
|
||||
|
||||
dcparams = append(dcparams, dataColumnParams)
|
||||
}
|
||||
|
||||
_, verifiedRoDataColumns := util.CreateTestVerifiedRoDataColumnSidecars(t, dcparams)
|
||||
|
||||
key := keyFromBlock(signedRoBlock)
|
||||
entry := avs.cache.entry(key)
|
||||
defer avs.cache.delete(key)
|
||||
|
||||
for _, verifiedRoDataColumn := range verifiedRoDataColumns {
|
||||
err := entry.stash(verifiedRoDataColumn.RODataColumn)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err = avs.IsDataAvailable(ctx, slot, signedRoBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := storage.Get(root, indices)
|
||||
require.NoError(t, err)
|
||||
|
||||
//summary := storage.Summary(root)
|
||||
require.Equal(t, len(verifiedRoDataColumns), len(actual))
|
||||
//require.Equal(t, uint64(len(indices)), summary.Count())
|
||||
//require.DeepSSZEqual(t, verifiedRoDataColumns, actual)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRetentionWindow(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
fuluSlot, err := slots.EpochStart(params.BeaconConfig().FuluForkEpoch)
|
||||
require.NoError(t, err)
|
||||
|
||||
numberOfColumns := fieldparams.NumberOfColumns
|
||||
testCases := []struct {
|
||||
name string
|
||||
commitments [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
wantedCols int
|
||||
}{
|
||||
{
|
||||
name: "Pre-Fulu block",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
return newSignedRoBlock(t, util.NewBeaconBlockElectra())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Commitments outside data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
beaconBlockElectra := util.NewBeaconBlockElectra()
|
||||
|
||||
// Block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
beaconBlockElectra.Block.Body.BlobKzgCommitments = commitments
|
||||
|
||||
return newSignedRoBlock(t, beaconBlockElectra)
|
||||
},
|
||||
slot: fuluSlot + windowSlots,
|
||||
},
|
||||
{
|
||||
name: "Commitments within data availability window",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
signedBeaconBlockFulu := util.NewBeaconBlockFulu()
|
||||
signedBeaconBlockFulu.Block.Body.BlobKzgCommitments = commitments
|
||||
signedBeaconBlockFulu.Block.Slot = fuluSlot + windowSlots - 1
|
||||
|
||||
return newSignedRoBlock(t, signedBeaconBlockFulu)
|
||||
},
|
||||
commitments: commitments,
|
||||
slot: fuluSlot + windowSlots,
|
||||
wantedCols: numberOfColumns,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
b := tc.block(t)
|
||||
s := NewLazilyPersistentStoreColumn(nil, nil, enode.ID{}, uint64(numberOfColumns), nil, mockShouldRetain(slots.ToEpoch(tc.slot)))
|
||||
|
||||
indices, err := s.required(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tc.wantedCols, len(indices))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newSignedRoBlock(t *testing.T, signedBeaconBlock any) blocks.ROBlock {
|
||||
sb, err := blocks.NewSignedBeaconBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
return rb
|
||||
}
|
||||
|
||||
type mockDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
dataColumnSidecars []blocks.RODataColumn
|
||||
validCalled, SidecarInclusionProvenCalled, SidecarKzgProofVerifiedCalled bool
|
||||
}
|
||||
|
||||
var _ verification.DataColumnsVerifier = &mockDataColumnsVerifier{}
|
||||
|
||||
func (m *mockDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
require.Equal(m.t, true, m.validCalled && m.SidecarInclusionProvenCalled && m.SidecarKzgProofVerifiedCalled)
|
||||
|
||||
verifiedDataColumnSidecars := make([]blocks.VerifiedRODataColumn, 0, len(m.dataColumnSidecars))
|
||||
for _, dataColumnSidecar := range m.dataColumnSidecars {
|
||||
verifiedDataColumnSidecar := blocks.NewVerifiedRODataColumn(dataColumnSidecar)
|
||||
verifiedDataColumnSidecars = append(verifiedDataColumnSidecars, verifiedDataColumnSidecar)
|
||||
}
|
||||
|
||||
return verifiedDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
|
||||
func (m *mockDataColumnsVerifier) ValidFields() error {
|
||||
m.validCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) CorrectSubnet(dataColumnSidecarSubTopic string, expectedTopics []string) error {
|
||||
return nil
|
||||
}
|
||||
func (m *mockDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) ValidProposerSignature(ctx context.Context) error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentValid(badParent func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (m *mockDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarInclusionProven() error {
|
||||
m.SidecarInclusionProvenCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarKzgProofVerified() error {
|
||||
m.SidecarKzgProofVerifiedCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockDataColumnsVerifier) SidecarProposerExpected(ctx context.Context) error { return nil }
|
||||
|
||||
// Mock implementations for bisectVerification tests
|
||||
|
||||
// mockBisectionIterator simulates a BisectionIterator for testing.
|
||||
type mockBisectionIterator struct {
|
||||
chunks [][]blocks.RODataColumn
|
||||
chunkErrors []error
|
||||
finalError error
|
||||
chunkIndex int
|
||||
nextCallCount int
|
||||
onErrorCallCount int
|
||||
onErrorErrors []error
|
||||
}
|
||||
|
||||
func (m *mockBisectionIterator) Next() ([]blocks.RODataColumn, error) {
|
||||
if m.chunkIndex >= len(m.chunks) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
chunk := m.chunks[m.chunkIndex]
|
||||
var err error
|
||||
if m.chunkIndex < len(m.chunkErrors) {
|
||||
err = m.chunkErrors[m.chunkIndex]
|
||||
}
|
||||
m.chunkIndex++
|
||||
m.nextCallCount++
|
||||
if err != nil {
|
||||
return chunk, err
|
||||
}
|
||||
return chunk, nil
|
||||
}
|
||||
|
||||
func (m *mockBisectionIterator) OnError(err error) {
|
||||
m.onErrorCallCount++
|
||||
m.onErrorErrors = append(m.onErrorErrors, err)
|
||||
}
|
||||
|
||||
func (m *mockBisectionIterator) Error() error {
|
||||
return m.finalError
|
||||
}
|
||||
|
||||
// mockBisector simulates a Bisector for testing.
|
||||
type mockBisector struct {
|
||||
shouldError bool
|
||||
bisectErr error
|
||||
iterator *mockBisectionIterator
|
||||
}
|
||||
|
||||
func (m *mockBisector) Bisect(columns []blocks.RODataColumn) (BisectionIterator, error) {
|
||||
if m.shouldError {
|
||||
return nil, m.bisectErr
|
||||
}
|
||||
return m.iterator, nil
|
||||
}
|
||||
|
||||
// testDataColumnsVerifier implements verification.DataColumnsVerifier for testing.
|
||||
type testDataColumnsVerifier struct {
|
||||
t *testing.T
|
||||
shouldFail bool
|
||||
columns []blocks.RODataColumn
|
||||
}
|
||||
|
||||
func (v *testDataColumnsVerifier) VerifiedRODataColumns() ([]blocks.VerifiedRODataColumn, error) {
|
||||
verified := make([]blocks.VerifiedRODataColumn, len(v.columns))
|
||||
for i, col := range v.columns {
|
||||
verified[i] = blocks.NewVerifiedRODataColumn(col)
|
||||
}
|
||||
return verified, nil
|
||||
}
|
||||
|
||||
func (v *testDataColumnsVerifier) SatisfyRequirement(verification.Requirement) {}
|
||||
func (v *testDataColumnsVerifier) ValidFields() error {
|
||||
if v.shouldFail {
|
||||
return errors.New("verification failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (v *testDataColumnsVerifier) CorrectSubnet(string, []string) error { return nil }
|
||||
func (v *testDataColumnsVerifier) NotFromFutureSlot() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SlotAboveFinalized() error { return nil }
|
||||
func (v *testDataColumnsVerifier) ValidProposerSignature(context.Context) error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarParentSeen(func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
func (v *testDataColumnsVerifier) SidecarParentValid(func([fieldparams.RootLength]byte) bool) error {
|
||||
return nil
|
||||
}
|
||||
func (v *testDataColumnsVerifier) SidecarParentSlotLower() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarDescendsFromFinalized() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarInclusionProven() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarKzgProofVerified() error { return nil }
|
||||
func (v *testDataColumnsVerifier) SidecarProposerExpected(context.Context) error { return nil }
|
||||
|
||||
// Helper function to create test data columns
|
||||
func makeTestDataColumns(t *testing.T, count int, blockRoot [32]byte, startIndex uint64) []blocks.RODataColumn {
|
||||
columns := make([]blocks.RODataColumn, 0, count)
|
||||
for i := range count {
|
||||
params := util.DataColumnParam{
|
||||
Index: startIndex + uint64(i),
|
||||
KzgCommitments: commitments,
|
||||
Slot: primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
}
|
||||
_, verifiedCols := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{params})
|
||||
if len(verifiedCols) > 0 {
|
||||
columns = append(columns, verifiedCols[0].RODataColumn)
|
||||
}
|
||||
}
|
||||
return columns
|
||||
}
|
||||
|
||||
// Helper function to create test verifier factory with failure pattern
|
||||
func makeTestVerifierFactory(failurePattern []bool) verification.NewDataColumnsVerifier {
|
||||
callIndex := 0
|
||||
return func(cols []blocks.RODataColumn, _ []verification.Requirement) verification.DataColumnsVerifier {
|
||||
shouldFail := callIndex < len(failurePattern) && failurePattern[callIndex]
|
||||
callIndex++
|
||||
return &testDataColumnsVerifier{
|
||||
shouldFail: shouldFail,
|
||||
columns: cols,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestBisectVerification tests the bisectVerification method with comprehensive table-driven test cases.
|
||||
func TestBisectVerification(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
|
||||
cases := []struct {
|
||||
expectedError bool
|
||||
bisectorNil bool
|
||||
expectedOnErrorCallCount int
|
||||
expectedNextCallCount int
|
||||
inputCount int
|
||||
iteratorFinalError error
|
||||
bisectorError error
|
||||
name string
|
||||
storedColumnIndices []uint64
|
||||
verificationFailurePattern []bool
|
||||
chunkErrors []error
|
||||
chunks [][]blocks.RODataColumn
|
||||
}{
|
||||
{
|
||||
name: "EmptyColumns",
|
||||
inputCount: 0,
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 0,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "NilBisector",
|
||||
inputCount: 3,
|
||||
bisectorNil: true,
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 0,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "BisectError",
|
||||
inputCount: 5,
|
||||
bisectorError: errors.New("bisect failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 0,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "SingleChunkSuccess",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "SingleChunkFails",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{true},
|
||||
iteratorFinalError: errors.New("chunk failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "TwoChunks_BothPass",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{false, false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "TwoChunks_FirstFails",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{true, false},
|
||||
iteratorFinalError: errors.New("first failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "TwoChunks_SecondFails",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{false, true},
|
||||
iteratorFinalError: errors.New("second failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "TwoChunks_BothFail",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{true, true},
|
||||
iteratorFinalError: errors.New("both failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 2,
|
||||
},
|
||||
{
|
||||
name: "ManyChunks_AllPass",
|
||||
inputCount: 16,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}, {}, {}},
|
||||
verificationFailurePattern: []bool{false, false, false, false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 5,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "ManyChunks_MixedFail",
|
||||
inputCount: 16,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}, {}, {}},
|
||||
verificationFailurePattern: []bool{false, true, false, true},
|
||||
iteratorFinalError: errors.New("mixed failures"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 5,
|
||||
expectedOnErrorCallCount: 2,
|
||||
},
|
||||
{
|
||||
name: "FilterStoredColumns_PartialFilter",
|
||||
inputCount: 6,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
storedColumnIndices: []uint64{1, 3},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "FilterStoredColumns_AllStored",
|
||||
inputCount: 6,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
storedColumnIndices: []uint64{0, 1, 2, 3, 4, 5},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "FilterStoredColumns_MixedAccess",
|
||||
inputCount: 10,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
storedColumnIndices: []uint64{1, 5, 9},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "IteratorNextError",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
chunkErrors: []error{nil, errors.New("next error")},
|
||||
verificationFailurePattern: []bool{false},
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "IteratorNextEOF",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "LargeChunkSize",
|
||||
inputCount: 128,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "ManySmallChunks",
|
||||
inputCount: 32,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}, {}, {}, {}, {}, {}, {}},
|
||||
verificationFailurePattern: []bool{false, false, false, false, false, false, false, false},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 9,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "ChunkWithSomeStoredColumns",
|
||||
inputCount: 6,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{false},
|
||||
storedColumnIndices: []uint64{0, 2, 4},
|
||||
expectedError: false,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "OnErrorDoesNotStopIteration",
|
||||
inputCount: 8,
|
||||
chunks: [][]blocks.RODataColumn{{}, {}},
|
||||
verificationFailurePattern: []bool{true, false},
|
||||
iteratorFinalError: errors.New("first failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 3,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "VerificationErrorWrapping",
|
||||
inputCount: 4,
|
||||
chunks: [][]blocks.RODataColumn{{}},
|
||||
verificationFailurePattern: []bool{true},
|
||||
iteratorFinalError: errors.New("verification failed"),
|
||||
expectedError: true,
|
||||
expectedNextCallCount: 2,
|
||||
expectedOnErrorCallCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Setup storage
|
||||
var store *filesystem.DataColumnStorage
|
||||
if len(tc.storedColumnIndices) > 0 {
|
||||
mocker, s := filesystem.NewEphemeralDataColumnStorageWithMocker(t)
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
slot := primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, mocker.CreateFakeIndices(blockRoot, slot, tc.storedColumnIndices...))
|
||||
store = s
|
||||
} else {
|
||||
store = filesystem.NewEphemeralDataColumnStorage(t)
|
||||
}
|
||||
|
||||
// Create test columns
|
||||
blockRoot := [32]byte{1, 2, 3}
|
||||
columns := makeTestDataColumns(t, tc.inputCount, blockRoot, 0)
|
||||
|
||||
// Setup iterator with chunks
|
||||
iterator := &mockBisectionIterator{
|
||||
chunks: tc.chunks,
|
||||
chunkErrors: tc.chunkErrors,
|
||||
finalError: tc.iteratorFinalError,
|
||||
}
|
||||
|
||||
// Setup bisector
|
||||
var bisector Bisector
|
||||
if tc.bisectorNil || tc.inputCount == 0 {
|
||||
bisector = nil
|
||||
} else if tc.bisectorError != nil {
|
||||
bisector = &mockBisector{
|
||||
shouldError: true,
|
||||
bisectErr: tc.bisectorError,
|
||||
}
|
||||
} else {
|
||||
bisector = &mockBisector{
|
||||
shouldError: false,
|
||||
iterator: iterator,
|
||||
}
|
||||
}
|
||||
|
||||
// Create store with verifier
|
||||
verifierFactory := makeTestVerifierFactory(tc.verificationFailurePattern)
|
||||
lazilyPersistentStore := &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newDataColumnCache(),
|
||||
newDataColumnsVerifier: verifierFactory,
|
||||
custody: &custodyRequirement{},
|
||||
bisector: bisector,
|
||||
}
|
||||
|
||||
// Execute
|
||||
err := lazilyPersistentStore.bisectVerification(columns)
|
||||
|
||||
// Assert
|
||||
if tc.expectedError {
|
||||
require.NotNil(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify iterator interactions for non-error cases
|
||||
if tc.inputCount > 0 && bisector != nil && tc.bisectorError == nil && !tc.expectedError {
|
||||
require.NotEqual(t, 0, iterator.nextCallCount, "iterator Next() should have been called")
|
||||
require.Equal(t, tc.expectedOnErrorCallCount, iterator.onErrorCallCount, "OnError() call count mismatch")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func allIndicesExcept(total int, excluded []uint64) []uint64 {
|
||||
excludeMap := make(map[uint64]bool)
|
||||
for _, idx := range excluded {
|
||||
excludeMap[idx] = true
|
||||
}
|
||||
|
||||
var result []uint64
|
||||
for i := range total {
|
||||
if !excludeMap[uint64(i)] {
|
||||
result = append(result, uint64(i))
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// TestColumnsNotStored tests the columnsNotStored method.
|
||||
func TestColumnsNotStored(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
count int
|
||||
stored []uint64 // Column indices marked as stored
|
||||
expected []uint64 // Expected column indices in returned result
|
||||
}{
|
||||
// Empty cases
|
||||
{
|
||||
name: "EmptyInput",
|
||||
count: 0,
|
||||
stored: []uint64{},
|
||||
expected: []uint64{},
|
||||
},
|
||||
// Single element cases
|
||||
{
|
||||
name: "SingleElement_NotStored",
|
||||
count: 1,
|
||||
stored: []uint64{},
|
||||
expected: []uint64{0},
|
||||
},
|
||||
{
|
||||
name: "SingleElement_Stored",
|
||||
count: 1,
|
||||
stored: []uint64{0},
|
||||
expected: []uint64{},
|
||||
},
|
||||
// All not stored cases
|
||||
{
|
||||
name: "AllNotStored_FiveElements",
|
||||
count: 5,
|
||||
stored: []uint64{},
|
||||
expected: []uint64{0, 1, 2, 3, 4},
|
||||
},
|
||||
// All stored cases
|
||||
{
|
||||
name: "AllStored",
|
||||
count: 5,
|
||||
stored: []uint64{0, 1, 2, 3, 4},
|
||||
expected: []uint64{},
|
||||
},
|
||||
// Partial storage - beginning
|
||||
{
|
||||
name: "StoredAtBeginning",
|
||||
count: 5,
|
||||
stored: []uint64{0, 1},
|
||||
expected: []uint64{2, 3, 4},
|
||||
},
|
||||
// Partial storage - end
|
||||
{
|
||||
name: "StoredAtEnd",
|
||||
count: 5,
|
||||
stored: []uint64{3, 4},
|
||||
expected: []uint64{0, 1, 2},
|
||||
},
|
||||
// Partial storage - middle
|
||||
{
|
||||
name: "StoredInMiddle",
|
||||
count: 5,
|
||||
stored: []uint64{2},
|
||||
expected: []uint64{0, 1, 3, 4},
|
||||
},
|
||||
// Partial storage - scattered
|
||||
{
|
||||
name: "StoredScattered",
|
||||
count: 8,
|
||||
stored: []uint64{1, 3, 5},
|
||||
expected: []uint64{0, 2, 4, 6, 7},
|
||||
},
|
||||
// Alternating pattern
|
||||
{
|
||||
name: "AlternatingPattern",
|
||||
count: 8,
|
||||
stored: []uint64{0, 2, 4, 6},
|
||||
expected: []uint64{1, 3, 5, 7},
|
||||
},
|
||||
// Consecutive stored
|
||||
{
|
||||
name: "ConsecutiveStored",
|
||||
count: 10,
|
||||
stored: []uint64{3, 4, 5, 6},
|
||||
expected: []uint64{0, 1, 2, 7, 8, 9},
|
||||
},
|
||||
// Large slice cases
|
||||
{
|
||||
name: "LargeSlice_NoStored",
|
||||
count: 64,
|
||||
stored: []uint64{},
|
||||
expected: allIndicesExcept(64, []uint64{}),
|
||||
},
|
||||
{
|
||||
name: "LargeSlice_SingleStored",
|
||||
count: 64,
|
||||
stored: []uint64{32},
|
||||
expected: allIndicesExcept(64, []uint64{32}),
|
||||
},
|
||||
}
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create test columns first to get the actual block root
|
||||
var columns []blocks.RODataColumn
|
||||
if tc.count > 0 {
|
||||
columns = makeTestDataColumns(t, tc.count, [32]byte{}, 0)
|
||||
}
|
||||
|
||||
// Get the actual block root from the first column (if any)
|
||||
var blockRoot [32]byte
|
||||
if len(columns) > 0 {
|
||||
blockRoot = columns[0].BlockRoot()
|
||||
}
|
||||
|
||||
// Setup storage
|
||||
var store *filesystem.DataColumnStorage
|
||||
if len(tc.stored) > 0 {
|
||||
mocker, s := filesystem.NewEphemeralDataColumnStorageWithMocker(t)
|
||||
require.NoError(t, mocker.CreateFakeIndices(blockRoot, slot, tc.stored...))
|
||||
store = s
|
||||
} else {
|
||||
store = filesystem.NewEphemeralDataColumnStorage(t)
|
||||
}
|
||||
|
||||
// Create store instance
|
||||
lazilyPersistentStore := &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
}
|
||||
|
||||
// Execute
|
||||
result := lazilyPersistentStore.columnsNotStored(columns)
|
||||
|
||||
// Assert count
|
||||
require.Equal(t, len(tc.expected), len(result),
|
||||
fmt.Sprintf("expected %d columns, got %d", len(tc.expected), len(result)))
|
||||
|
||||
// Verify that no stored columns are in the result
|
||||
if len(tc.stored) > 0 {
|
||||
resultIndices := make(map[uint64]bool)
|
||||
for _, col := range result {
|
||||
resultIndices[col.Index] = true
|
||||
}
|
||||
for _, storedIdx := range tc.stored {
|
||||
require.Equal(t, false, resultIndices[storedIdx],
|
||||
fmt.Sprintf("stored column index %d should not be in result", storedIdx))
|
||||
}
|
||||
}
|
||||
|
||||
// If expectedIndices is specified, verify the exact column indices in order
|
||||
if len(tc.expected) > 0 && len(tc.stored) == 0 {
|
||||
// Only check exact order for non-stored cases (where we know they stay in same order)
|
||||
for i, expectedIdx := range tc.expected {
|
||||
require.Equal(t, columns[expectedIdx].Index, result[i].Index,
|
||||
fmt.Sprintf("column %d: expected index %d, got %d", i, columns[expectedIdx].Index, result[i].Index))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify optimization: if nothing stored, should return original slice
|
||||
if len(tc.stored) == 0 && tc.count > 0 {
|
||||
require.Equal(t, &columns[0], &result[0],
|
||||
"when no columns stored, should return original slice (same pointer)")
|
||||
}
|
||||
|
||||
// Verify optimization: if some stored, result should use in-place shifting
|
||||
if len(tc.stored) > 0 && len(tc.expected) > 0 && tc.count > 0 {
|
||||
require.Equal(t, cap(columns), cap(result),
|
||||
"result should be in-place shifted from original (same capacity)")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
40
beacon-chain/das/bisect.go
Normal file
40
beacon-chain/das/bisect.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
)
|
||||
|
||||
// Bisector describes a type that takes a set of RODataColumns via the Bisect method
|
||||
// and returns a BisectionIterator that returns batches of those columns to be
|
||||
// verified together.
|
||||
type Bisector interface {
|
||||
// Bisect initializes the BisectionIterator and returns the result.
|
||||
Bisect([]blocks.RODataColumn) (BisectionIterator, error)
|
||||
}
|
||||
|
||||
// BisectionIterator describes an iterator that returns groups of columns to verify.
|
||||
// It is up to the bisector implementation to decide how to chunk up the columns,
|
||||
// whether by block, by peer, or any other strategy. For example, backfill implements
|
||||
// a bisector that keeps track of the source of each sidecar by peer, and groups
|
||||
// sidecars by peer in the Next method, enabling it to track which peers, out of all
|
||||
// the peers contributing to a batch, gave us bad data.
|
||||
// When a batch fails, the OnError method should be used so that the bisector can
|
||||
// keep track of the failed groups of columns and eg apply that knowledge in peer scoring.
|
||||
// The same column will be returned multiple times by Next; first as part of a larger batch,
|
||||
// and again as part of a more fine grained batch if there was an error in the large batch.
|
||||
// For example, first as part of a batch of all columns spanning peers, and then again
|
||||
// as part of a batch of columns from a single peer if some column in the larger batch
|
||||
// failed verification.
|
||||
type BisectionIterator interface {
|
||||
// Next returns the next group of columns to verify.
|
||||
// When the iteration is complete, Next should return (nil, io.EOF).
|
||||
Next() ([]blocks.RODataColumn, error)
|
||||
// OnError should be called when verification of a group of columns obtained via Next() fails.
|
||||
OnError(error)
|
||||
// Error can be used at the end of the iteration to get a single error result. It will return
|
||||
// nil if OnError was never called, or an error of the implementers choosing representing the set
|
||||
// of errors seen during iteration. For instance when bisecting from columns spanning peers to columns
|
||||
// from a single peer, the broader error could be dropped, and then the more specific error
|
||||
// (for a single peer's response) returned after bisecting to it.
|
||||
Error() error
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user