Compare commits

..

27 Commits

Author SHA1 Message Date
Preston Van Loon
345d587204 Add comprehensive mutation test coverage for columns.go
This commit adds 25 new test cases to improve mutation testing coverage for
the backfill columns sync functionality, addressing 38 previously escaped
mutants.

Test coverage added:
- buildColumnBatch(): Array indexing edge cases, fork epoch boundaries,
  pre/post-Fulu block handling, and control flow mutations
- countedValidation(): Validation error paths, commitment mismatches,
  and state update verification (Unset/addPeerColumns calls)
- validate(): Metrics recording wrapper and error propagation
- newColumnSync(): Initialization paths and nil columnBatch handling
- currentCustodiedColumns(): Column indices retrieval
- columnSync wrapper methods: Nil checks and delegation logic

The new tests specifically target:
- Array indexing bugs (len-1, len+1, len-0 mutations)
- Boundary conditions at Fulu fork epoch (< vs <=)
- Branch coverage for error handling paths
- Statement removal detection for critical state updates
- Expression and comparison operator mutations

All 25 new test cases pass successfully, bringing function coverage from
14% (1/7 functions) to 100% (7/7 functions) and estimated mutation
coverage from ~0% to ~95%+.
2025-11-20 08:54:07 -06:00
Preston Van Loon
013d8ca4fd TestColumnBatch 2025-11-20 08:54:07 -06:00
Kasey Kirkham
85b1414119 Test coverage for log.go and some small improvements
Co-authored-by: Claude <noreply@anthropic.com>
2025-11-20 00:21:59 -06:00
Kasey Kirkham
b7e999d651 Test coverage for verify_column.go
Co-authored-by: Claude <noreply@anthropic.com>
2025-11-19 16:17:56 -06:00
Kasey Kirkham
d6209ae5c3 rm extra tick 2025-11-19 13:18:04 -06:00
Kasey Kirkham
36e8947068 more comment fixes 2025-11-19 13:01:33 -06:00
Kasey Kirkham
0a827f17d5 more naming etc feedback 2025-11-19 12:24:07 -06:00
Kasey Kirkham
8d126196d9 update changelog to include message about flag default changing 2025-11-19 11:42:02 -06:00
Kasey Kirkham
094cee25ac more comment cleanup 2025-11-19 11:41:01 -06:00
Kasey Kirkham
bbd856fe6f extra BisectionIterator as a separate interface 2025-11-19 11:20:22 -06:00
Kasey Kirkham
b9a7cb3764 more manu feedback 2025-11-19 10:40:49 -06:00
Kasey Kirkham
61d4a6c105 don't try to cache peerdas.Info 2025-11-18 17:44:38 -06:00
Kasey Kirkham
1037e56238 manu feedback 2025-11-18 17:39:20 -06:00
Kasey Kirkham
ac0b3cb593 remove "feature" to slice result from BestFinalized 2025-11-18 16:22:31 -06:00
Kasey Kirkham
d156168712 Avoid requesting blocks from peer that gave us an invalid batch 2025-11-18 10:48:52 -06:00
Kasey Kirkham
1644dc6323 decrease default batch size to compensate for data column overhead 2025-11-13 13:14:21 -06:00
Kasey Kirkham
29257b10ec avoid debug log spam that comes from computing custody info pre-fulu 2025-11-12 16:35:40 -06:00
Kasey Kirkham
6849302288 re-enable backfill for fulu 2025-11-12 16:35:40 -06:00
Kasey Kirkham
58f6b3ff3c fixing rebase 2025-11-12 16:35:37 -06:00
Kasey Kirkham
51bca0d08c make daChecker less ambiguously stateful 2025-11-12 16:00:39 -06:00
Kasey Kirkham
3697b1db50 multiStore/Checker to validate dependencies/state up front 2025-11-12 16:00:39 -06:00
Kasey Kirkham
c1b361ce0c remove or rewrite non-actionable TODOs 2025-11-12 16:00:39 -06:00
Kasey Kirkham
20bbc60efe replace panic that "shouldn't happen" with a safe shutdown 2025-11-12 16:00:39 -06:00
Kasey Kirkham
0f9b87cb59 downscore peers on block batch failures 2025-11-12 16:00:39 -06:00
Kasey Kirkham
d6ce7e0b9f potuz' feedback 2025-11-12 16:00:39 -06:00
Kasey Kirkham
9d8f45940a filter locally available columns from backfill batch 2025-11-12 16:00:39 -06:00
Kasey
4424cce30d DataColumnSidecar backfill 2025-11-12 16:00:39 -06:00
1124 changed files with 7179 additions and 258274 deletions

View File

@@ -34,5 +34,4 @@ Fixes #
- [ ] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [ ] I have included a uniquely named [changelog fragment file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [ ] I have added a description with sufficient context for reviewers to understand this PR.
- [ ] I have tested that my changes work as expected and I added a testing plan to the PR description (if applicable).
- [ ] I have added a description to this PR with sufficient context for reviewers to understand this PR.

View File

@@ -1,23 +0,0 @@
name: Check log.go files
on: [ pull_request ]
jobs:
check-logs:
runs-on: ubuntu-4
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go 1.25.1
uses: actions/setup-go@v5
with:
go-version: '1.25.1'
- name: Install ripgrep
run: sudo apt-get install -y ripgrep
- name: Check log.go files
run: ./hack/check-logs.sh

3
.gitignore vendored
View File

@@ -44,6 +44,3 @@ tmp
# spectest coverage reports
report.txt
# execution client data
execution/

View File

@@ -193,30 +193,10 @@ nogo(
"//tools/analyzers/featureconfig:go_default_library",
"//tools/analyzers/gocognit:go_default_library",
"//tools/analyzers/ineffassign:go_default_library",
"//tools/analyzers/httpwriter:go_default_library",
"//tools/analyzers/interfacechecker:go_default_library",
"//tools/analyzers/logcapitalization:go_default_library",
"//tools/analyzers/logruswitherror:go_default_library",
"//tools/analyzers/maligned:go_default_library",
"//tools/analyzers/modernize/any:go_default_library",
"//tools/analyzers/modernize/appendclipped:go_default_library",
"//tools/analyzers/modernize/bloop:go_default_library",
"//tools/analyzers/modernize/fmtappendf:go_default_library",
"//tools/analyzers/modernize/forvar:go_default_library",
"//tools/analyzers/modernize/mapsloop:go_default_library",
"//tools/analyzers/modernize/minmax:go_default_library",
#"//tools/analyzers/modernize/newexpr:go_default_library", # Disabled until go 1.26.
"//tools/analyzers/modernize/omitzero:go_default_library",
"//tools/analyzers/modernize/rangeint:go_default_library",
"//tools/analyzers/modernize/reflecttypefor:go_default_library",
"//tools/analyzers/modernize/slicescontains:go_default_library",
#"//tools/analyzers/modernize/slicesdelete:go_default_library", # Disabled, see https://go.dev/issue/73686
"//tools/analyzers/modernize/slicessort:go_default_library",
"//tools/analyzers/modernize/stringsbuilder:go_default_library",
"//tools/analyzers/modernize/stringscutprefix:go_default_library",
"//tools/analyzers/modernize/stringsseq:go_default_library",
"//tools/analyzers/modernize/testingcontext:go_default_library",
"//tools/analyzers/modernize/waitgroup:go_default_library",
"//tools/analyzers/nop:go_default_library",
"//tools/analyzers/nopanic:go_default_library",
"//tools/analyzers/properpermissions:go_default_library",

View File

@@ -4,232 +4,6 @@ All notable changes to this project will be documented in this file.
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
## [v7.1.2](https://github.com/prysmaticlabs/prysm/compare/v7.1.1...v7.1.2) - 2026-01-07
Happy new year! This patch release is very small. The main improvement is better management of pending attestation aggregation via [PR 16153](https://github.com/OffchainLabs/prysm/pull/16153).
### Added
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16169)
### Changed
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16152)
- `validateDataColumn`: Remove error logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16157)
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16153)
### Fixed
- Fix the missing fork version object mapping for Fulu in light client p2p. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16151)
- Do not process slots and copy states for next epoch proposers after Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16168)
## [v7.1.1](https://github.com/prysmaticlabs/prysm/compare/v7.1.0...v7.1.1) - 2025-12-18
Release highlights:
- Fixed potential deadlock scenario in data column batch verification
- Improved processing and metrics for cells and proofs
We are aware of [an issue](https://github.com/OffchainLabs/prysm/issues/16160) where Prysm struggles to sync from an out of sync state. We will have another release before the end of the year to address this issue.
Our postmortem document from the December 4th mainnet issue has been published on our [documentation site](https://prysm.offchainlabs.com/docs/misc/mainnet-postmortems/)
### Added
- Track the dependent root of the latest finalized checkpoint in forkchoice. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16103)
- Proposal design document to implement graffiti. Currently it is empty by default and the idea is to have it of the form GE168dPR63af. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15983)
- Add support for detecting and logging per address reachability via libp2p AutoNAT v2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16100)
- Static analyzer that ensures each `httputil.HandleError` call is followed by a `return` statement. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16134)
- Prometheus histogram `cells_and_proofs_from_structured_computation_milliseconds` to track computation time for cells and proofs from structured blobs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16115)
- Prometheus histogram `get_blobs_v2_latency_milliseconds` to track RPC latency for `getBlobsV2` calls to the execution layer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16115)
### Changed
- Optimise migratetocold by not doing brute force for loop. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16101)
- e2e sync committee evaluator now skips the first slot after startup, we already skip the fork epoch for checks here, this skip only applies on startup, due to altair always from 0 and validators need to warm up. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16145)
- Run `ComputeCellsAndProofsFromFlat` in parallel to improve performance when computing cells and proofs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16115)
- Run `ComputeCellsAndProofsFromStructured` in parallel to improve performance when computing cells and proofs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16115)
### Removed
- Unnecessary copy is removed from Eth1DataHasEnoughSupport. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16118)
### Fixed
- Incorrect constructor return type [#16084](https://github.com/OffchainLabs/prysm/pull/16084). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16084)
- Fixed possible race when validating two attestations at the same time. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16105)
- Fix missing return after version header check in SubmitAttesterSlashingsV2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16126)
- Fix deadlock in data column gossip KZG batch verification when a caller times out preventing result delivery. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16141)
- Fixed replay state issue in rest api caused by attester and sync committee duties endpoints. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16136)
- Do not error when committee has been computed correctly but updating the cache failed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16142)
- Prevent blocked sends to the KZG batch verifier when the caller context is already canceled, avoiding useless queueing and potential hangs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16144)
## [v7.1.0](https://github.com/prysmaticlabs/prysm/compare/v7.0.0...v7.1.0) - 2025-12-10
This release includes several key features/fixes. If you are running v7.0.0 then you should update to v7.0.1 or later and remove the flag `--disable-last-epoch-targets`.
Release highlights:
- Backfill is now supported in Fulu. Backfill from checkpoint sync now supports data columns. Run with `--enable-backfill` when using checkpoint sync.
- A new node configuration to custody enough data columns to reconstruct blobs. Use flag `--semi-supernode` to custody at least 50% of the data columns.
- Critical fixes in attestation processing.
A post mortem doc with full details on the mainnet attestation processing issue from December 4th is expected in the coming days.
### Added
- add fulu support to light client processing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15995)
- Record data column gossip KZG batch verification latency in both the pooled worker and fallback paths so the `beacon_kzg_verification_data_column_batch_milliseconds` histogram reflects gossip traffic, annotated with `path` labels to distinguish the sources. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16018)
- Implement Gloas state. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15611)
- Add initial configs for the state-diff feature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15903)
- Add kv functions for the state-diff feature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15903)
- Add supported version for fork versions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16030)
- prometheus metric `gossip_attestation_verification_milliseconds` to track attestation gossip topic validation latency. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15785)
- Integrate state-diff into `State()`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16033)
- Implement Gloas fork support in consensus-types/blocks with factory methods, getters, setters, and proto handling. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15618)
- Integrate state-diff into `HasState()`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16045)
- Added `--semi-supernode` flag to custody half of a super node's datacolumn requirements but allowing for reconstruction for blob retrieval. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16029)
- Data column backfill. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15580)
- Backfill metrics for columns: backfill_data_column_sidecar_downloaded, backfill_data_column_sidecar_downloaded_bytes, backfill_batch_columns_download_ms, backfill_batch_columns_verify_ms. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15580)
- prometheus summary `gossip_data_column_sidecar_arrival_milliseconds` to track data column sidecar arrival latency since slot start. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16099)
### Changed
- Improve readability in slashing import and remove duplicated code. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15957)
- Use dependent root instead of target when possible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15996)
- Changed `--subscribe-all-data-subnets` flag to `--supernode` and aliased `--subscribe-all-data-subnets` for existing users. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16012)
- Use explicit slot component timing configs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15999)
- Downgraded log level from INFO to DEBUG on PrepareBeaconProposer updated fee recipients. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15998)
- Change the logging behaviour of Updated fee recipients to only log count of validators at Debug level and all validator indices at Trace level. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15998)
- Stop emitting payload attribute events during late block handling when we are not proposing the next slot. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16026)
- Initialize the `ExecutionRequests` field in gossip block map. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16047)
- Avoid redundant WithHttpEndpoint when JWT is provided. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16032)
- Removed dead slot parameter from blobCacheEntry.filter. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16021)
- Added log prefix to the `genesis` package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
- Added log prefix to the `params` package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
- `WithGenesisValidatorsRoot`: Use camelCase for log field param. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
- Move `Origin checkpoint found in db` from WARN to INFO, since it is the expected behaviour. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
- backfill metrics that changed name and/or histogram buckets: backfill_batch_time_verify -> backfill_batch_verify_ms, backfill_batch_time_waiting -> backfill_batch_waiting_ms, backfill_batch_time_roundtrip -> backfill_batch_roundtrip_ms, backfill_blocks_bytes_downloaded -> backfill_blocks_downloaded_bytes, backfill_batch_time_verify -> backfill_batch_verify_ms, backfill_batch_blocks_time_download -> backfill_batch_blocks_download_ms, backfill_batch_blobs_time_download -> backfill_batch_blobs_download_ms, backfill_blobs_bytes_downloaded -> backfill_blocks_downloaded_bytes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15580)
- Move the "Not enough connected peers" (for a given subnet) from WARN to DEBUG. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16087)
- `blobsDataFromStoredDataColumns`: Ask the use to use the `--supernode` flag and shorten the error mesage. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16097)
- Introduced flag `--ignore-unviable-attestations` (replaces and deprecates `--disable-last-epoch-targets`) to drop attestations whose target state is not viable; default remains to process them unless explicitly enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16094)
### Removed
- Remove validator cross-client from end-to-end tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16025)
- `NUMBER_OF_COLUMNS` configuration (not in the specification any more, replaced by a preset). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16073)
- `MAX_CELLS_IN_EXTENDED_MATRIX` configuration (not in the specification any more). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16073)
### Fixed
- Nil check for block if it doesn't exist in the DB in fetchOriginSidecars. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16006)
- Fix proposals progress bar count [#16020](https://github.com/OffchainLabs/prysm/pull/16020). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16020)
- Move `BlockGossipReceived` event to the end of gossip validation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16031)
- Fix state diff repetitive anchor slot bug. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16037)
- Check the JWT secret length is exactly 256 bits (32 bytes) as per Engine API specification. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15939)
- http_error_count now matches the other cases by listing the endpoint name rather than the actual URL requested. This improves metrics cardinality. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16055)
- Fix array out of bounds in static analyzer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16058)
- fixes E2E tests to be able to start from Electra genesis fork or future forks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16048)
- Use head state to validate attestations for old blocks if they are compatible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16095)
## [v7.0.1](https://github.com/prysmaticlabs/prysm/compare/v7.0.0...v7.0.1) - 2025-12-08
This patch release contains 4 cherry-picked changes to address the mainnet attestation processing issue from 2025-12-04. Operators are encouraged to update to this release as soon as practical. As of this release, the feature flag `--disable-last-epoch-targets` has been deprecated and can be safely removed from your node configuration.
A post mortem doc with full details is expected to be published later this week.
### Changed
- Move the "Not enough connected peers" (for a given subnet) from WARN to DEBUG. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16087)
- Use dependent root instead of target when possible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15996)
- Introduced flag `--ignore-unviable-attestations` (replaces and deprecates `--disable-last-epoch-targets`) to drop attestations whose target state is not viable; default remains to process them unless explicitly enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16094)
### Fixed
- Use head state to validate attestations for old blocks if they are compatible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16095)
## [v7.0.0](https://github.com/prysmaticlabs/prysm/compare/v6.1.4...v7.0.0) - 2025-11-10
This is our initial mainnet release for the Ethereum mainnet Fulu fork on December 3rd, 2025. All operators MUST update to v7.0.0 or later release prior to the fulu fork epoch `411392`. See the [Ethereum Foundation blog post](https://blog.ethereum.org/2025/11/06/fusaka-mainnet-announcement) for more information on Fulu.
Other than the mainnet fulu fork schedule, there are a few callouts in this release:
- `by-epoch` blob storage format is the default for new installations. Users that haven't migrated will see a warning to migrate to the new format. Existing deployments may set `--blob-storage-layout=by-epoch` to perform the migration.
- Several deprecated flags have been deleted! Please review the "removed" section of this changelog carefully. If you are referencing a removed flag, Prysm will not start! All of these flags had no effect for at least one release.
- Several deprecated API endpoints have been deleted. Please review the "removed" section of this changelog carefully.
- Backfill is not supported in Fulu. This is expected to be fixed in the next release and should be delivered prior to the mainnet activation fork.
- The builder default gas limit is raised from `45000000` (45 MGas) to `60000000` (60 MGas).
- Several bug fixes and improvements.
### Added
- Allow custom headers in validator client HTTP requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15884)
- Metric to track data columns recovered from execution layer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15924)
- Metrics: Add count of peers per direction and type (inbound/outbound), (TCP/QUIC). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15922)
- `p2p_subscribed_topic_peer_total`: Reset to avoid dangling values. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15922)
- Add `p2p_minimum_peers_per_subnet` metric. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15922)
- Added GeneralizedIndicesFromPath function to calculate the GIs for a given sszInfo object and a PathElement. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15873)
- Add Gloas protobuf definitions with spec tests and SSZ serialization support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15601)
- Fulu fork epoch for mainnet configurations set for December 3, 2025, 09:49:11pm UTC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15975)
- Added BPO schedules for December 9, 2025, 02:21:11pm UTC and January 7, 2026, 01:01:11am UTC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15975)
### Changed
- Updated consensus spec tests to v1.6.0-beta.1 with new hashes and URL template. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15918)
- Use the `by-epoch' blob storage layout by default and log a warning to users who continue to use the flat layout, encouraging them to switch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15904)
- Update go-netroute to `v0.3.0`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15934)
- Introduced Path type for SSZ-QL queries and updated PathElement (removed Length field, kept Index) enforcing that len queries are terminal (at most one per path). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15935)
- Changed length query syntax from `block.payload.len(transactions)` to `len(block.payload.transactions)`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15935)
- Update `go-netroute` to `v0.4.0`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15949)
- Updated consensus spec tests to v1.6.0-beta.2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15960)
- Updated go bitfield from prysmaticlabs to offchainlabs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15968)
- Bump builder default gas limit from `45000000` (45 MGas) to `60000000` (60 MGas). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15979)
- Use head state for block pubsub validation when possible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15972)
- updated consensus spec to 1.6.0 from 1.6.0-beta.2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15975)
- Upgrade Prysm v6 to v7. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15989)
- Use head state readonly when possible to validate data column sidecars. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15977)
### Removed
- log mentioning removed flag `--show-deposit-data`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15926)
- Remove Beacon API endpoints that were deprecated in Electra: `GET /eth/v1/beacon/deposit_snapshot`, `GET /eth/v1/beacon/blocks/{block_id}/attestations`, `GET /eth/v1/beacon/pool/attestations`, `POST /eth/v1/beacon/pool/attestations`, `GET /eth/v1/beacon/pool/attester_slashings`, `POST /eth/v1/beacon/pool/attester_slashings`, `GET /eth/v1/validator/aggregate_attestation`, `POST /eth/v1/validator/aggregate_and_proofs`, `POST /eth/v1/beacon/blocks`, `POST /eth/v1/beacon/blinded_blocks`, `GET /eth/v1/builder/states/{state_id}/expected_withdrawals`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15962)
- Deprecated flag `--enable-optional-engine-methods` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-build-block-parallel` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-reorg-late-blocks` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-optional-engine-methods` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-aggregate-parallel` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--enable-eip-4881` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-eip-4881` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--enable-verbose-sig-verification` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--enable-debug-rpc-endpoints` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--beacon-rpc-gateway-provider` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-grpc-gateway` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--enable-experimental-state` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--enable-committee-aware-packing` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--interop-genesis-time` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--interop-num-validators` has been removed (from beacon-chain only; still available in validator client). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--enable-quic` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--attest-timely` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--disable-experimental-state` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
- Deprecated flag `--p2p-metadata` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
### Fixed
- Remove `Reading static P2P private key from a file.` log if Fulu is enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15913)
- `blobSidecarByRootRPCHandler`: Do not serve a sidecar if the corresponding block is not available. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15933)
- `dataColumnSidecarByRootRPCHandler`: Do not serve a sidecar if the corresponding block is not available. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15933)
- Fix incorrect version used when sending attestation version in Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15950)
- Changed the behavior of topic subscriptions such that only topics that require the active validator count will compute that value. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15955)
- Added a Mutex to the computation of active validator count during topic subscription to avoid a race condition where multiple goroutines are computing the same work. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15955)
- `RODataColumnsVerifier.ValidProposerSignature`: Ensure the expensive signature verification is only performed once for concurrent requests for the same signature data. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15954)
- use filepath for path operations (clean, join, etc.) to ensure correct behavior on Windows. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15953)
- Fix #15969: Handle addition overflow in `/eth/v1/beacon/rewards/attestations/{epoch}`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15970)
- `SidecarProposerExpected`: Add the slot in the single flight key. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15976)
- Ensures the rate limitation is respected for by root blob and data column sidecars requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15981)
- Use head only if its compatible with target for attestation validation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15965)
- Backfill disabled if checkpoint sync origin is after fulu fork due to lack of DataColumnSidecar support in backfill. To track the availability of fulu-compatible backfill please watch https://github.com/OffchainLabs/prysm/issues/15982. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15987)
- `SidecarProposerExpected`: Use the correct value of proposer index in the singleflight group. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15993)
## [v6.1.4](https://github.com/prysmaticlabs/prysm/compare/v6.1.3...v6.1.4) - 2025-10-24
This release includes a bug fix affecting block proposals in rare cases, along with an important update for Windows users running post-Fusaka fork.
@@ -4046,4 +3820,4 @@ There are no security updates in this release.
# Older than v2.0.0
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases

View File

@@ -205,26 +205,6 @@ prysm_image_deps()
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
# Override golang.org/x/tools to use v0.38.0 instead of v0.30.0
# This is necessary as this dependency is required by rules_go and they do not accept dependency
# update PRs. Instead, they ask downstream projects to override the dependency. To generate the
# patches or update this dependency again, check out the rules_go repo then run the releaser tool.
# bazel run //go/tools/releaser -- upgrade-dep -mirror=false org_golang_x_tools
# Copy the patches and http_archive updates from rules_go here.
http_archive(
name = "org_golang_x_tools",
patch_args = ["-p1"],
patches = [
"//third_party:org_golang_x_tools-deletegopls.patch",
"//third_party:org_golang_x_tools-gazelle.patch",
],
sha256 = "8509908cd7fc35aa09ff49d8494e4fd25bab9e6239fbf57e0d8344f6bec5802b",
strip_prefix = "tools-0.38.0",
urls = [
"https://github.com/golang/tools/archive/refs/tags/v0.38.0.zip",
],
)
go_rules_dependencies()
go_register_toolchains(
@@ -273,16 +253,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.7.0-alpha.0"
consensus_spec_version = "v1.6.0"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-b+rJOuVqq+Dy53quPcNYcQwPFoMU7Wp7tdUVe7n0g8w=",
"minimal": "sha256-qxRIxtjPxVsVCY90WsBJKhk0027XDSmhjnRvRN14V1c=",
"mainnet": "sha256-NsuOQG3LzeiEE1TrWuvQ6vu6BboHv7h7f/RTS0pWkCs=",
"general": "sha256-54hTaUNF9nLg+hRr3oHoq0yjZpW3MNiiUUuCQu6Rajk=",
"minimal": "sha256-1JHIGg3gVMjvcGYRHR5cwdDgOvX47oR/MWp6gyAeZfA=",
"mainnet": "sha256-292h3W2Ffts0YExgDTyxYe9Os7R0bZIXuAaMO8P6kl4=",
},
version = consensus_spec_version,
)
@@ -298,7 +278,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-hwNdUBgdBrkk6pWIpNYbzbwswUuOu6AMD2exN8uv+QQ=",
integrity = "sha256-VzBgrEokvYSMIIXVnSA5XS9I3m9oxpvToQGxC1N5lzw=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)

View File

@@ -5,7 +5,6 @@ go_library(
srcs = [
"common.go",
"header.go",
"log.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/apiutil",
visibility = ["//visibility:public"],

View File

@@ -5,6 +5,8 @@ import (
"sort"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
)
type mediaRange struct {
@@ -54,7 +56,7 @@ func ParseAccept(header string) []mediaRange {
}
var out []mediaRange
for field := range strings.SplitSeq(header, ",") {
for _, field := range strings.Split(header, ",") {
if r, ok := parseMediaRange(field); ok {
out = append(out, r)
}

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package apiutil
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "api/apiutil")

View File

@@ -1,9 +1,5 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package beacon
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "api/client/beacon")
var log = logrus.WithField("prefix", "beacon")

View File

@@ -6,7 +6,6 @@ go_library(
"bid.go",
"client.go",
"errors.go",
"log.go",
"types.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/client/builder",
@@ -64,5 +63,6 @@ go_test(
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -25,7 +25,7 @@ import (
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
)
@@ -70,7 +70,7 @@ type requestLogger struct{}
func (*requestLogger) observe(r *http.Request) (e error) {
b := bytes.NewBuffer(nil)
if r.Body == nil {
log.WithFields(logrus.Fields{
log.WithFields(log.Fields{
"bodyBase64": "(nil value)",
"url": r.URL.String(),
}).Info("Builder http request")
@@ -87,7 +87,7 @@ func (*requestLogger) observe(r *http.Request) (e error) {
return err
}
r.Body = io.NopCloser(b)
log.WithFields(logrus.Fields{
log.WithFields(log.Fields{
"bodyBase64": string(body),
"url": r.URL.String(),
}).Info("Builder http request")
@@ -421,7 +421,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
func jsonValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]byte, error) {
vs := make([]*structs.SignedValidatorRegistration, len(svr))
for i := range svr {
for i := 0; i < len(svr); i++ {
vs[i] = structs.SignedValidatorRegistrationFromConsensus(svr[i])
}
body, err := json.Marshal(vs)

View File

@@ -23,6 +23,7 @@ import (
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
log "github.com/sirupsen/logrus"
)
type roundtrip func(*http.Request) (*http.Response, error)

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package builder
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "api/client/builder")

View File

@@ -121,7 +121,7 @@ func (s *Uint64String) UnmarshalText(t []byte) error {
// MarshalText returns a byte representation of the text from Uint64String.
func (s Uint64String) MarshalText() ([]byte, error) {
return fmt.Appendf(nil, "%d", s), nil
return []byte(fmt.Sprintf("%d", s)), nil
}
// VersionResponse is a JSON representation of a field in the builder API header response.

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"event_stream.go",
"log.go",
"utils.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/client/event",
@@ -24,5 +23,8 @@ go_test(
"utils_test.go",
],
embed = [":go_default_library"],
deps = ["//testing/require:go_default_library"],
deps = [
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -10,6 +10,7 @@ import (
"github.com/OffchainLabs/prysm/v7/api"
"github.com/OffchainLabs/prysm/v7/api/client"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
const (

View File

@@ -8,6 +8,7 @@ import (
"time"
"github.com/OffchainLabs/prysm/v7/testing/require"
log "github.com/sirupsen/logrus"
)
func TestNewEventStream(t *testing.T) {

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package event
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "api/client/event")

View File

@@ -4,7 +4,6 @@ go_library(
name = "go_default_library",
srcs = [
"grpcutils.go",
"log.go",
"parameters.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/api/grpc",

View File

@@ -15,7 +15,7 @@ import (
func LogRequests(
ctx context.Context,
method string, req,
reply any,
reply interface{},
cc *grpc.ClientConn,
invoker grpc.UnaryInvoker,
opts ...grpc.CallOption,
@@ -32,7 +32,7 @@ func LogRequests(
)
start := time.Now()
err := invoker(ctx, method, req, reply, cc, opts...)
log.WithField("backend", header["x-backend"]).
logrus.WithField("backend", header["x-backend"]).
WithField("method", method).WithField("duration", time.Since(start)).
Debug("gRPC request finished.")
return err
@@ -58,7 +58,7 @@ func LogStream(
grpc.Header(&header),
)
strm, err := streamer(ctx, sd, conn, method, opts...)
log.WithField("backend", header["x-backend"]).
logrus.WithField("backend", header["x-backend"]).
WithField("method", method).
Debug("gRPC stream started.")
return strm, err
@@ -71,7 +71,7 @@ func AppendHeaders(parent context.Context, headers []string) context.Context {
if h != "" {
keyValue := strings.Split(h, "=")
if len(keyValue) < 2 {
log.Warnf("Incorrect gRPC header flag format. Skipping %v", keyValue[0])
logrus.Warnf("Incorrect gRPC header flag format. Skipping %v", keyValue[0])
continue
}
parent = metadata.AppendToOutgoingContext(parent, keyValue[0], strings.Join(keyValue[1:], "=")) // nolint:fatcontext

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package grpc
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "api/grpc")

View File

@@ -1,9 +1,5 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package httprest
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "api/server/httprest")
var log = logrus.WithField("prefix", "httprest")

View File

@@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"log.go",
"middleware.go",
"util.go",
],
@@ -28,5 +27,6 @@ go_test(
"//api:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package middleware
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "api/server/middleware")

View File

@@ -9,6 +9,7 @@ import (
"github.com/OffchainLabs/prysm/v7/api"
"github.com/OffchainLabs/prysm/v7/api/apiutil"
"github.com/rs/cors"
log "github.com/sirupsen/logrus"
)
type Middleware func(http.Handler) http.Handler

View File

@@ -10,6 +10,7 @@ import (
"github.com/OffchainLabs/prysm/v7/api"
"github.com/OffchainLabs/prysm/v7/testing/require"
log "github.com/sirupsen/logrus"
)
// frozenHeaderRecorder allows asserting that response headers were not modified

View File

@@ -14,5 +14,5 @@ type GetForkScheduleResponse struct {
}
type GetSpecResponse struct {
Data any `json:"data"`
Data interface{} `json:"data"`
}

View File

@@ -5,7 +5,6 @@ go_library(
srcs = [
"debounce.go",
"every.go",
"log.go",
"multilock.go",
"scatter.go",
],

View File

@@ -93,9 +93,9 @@ func TestToggleMultipleTimes(t *testing.T) {
v := New()
pre := !v.IsSet()
for i := range 100 {
for i := 0; i < 100; i++ {
v.SetTo(false)
for range i {
for j := 0; j < i; j++ {
pre = v.Toggle()
}
@@ -149,7 +149,7 @@ func TestRace(t *testing.T) {
// Writer
go func() {
for range repeat {
for i := 0; i < repeat; i++ {
v.Set()
wg.Done()
}
@@ -157,7 +157,7 @@ func TestRace(t *testing.T) {
// Reader
go func() {
for range repeat {
for i := 0; i < repeat; i++ {
v.IsSet()
wg.Done()
}
@@ -165,7 +165,7 @@ func TestRace(t *testing.T) {
// Writer
go func() {
for range repeat {
for i := 0; i < repeat; i++ {
v.UnSet()
wg.Done()
}
@@ -173,7 +173,7 @@ func TestRace(t *testing.T) {
// Reader And Writer
go func() {
for range repeat {
for i := 0; i < repeat; i++ {
v.Toggle()
wg.Done()
}
@@ -198,8 +198,8 @@ func ExampleAtomicBool() {
func BenchmarkMutexRead(b *testing.B) {
var m sync.RWMutex
var v bool
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.RLock()
_ = v
m.RUnlock()
@@ -208,16 +208,16 @@ func BenchmarkMutexRead(b *testing.B) {
func BenchmarkAtomicValueRead(b *testing.B) {
var v atomic.Value
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = v.Load() != nil
}
}
func BenchmarkAtomicBoolRead(b *testing.B) {
v := New()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = v.IsSet()
}
}
@@ -227,8 +227,8 @@ func BenchmarkAtomicBoolRead(b *testing.B) {
func BenchmarkMutexWrite(b *testing.B) {
var m sync.RWMutex
var v bool
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.RLock()
v = true
m.RUnlock()
@@ -239,16 +239,16 @@ func BenchmarkMutexWrite(b *testing.B) {
func BenchmarkAtomicValueWrite(b *testing.B) {
var v atomic.Value
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.Store(true)
}
}
func BenchmarkAtomicBoolWrite(b *testing.B) {
v := New()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.Set()
}
}
@@ -258,8 +258,8 @@ func BenchmarkAtomicBoolWrite(b *testing.B) {
func BenchmarkMutexCAS(b *testing.B) {
var m sync.RWMutex
var v bool
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Lock()
if !v {
v = true
@@ -270,8 +270,8 @@ func BenchmarkMutexCAS(b *testing.B) {
func BenchmarkAtomicBoolCAS(b *testing.B) {
v := New()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.SetToIf(false, true)
}
}
@@ -281,8 +281,8 @@ func BenchmarkAtomicBoolCAS(b *testing.B) {
func BenchmarkMutexToggle(b *testing.B) {
var m sync.RWMutex
var v bool
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
m.Lock()
v = !v
m.Unlock()
@@ -291,8 +291,8 @@ func BenchmarkMutexToggle(b *testing.B) {
func BenchmarkAtomicBoolToggle(b *testing.B) {
v := New()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.Toggle()
}
}

View File

@@ -21,7 +21,7 @@ const (
func init() {
input = make([][]byte, benchmarkElements)
for i := range benchmarkElements {
for i := 0; i < benchmarkElements; i++ {
input[i] = make([]byte, benchmarkElementSize)
_, err := rand.Read(input[i])
if err != nil {
@@ -35,7 +35,7 @@ func hash(input [][]byte) [][]byte {
output := make([][]byte, len(input))
for i := range input {
copy(output, input)
for range benchmarkHashRuns {
for j := 0; j < benchmarkHashRuns; j++ {
hash := sha256.Sum256(output[i])
output[i] = hash[:]
}
@@ -44,15 +44,15 @@ func hash(input [][]byte) [][]byte {
}
func BenchmarkHash(b *testing.B) {
for b.Loop() {
for i := 0; i < b.N; i++ {
hash(input)
}
}
func BenchmarkHashMP(b *testing.B) {
output := make([][]byte, len(input))
for b.Loop() {
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (any, error) {
for i := 0; i < b.N; i++ {
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
return hash(input[offset : offset+entries]), nil
})
require.NoError(b, err)

View File

@@ -7,7 +7,7 @@ import (
// Debounce events fired over a channel by a specified duration, ensuring no events
// are handled until a certain interval of time has passed.
func Debounce(ctx context.Context, interval time.Duration, eventsChan <-chan any, handler func(any)) {
func Debounce(ctx context.Context, interval time.Duration, eventsChan <-chan interface{}, handler func(interface{})) {
var timer *time.Timer
defer func() {
if timer != nil {

View File

@@ -14,7 +14,7 @@ import (
)
func TestDebounce_NoEvents(t *testing.T) {
eventsChan := make(chan any, 100)
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(t.Context())
interval := time.Second
timesHandled := int32(0)
@@ -26,7 +26,7 @@ func TestDebounce_NoEvents(t *testing.T) {
})
}()
go func() {
async.Debounce(ctx, interval, eventsChan, func(event any) {
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
})
wg.Done()
@@ -38,7 +38,7 @@ func TestDebounce_NoEvents(t *testing.T) {
}
func TestDebounce_CtxClosing(t *testing.T) {
eventsChan := make(chan any, 100)
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(t.Context())
interval := time.Second
timesHandled := int32(0)
@@ -62,7 +62,7 @@ func TestDebounce_CtxClosing(t *testing.T) {
})
}()
go func() {
async.Debounce(ctx, interval, eventsChan, func(event any) {
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
})
wg.Done()
@@ -74,14 +74,14 @@ func TestDebounce_CtxClosing(t *testing.T) {
}
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
eventsChan := make(chan any, 100)
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(t.Context())
interval := time.Second
timesHandled := int32(0)
go async.Debounce(ctx, interval, eventsChan, func(event any) {
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
})
for range 100 {
for i := 0; i < 100; i++ {
eventsChan <- struct{}{}
}
// We should expect 100 rapid fire changes to only have caused
@@ -92,14 +92,14 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
}
func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
eventsChan := make(chan any, 100)
eventsChan := make(chan interface{}, 100)
ctx, cancel := context.WithCancel(t.Context())
interval := time.Second
timesHandled := int32(0)
go async.Debounce(ctx, interval, eventsChan, func(event any) {
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
atomic.AddInt32(&timesHandled, 1)
})
for range 100 {
for i := 0; i < 100; i++ {
eventsChan <- struct{}{}
}
require.Equal(t, int32(0), atomic.LoadInt32(&timesHandled), "Events must prevent from handler execution")

View File

@@ -93,7 +93,9 @@ func ExampleSubscriptionScope() {
// Run a subscriber in the background.
divsub := app.SubscribeResults('/', divs)
mulsub := app.SubscribeResults('*', muls)
wg.Go(func() {
wg.Add(1)
go func() {
defer wg.Done()
defer fmt.Println("subscriber exited")
defer divsub.Unsubscribe()
defer mulsub.Unsubscribe()
@@ -109,7 +111,7 @@ func ExampleSubscriptionScope() {
return
}
}
})
}()
// Interact with the app.
app.Calc('/', 22, 11)

View File

@@ -26,7 +26,7 @@ func ExampleNewSubscription() {
// Create a subscription that sends 10 integers on ch.
ch := make(chan int)
sub := event.NewSubscription(func(quit <-chan struct{}) error {
for i := range 10 {
for i := 0; i < 10; i++ {
select {
case ch <- i:
case <-quit:

View File

@@ -3,6 +3,6 @@ package event
// SubscriberSender is an abstract representation of an *event.Feed
// to use in describing types that accept or return an *event.Feed.
type SubscriberSender interface {
Subscribe(channel any) Subscription
Send(value any) (nsent int)
Subscribe(channel interface{}) Subscription
Send(value interface{}) (nsent int)
}

View File

@@ -30,7 +30,7 @@ var errInts = errors.New("error in subscribeInts")
func subscribeInts(max, fail int, c chan<- int) Subscription {
return NewSubscription(func(quit <-chan struct{}) error {
for i := range max {
for i := 0; i < max; i++ {
if i >= fail {
return errInts
}
@@ -50,7 +50,7 @@ func TestNewSubscriptionError(t *testing.T) {
channel := make(chan int)
sub := subscribeInts(10, 2, channel)
loop:
for want := range 10 {
for want := 0; want < 10; want++ {
select {
case got := <-channel:
require.Equal(t, want, got)

View File

@@ -6,6 +6,8 @@ import (
"reflect"
"runtime"
"time"
log "github.com/sirupsen/logrus"
)
// RunEvery runs the provided command periodically.

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package async
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "async")

View File

@@ -107,13 +107,15 @@ func TestLockUnlock(_ *testing.T) {
func TestLockUnlock_CleansUnused(t *testing.T) {
var wg sync.WaitGroup
wg.Go(func() {
wg.Add(1)
go func() {
lock := NewMultilock("dog", "cat", "owl")
lock.Lock()
assert.Equal(t, 3, len(locks.list))
lock.Unlock()
})
wg.Done()
}()
wg.Wait()
// We expect that unlocking completely cleared the locks list
// given all 3 lock keys were unused at time of unlock.

View File

@@ -9,14 +9,14 @@ import (
// WorkerResults are the results of a scatter worker.
type WorkerResults struct {
Offset int
Extent any
Extent interface{}
}
// Scatter scatters a computation across multiple goroutines.
// This breaks the task in to a number of chunks and executes those chunks in parallel with the function provided.
// Results returned are collected and presented as a set of WorkerResults, which can be reassembled by the calling function.
// Any error that occurs in the workers will be passed back to the calling function.
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (any, error)) ([]*WorkerResults, error) {
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, error)) ([]*WorkerResults, error) {
if inputLen <= 0 {
return nil, errors.New("input length must be greater than 0")
}

View File

@@ -46,9 +46,9 @@ func TestDouble(t *testing.T) {
inValues[i] = i
}
outValues := make([]int, test.inValues)
workerResults, err := async.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (any, error) {
workerResults, err := async.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
extent := make([]int, entries)
for i := range entries {
for i := 0; i < entries; i++ {
extent[i] = inValues[offset+i] * 2
}
return extent, nil
@@ -72,8 +72,8 @@ func TestDouble(t *testing.T) {
func TestMutex(t *testing.T) {
totalRuns := 1048576
val := 0
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (any, error) {
for range entries {
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
for i := 0; i < entries; i++ {
mu.Lock()
val++
mu.Unlock()
@@ -90,8 +90,8 @@ func TestMutex(t *testing.T) {
func TestError(t *testing.T) {
totalRuns := 1024
val := 0
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (any, error) {
for range entries {
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
for i := 0; i < entries; i++ {
mu.Lock()
val++
if val == 1011 {

View File

@@ -14,7 +14,6 @@ go_library(
"head_sync_committee_info.go",
"init_sync_process_block.go",
"log.go",
"log_helpers.go",
"merge_ascii_art.go",
"metrics.go",
"options.go",

View File

@@ -323,17 +323,14 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
var ok bool
e := slots.ToEpoch(slot)
stateEpoch := slots.ToEpoch(st.Slot())
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
if e == stateEpoch || fuluAndNextEpoch {
if e == stateEpoch {
val, ok = s.trackedProposer(st, slot)
if !ok {
return emptyAttri
}
}
st = st.Copy()
if slot > st.Slot() {
// At this point either we know we are proposing on a future slot or we need to still compute the
// right proposer index pre-Fulu, either way we need to copy the state to process it.
st = st.Copy()
var err error
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
if err != nil {
@@ -341,7 +338,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
return emptyAttri
}
}
if e > stateEpoch && !fuluAndNextEpoch {
if e > stateEpoch {
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
val, ok = s.trackedProposer(st, slot)
if !ok {

View File

@@ -1053,3 +1053,40 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
require.Equal(t, vhs[0].String(), vh0)
require.Equal(t, vhs[1].String(), vh1)
}
func TestComputePayloadAttribute(t *testing.T) {
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
ctx := tr.ctx
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
// Cache hit, advance state, no fee recipient
slot := primitives.Slot(1)
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
blk := util.NewBeaconBlockBellatrix()
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
require.NoError(t, err)
cfg := &postBlockProcessConfig{
ctx: ctx,
roblock: roblock,
}
fcu := &fcuConfig{
headState: st,
proposingSlot: slot,
headRoot: [32]byte{},
}
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
require.Equal(t, false, fcu.attributes.IsEmpty())
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
// Cache hit, advance state, has fee recipient
suggestedAddr := common.HexToAddress("123")
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
require.Equal(t, false, fcu.attributes.IsEmpty())
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
}

View File

@@ -12,7 +12,6 @@ import (
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -54,53 +53,58 @@ type fcuConfig struct {
}
// sendFCU handles the logic to notify the engine of a forckhoice update
// when processing an incoming block during regular sync. It
// always updates the shuffling caches and handles epoch transitions .
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
if cfg.postState.Version() < version.Fulu {
// update the caches to compute the right proposer index
// this function is called under a forkchoice lock which we need to release.
s.ForkChoicer().Unlock()
s.updateCachesPostBlockProcessing(cfg)
s.ForkChoicer().Lock()
// for the first time when processing an incoming block during regular sync. It
// always updates the shuffling caches and handles epoch transitions when the
// incoming block is late, preparing payload attributes in this case while it
// only sends a message with empty attributes for early blocks.
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if !s.isNewHead(cfg.headRoot) {
return nil
}
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return
}
// If head has not been updated and attributes are nil, we can skip the FCU.
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
return
}
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
return nil
}
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
}
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
// to the engine if needed
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
cfg.ctx = slotCtx
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not compute payload attributes")
return
}
if s.inRegularSync() {
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
if fcuArgs.attributes.IsEmpty() {
return
}
if s.isNewHead(fcuArgs.headRoot) {
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
log.WithError(err).Error("Could not save head")
}
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
}
}
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine.
// The caller of this function should NOT have a lock in forkchoice store.
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) {
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
defer span.End()
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
ctx = trace.NewContext(s.ctx, span)
s.ForkChoicer().Lock()
defer s.ForkChoicer().Unlock()
_, err := s.notifyForkchoiceUpdate(ctx, args)
if err != nil {
log.WithError(err).Error("Could not notify forkchoice update")
return errors.Wrap(err, "could not notify forkchoice update")
}
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
log.WithError(err).Error("Could not save head")
}
// Only need to prune attestations from pool if the head has changed.
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
return nil
}
// shouldOverrideFCU checks whether the incoming block is still subject to being

View File

@@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
headBlock: wsb,
proposingSlot: service.CurrentSlot() + 1,
}
service.forkchoiceUpdateWithExecution(ctx, args)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
require.Equal(t, true, has)
@@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
headRoot: r,
proposingSlot: service.CurrentSlot() + 1,
}
service.forkchoiceUpdateWithExecution(ctx, args)
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
}
func TestShouldOverrideFCU(t *testing.T) {

View File

@@ -70,7 +70,7 @@ func TestVerifyBlobKZGProofBatch(t *testing.T) {
commitments := make([][]byte, blobCount)
proofs := make([][]byte, blobCount)
for i := range blobCount {
for i := 0; i < blobCount; i++ {
blob := random.GetRandBlob(int64(i))
commitment, proof, err := GenerateCommitmentAndProof(blob)
require.NoError(t, err)
@@ -432,8 +432,8 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
commitments[1] = make([]byte, 32) // Wrong size
// Add cell proofs for both blobs
for range blobCount {
for range numberOfColumns {
for i := 0; i < blobCount; i++ {
for j := uint64(0); j < numberOfColumns; j++ {
allCellProofs = append(allCellProofs, make([]byte, 48))
}
}
@@ -450,7 +450,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
commitments := make([][]byte, blobCount)
var allCellProofs [][]byte
for i := range blobCount {
for i := 0; i < blobCount; i++ {
randBlob := random.GetRandBlob(int64(i))
var blob Blob
copy(blob[:], randBlob[:])
@@ -461,7 +461,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
commitments[i] = commitment[:]
// Add cell proofs - make some invalid in the second blob
for j := range numberOfColumns {
for j := uint64(0); j < numberOfColumns; j++ {
if i == 1 && j == 64 {
// Invalid proof size in middle of second blob's proofs
allCellProofs = append(allCellProofs, make([]byte, 20))

View File

@@ -1,9 +1,164 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package blockchain
import "github.com/sirupsen/logrus"
import (
"encoding/hex"
"fmt"
"time"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "beacon-chain/blockchain")
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/config/params"
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
prysmTime "github.com/OffchainLabs/prysm/v7/time"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var log = logrus.WithField("prefix", "blockchain")
// logs state transition related data every slot.
func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
log := log.WithField("slot", b.Slot())
if len(b.Body().Attestations()) > 0 {
log = log.WithField("attestations", len(b.Body().Attestations()))
}
if len(b.Body().AttesterSlashings()) > 0 {
log = log.WithField("attesterSlashings", len(b.Body().AttesterSlashings()))
}
if len(b.Body().ProposerSlashings()) > 0 {
log = log.WithField("proposerSlashings", len(b.Body().ProposerSlashings()))
}
if len(b.Body().VoluntaryExits()) > 0 {
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
}
if b.Version() >= version.Altair {
agg, err := b.Body().SyncAggregate()
if err != nil {
return err
}
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
if b.Version() >= version.Bellatrix {
p, err := b.Body().Execution()
if err != nil {
return err
}
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
txs, err := p.Transactions()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedField):
case err != nil:
return err
default:
log = log.WithField("txCount", len(txs))
txsPerSlotCount.Set(float64(len(txs)))
}
}
if b.Version() >= version.Deneb {
kzgs, err := b.Body().BlobKzgCommitments()
if err != nil {
log.WithError(err).Error("Failed to get blob KZG commitments")
} else if len(kzgs) > 0 {
log = log.WithField("kzgCommitmentCount", len(kzgs))
}
}
if b.Version() >= version.Electra {
eReqs, err := b.Body().ExecutionRequests()
if err != nil {
log.WithError(err).Error("Failed to get execution requests")
} else {
if len(eReqs.Deposits) > 0 {
log = log.WithField("depositRequestCount", len(eReqs.Deposits))
}
if len(eReqs.Consolidations) > 0 {
log = log.WithField("consolidationRequestCount", len(eReqs.Consolidations))
}
if len(eReqs.Withdrawals) > 0 {
log = log.WithField("withdrawalRequestCount", len(eReqs.Withdrawals))
}
}
}
log.Info("Finished applying state transition")
return nil
}
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
startTime, err := slots.StartTime(genesis, block.Slot())
if err != nil {
return err
}
level := log.Logger.GetLevel()
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
log.WithFields(lf).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"epoch": slots.ToEpoch(block.Slot()),
}).Info("Synced new block")
}
return nil
}
// logs payload related data every slot.
func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
if err != nil {
return errors.Wrap(err, "could not determine if block is execution block")
}
if !isExecutionBlk {
return nil
}
payload, err := block.Body().Execution()
if err != nil {
return err
}
if payload.GasLimit() == 0 {
return errors.New("gas limit should not be 0")
}
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
fields := logrus.Fields{
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
"blockNumber": payload.BlockNumber(),
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
}
if block.Version() >= version.Capella {
withdrawals, err := payload.Withdrawals()
if err != nil {
return errors.Wrap(err, "could not get withdrawals")
}
fields["withdrawals"] = len(withdrawals)
changes, err := block.Body().BLSToExecutionChanges()
if err != nil {
return errors.Wrap(err, "could not get BLSToExecutionChanges")
}
if len(changes) > 0 {
fields["blsToExecutionChanges"] = len(changes)
}
}
log.WithFields(fields).Debug("Synced new payload")
return nil
}

View File

@@ -1,162 +0,0 @@
package blockchain
import (
"encoding/hex"
"fmt"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/config/params"
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
prysmTime "github.com/OffchainLabs/prysm/v7/time"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// logs state transition related data every slot.
func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
log := log.WithField("slot", b.Slot())
if len(b.Body().Attestations()) > 0 {
log = log.WithField("attestations", len(b.Body().Attestations()))
}
if len(b.Body().AttesterSlashings()) > 0 {
log = log.WithField("attesterSlashings", len(b.Body().AttesterSlashings()))
}
if len(b.Body().ProposerSlashings()) > 0 {
log = log.WithField("proposerSlashings", len(b.Body().ProposerSlashings()))
}
if len(b.Body().VoluntaryExits()) > 0 {
log = log.WithField("voluntaryExits", len(b.Body().VoluntaryExits()))
}
if b.Version() >= version.Altair {
agg, err := b.Body().SyncAggregate()
if err != nil {
return err
}
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
}
if b.Version() >= version.Bellatrix {
p, err := b.Body().Execution()
if err != nil {
return err
}
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
txs, err := p.Transactions()
switch {
case errors.Is(err, consensus_types.ErrUnsupportedField):
case err != nil:
return err
default:
log = log.WithField("txCount", len(txs))
txsPerSlotCount.Set(float64(len(txs)))
}
}
if b.Version() >= version.Deneb {
kzgs, err := b.Body().BlobKzgCommitments()
if err != nil {
log.WithError(err).Error("Failed to get blob KZG commitments")
} else if len(kzgs) > 0 {
log = log.WithField("kzgCommitmentCount", len(kzgs))
}
}
if b.Version() >= version.Electra {
eReqs, err := b.Body().ExecutionRequests()
if err != nil {
log.WithError(err).Error("Failed to get execution requests")
} else {
if len(eReqs.Deposits) > 0 {
log = log.WithField("depositRequestCount", len(eReqs.Deposits))
}
if len(eReqs.Consolidations) > 0 {
log = log.WithField("consolidationRequestCount", len(eReqs.Consolidations))
}
if len(eReqs.Withdrawals) > 0 {
log = log.WithField("withdrawalRequestCount", len(eReqs.Withdrawals))
}
}
}
log.Info("Finished applying state transition")
return nil
}
func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte, justified, finalized *ethpb.Checkpoint, receivedTime time.Time, genesis time.Time, daWaitedTime time.Duration) error {
startTime, err := slots.StartTime(genesis, block.Slot())
if err != nil {
return err
}
level := log.Logger.GetLevel()
if level >= logrus.DebugLevel {
parentRoot := block.ParentRoot()
lf := logrus.Fields{
"slot": block.Slot(),
"slotInEpoch": block.Slot() % params.BeaconConfig().SlotsPerEpoch,
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"epoch": slots.ToEpoch(block.Slot()),
"justifiedEpoch": justified.Epoch,
"justifiedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(justified.Root)[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"parentRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(parentRoot[:])[:8]),
"version": version.String(block.Version()),
"sinceSlotStartTime": prysmTime.Now().Sub(startTime),
"chainServiceProcessedTime": prysmTime.Now().Sub(receivedTime) - daWaitedTime,
"dataAvailabilityWaitedTime": daWaitedTime,
}
log.WithFields(lf).Debug("Synced new block")
} else {
log.WithFields(logrus.Fields{
"slot": block.Slot(),
"block": fmt.Sprintf("0x%s...", hex.EncodeToString(blockRoot[:])[:8]),
"finalizedEpoch": finalized.Epoch,
"finalizedRoot": fmt.Sprintf("0x%s...", hex.EncodeToString(finalized.Root)[:8]),
"epoch": slots.ToEpoch(block.Slot()),
}).Info("Synced new block")
}
return nil
}
// logs payload related data every slot.
func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
if err != nil {
return errors.Wrap(err, "could not determine if block is execution block")
}
if !isExecutionBlk {
return nil
}
payload, err := block.Body().Execution()
if err != nil {
return err
}
if payload.GasLimit() == 0 {
return errors.New("gas limit should not be 0")
}
gasUtilized := float64(payload.GasUsed()) / float64(payload.GasLimit())
fields := logrus.Fields{
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
"parentHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.ParentHash())),
"blockNumber": payload.BlockNumber(),
"gasUtilized": fmt.Sprintf("%.2f", gasUtilized),
}
if block.Version() >= version.Capella {
withdrawals, err := payload.Withdrawals()
if err != nil {
return errors.Wrap(err, "could not get withdrawals")
}
fields["withdrawals"] = len(withdrawals)
changes, err := block.Body().BLSToExecutionChanges()
if err != nil {
return errors.Wrap(err, "could not get BLSToExecutionChanges")
}
if len(changes) > 0 {
fields["blsToExecutionChanges"] = len(changes)
}
}
log.WithFields(fields).Debug("Synced new payload")
return nil
}

View File

@@ -34,7 +34,7 @@ func Test_logStateTransitionData(t *testing.T) {
require.NoError(t, err)
return wb
},
want: "\"Finished applying state transition\" package=beacon-chain/blockchain slot=0",
want: "\"Finished applying state transition\" prefix=blockchain slot=0",
},
{name: "has attestation",
b: func() interfaces.ReadOnlyBeaconBlock {
@@ -42,7 +42,7 @@ func Test_logStateTransitionData(t *testing.T) {
require.NoError(t, err)
return wb
},
want: "\"Finished applying state transition\" attestations=1 package=beacon-chain/blockchain slot=0",
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
},
{name: "has deposit",
b: func() interfaces.ReadOnlyBeaconBlock {
@@ -53,7 +53,7 @@ func Test_logStateTransitionData(t *testing.T) {
require.NoError(t, err)
return wb
},
want: "\"Finished applying state transition\" attestations=1 package=beacon-chain/blockchain slot=0",
want: "\"Finished applying state transition\" attestations=1 prefix=blockchain slot=0",
},
{name: "has attester slashing",
b: func() interfaces.ReadOnlyBeaconBlock {
@@ -62,7 +62,7 @@ func Test_logStateTransitionData(t *testing.T) {
require.NoError(t, err)
return wb
},
want: "\"Finished applying state transition\" attesterSlashings=1 package=beacon-chain/blockchain slot=0",
want: "\"Finished applying state transition\" attesterSlashings=1 prefix=blockchain slot=0",
},
{name: "has proposer slashing",
b: func() interfaces.ReadOnlyBeaconBlock {
@@ -71,7 +71,7 @@ func Test_logStateTransitionData(t *testing.T) {
require.NoError(t, err)
return wb
},
want: "\"Finished applying state transition\" package=beacon-chain/blockchain proposerSlashings=1 slot=0",
want: "\"Finished applying state transition\" prefix=blockchain proposerSlashings=1 slot=0",
},
{name: "has exit",
b: func() interfaces.ReadOnlyBeaconBlock {
@@ -80,7 +80,7 @@ func Test_logStateTransitionData(t *testing.T) {
require.NoError(t, err)
return wb
},
want: "\"Finished applying state transition\" package=beacon-chain/blockchain slot=0 voluntaryExits=1",
want: "\"Finished applying state transition\" prefix=blockchain slot=0 voluntaryExits=1",
},
{name: "has everything",
b: func() interfaces.ReadOnlyBeaconBlock {
@@ -93,11 +93,11 @@ func Test_logStateTransitionData(t *testing.T) {
require.NoError(t, err)
return wb
},
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 package=beacon-chain/blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
want: "\"Finished applying state transition\" attestations=1 attesterSlashings=1 prefix=blockchain proposerSlashings=1 slot=0 voluntaryExits=1",
},
{name: "has payload",
b: func() interfaces.ReadOnlyBeaconBlock { return wrappedPayloadBlk },
want: "\"Finished applying state transition\" package=beacon-chain/blockchain payloadHash=0x010203 slot=0 syncBitsCount=0 txCount=2",
want: "\"Finished applying state transition\" payloadHash=0x010203 prefix=blockchain slot=0 syncBitsCount=0 txCount=2",
},
}
for _, tt := range tests {

View File

@@ -22,7 +22,10 @@ import (
// The caller of this function must have a lock on forkchoice.
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
headEpoch := slots.ToEpoch(s.HeadSlot())
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
if c.Epoch < headEpoch {
return nil
}
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return nil
}
// Only use head state if the head state is compatible with the target checkpoint.
@@ -30,13 +33,11 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
if err != nil {
return nil
}
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch)
if err != nil {
return nil
}
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch)
if err != nil {
return nil
}
@@ -45,18 +46,14 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
}
// If the head state alone is enough, we can return it directly read only.
if c.Epoch <= headEpoch {
if c.Epoch == headEpoch {
st, err := s.HeadStateReadOnly(ctx)
if err != nil {
return nil
}
return st
}
// At this point we can only have c.Epoch > headEpoch.
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
return nil
}
// Advance the head state to the start of the target epoch.
// Otherwise we need to advance the head state to the start of the target epoch.
// This point can only be reached if c.Root == headRoot and c.Epoch > headEpoch.
slot, err := slots.EpochStart(c.Epoch)
if err != nil {

View File

@@ -170,141 +170,17 @@ func TestService_GetRecentPreState(t *testing.T) {
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 31,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}))
}
func TestService_GetRecentPreState_Epoch_0(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
// Create a fork 31 <-- 32 <--- 64
// \---------33
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
headBlock := blk
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'T'},
block: headBlock,
slot: 64,
state: s,
}
require.NotNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
// Create a fork 30 <-- 31 <-- 32 <--- 64
// \---------33
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
st, blk, err := prepareForkchoiceState(ctx, 30, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 31, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
require.NoError(t, err)
headBlock := blk
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
cpRoot := blk.Root()
service.head = &head{
root: [32]byte{'U'},
block: headBlock,
state: s,
slot: 64,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
}
func TestService_GetRecentPreState_Different(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
s, err := util.NewBeaconState()
require.NoError(t, err)
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
cp0 := &ethpb.Checkpoint{Epoch: 0, Root: ckRoot}
err = s.SetFinalizedCheckpoint(cp0)
require.NoError(t, err)
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
require.NoError(t, err)
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
service.head = &head{
root: [32]byte(ckRoot),
state: s,
block: blk,
slot: 33,
}
require.IsNil(t, service.getRecentPreState(ctx, &ethpb.Checkpoint{}))
}
func TestService_GetAttPreState_Concurrency(t *testing.T) {
service, _ := minimalTestService(t)
ctx := t.Context()
@@ -333,14 +209,16 @@ func TestService_GetAttPreState_Concurrency(t *testing.T) {
var wg sync.WaitGroup
errChan := make(chan error, 1000)
for range 1000 {
wg.Go(func() {
for i := 0; i < 1000; i++ {
wg.Add(1)
go func() {
defer wg.Done()
cp1 := &ethpb.Checkpoint{Epoch: 1, Root: ckRoot}
_, err := service.getAttPreState(ctx, cp1)
if err != nil {
errChan <- err
}
})
}()
}
go func() {

View File

@@ -66,6 +66,9 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
startTime := time.Now()
fcuArgs := &fcuConfig{}
if s.inRegularSync() {
defer s.handleSecondFCUCall(cfg, fcuArgs)
}
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
defer s.processLightClientUpdates(cfg)
}
@@ -102,17 +105,14 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
return nil
}
s.sendFCU(cfg, fcuArgs)
// Pre-Fulu the caches are updated when computing the payload attributes
if cfg.postState.Version() >= version.Fulu {
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
cfg.ctx = ctx
s.updateCachesPostBlockProcessing(cfg)
}()
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
log.WithError(err).Error("Could not get forkchoice update argument")
return nil
}
if err := s.sendFCU(cfg, fcuArgs); err != nil {
return errors.Wrap(err, "could not send FCU to engine")
}
return nil
}
@@ -295,6 +295,14 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
return errors.Wrap(err, "could not set optimistic block to valid")
}
}
arg := &fcuConfig{
headState: preState,
headRoot: lastBR,
headBlock: lastB,
}
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return err
}
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
@@ -322,7 +330,6 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
return nil
}
// the caller of this function must not hold a lock in forkchoice store.
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
e := coreTime.CurrentEpoch(st)
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
@@ -352,9 +359,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
if e > 0 {
e = e - 1
}
s.ForkChoicer().RLock()
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
s.ForkChoicer().RUnlock()
if err != nil {
log.WithError(err).Error("Could not update proposer index state-root map")
return nil
@@ -367,7 +372,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
}
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
// caches. The caller of this function must not hold a lock in forkchoice store.
// caches.
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
@@ -629,7 +634,9 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
return nil, nil
}
if len(expected) > fieldparams.NumberOfColumns {
numberOfColumns := params.BeaconConfig().NumberOfColumns
if uint64(len(expected)) > numberOfColumns {
return nil, errMaxDataColumnsExceeded
}
@@ -810,10 +817,11 @@ func (s *Service) areDataColumnsAvailable(
}
case <-ctx.Done():
var missingIndices any = "all"
missingIndicesCount := len(missing)
var missingIndices interface{} = "all"
numberOfColumns := params.BeaconConfig().NumberOfColumns
missingIndicesCount := uint64(len(missing))
if missingIndicesCount < fieldparams.NumberOfColumns {
if missingIndicesCount < numberOfColumns {
missingIndices = helpers.SortedPrettySliceFromMap(missing)
}
@@ -907,6 +915,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if currentSlot == s.HeadSlot() {
return
}
s.cfg.ForkChoiceStore.RLock()
defer s.cfg.ForkChoiceStore.RUnlock()
// return early if we are in init sync
if !s.inRegularSync() {
return
@@ -919,32 +929,14 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
}
// Before Fulu we need to process the next slot to find out if we are proposing.
if lastState.Version() < version.Fulu {
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
} else {
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
defer func() {
go func() {
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
defer cancel()
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
}()
}()
// Copy all the field tries in our cached state in the event of late
// blocks.
lastState.CopyAllTries()
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("Could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("Could not update epoch boundary caches")
}
// return early if we already started building a block for the current
// head root
@@ -956,6 +948,13 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
// return early if we are not proposing next slot
if attribute.IsEmpty() {
headBlock, err := s.headBlock()
if err != nil {
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
}
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
// call notifyForkchoiceUpdate, so the event is fired here.
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), headBlock, headRoot, s.CurrentSlot()+1)
return
}
@@ -974,8 +973,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
headBlock: headBlock,
attributes: attribute,
}
s.cfg.ForkChoiceStore.Lock()
defer s.cfg.ForkChoiceStore.Unlock()
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
if err != nil {
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")

View File

@@ -42,8 +42,14 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
return err
}
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
return nil
if !s.inRegularSync() {
return nil
}
slot := cfg.roblock.Block().Slot()
if slots.WithinVotingWindow(s.genesisTime, slot) {
return nil
}
return s.computePayloadAttributes(cfg, fcuArgs)
}
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
@@ -167,19 +173,26 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
// boundary in order to compute the right proposer indices after processing
// state transition. The caller of this function must not hold a lock in forkchoice store.
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) {
// state transition. This function is called on late blocks while still locked,
// before sending FCU to the engine.
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
slot := cfg.postState.Slot()
root := cfg.roblock.Root()
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
log.WithError(err).Error("Could not update next slot state cache")
return
return errors.Wrap(err, "could not update next slot state cache")
}
if !slots.IsEpochEnd(slot) {
return
return nil
}
if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil {
log.WithError(err).Error("Could not handle epoch boundary")
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
}
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
// This is useful when proposing in the next block and we want to defer the
// computation of the next slot shuffling.
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
go s.sendFCUWithAttributes(cfg, fcuArgs)
}
}
@@ -189,6 +202,20 @@ func reportProcessingTime(startTime time.Time) {
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
}
// computePayloadAttributes modifies the passed FCU arguments to
// contain the right payload attributes with the tracked proposer. It gets
// called on blocks that arrive after the attestation voting window, or in a
// background routine after syncing early blocks.
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
if cfg.roblock.Root() == cfg.headRoot {
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
return err
}
}
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
return nil
}
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
// is in the correct time window.

View File

@@ -147,7 +147,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
bState := st.Copy()
var blks []consensusblocks.ROBlock
for i := range 97 {
for i := 0; i < 97; i++ {
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
@@ -738,9 +738,7 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -790,9 +788,7 @@ func TestOnBlock_CanFinalize(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -820,9 +816,25 @@ func TestOnBlock_NilBlock(t *testing.T) {
service, tr := minimalTestService(t)
signed := &consensusblocks.SignedBeaconBlock{}
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
service.cfg.ForkChoiceStore.Lock()
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
service.cfg.ForkChoiceStore.Unlock()
require.Equal(t, true, IsInvalidBlock(err))
}
func TestOnBlock_InvalidSignature(t *testing.T) {
service, tr := minimalTestService(t)
ctx := tr.ctx
gs, keys := util.DeterministicGenesisState(t, 32)
require.NoError(t, service.saveGenesisData(ctx, gs))
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
blk.Signature = []byte{'a'} // Mutate the signature.
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
_, err = service.validateStateTransition(ctx, preState, wsb)
require.Equal(t, true, IsInvalidBlock(err))
}
@@ -854,9 +866,7 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
require.NoError(t, err)
}
@@ -1313,7 +1323,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
require.NoError(t, err)
logHook := logTest.NewGlobal()
for range 10 {
for i := 0; i < 10; i++ {
fc := &ethpb.Checkpoint{}
st, blkRoot, err := prepareForkchoiceState(ctx, 0, wsb1.Block().ParentRoot(), [32]byte{}, [32]byte{}, fc, fc)
require.NoError(t, err)
@@ -1329,9 +1339,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1343,9 +1351,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1357,9 +1363,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1371,9 +1375,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
lock.Lock()
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
lock.Unlock()
wg.Done()
}()
@@ -1398,6 +1400,197 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
require.Equal(t, true, IsInvalidBlock(err))
}
// See the description in #10777 and #10782 for the full setup
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
// INVALID from FCU, with LVH block 17. No head is viable. We check
// that the node is optimistic and that we can actually import a block on top of
// 17 and recover.
func TestStore_NoViableHead_FCU(t *testing.T) {
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.SlotsPerEpoch = 6
config.AltairForkEpoch = 1
config.BellatrixForkEpoch = 2
params.OverrideBeaconConfig(config)
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
ctx := tr.ctx
st, keys := util.DeterministicGenesisState(t, 64)
stateRoot, err := st.HashTreeRoot(ctx)
require.NoError(t, err, "Could not hash genesis state")
require.NoError(t, service.saveGenesisData(ctx, st))
genesis := blocks.NewGenesisBlock(stateRoot[:])
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
require.NoError(t, err)
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
parentRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
for i := 1; i < 6; i++ {
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
}
for i := 6; i < 12; i++ {
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
for i := 12; i < 18; i++ {
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
require.NoError(t, err)
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(0), jc.Epoch)
// import a block that justifies the second epoch
driftGenesisTime(service, 18, 0)
validHeadState, err := service.HeadState(ctx)
require.NoError(t, err)
b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
firstInvalidRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err := service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err := service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
sjc := validHeadState.CurrentJustifiedCheckpoint()
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
lvh := b.Block.Body.ExecutionPayload.ParentHash
// check our head
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
// import another block to find out that it was invalid
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
service.cfg.ExecutionEngineCaller = mockEngine
driftGenesisTime(service, 19, 0)
st, err = service.HeadState(ctx)
require.NoError(t, err)
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
// Check that forkchoice's head is the last invalid block imported. The
// store's headroot is the previous head (since the invalid block did
// not finish importing) one and that the node is optimistic
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
headRoot, err := service.HeadRoot(ctx)
require.NoError(t, err)
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
optimistic, err := service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, true, optimistic)
// import another block based on the last valid head state
mockEngine = &mockExecution.EngineClient{}
service.cfg.ExecutionEngineCaller = mockEngine
driftGenesisTime(service, 20, 0)
b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20)
require.NoError(t, err)
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
root, err = b.Block.HashTreeRoot()
require.NoError(t, err)
preState, err = service.getBlockPreState(ctx, wsb.Block())
require.NoError(t, err)
postState, err = service.validateStateTransition(ctx, preState, wsb)
require.NoError(t, err)
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
sjc = service.CurrentJustifiedCheckpt()
require.Equal(t, jc.Epoch, sjc.Epoch)
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
optimistic, err = service.IsOptimistic(ctx)
require.NoError(t, err)
require.Equal(t, false, optimistic)
}
// See the description in #10777 and #10782 for the full setup
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
@@ -1449,9 +1642,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
}
for i := 6; i < 12; i++ {
@@ -1471,9 +1662,8 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
for i := 12; i < 18; i++ {
@@ -1494,9 +1684,8 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we haven't justified the second epoch yet
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
@@ -1519,9 +1708,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
require.Equal(t, primitives.Epoch(2), jc.Epoch)
@@ -1531,10 +1718,6 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
lvh := b.Block.Body.ExecutionPayload.ParentHash
// check our head
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot)
require.NoError(t, err)
require.Equal(t, true, isBlock18OptimisticAfterImport)
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
// import another block to find out that it was invalid
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
@@ -1585,9 +1768,7 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
// Check the newly imported block is head, it justified the right
// checkpoint and the node is no longer optimistic
@@ -1654,9 +1835,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
}
for i := 6; i < 12; i++ {
@@ -1677,9 +1856,8 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// import the merge block
@@ -1699,9 +1877,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1730,9 +1906,8 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// Check that we have justified the second epoch
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
@@ -1774,7 +1949,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.Equal(t, true, optimistic)
// Check that the invalid blocks are not in database
for i := range 19 - 13 {
for i := 0; i < 19-13; i++ {
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, invalidRoots[i]))
}
@@ -1800,9 +1975,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
service.cfg.ForkChoiceStore.Unlock()
// Check that the head is still INVALID and the node is still optimistic
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
optimistic, err = service.IsOptimistic(ctx)
@@ -1827,9 +2000,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
require.NoError(t, err)
@@ -1857,9 +2028,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
sjc = service.CurrentJustifiedCheckpt()
@@ -1903,6 +2072,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
for i := 1; i < 6; i++ {
t.Log(i)
driftGenesisTime(service, primitives.Slot(i), 0)
st, err := service.HeadState(ctx)
require.NoError(t, err)
@@ -1919,9 +2089,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
}
for i := 6; i < 12; i++ {
@@ -1941,9 +2109,8 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
require.NoError(t, err)
}
// import the merge block
@@ -1963,9 +2130,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, err)
// save the post state and the payload Hash of this block since it will
// be the LVH
@@ -1996,9 +2161,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
require.NoError(t, err)
@@ -2119,9 +2282,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
st, err = service.HeadState(ctx)
require.NoError(t, err)
@@ -2187,9 +2348,7 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
st, err = service.HeadState(ctx)
require.NoError(t, err)
@@ -2336,8 +2495,7 @@ func TestMissingBlobIndices(t *testing.T) {
}
func TestMissingDataColumnIndices(t *testing.T) {
const countPlusOne = fieldparams.NumberOfColumns + 1
countPlusOne := params.BeaconConfig().NumberOfColumns + 1
tooManyColumns := make(map[uint64]bool, countPlusOne)
for i := range countPlusOne {
tooManyColumns[uint64(i)] = true
@@ -2472,10 +2630,7 @@ func TestRollbackBlock(t *testing.T) {
require.NoError(t, err)
// Rollback block insertion into db and caches.
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err)
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
// The block should no longer exist.
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
@@ -2576,9 +2731,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
require.NoError(t, err)
@@ -2612,10 +2765,7 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
// Rollback block insertion into db and caches.
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})
service.cfg.ForkChoiceStore.Unlock()
require.ErrorContains(t, "context canceled", err)
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
// The block should no longer exist.
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
@@ -2655,10 +2805,6 @@ func TestProcessLightClientUpdate(t *testing.T) {
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
for _, testVersion := range version.All()[1:] {
if testVersion == version.Gloas {
// TODO(16027): Unskip light client tests for Gloas
continue
}
t.Run(version.String(testVersion), func(t *testing.T) {
l := util.NewTestLightClient(t, testVersion)
@@ -2733,7 +2879,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
// set a better sync aggregate
scb := make([]byte, 64)
for i := range 5 {
for i := 0; i < 5; i++ {
scb[i] = 0x01
}
oldUpdate.SetSyncAggregate(&ethpb.SyncAggregate{
@@ -3111,9 +3257,7 @@ func Test_postBlockProcess_EventSending(t *testing.T) {
}
// Execute postBlockProcess
service.cfg.ForkChoiceStore.Lock()
err = service.postBlockProcess(cfg)
service.cfg.ForkChoiceStore.Unlock()
// Check error expectation
if tt.expectError {

View File

@@ -156,15 +156,13 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
}
if s.inRegularSync() {
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return
}
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
}
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
log.WithError(err).Error("Could not save head")
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
return
}
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
log.WithError(err).Error("Could not update forkchoice")
}
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
}
// This processes fork choice attestations from the pool to account for validator votes and fork choice.

View File

@@ -117,9 +117,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
require.NoError(t, err)
require.Equal(t, 2, fcs.NodeCount())
@@ -179,9 +177,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
require.NoError(t, err)
service.cfg.ForkChoiceStore.Lock()
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
service.cfg.ForkChoiceStore.Unlock()
require.Equal(t, 2, fcs.NodeCount())
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
require.Equal(t, tRoot, service.head.root)

View File

@@ -17,7 +17,6 @@ import (
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpbv1 "github.com/OffchainLabs/prysm/v7/proto/eth/v1"
@@ -131,10 +130,12 @@ func TestService_ReceiveBlock(t *testing.T) {
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
},
check: func(t *testing.T, s *Service) {
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
require.Eventually(t, func() bool {
return len(notifier.ReceivedEvents()) >= 1
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
// Hacky sleep, should use a better way to be able to resolve the race
// between event being sent out and processed.
time.Sleep(100 * time.Millisecond)
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
t.Errorf("Received %d state notifications, expected at least 1", recvd)
}
},
},
{
@@ -215,16 +216,18 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
root, err := b.Block.HashTreeRoot()
require.NoError(t, err)
wg := sync.WaitGroup{}
wg.Go(func() {
wg.Add(1)
go func() {
wsb, err := blocks.NewSignedBeaconBlock(b)
require.NoError(t, err)
require.NoError(t, s.ReceiveBlock(ctx, wsb, root, nil))
})
wg.Done()
}()
wg.Wait()
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
require.Eventually(t, func() bool {
return len(notifier.ReceivedEvents()) >= 1
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
time.Sleep(100 * time.Millisecond)
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
t.Errorf("Received %d state notifications, expected at least 1", recvd)
}
// Verify fork choice has processed the block. (Genesis block and the new block)
assert.Equal(t, 2, s.cfg.ForkChoiceStore.NodeCount())
}
@@ -264,10 +267,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
},
check: func(t *testing.T, s *Service) {
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
require.Eventually(t, func() bool {
return len(notifier.ReceivedEvents()) >= 1
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
time.Sleep(100 * time.Millisecond)
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
t.Errorf("Received %d state notifications, expected at least 1", recvd)
}
},
},
}
@@ -511,9 +514,8 @@ func Test_executePostFinalizationTasks(t *testing.T) {
s.cfg.StateNotifier = notifier
s.executePostFinalizationTasks(s.ctx, headState)
require.Eventually(t, func() bool {
return len(notifier.ReceivedEvents()) == 1
}, 5*time.Second, 50*time.Millisecond, "Expected exactly 1 state notification")
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
require.Equal(t, 1, len(notifier.ReceivedEvents()))
e := notifier.ReceivedEvents()[0]
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
@@ -552,9 +554,8 @@ func Test_executePostFinalizationTasks(t *testing.T) {
s.cfg.StateNotifier = notifier
s.executePostFinalizationTasks(s.ctx, headState)
require.Eventually(t, func() bool {
return len(notifier.ReceivedEvents()) == 1
}, 5*time.Second, 50*time.Millisecond, "Expected exactly 1 state notification")
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
require.Equal(t, 1, len(notifier.ReceivedEvents()))
e := notifier.ReceivedEvents()[0]
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
@@ -597,13 +598,13 @@ func TestProcessLightClientBootstrap(t *testing.T) {
s.executePostFinalizationTasks(s.ctx, l.AttestedState)
// Wait for the light client bootstrap to be saved (runs in goroutine)
var b interfaces.LightClientBootstrap
require.Eventually(t, func() bool {
var err error
b, err = s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
return err == nil && b != nil
}, 5*time.Second, 50*time.Millisecond, "Light client bootstrap was not saved within timeout")
// wait for the goroutine to finish processing
time.Sleep(1 * time.Second)
// Check that the light client bootstrap is saved
b, err := s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
require.NoError(t, err)
require.NotNil(t, b)
btst, err := lightClient.NewLightClientBootstrapFromBeaconState(ctx, l.FinalizedState.Slot(), l.FinalizedState, l.FinalizedBlock)
require.NoError(t, err)

View File

@@ -14,7 +14,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
@@ -471,35 +470,30 @@ func (s *Service) removeStartupState() {
// UpdateCustodyInfoInDB updates the custody information in the database.
// It returns the (potentially updated) custody group count and the earliest available slot.
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
isSupernode := flags.Get().Supernode
isSemiSupernode := flags.Get().SemiSupernode
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
cfg := params.BeaconConfig()
custodyRequirement := cfg.CustodyRequirement
// Check if the node was previously subscribed to all data subnets, and if so,
// store the new status accordingly.
wasSupernode, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
if err != nil {
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
log.WithError(err).Error("Could not update subscription status to all data subnets")
}
// Compute the target custody group count based on current flag configuration.
targetCustodyGroupCount := custodyRequirement
// Supernode: custody all groups (either currently set or previously enabled)
if isSupernode {
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
log.Warnf(
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
flags.SubscribeAllDataSubnets.Name,
)
}
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
if isSemiSupernode {
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
if err != nil {
return 0, 0, errors.Wrap(err, "minimum custody group count")
}
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
// Compute the custody group count.
custodyGroupCount := custodyRequirement
if isSubscribedToAllDataSubnets {
custodyGroupCount = cfg.NumberOfCustodyGroups
}
// Safely compute the fulu fork slot.
@@ -516,23 +510,12 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
}
}
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
if err != nil {
return 0, 0, errors.Wrap(err, "update custody info")
}
if isSupernode {
log.WithFields(logrus.Fields{
"current": actualCustodyGroupCount,
"target": cfg.NumberOfCustodyGroups,
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
}
if wasSupernode && !isSupernode {
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
}
return earliestAvailableSlot, actualCustodyGroupCount, nil
return earliestAvailableSlot, custodyGroupCount, nil
}
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {

View File

@@ -412,7 +412,8 @@ func BenchmarkHasBlockDB(b *testing.B) {
r, err := blk.Block.HashTreeRoot()
require.NoError(b, err)
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.Equal(b, true, s.cfg.BeaconDB.HasBlock(ctx, r), "Block is not in DB")
}
}
@@ -431,7 +432,8 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
require.NoError(b, err)
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
}
}
@@ -603,6 +605,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
custodyRequirement = uint64(4)
earliestStoredSlot = primitives.Slot(12)
numberOfCustodyGroups = uint64(64)
numberOfColumns = uint64(128)
)
params.SetupTestConfigCleanup(t)
@@ -610,6 +613,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
cfg.FuluForkEpoch = fuluForkEpoch
cfg.CustodyRequirement = custodyRequirement
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
cfg.NumberOfColumns = numberOfColumns
params.OverrideBeaconConfig(cfg)
ctx := t.Context()
@@ -640,7 +644,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
gFlags.SubscribeAllDataSubnets = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
@@ -678,7 +682,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
// ----------
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
gFlags.SubscribeAllDataSubnets = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
@@ -693,121 +697,4 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
})
t.Run("Supernode downgrade prevented", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
// Try to downgrade by removing flag
gFlags.Supernode = false
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Should still be supernode
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
})
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
// Try to downgrade by removing flag
gFlags.SemiSupernode = false
flags.Init(gFlags)
defer flags.Init(resetFlags)
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
})
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Start with semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
// Upgrade to full supernode
gFlags.SemiSupernode = false
gFlags.Supernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Should upgrade to full supernode
upgradeSlot := slot + 2
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
require.NoError(t, err)
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
})
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Mock a high custody requirement (simulating many validators)
// We need to override the custody requirement calculation
// For this test, we'll verify the logic by checking if custodyRequirement > 64
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
// This would require a different test setup with actual validators
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
// With low validator requirements (4), should use semi-supernode minimum (64)
require.Equal(t, semiSupernodeCustody, actualCgc)
})
}

View File

@@ -3,10 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
testonly = True,
srcs = [
"log.go",
"mock.go",
],
srcs = ["mock.go"],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing",
visibility = [
"//beacon-chain:__subpackages__",

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package testing
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "beacon-chain/blockchain/testing")

View File

@@ -30,6 +30,7 @@ import (
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var ErrNilState = errors.New("nil state")
@@ -105,7 +106,7 @@ type EventFeedWrapper struct {
subscribed chan struct{} // this channel is closed once a subscription is made
}
func (w *EventFeedWrapper) Subscribe(channel any) event.Subscription {
func (w *EventFeedWrapper) Subscribe(channel interface{}) event.Subscription {
select {
case <-w.subscribed:
break // already closed
@@ -115,7 +116,7 @@ func (w *EventFeedWrapper) Subscribe(channel any) event.Subscription {
return w.feed.Subscribe(channel)
}
func (w *EventFeedWrapper) Send(value any) int {
func (w *EventFeedWrapper) Send(value interface{}) int {
return w.feed.Send(value)
}
@@ -266,7 +267,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
if err := s.DB.SaveBlock(ctx, block); err != nil {
return err
}
log.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
}
s.Root = signingRoot[:]
s.Block = block
@@ -295,7 +296,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl
if err := s.DB.SaveBlock(ctx, b); err != nil {
return err
}
log.Infof("Saved block with root: %#x at slot %d", signingRoot, b.Block().Slot())
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, b.Block().Slot())
}
s.Root = signingRoot[:]
s.Block = b
@@ -327,7 +328,7 @@ func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOn
if err := s.DB.SaveBlock(ctx, block); err != nil {
return err
}
log.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
}
s.Root = signingRoot[:]
s.Block = block
@@ -584,11 +585,11 @@ func (s *ChainService) UpdateHead(ctx context.Context, slot primitives.Slot) {
ojc := &ethpb.Checkpoint{}
st, root, err := prepareForkchoiceState(ctx, slot, bytesutil.ToBytes32(s.Root), [32]byte{}, [32]byte{}, ojc, ojc)
if err != nil {
log.WithError(err).Error("Could not update head")
logrus.WithError(err).Error("Could not update head")
}
err = s.ForkChoiceStore.InsertNode(ctx, st, root)
if err != nil {
log.WithError(err).Error("Could not insert node to forkchoice")
logrus.WithError(err).Error("Could not insert node to forkchoice")
}
}

View File

@@ -1,9 +1,5 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package builder
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "beacon-chain/builder")
var log = logrus.WithField("prefix", "builder")

View File

@@ -166,7 +166,7 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
indexToRegistration := make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1)
valid := make([]*ethpb.SignedValidatorRegistrationV1, 0)
for i := range reg {
for i := 0; i < len(reg); i++ {
r := reg[i]
nx, exists := s.cfg.headFetcher.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(r.Message.Pubkey))
if !exists {

View File

@@ -16,7 +16,6 @@ go_library(
"doc.go",
"error.go",
"interfaces.go",
"log.go",
"payload_id.go",
"proposer_indices.go",
"proposer_indices_disabled.go", # keep

View File

@@ -17,7 +17,7 @@ import (
func TestBalanceCache_AddGetBalance(t *testing.T) {
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := range blockRoots {
for i := 0; i < len(blockRoots); i++ {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(i))
blockRoots[i] = b
@@ -61,7 +61,7 @@ func TestBalanceCache_AddGetBalance(t *testing.T) {
func TestBalanceCache_BalanceKey(t *testing.T) {
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
for i := range blockRoots {
for i := 0; i < len(blockRoots); i++ {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(i))
blockRoots[i] = b

View File

@@ -9,6 +9,7 @@ import (
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1/attestation"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
type attGroup struct {

View File

@@ -17,6 +17,7 @@ import (
lru "github.com/hashicorp/golang-lru"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
log "github.com/sirupsen/logrus"
)
const (
@@ -50,7 +51,7 @@ type CommitteeCache struct {
}
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
func committeeKeyFn(obj any) (string, error) {
func committeeKeyFn(obj interface{}) (string, error) {
info, ok := obj.(*Committees)
if !ok {
return "", ErrNotCommittee

View File

@@ -14,7 +14,7 @@ func TestCommitteeKeyFuzz_OK(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
c := &Committees{}
for range 100000 {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
k, err := committeeKeyFn(c)
require.NoError(t, err)
@@ -27,7 +27,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
c := &Committees{}
for range 100000 {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
_, err := cache.Committee(t.Context(), 0, c.Seed, 0)
@@ -42,7 +42,7 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
fuzzer := fuzz.NewWithSeed(0)
c := &Committees{}
for range 100000 {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(c)
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))

View File

@@ -17,6 +17,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
}
// popProcessNoopFunc is a no-op function that never returns an error.
func popProcessNoopFunc(_ any, _ bool) error {
func popProcessNoopFunc(_ interface{}, _ bool) error {
return nil
}

View File

@@ -8,7 +8,6 @@ go_library(
"deposit_pruner.go",
"deposit_tree.go",
"deposit_tree_snapshot.go",
"log.go",
"merkle_tree.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/cache/depositsnapshot",

View File

@@ -769,7 +769,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
}
var ctrs []*ethpb.DepositContainer
for i := range 2000 {
for i := 0; i < 2000; i++ {
ctrs = append(ctrs, generateCtr(uint64(10+(i/2)), int64(i)))
}
@@ -948,9 +948,9 @@ func rootCreator(rn byte) []byte {
func BenchmarkDepositTree_InsertNewImplementation(b *testing.B) {
totalDeposits := 10000
input := bytesutil.ToBytes32([]byte("foo"))
for b.Loop() {
for i := 0; i < b.N; i++ {
dt := NewDepositTree()
for range totalDeposits {
for j := 0; j < totalDeposits; j++ {
err := dt.Insert(input[:], 0)
require.NoError(b, err)
}
@@ -959,10 +959,10 @@ func BenchmarkDepositTree_InsertNewImplementation(b *testing.B) {
func BenchmarkDepositTree_InsertOldImplementation(b *testing.B) {
totalDeposits := 10000
input := bytesutil.ToBytes32([]byte("foo"))
for b.Loop() {
for i := 0; i < b.N; i++ {
dt, err := trie.NewTrie(33)
require.NoError(b, err)
for range totalDeposits {
for j := 0; j < totalDeposits; j++ {
err := dt.Insert(input[:], 0)
require.NoError(b, err)
}
@@ -980,8 +980,8 @@ func BenchmarkDepositTree_HashTreeRootNewImplementation(b *testing.B) {
}
b.ReportAllocs()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err = tr.HashTreeRoot()
require.NoError(b, err)
}
@@ -999,8 +999,8 @@ func BenchmarkDepositTree_HashTreeRootOldImplementation(b *testing.B) {
}
b.ReportAllocs()
for b.Loop() {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err = dt.HashTreeRoot()
require.NoError(b, err)
}

View File

@@ -20,6 +20,7 @@ var (
Name: "beacondb_all_deposits_eip4881",
Help: "The number of total deposits in memory",
})
log = logrus.WithField("prefix", "cache")
)
// InsertDeposit into the database. If deposit or block number are nil

View File

@@ -20,7 +20,7 @@ func (ds *DepositTreeSnapshot) CalculateRoot() ([32]byte, error) {
size := ds.depositCount
index := len(ds.finalized)
root := trie.ZeroHashes[0]
for i := range DepositContractDepth {
for i := 0; i < DepositContractDepth; i++ {
if (size & 1) == 1 {
if index == 0 {
break

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package depositsnapshot
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "beacon-chain/cache/depositsnapshot")

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package cache
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "beacon-chain/cache")

View File

@@ -47,13 +47,15 @@ func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
c.Enable()
wg := new(sync.WaitGroup)
wg.Go(func() {
wg.Add(1)
go func() {
// Get call will only terminate when
// it is not longer in progress.
obj, err := c.Get(ctx, r)
require.NoError(t, err)
require.IsNil(t, obj)
})
wg.Done()
}()
c.MarkNotInProgress(r)
wg.Wait()

View File

@@ -11,6 +11,7 @@ import (
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/tools/cache"
)
@@ -235,7 +236,7 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
// Given the `syncCommitteeIndexPosition` object, this returns the key of the object.
// The key is the `currentSyncCommitteeRoot` within the field.
// Error gets returned if input does not comply with `currentSyncCommitteeRoot` object.
func keyFn(obj any) (string, error) {
func keyFn(obj interface{}) (string, error) {
info, ok := obj.(*syncCommitteeIndexPosition)
if !ok {
return "", errNotSyncCommitteeIndexPosition

View File

@@ -12,12 +12,12 @@ import (
func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
c := newSyncSubnetIDs()
for i := range 20 {
for i := 0; i < 20; i++ {
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
}
for i := range uint64(20) {
for i := uint64(0); i < 20; i++ {
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
idxs, _, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
@@ -34,7 +34,7 @@ func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
c := newSyncSubnetIDs()
for i := range 20 {
for i := 0; i < 20; i++ {
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
}
@@ -42,7 +42,7 @@ func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
coms := c.GetAllSubnets(50)
assert.Equal(t, 0, len(coms))
for i := range uint64(20) {
for i := uint64(0); i < 20; i++ {
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
_, jEpoch, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)

View File

@@ -9,6 +9,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus"
)
const (
@@ -66,7 +67,7 @@ func (t *TrackedValidatorsCache) Validator(index primitives.ValidatorIndex) (Tra
val, ok := item.(TrackedValidator)
if !ok {
log.Errorf("Failed to cast tracked validator from cache, got unexpected item type %T", item)
logrus.Errorf("Failed to cast tracked validator from cache, got unexpected item type %T", item)
return TrackedValidator{}, false
}
@@ -112,7 +113,7 @@ func (t *TrackedValidatorsCache) Indices() map[primitives.ValidatorIndex]bool {
for cacheKey := range items {
index, err := fromCacheKey(cacheKey)
if err != nil {
log.WithError(err).Error("Failed to get validator index from cache key")
logrus.WithError(err).Error("Failed to get validator index from cache key")
continue
}

View File

@@ -8,7 +8,6 @@ go_library(
"deposit.go",
"epoch_precompute.go",
"epoch_spec.go",
"log.go",
"reward.go",
"sync_committee.go",
"transition.go",

View File

@@ -461,7 +461,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
fuzzer := gofuzz.NewWithSeed(0)
st := &ethpb.BeaconStateAltair{}
b := &ethpb.SignedBeaconBlockAltair{Block: &ethpb.BeaconBlockAltair{}}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(st)
fuzzer.Fuzz(b)
if b.Block == nil {

View File

@@ -240,7 +240,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
proposerIndex, err := helpers.BeaconProposerIndex(t.Context(), beaconState)
require.NoError(t, err)
for i := range syncBits {
for i := 0; i < len(syncBits); i++ {
if syncBits.BitAt(uint64(i)) {
pk := bytesutil.ToBytes48(committeeKeys[i])
require.DeepEqual(t, true, votedMap[pk])

View File

@@ -195,7 +195,10 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
// withdrawable_epoch=FAR_FUTURE_EPOCH,
// )
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
effectiveBalance := min(params.BeaconConfig().MaxEffectiveBalance, amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement))
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
if params.BeaconConfig().MaxEffectiveBalance < effectiveBalance {
effectiveBalance = params.BeaconConfig().MaxEffectiveBalance
}
return &ethpb.Validator{
PublicKey: pubKey,

View File

@@ -16,7 +16,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
state := &ethpb.BeaconStateAltair{}
deposits := make([]*ethpb.Deposit, 100)
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
for i := range deposits {
fuzzer.Fuzz(deposits[i])
@@ -37,7 +37,7 @@ func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
deposit := &ethpb.Deposit{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := state_native.InitializeFromProtoUnsafeAltair(state)
@@ -56,7 +56,7 @@ func TestFuzzProcessPreGenesisDeposit_Phase0_10000(t *testing.T) {
deposit := &ethpb.Deposit{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -74,7 +74,7 @@ func TestFuzzProcessDeposit_Phase0_10000(t *testing.T) {
state := &ethpb.BeaconState{}
deposit := &ethpb.Deposit{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -92,7 +92,7 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
state := &ethpb.BeaconStateAltair{}
deposit := &ethpb.Deposit{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := state_native.InitializeFromProtoUnsafeAltair(state)

View File

@@ -122,8 +122,11 @@ func ProcessInactivityScores(
}
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
score := recoveryRate
// Prevents underflow below 0.
score := min(recoveryRate, v.InactivityScore)
if score > v.InactivityScore {
score = v.InactivityScore
}
v.InactivityScore -= score
}
inactivityScores[i] = v.InactivityScore
@@ -239,7 +242,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
}
balances := beaconState.Balances()
for i := range numOfVals {
for i := 0; i < numOfVals; i++ {
vals[i].BeforeEpochTransitionBalance = balances[i]
// Compute the post balance of the validator after accounting for the

View File

@@ -7,6 +7,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
log "github.com/sirupsen/logrus"
)
// ProcessSyncCommitteeUpdates processes sync client committee updates for the beacon state.

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package altair
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "beacon-chain/core/altair")

View File

@@ -21,7 +21,7 @@ import (
func TestSyncCommitteeIndices_CanGet(t *testing.T) {
getState := func(t *testing.T, count uint64, vers int) state.BeaconState {
validators := make([]*ethpb.Validator, count)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
@@ -113,7 +113,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
helpers.ClearCache()
getState := func(t *testing.T, count uint64) state.BeaconState {
validators := make([]*ethpb.Validator, count)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
@@ -147,7 +147,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
func TestSyncCommittee_CanGet(t *testing.T) {
getState := func(t *testing.T, count uint64) state.BeaconState {
validators := make([]*ethpb.Validator, count)
for i := range validators {
for i := 0; i < len(validators); i++ {
blsKey, err := bls.RandKey()
require.NoError(t, err)
validators[i] = &ethpb.Validator{
@@ -394,7 +394,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
func getState(t *testing.T, count uint64) state.BeaconState {
validators := make([]*ethpb.Validator, count)
for i := range validators {
for i := 0; i < len(validators); i++ {
blsKey, err := bls.RandKey()
require.NoError(t, err)
validators[i] = &ethpb.Validator{

View File

@@ -33,7 +33,7 @@ func TestTranslateParticipation(t *testing.T) {
r, err := helpers.BlockRootAtSlot(s, 0)
require.NoError(t, err)
var pendingAtts []*ethpb.PendingAttestation
for i := range 3 {
for i := 0; i < 3; i++ {
pendingAtts = append(pendingAtts, &ethpb.PendingAttestation{
Data: &ethpb.AttestationData{
CommitteeIndex: primitives.CommitteeIndex(i),

View File

@@ -26,7 +26,6 @@ go_library(
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",

View File

@@ -257,7 +257,7 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBea
}
indices := indexedAtt.GetAttestingIndices()
var pubkeys []bls.PublicKey
for i := range indices {
for i := 0; i < len(indices); i++ {
pubkeyAtIdx := beaconState.PubkeyAtIndex(primitives.ValidatorIndex(indices[i]))
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx[:])
if err != nil {

View File

@@ -317,7 +317,7 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
func TestConvertToIndexed_OK(t *testing.T) {
helpers.ClearCache()
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
}
@@ -373,7 +373,7 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
validators := make([]*ethpb.Validator, numOfValidators)
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
require.NoError(t, err)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[i].PublicKey().Marshal(),
@@ -481,7 +481,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
list := bitfield.Bitlist{0b11111}
var atts []ethpb.Att
for range uint64(1000) {
for i := uint64(0); i < 1000; i++ {
atts = append(atts, &ethpb.Attestation{
Data: &ethpb.AttestationData{
CommitteeIndex: 1,
@@ -498,7 +498,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
atts = []ethpb.Att{}
list = bitfield.Bitlist{0b10000}
for range uint64(1000) {
for i := uint64(0); i < 1000; i++ {
atts = append(atts, &ethpb.Attestation{
Data: &ethpb.AttestationData{
CommitteeIndex: 1,
@@ -524,7 +524,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
validators := make([]*ethpb.Validator, numOfValidators)
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
require.NoError(t, err)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[i].PublicKey().Marshal(),
@@ -588,7 +588,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
validators := make([]*ethpb.Validator, numOfValidators)
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
require.NoError(t, err)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[i].PublicKey().Marshal(),
@@ -707,7 +707,7 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
validators := make([]*ethpb.Validator, numOfValidators)
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
require.NoError(t, err)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: keys[i].PublicKey().Marshal(),

View File

@@ -21,7 +21,7 @@ func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
state := &ethpb.BeaconState{}
att := &ethpb.Attestation{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(att)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -37,7 +37,7 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
state := &ethpb.BeaconState{}
block := &ethpb.SignedBeaconBlock{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(block)
@@ -63,7 +63,7 @@ func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
var p []byte
var s []byte
var d []byte
for range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(&ba)
fuzzer.Fuzz(&pubkey)
fuzzer.Fuzz(&sig)
@@ -83,7 +83,7 @@ func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) {
e := &ethpb.Eth1Data{}
state, err := state_native.InitializeFromProtoUnsafePhase0(&ethpb.BeaconState{})
require.NoError(t, err)
for range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(e)
s, err := ProcessEth1DataInBlock(t.Context(), state, e)
@@ -98,7 +98,7 @@ func TestFuzzareEth1DataEqual_10000(_ *testing.T) {
eth1data := &ethpb.Eth1Data{}
eth1data2 := &ethpb.Eth1Data{}
for range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(eth1data)
fuzzer.Fuzz(eth1data2)
AreEth1DataEqual(eth1data, eth1data2)
@@ -110,7 +110,7 @@ func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
fuzzer := gofuzz.NewWithSeed(0)
eth1data := &ethpb.Eth1Data{}
var stateVotes []*ethpb.Eth1Data
for i := range 100000 {
for i := 0; i < 100000; i++ {
fuzzer.Fuzz(eth1data)
fuzzer.Fuzz(&stateVotes)
s, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
@@ -129,7 +129,7 @@ func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
state := &ethpb.BeaconState{}
block := &ethpb.BeaconBlock{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(block)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -145,7 +145,7 @@ func TestFuzzProcessRandao_10000(t *testing.T) {
state := &ethpb.BeaconState{}
b := &ethpb.SignedBeaconBlock{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -168,7 +168,7 @@ func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
state := &ethpb.BeaconState{}
blockBody := &ethpb.BeaconBlockBody{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(blockBody)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -186,7 +186,7 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
state := &ethpb.BeaconState{}
p := &ethpb.ProposerSlashing{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(p)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -203,7 +203,7 @@ func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
fuzzer := gofuzz.NewWithSeed(0)
state := &ethpb.BeaconState{}
proposerSlashing := &ethpb.ProposerSlashing{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(proposerSlashing)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -219,7 +219,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
state := &ethpb.BeaconState{}
a := &ethpb.AttesterSlashing{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(a)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -237,7 +237,7 @@ func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
state := &ethpb.BeaconState{}
attesterSlashing := &ethpb.AttesterSlashing{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(attesterSlashing)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -253,7 +253,7 @@ func TestFuzzIsSlashableAttestationData_10000(_ *testing.T) {
attestationData := &ethpb.AttestationData{}
attestationData2 := &ethpb.AttestationData{}
for range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(attestationData)
fuzzer.Fuzz(attestationData2)
IsSlashableAttestationData(attestationData, attestationData2)
@@ -264,7 +264,7 @@ func TestFuzzslashableAttesterIndices_10000(_ *testing.T) {
fuzzer := gofuzz.NewWithSeed(0)
attesterSlashing := &ethpb.AttesterSlashing{}
for range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(attesterSlashing)
SlashableAttesterIndices(attesterSlashing)
}
@@ -275,7 +275,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
state := &ethpb.BeaconState{}
b := &ethpb.SignedBeaconBlock{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(b)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -298,7 +298,7 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
state := &ethpb.BeaconState{}
idxAttestation := &ethpb.IndexedAttestation{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(idxAttestation)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -313,7 +313,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
fuzzer := gofuzz.NewWithSeed(0)
state := &ethpb.BeaconState{}
deposit := &ethpb.Deposit{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(deposit)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -329,7 +329,7 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
state := &ethpb.BeaconState{}
e := &ethpb.SignedVoluntaryExit{}
ctx := t.Context()
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(e)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -346,7 +346,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
fuzzer := gofuzz.NewWithSeed(0)
state := &ethpb.BeaconState{}
e := &ethpb.SignedVoluntaryExit{}
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(state)
fuzzer.Fuzz(e)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
@@ -366,7 +366,7 @@ func TestFuzzVerifyExit_10000(t *testing.T) {
fork := &ethpb.Fork{}
var slot primitives.Slot
for i := range 10000 {
for i := 0; i < 10000; i++ {
fuzzer.Fuzz(ve)
fuzzer.Fuzz(rawVal)
fuzzer.Fuzz(fork)

View File

@@ -60,7 +60,7 @@ func Eth1DataHasEnoughSupport(beaconState state.ReadOnlyBeaconState, data *ethpb
voteCount := uint64(0)
for _, vote := range beaconState.Eth1DataVotes() {
if AreEth1DataEqual(vote, data) {
if AreEth1DataEqual(vote, data.Copy()) {
voteCount++
}
}

View File

@@ -19,7 +19,7 @@ import (
func FakeDeposits(n uint64) []*ethpb.Eth1Data {
deposits := make([]*ethpb.Eth1Data, n)
for i := range n {
for i := uint64(0); i < n; i++ {
deposits[i] = &ethpb.Eth1Data{
DepositCount: 1,
DepositRoot: bytesutil.PadTo([]byte("root"), 32),
@@ -175,7 +175,7 @@ func TestProcessEth1Data_SetsCorrectly(t *testing.T) {
}
period := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod)))
for range period {
for i := uint64(0); i < period; i++ {
processedState, err := blocks.ProcessEth1DataInBlock(t.Context(), beaconState, b.Block.Body.Eth1Data)
require.NoError(t, err)
require.Equal(t, true, processedState.Version() == version.Phase0)

View File

@@ -27,7 +27,7 @@ func init() {
func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 32),
WithdrawalCredentials: make([]byte, 32),
@@ -104,7 +104,7 @@ func TestProcessBlockHeader_WrongProposerSig(t *testing.T) {
func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 32),
WithdrawalCredentials: make([]byte, 32),
@@ -148,7 +148,7 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
@@ -189,7 +189,7 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
@@ -233,7 +233,7 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
func TestProcessBlockHeader_OK(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := range validators {
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 32),
WithdrawalCredentials: make([]byte, 32),
@@ -290,3 +290,52 @@ func TestProcessBlockHeader_OK(t *testing.T) {
}
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
}
func TestBlockSignatureSet_OK(t *testing.T) {
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
for i := 0; i < len(validators); i++ {
validators[i] = &ethpb.Validator{
PublicKey: make([]byte, 32),
WithdrawalCredentials: make([]byte, 32),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
Slashed: true,
}
}
state, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, state.SetValidators(validators))
require.NoError(t, state.SetSlot(10))
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(&ethpb.BeaconBlockHeader{
Slot: 9,
ProposerIndex: 0,
})))
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
require.NoError(t, err)
currentEpoch := time.CurrentEpoch(state)
priv, err := bls.RandKey()
require.NoError(t, err)
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
require.NoError(t, err)
block := util.NewBeaconBlock()
block.Block.Slot = 10
block.Block.ProposerIndex = pID
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
block.Block.ParentRoot = latestBlockSignedRoot[:]
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
require.NoError(t, err)
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
require.NoError(t, err)
validators[proposerIdx].Slashed = false
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
require.NoError(t, err)
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
require.NoError(t, err)
verified, err := set.Verify()
require.NoError(t, err)
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
}

View File

@@ -1,9 +1,5 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package blocks
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "beacon-chain/core/blocks")
var log = logrus.WithField("prefix", "blocks")

Some files were not shown because too many files have changed in this diff Show More