mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Compare commits
81 Commits
potuz_patc
...
7185c4fb39
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7185c4fb39 | ||
|
|
0db74365e0 | ||
|
|
6f90101364 | ||
|
|
49e1763ec2 | ||
|
|
c2527c82cd | ||
|
|
d4ea8fafd6 | ||
|
|
07d1d6bdf9 | ||
|
|
f938da99d9 | ||
|
|
9deec69cc7 | ||
|
|
2767f08f4d | ||
|
|
d46c620783 | ||
|
|
dd05e44ef3 | ||
|
|
9da36a5de6 | ||
|
|
7950a24926 | ||
|
|
ea51253be9 | ||
|
|
2ac30f5ce6 | ||
|
|
7418c00ad6 | ||
|
|
66342655fd | ||
|
|
18eca953c1 | ||
|
|
8191bb5711 | ||
|
|
d4613aee0c | ||
|
|
9fcc1a7a77 | ||
|
|
75dea214ac | ||
|
|
4374e709cb | ||
|
|
be300f80bd | ||
|
|
096cba5b2d | ||
|
|
d5127233e4 | ||
|
|
3d35cc20ec | ||
|
|
1e658530a7 | ||
|
|
b360794c9c | ||
|
|
0fc9ab925a | ||
|
|
dda5ee3334 | ||
|
|
14c67376c3 | ||
|
|
9c8b68a66d | ||
|
|
a3210157e2 | ||
|
|
1536d59e30 | ||
|
|
11e46a4560 | ||
|
|
5a2e51b894 | ||
|
|
d20ec4c7a1 | ||
|
|
7a70abbd15 | ||
|
|
a2b84c9320 | ||
|
|
edef17e41d | ||
|
|
85c5d31b5b | ||
|
|
fa056c2d21 | ||
|
|
61de11e2c4 | ||
|
|
2773bdef89 | ||
|
|
2a23dc7f4a | ||
|
|
f97622b054 | ||
|
|
08d0f42725 | ||
|
|
74c8a25354 | ||
|
|
a466c6db9c | ||
|
|
4da6c4291f | ||
|
|
2d242a8d09 | ||
|
|
6be1541e57 | ||
|
|
b845222ce7 | ||
|
|
5bbdebee22 | ||
|
|
26100e074d | ||
|
|
768fa0e5a1 | ||
|
|
11bb8542a4 | ||
|
|
b78c2c354b | ||
|
|
55e2001a0b | ||
|
|
c093283b1b | ||
|
|
5449fd0352 | ||
|
|
3d7f7b588b | ||
|
|
2f067c4164 | ||
|
|
81266f60af | ||
|
|
207f36065a | ||
|
|
eb9feabd6f | ||
|
|
bc0868e232 | ||
|
|
35c1ab5e88 | ||
|
|
21bb6f5258 | ||
|
|
4914882e97 | ||
|
|
2302ef918a | ||
|
|
76f3083090 | ||
|
|
2fd6bd8150 | ||
|
|
f77b78943a | ||
|
|
7ba60d93f2 | ||
|
|
b94904b784 | ||
|
|
1af12d841d | ||
|
|
e1b98a4ca1 | ||
|
|
eae15697da |
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
3
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -34,4 +34,5 @@ Fixes #
|
||||
|
||||
- [ ] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
|
||||
- [ ] I have included a uniquely named [changelog fragment file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
|
||||
- [ ] I have added a description to this PR with sufficient context for reviewers to understand this PR.
|
||||
- [ ] I have added a description with sufficient context for reviewers to understand this PR.
|
||||
- [ ] I have tested that my changes work as expected and I added a testing plan to the PR description (if applicable).
|
||||
|
||||
20
BUILD.bazel
20
BUILD.bazel
@@ -193,10 +193,30 @@ nogo(
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/httpwriter:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/modernize/any:go_default_library",
|
||||
"//tools/analyzers/modernize/appendclipped:go_default_library",
|
||||
"//tools/analyzers/modernize/bloop:go_default_library",
|
||||
"//tools/analyzers/modernize/fmtappendf:go_default_library",
|
||||
"//tools/analyzers/modernize/forvar:go_default_library",
|
||||
"//tools/analyzers/modernize/mapsloop:go_default_library",
|
||||
"//tools/analyzers/modernize/minmax:go_default_library",
|
||||
#"//tools/analyzers/modernize/newexpr:go_default_library", # Disabled until go 1.26.
|
||||
"//tools/analyzers/modernize/omitzero:go_default_library",
|
||||
"//tools/analyzers/modernize/rangeint:go_default_library",
|
||||
"//tools/analyzers/modernize/reflecttypefor:go_default_library",
|
||||
"//tools/analyzers/modernize/slicescontains:go_default_library",
|
||||
#"//tools/analyzers/modernize/slicesdelete:go_default_library", # Disabled, see https://go.dev/issue/73686
|
||||
"//tools/analyzers/modernize/slicessort:go_default_library",
|
||||
"//tools/analyzers/modernize/stringsbuilder:go_default_library",
|
||||
"//tools/analyzers/modernize/stringscutprefix:go_default_library",
|
||||
"//tools/analyzers/modernize/stringsseq:go_default_library",
|
||||
"//tools/analyzers/modernize/testingcontext:go_default_library",
|
||||
"//tools/analyzers/modernize/waitgroup:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
"//tools/analyzers/nopanic:go_default_library",
|
||||
"//tools/analyzers/properpermissions:go_default_library",
|
||||
|
||||
168
CHANGELOG.md
168
CHANGELOG.md
@@ -4,6 +4,172 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v7.1.0](https://github.com/prysmaticlabs/prysm/compare/v7.0.0...v7.1.0) - 2025-12-10
|
||||
|
||||
This release includes several key features/fixes. If you are running v7.0.0 then you should update to v7.0.1 or later and remove the flag `--disable-last-epoch-targets`.
|
||||
|
||||
Release highlights:
|
||||
|
||||
- Backfill is now supported in Fulu. Backfill from checkpoint sync now supports data columns. Run with `--enable-backfill` when using checkpoint sync.
|
||||
- A new node configuration to custody enough data columns to reconstruct blobs. Use flag `--semi-supernode` to custody at least 50% of the data columns.
|
||||
- Critical fixes in attestation processing.
|
||||
|
||||
A post mortem doc with full details on the mainnet attestation processing issue from December 4th is expected in the coming days.
|
||||
|
||||
### Added
|
||||
|
||||
- add fulu support to light client processing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15995)
|
||||
- Record data column gossip KZG batch verification latency in both the pooled worker and fallback paths so the `beacon_kzg_verification_data_column_batch_milliseconds` histogram reflects gossip traffic, annotated with `path` labels to distinguish the sources. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16018)
|
||||
- Implement Gloas state. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15611)
|
||||
- Add initial configs for the state-diff feature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15903)
|
||||
- Add kv functions for the state-diff feature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15903)
|
||||
- Add supported version for fork versions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16030)
|
||||
- prometheus metric `gossip_attestation_verification_milliseconds` to track attestation gossip topic validation latency. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15785)
|
||||
- Integrate state-diff into `State()`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16033)
|
||||
- Implement Gloas fork support in consensus-types/blocks with factory methods, getters, setters, and proto handling. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15618)
|
||||
- Integrate state-diff into `HasState()`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16045)
|
||||
- Added `--semi-supernode` flag to custody half of a super node's datacolumn requirements but allowing for reconstruction for blob retrieval. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16029)
|
||||
- Data column backfill. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15580)
|
||||
- Backfill metrics for columns: backfill_data_column_sidecar_downloaded, backfill_data_column_sidecar_downloaded_bytes, backfill_batch_columns_download_ms, backfill_batch_columns_verify_ms. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15580)
|
||||
- prometheus summary `gossip_data_column_sidecar_arrival_milliseconds` to track data column sidecar arrival latency since slot start. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16099)
|
||||
|
||||
### Changed
|
||||
|
||||
- Improve readability in slashing import and remove duplicated code. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15957)
|
||||
- Use dependent root instead of target when possible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15996)
|
||||
- Changed `--subscribe-all-data-subnets` flag to `--supernode` and aliased `--subscribe-all-data-subnets` for existing users. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16012)
|
||||
- Use explicit slot component timing configs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15999)
|
||||
- Downgraded log level from INFO to DEBUG on PrepareBeaconProposer updated fee recipients. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15998)
|
||||
- Change the logging behaviour of Updated fee recipients to only log count of validators at Debug level and all validator indices at Trace level. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15998)
|
||||
- Stop emitting payload attribute events during late block handling when we are not proposing the next slot. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16026)
|
||||
- Initialize the `ExecutionRequests` field in gossip block map. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16047)
|
||||
- Avoid redundant WithHttpEndpoint when JWT is provided. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16032)
|
||||
- Removed dead slot parameter from blobCacheEntry.filter. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16021)
|
||||
- Added log prefix to the `genesis` package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
|
||||
- Added log prefix to the `params` package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
|
||||
- `WithGenesisValidatorsRoot`: Use camelCase for log field param. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
|
||||
- Move `Origin checkpoint found in db` from WARN to INFO, since it is the expected behaviour. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16075)
|
||||
- backfill metrics that changed name and/or histogram buckets: backfill_batch_time_verify -> backfill_batch_verify_ms, backfill_batch_time_waiting -> backfill_batch_waiting_ms, backfill_batch_time_roundtrip -> backfill_batch_roundtrip_ms, backfill_blocks_bytes_downloaded -> backfill_blocks_downloaded_bytes, backfill_batch_time_verify -> backfill_batch_verify_ms, backfill_batch_blocks_time_download -> backfill_batch_blocks_download_ms, backfill_batch_blobs_time_download -> backfill_batch_blobs_download_ms, backfill_blobs_bytes_downloaded -> backfill_blocks_downloaded_bytes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15580)
|
||||
- Move the "Not enough connected peers" (for a given subnet) from WARN to DEBUG. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16087)
|
||||
- `blobsDataFromStoredDataColumns`: Ask the use to use the `--supernode` flag and shorten the error mesage. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16097)
|
||||
- Introduced flag `--ignore-unviable-attestations` (replaces and deprecates `--disable-last-epoch-targets`) to drop attestations whose target state is not viable; default remains to process them unless explicitly enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16094)
|
||||
|
||||
### Removed
|
||||
|
||||
- Remove validator cross-client from end-to-end tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16025)
|
||||
- `NUMBER_OF_COLUMNS` configuration (not in the specification any more, replaced by a preset). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16073)
|
||||
- `MAX_CELLS_IN_EXTENDED_MATRIX` configuration (not in the specification any more). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16073)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Nil check for block if it doesn't exist in the DB in fetchOriginSidecars. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16006)
|
||||
- Fix proposals progress bar count [#16020](https://github.com/OffchainLabs/prysm/pull/16020). [[PR]](https://github.com/prysmaticlabs/prysm/pull/16020)
|
||||
- Move `BlockGossipReceived` event to the end of gossip validation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16031)
|
||||
- Fix state diff repetitive anchor slot bug. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16037)
|
||||
- Check the JWT secret length is exactly 256 bits (32 bytes) as per Engine API specification. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15939)
|
||||
- http_error_count now matches the other cases by listing the endpoint name rather than the actual URL requested. This improves metrics cardinality. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16055)
|
||||
- Fix array out of bounds in static analyzer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16058)
|
||||
- fixes E2E tests to be able to start from Electra genesis fork or future forks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16048)
|
||||
- Use head state to validate attestations for old blocks if they are compatible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16095)
|
||||
|
||||
## [v7.0.1](https://github.com/prysmaticlabs/prysm/compare/v7.0.0...v7.0.1) - 2025-12-08
|
||||
|
||||
This patch release contains 4 cherry-picked changes to address the mainnet attestation processing issue from 2025-12-04. Operators are encouraged to update to this release as soon as practical. As of this release, the feature flag `--disable-last-epoch-targets` has been deprecated and can be safely removed from your node configuration.
|
||||
|
||||
A post mortem doc with full details is expected to be published later this week.
|
||||
|
||||
### Changed
|
||||
|
||||
- Move the "Not enough connected peers" (for a given subnet) from WARN to DEBUG. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16087)
|
||||
- Use dependent root instead of target when possible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15996)
|
||||
- Introduced flag `--ignore-unviable-attestations` (replaces and deprecates `--disable-last-epoch-targets`) to drop attestations whose target state is not viable; default remains to process them unless explicitly enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16094)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Use head state to validate attestations for old blocks if they are compatible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16095)
|
||||
|
||||
|
||||
## [v7.0.0](https://github.com/prysmaticlabs/prysm/compare/v6.1.4...v7.0.0) - 2025-11-10
|
||||
|
||||
This is our initial mainnet release for the Ethereum mainnet Fulu fork on December 3rd, 2025. All operators MUST update to v7.0.0 or later release prior to the fulu fork epoch `411392`. See the [Ethereum Foundation blog post](https://blog.ethereum.org/2025/11/06/fusaka-mainnet-announcement) for more information on Fulu.
|
||||
|
||||
Other than the mainnet fulu fork schedule, there are a few callouts in this release:
|
||||
- `by-epoch` blob storage format is the default for new installations. Users that haven't migrated will see a warning to migrate to the new format. Existing deployments may set `--blob-storage-layout=by-epoch` to perform the migration.
|
||||
- Several deprecated flags have been deleted! Please review the "removed" section of this changelog carefully. If you are referencing a removed flag, Prysm will not start! All of these flags had no effect for at least one release.
|
||||
- Several deprecated API endpoints have been deleted. Please review the "removed" section of this changelog carefully.
|
||||
- Backfill is not supported in Fulu. This is expected to be fixed in the next release and should be delivered prior to the mainnet activation fork.
|
||||
- The builder default gas limit is raised from `45000000` (45 MGas) to `60000000` (60 MGas).
|
||||
- Several bug fixes and improvements.
|
||||
|
||||
### Added
|
||||
|
||||
- Allow custom headers in validator client HTTP requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15884)
|
||||
- Metric to track data columns recovered from execution layer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15924)
|
||||
- Metrics: Add count of peers per direction and type (inbound/outbound), (TCP/QUIC). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15922)
|
||||
- `p2p_subscribed_topic_peer_total`: Reset to avoid dangling values. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15922)
|
||||
- Add `p2p_minimum_peers_per_subnet` metric. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15922)
|
||||
- Added GeneralizedIndicesFromPath function to calculate the GIs for a given sszInfo object and a PathElement. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15873)
|
||||
- Add Gloas protobuf definitions with spec tests and SSZ serialization support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15601)
|
||||
- Fulu fork epoch for mainnet configurations set for December 3, 2025, 09:49:11pm UTC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15975)
|
||||
- Added BPO schedules for December 9, 2025, 02:21:11pm UTC and January 7, 2026, 01:01:11am UTC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15975)
|
||||
|
||||
### Changed
|
||||
|
||||
- Updated consensus spec tests to v1.6.0-beta.1 with new hashes and URL template. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15918)
|
||||
- Use the `by-epoch' blob storage layout by default and log a warning to users who continue to use the flat layout, encouraging them to switch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15904)
|
||||
- Update go-netroute to `v0.3.0`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15934)
|
||||
- Introduced Path type for SSZ-QL queries and updated PathElement (removed Length field, kept Index) enforcing that len queries are terminal (at most one per path). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15935)
|
||||
- Changed length query syntax from `block.payload.len(transactions)` to `len(block.payload.transactions)`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15935)
|
||||
- Update `go-netroute` to `v0.4.0`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15949)
|
||||
- Updated consensus spec tests to v1.6.0-beta.2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15960)
|
||||
- Updated go bitfield from prysmaticlabs to offchainlabs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15968)
|
||||
- Bump builder default gas limit from `45000000` (45 MGas) to `60000000` (60 MGas). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15979)
|
||||
- Use head state for block pubsub validation when possible. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15972)
|
||||
- updated consensus spec to 1.6.0 from 1.6.0-beta.2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15975)
|
||||
- Upgrade Prysm v6 to v7. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15989)
|
||||
- Use head state readonly when possible to validate data column sidecars. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15977)
|
||||
|
||||
### Removed
|
||||
|
||||
- log mentioning removed flag `--show-deposit-data`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15926)
|
||||
- Remove Beacon API endpoints that were deprecated in Electra: `GET /eth/v1/beacon/deposit_snapshot`, `GET /eth/v1/beacon/blocks/{block_id}/attestations`, `GET /eth/v1/beacon/pool/attestations`, `POST /eth/v1/beacon/pool/attestations`, `GET /eth/v1/beacon/pool/attester_slashings`, `POST /eth/v1/beacon/pool/attester_slashings`, `GET /eth/v1/validator/aggregate_attestation`, `POST /eth/v1/validator/aggregate_and_proofs`, `POST /eth/v1/beacon/blocks`, `POST /eth/v1/beacon/blinded_blocks`, `GET /eth/v1/builder/states/{state_id}/expected_withdrawals`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15962)
|
||||
- Deprecated flag `--enable-optional-engine-methods` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-build-block-parallel` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-reorg-late-blocks` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-optional-engine-methods` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-aggregate-parallel` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--enable-eip-4881` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-eip-4881` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--enable-verbose-sig-verification` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--enable-debug-rpc-endpoints` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--beacon-rpc-gateway-provider` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-grpc-gateway` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--enable-experimental-state` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--enable-committee-aware-packing` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--interop-genesis-time` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--interop-num-validators` has been removed (from beacon-chain only; still available in validator client). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--enable-quic` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--attest-timely` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--disable-experimental-state` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
- Deprecated flag `--p2p-metadata` has been removed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15986)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Remove `Reading static P2P private key from a file.` log if Fulu is enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15913)
|
||||
- `blobSidecarByRootRPCHandler`: Do not serve a sidecar if the corresponding block is not available. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15933)
|
||||
- `dataColumnSidecarByRootRPCHandler`: Do not serve a sidecar if the corresponding block is not available. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15933)
|
||||
- Fix incorrect version used when sending attestation version in Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15950)
|
||||
- Changed the behavior of topic subscriptions such that only topics that require the active validator count will compute that value. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15955)
|
||||
- Added a Mutex to the computation of active validator count during topic subscription to avoid a race condition where multiple goroutines are computing the same work. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15955)
|
||||
- `RODataColumnsVerifier.ValidProposerSignature`: Ensure the expensive signature verification is only performed once for concurrent requests for the same signature data. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15954)
|
||||
- use filepath for path operations (clean, join, etc.) to ensure correct behavior on Windows. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15953)
|
||||
- Fix #15969: Handle addition overflow in `/eth/v1/beacon/rewards/attestations/{epoch}`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15970)
|
||||
- `SidecarProposerExpected`: Add the slot in the single flight key. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15976)
|
||||
- Ensures the rate limitation is respected for by root blob and data column sidecars requests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15981)
|
||||
- Use head only if its compatible with target for attestation validation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15965)
|
||||
- Backfill disabled if checkpoint sync origin is after fulu fork due to lack of DataColumnSidecar support in backfill. To track the availability of fulu-compatible backfill please watch https://github.com/OffchainLabs/prysm/issues/15982. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15987)
|
||||
- `SidecarProposerExpected`: Use the correct value of proposer index in the singleflight group. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15993)
|
||||
|
||||
## [v6.1.4](https://github.com/prysmaticlabs/prysm/compare/v6.1.3...v6.1.4) - 2025-10-24
|
||||
|
||||
This release includes a bug fix affecting block proposals in rare cases, along with an important update for Windows users running post-Fusaka fork.
|
||||
@@ -3820,4 +3986,4 @@ There are no security updates in this release.
|
||||
|
||||
# Older than v2.0.0
|
||||
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
|
||||
20
WORKSPACE
20
WORKSPACE
@@ -205,6 +205,26 @@ prysm_image_deps()
|
||||
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
|
||||
|
||||
# Override golang.org/x/tools to use v0.38.0 instead of v0.30.0
|
||||
# This is necessary as this dependency is required by rules_go and they do not accept dependency
|
||||
# update PRs. Instead, they ask downstream projects to override the dependency. To generate the
|
||||
# patches or update this dependency again, check out the rules_go repo then run the releaser tool.
|
||||
# bazel run //go/tools/releaser -- upgrade-dep -mirror=false org_golang_x_tools
|
||||
# Copy the patches and http_archive updates from rules_go here.
|
||||
http_archive(
|
||||
name = "org_golang_x_tools",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
"//third_party:org_golang_x_tools-deletegopls.patch",
|
||||
"//third_party:org_golang_x_tools-gazelle.patch",
|
||||
],
|
||||
sha256 = "8509908cd7fc35aa09ff49d8494e4fd25bab9e6239fbf57e0d8344f6bec5802b",
|
||||
strip_prefix = "tools-0.38.0",
|
||||
urls = [
|
||||
"https://github.com/golang/tools/archive/refs/tags/v0.38.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
|
||||
@@ -56,7 +56,7 @@ func ParseAccept(header string) []mediaRange {
|
||||
}
|
||||
|
||||
var out []mediaRange
|
||||
for _, field := range strings.Split(header, ",") {
|
||||
for field := range strings.SplitSeq(header, ",") {
|
||||
if r, ok := parseMediaRange(field); ok {
|
||||
out = append(out, r)
|
||||
}
|
||||
|
||||
@@ -421,7 +421,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
|
||||
func jsonValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]byte, error) {
|
||||
vs := make([]*structs.SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
for i := range svr {
|
||||
vs[i] = structs.SignedValidatorRegistrationFromConsensus(svr[i])
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
|
||||
@@ -121,7 +121,7 @@ func (s *Uint64String) UnmarshalText(t []byte) error {
|
||||
|
||||
// MarshalText returns a byte representation of the text from Uint64String.
|
||||
func (s Uint64String) MarshalText() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%d", s)), nil
|
||||
return fmt.Appendf(nil, "%d", s), nil
|
||||
}
|
||||
|
||||
// VersionResponse is a JSON representation of a field in the builder API header response.
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
func LogRequests(
|
||||
ctx context.Context,
|
||||
method string, req,
|
||||
reply interface{},
|
||||
reply any,
|
||||
cc *grpc.ClientConn,
|
||||
invoker grpc.UnaryInvoker,
|
||||
opts ...grpc.CallOption,
|
||||
|
||||
@@ -14,5 +14,5 @@ type GetForkScheduleResponse struct {
|
||||
}
|
||||
|
||||
type GetSpecResponse struct {
|
||||
Data interface{} `json:"data"`
|
||||
Data any `json:"data"`
|
||||
}
|
||||
|
||||
@@ -93,9 +93,9 @@ func TestToggleMultipleTimes(t *testing.T) {
|
||||
|
||||
v := New()
|
||||
pre := !v.IsSet()
|
||||
for i := 0; i < 100; i++ {
|
||||
for i := range 100 {
|
||||
v.SetTo(false)
|
||||
for j := 0; j < i; j++ {
|
||||
for range i {
|
||||
pre = v.Toggle()
|
||||
}
|
||||
|
||||
@@ -149,7 +149,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Writer
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.Set()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -157,7 +157,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Reader
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.IsSet()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -165,7 +165,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Writer
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.UnSet()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -173,7 +173,7 @@ func TestRace(t *testing.T) {
|
||||
|
||||
// Reader And Writer
|
||||
go func() {
|
||||
for i := 0; i < repeat; i++ {
|
||||
for range repeat {
|
||||
v.Toggle()
|
||||
wg.Done()
|
||||
}
|
||||
@@ -198,8 +198,8 @@ func ExampleAtomicBool() {
|
||||
func BenchmarkMutexRead(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.RLock()
|
||||
_ = v
|
||||
m.RUnlock()
|
||||
@@ -208,16 +208,16 @@ func BenchmarkMutexRead(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicValueRead(b *testing.B) {
|
||||
var v atomic.Value
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_ = v.Load() != nil
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicBoolRead(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_ = v.IsSet()
|
||||
}
|
||||
}
|
||||
@@ -227,8 +227,8 @@ func BenchmarkAtomicBoolRead(b *testing.B) {
|
||||
func BenchmarkMutexWrite(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.RLock()
|
||||
v = true
|
||||
m.RUnlock()
|
||||
@@ -239,16 +239,16 @@ func BenchmarkMutexWrite(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicValueWrite(b *testing.B) {
|
||||
var v atomic.Value
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.Store(true)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicBoolWrite(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.Set()
|
||||
}
|
||||
}
|
||||
@@ -258,8 +258,8 @@ func BenchmarkAtomicBoolWrite(b *testing.B) {
|
||||
func BenchmarkMutexCAS(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.Lock()
|
||||
if !v {
|
||||
v = true
|
||||
@@ -270,8 +270,8 @@ func BenchmarkMutexCAS(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicBoolCAS(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.SetToIf(false, true)
|
||||
}
|
||||
}
|
||||
@@ -281,8 +281,8 @@ func BenchmarkAtomicBoolCAS(b *testing.B) {
|
||||
func BenchmarkMutexToggle(b *testing.B) {
|
||||
var m sync.RWMutex
|
||||
var v bool
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
m.Lock()
|
||||
v = !v
|
||||
m.Unlock()
|
||||
@@ -291,8 +291,8 @@ func BenchmarkMutexToggle(b *testing.B) {
|
||||
|
||||
func BenchmarkAtomicBoolToggle(b *testing.B) {
|
||||
v := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
v.Toggle()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ const (
|
||||
|
||||
func init() {
|
||||
input = make([][]byte, benchmarkElements)
|
||||
for i := 0; i < benchmarkElements; i++ {
|
||||
for i := range benchmarkElements {
|
||||
input[i] = make([]byte, benchmarkElementSize)
|
||||
_, err := rand.Read(input[i])
|
||||
if err != nil {
|
||||
@@ -35,7 +35,7 @@ func hash(input [][]byte) [][]byte {
|
||||
output := make([][]byte, len(input))
|
||||
for i := range input {
|
||||
copy(output, input)
|
||||
for j := 0; j < benchmarkHashRuns; j++ {
|
||||
for range benchmarkHashRuns {
|
||||
hash := sha256.Sum256(output[i])
|
||||
output[i] = hash[:]
|
||||
}
|
||||
@@ -44,15 +44,15 @@ func hash(input [][]byte) [][]byte {
|
||||
}
|
||||
|
||||
func BenchmarkHash(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
hash(input)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkHashMP(b *testing.B) {
|
||||
output := make([][]byte, len(input))
|
||||
for i := 0; i < b.N; i++ {
|
||||
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
|
||||
for b.Loop() {
|
||||
workerResults, err := async.Scatter(len(input), func(offset int, entries int, _ *sync.RWMutex) (any, error) {
|
||||
return hash(input[offset : offset+entries]), nil
|
||||
})
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
// Debounce events fired over a channel by a specified duration, ensuring no events
|
||||
// are handled until a certain interval of time has passed.
|
||||
func Debounce(ctx context.Context, interval time.Duration, eventsChan <-chan interface{}, handler func(interface{})) {
|
||||
func Debounce(ctx context.Context, interval time.Duration, eventsChan <-chan any, handler func(any)) {
|
||||
var timer *time.Timer
|
||||
defer func() {
|
||||
if timer != nil {
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func TestDebounce_NoEvents(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
@@ -26,7 +26,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
})
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
wg.Done()
|
||||
@@ -38,7 +38,7 @@ func TestDebounce_NoEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDebounce_CtxClosing(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
@@ -62,7 +62,7 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
})
|
||||
}()
|
||||
go func() {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
wg.Done()
|
||||
@@ -74,14 +74,14 @@ func TestDebounce_CtxClosing(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
eventsChan <- struct{}{}
|
||||
}
|
||||
// We should expect 100 rapid fire changes to only have caused
|
||||
@@ -92,14 +92,14 @@ func TestDebounce_SingleHandlerInvocation(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDebounce_MultipleHandlerInvocation(t *testing.T) {
|
||||
eventsChan := make(chan interface{}, 100)
|
||||
eventsChan := make(chan any, 100)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
interval := time.Second
|
||||
timesHandled := int32(0)
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event interface{}) {
|
||||
go async.Debounce(ctx, interval, eventsChan, func(event any) {
|
||||
atomic.AddInt32(×Handled, 1)
|
||||
})
|
||||
for i := 0; i < 100; i++ {
|
||||
for range 100 {
|
||||
eventsChan <- struct{}{}
|
||||
}
|
||||
require.Equal(t, int32(0), atomic.LoadInt32(×Handled), "Events must prevent from handler execution")
|
||||
|
||||
@@ -93,9 +93,7 @@ func ExampleSubscriptionScope() {
|
||||
// Run a subscriber in the background.
|
||||
divsub := app.SubscribeResults('/', divs)
|
||||
mulsub := app.SubscribeResults('*', muls)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
wg.Go(func() {
|
||||
defer fmt.Println("subscriber exited")
|
||||
defer divsub.Unsubscribe()
|
||||
defer mulsub.Unsubscribe()
|
||||
@@ -111,7 +109,7 @@ func ExampleSubscriptionScope() {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
})
|
||||
|
||||
// Interact with the app.
|
||||
app.Calc('/', 22, 11)
|
||||
|
||||
@@ -26,7 +26,7 @@ func ExampleNewSubscription() {
|
||||
// Create a subscription that sends 10 integers on ch.
|
||||
ch := make(chan int)
|
||||
sub := event.NewSubscription(func(quit <-chan struct{}) error {
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
select {
|
||||
case ch <- i:
|
||||
case <-quit:
|
||||
|
||||
@@ -3,6 +3,6 @@ package event
|
||||
// SubscriberSender is an abstract representation of an *event.Feed
|
||||
// to use in describing types that accept or return an *event.Feed.
|
||||
type SubscriberSender interface {
|
||||
Subscribe(channel interface{}) Subscription
|
||||
Send(value interface{}) (nsent int)
|
||||
Subscribe(channel any) Subscription
|
||||
Send(value any) (nsent int)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ var errInts = errors.New("error in subscribeInts")
|
||||
|
||||
func subscribeInts(max, fail int, c chan<- int) Subscription {
|
||||
return NewSubscription(func(quit <-chan struct{}) error {
|
||||
for i := 0; i < max; i++ {
|
||||
for i := range max {
|
||||
if i >= fail {
|
||||
return errInts
|
||||
}
|
||||
@@ -50,7 +50,7 @@ func TestNewSubscriptionError(t *testing.T) {
|
||||
channel := make(chan int)
|
||||
sub := subscribeInts(10, 2, channel)
|
||||
loop:
|
||||
for want := 0; want < 10; want++ {
|
||||
for want := range 10 {
|
||||
select {
|
||||
case got := <-channel:
|
||||
require.Equal(t, want, got)
|
||||
|
||||
@@ -107,15 +107,13 @@ func TestLockUnlock(_ *testing.T) {
|
||||
|
||||
func TestLockUnlock_CleansUnused(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
lock := NewMultilock("dog", "cat", "owl")
|
||||
lock.Lock()
|
||||
assert.Equal(t, 3, len(locks.list))
|
||||
lock.Unlock()
|
||||
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
wg.Wait()
|
||||
// We expect that unlocking completely cleared the locks list
|
||||
// given all 3 lock keys were unused at time of unlock.
|
||||
|
||||
@@ -9,14 +9,14 @@ import (
|
||||
// WorkerResults are the results of a scatter worker.
|
||||
type WorkerResults struct {
|
||||
Offset int
|
||||
Extent interface{}
|
||||
Extent any
|
||||
}
|
||||
|
||||
// Scatter scatters a computation across multiple goroutines.
|
||||
// This breaks the task in to a number of chunks and executes those chunks in parallel with the function provided.
|
||||
// Results returned are collected and presented as a set of WorkerResults, which can be reassembled by the calling function.
|
||||
// Any error that occurs in the workers will be passed back to the calling function.
|
||||
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (interface{}, error)) ([]*WorkerResults, error) {
|
||||
func Scatter(inputLen int, sFunc func(int, int, *sync.RWMutex) (any, error)) ([]*WorkerResults, error) {
|
||||
if inputLen <= 0 {
|
||||
return nil, errors.New("input length must be greater than 0")
|
||||
}
|
||||
|
||||
@@ -46,9 +46,9 @@ func TestDouble(t *testing.T) {
|
||||
inValues[i] = i
|
||||
}
|
||||
outValues := make([]int, test.inValues)
|
||||
workerResults, err := async.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (interface{}, error) {
|
||||
workerResults, err := async.Scatter(len(inValues), func(offset int, entries int, _ *sync.RWMutex) (any, error) {
|
||||
extent := make([]int, entries)
|
||||
for i := 0; i < entries; i++ {
|
||||
for i := range entries {
|
||||
extent[i] = inValues[offset+i] * 2
|
||||
}
|
||||
return extent, nil
|
||||
@@ -72,8 +72,8 @@ func TestDouble(t *testing.T) {
|
||||
func TestMutex(t *testing.T) {
|
||||
totalRuns := 1048576
|
||||
val := 0
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
|
||||
for i := 0; i < entries; i++ {
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (any, error) {
|
||||
for range entries {
|
||||
mu.Lock()
|
||||
val++
|
||||
mu.Unlock()
|
||||
@@ -90,8 +90,8 @@ func TestMutex(t *testing.T) {
|
||||
func TestError(t *testing.T) {
|
||||
totalRuns := 1024
|
||||
val := 0
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (interface{}, error) {
|
||||
for i := 0; i < entries; i++ {
|
||||
_, err := async.Scatter(totalRuns, func(offset int, entries int, mu *sync.RWMutex) (any, error) {
|
||||
for range entries {
|
||||
mu.Lock()
|
||||
val++
|
||||
if val == 1011 {
|
||||
|
||||
@@ -323,14 +323,17 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
var ok bool
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(st.Slot())
|
||||
if e == stateEpoch {
|
||||
fuluAndNextEpoch := st.Version() >= version.Fulu && e == stateEpoch+1
|
||||
if e == stateEpoch || fuluAndNextEpoch {
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
st = st.Copy()
|
||||
if slot > st.Slot() {
|
||||
// At this point either we know we are proposing on a future slot or we need to still compute the
|
||||
// right proposer index pre-Fulu, either way we need to copy the state to process it.
|
||||
st = st.Copy()
|
||||
var err error
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, slot)
|
||||
if err != nil {
|
||||
@@ -338,7 +341,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return emptyAttri
|
||||
}
|
||||
}
|
||||
if e > stateEpoch {
|
||||
if e > stateEpoch && !fuluAndNextEpoch {
|
||||
emptyAttri := payloadattribute.EmptyWithVersion(st.Version())
|
||||
val, ok = s.trackedProposer(st, slot)
|
||||
if !ok {
|
||||
|
||||
@@ -1053,40 +1053,3 @@ func TestKZGCommitmentToVersionedHashes(t *testing.T) {
|
||||
require.Equal(t, vhs[0].String(), vh0)
|
||||
require.Equal(t, vhs[1].String(), vh1)
|
||||
}
|
||||
|
||||
func TestComputePayloadAttribute(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, Index: 0})
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
proposingSlot: slot,
|
||||
headRoot: [32]byte{},
|
||||
}
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, params.BeaconConfig().EthBurnAddressHex, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()).String())
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
service.cfg.TrackedValidatorsCache.Set(cache.TrackedValidator{Active: true, FeeRecipient: primitives.ExecutionAddress(suggestedAddr), Index: 0})
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
require.NoError(t, service.computePayloadAttributes(cfg, fcu))
|
||||
require.Equal(t, false, fcu.attributes.IsEmpty())
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(fcu.attributes.SuggestedFeeRecipient()))
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v7/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -53,58 +54,53 @@ type fcuConfig struct {
|
||||
}
|
||||
|
||||
// sendFCU handles the logic to notify the engine of a forckhoice update
|
||||
// for the first time when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions when the
|
||||
// incoming block is late, preparing payload attributes in this case while it
|
||||
// only sends a message with empty attributes for early blocks.
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if !s.isNewHead(cfg.headRoot) {
|
||||
return nil
|
||||
// when processing an incoming block during regular sync. It
|
||||
// always updates the shuffling caches and handles epoch transitions .
|
||||
func (s *Service) sendFCU(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if cfg.postState.Version() < version.Fulu {
|
||||
// update the caches to compute the right proposer index
|
||||
// this function is called under a forkchoice lock which we need to release.
|
||||
s.ForkChoicer().Unlock()
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
s.ForkChoicer().Lock()
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return
|
||||
}
|
||||
// If head has not been updated and attributes are nil, we can skip the FCU.
|
||||
if !s.isNewHead(cfg.headRoot) && (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) {
|
||||
return
|
||||
}
|
||||
// If we are proposing and we aim to reorg the block, we have already sent FCU with attributes on lateBlockTasks
|
||||
if fcuArgs.attributes != nil && !fcuArgs.attributes.IsEmpty() && s.shouldOverrideFCU(cfg.headRoot, s.CurrentSlot()+1) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
go s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
return s.forkchoiceUpdateWithExecution(cfg.ctx, fcuArgs)
|
||||
}
|
||||
|
||||
// sendFCUWithAttributes computes the payload attributes and sends an FCU message
|
||||
// to the engine if needed
|
||||
func (s *Service) sendFCUWithAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = slotCtx
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
if err := s.computePayloadAttributes(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not compute payload attributes")
|
||||
return
|
||||
}
|
||||
if fcuArgs.attributes.IsEmpty() {
|
||||
return
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(cfg.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice with payload attributes for proposal")
|
||||
if s.isNewHead(fcuArgs.headRoot) {
|
||||
if err := s.saveHead(cfg.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
}
|
||||
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It decides whether a new call to FCU should be made.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) error {
|
||||
// fockchoiceUpdateWithExecution is a wrapper around notifyForkchoiceUpdate. It gets a forkchoice lock and calls the engine.
|
||||
// The caller of this function should NOT have a lock in forkchoice store.
|
||||
func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuConfig) {
|
||||
_, span := trace.StartSpan(ctx, "beacon-chain.blockchain.forkchoiceUpdateWithExecution")
|
||||
defer span.End()
|
||||
// Note: Use the service context here to avoid the parent context being ended during a forkchoice update.
|
||||
ctx = trace.NewContext(s.ctx, span)
|
||||
s.ForkChoicer().Lock()
|
||||
defer s.ForkChoicer().Unlock()
|
||||
_, err := s.notifyForkchoiceUpdate(ctx, args)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify forkchoice update")
|
||||
log.WithError(err).Error("Could not notify forkchoice update")
|
||||
}
|
||||
|
||||
if err := s.saveHead(ctx, args.headRoot, args.headBlock, args.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
// shouldOverrideFCU checks whether the incoming block is still subject to being
|
||||
|
||||
@@ -97,7 +97,7 @@ func TestService_forkchoiceUpdateWithExecution_exceptionalCases(t *testing.T) {
|
||||
headBlock: wsb,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
|
||||
payloadID, has := service.cfg.PayloadIDCache.PayloadID(2, [32]byte{2})
|
||||
require.Equal(t, true, has)
|
||||
@@ -151,7 +151,7 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
headRoot: r,
|
||||
proposingSlot: service.CurrentSlot() + 1,
|
||||
}
|
||||
require.NoError(t, service.forkchoiceUpdateWithExecution(ctx, args))
|
||||
service.forkchoiceUpdateWithExecution(ctx, args)
|
||||
}
|
||||
|
||||
func TestShouldOverrideFCU(t *testing.T) {
|
||||
|
||||
@@ -23,6 +23,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"kzg_test.go",
|
||||
"trusted_setup_test.go",
|
||||
"validation_test.go",
|
||||
],
|
||||
|
||||
@@ -34,12 +34,6 @@ type Bytes48 = ckzg4844.Bytes48
|
||||
// Bytes32 is a 32-byte array.
|
||||
type Bytes32 = ckzg4844.Bytes32
|
||||
|
||||
// CellsAndProofs represents the Cells and Proofs corresponding to a single blob.
|
||||
type CellsAndProofs struct {
|
||||
Cells []Cell
|
||||
Proofs []Proof
|
||||
}
|
||||
|
||||
// BlobToKZGCommitment computes a KZG commitment from a given blob.
|
||||
func BlobToKZGCommitment(blob *Blob) (Commitment, error) {
|
||||
var kzgBlob kzg4844.Blob
|
||||
@@ -65,7 +59,7 @@ func ComputeCells(blob *Blob) ([]Cell, error) {
|
||||
|
||||
cells := make([]Cell, len(ckzgCells))
|
||||
for i := range ckzgCells {
|
||||
cells[i] = Cell(ckzgCells[i])
|
||||
copy(cells[i][:], ckzgCells[i][:])
|
||||
}
|
||||
|
||||
return cells, nil
|
||||
@@ -78,22 +72,35 @@ func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) {
|
||||
|
||||
proof, err := kzg4844.ComputeBlobProof(&kzgBlob, kzg4844.Commitment(commitment))
|
||||
if err != nil {
|
||||
return [48]byte{}, err
|
||||
return Proof{}, err
|
||||
}
|
||||
return Proof(proof), nil
|
||||
var result Proof
|
||||
copy(result[:], proof[:])
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ComputeCellsAndKZGProofs computes the cells and cells KZG proofs from a given blob.
|
||||
func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) {
|
||||
func ComputeCellsAndKZGProofs(blob *Blob) ([]Cell, []Proof, error) {
|
||||
var ckzgBlob ckzg4844.Blob
|
||||
copy(ckzgBlob[:], blob[:])
|
||||
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(&ckzgBlob)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
if len(ckzgCells) != len(ckzgProofs) {
|
||||
return nil, nil, errors.New("mismatched cells and proofs length")
|
||||
}
|
||||
|
||||
cells := make([]Cell, len(ckzgCells))
|
||||
proofs := make([]Proof, len(ckzgProofs))
|
||||
for i := range ckzgCells {
|
||||
copy(cells[i][:], ckzgCells[i][:])
|
||||
copy(proofs[i][:], ckzgProofs[i][:])
|
||||
}
|
||||
|
||||
return cells, proofs, nil
|
||||
}
|
||||
|
||||
// VerifyCellKZGProofBatch verifies the KZG proofs for a given slice of commitments, cells indices, cells and proofs.
|
||||
@@ -103,44 +110,57 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
copy(ckzgCells[i][:], cells[i][:])
|
||||
}
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
// RecoverCellsAndKZGProofs recovers the complete cells and KZG proofs from a given set of cell indices and partial cells.
|
||||
// RecoverCells recovers the complete cells from a given set of cell indices and partial cells.
|
||||
// Note: `len(cellIndices)` must be equal to `len(partialCells)` and `cellIndices` must be sorted in ascending order.
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
|
||||
func RecoverCells(cellIndices []uint64, partialCells []Cell) ([]Cell, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
for i := range partialCells {
|
||||
ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i])
|
||||
copy(ckzgPartialCells[i][:], partialCells[i][:])
|
||||
}
|
||||
|
||||
ckzgCells, err := ckzg4844.RecoverCells(cellIndices, ckzgPartialCells)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "recover cells")
|
||||
}
|
||||
|
||||
cells := make([]Cell, len(ckzgCells))
|
||||
for i := range ckzgCells {
|
||||
copy(cells[i][:], ckzgCells[i][:])
|
||||
}
|
||||
|
||||
return cells, nil
|
||||
}
|
||||
|
||||
// RecoverCellsAndKZGProofs recovers the complete cells and KZG proofs from a given set of cell indices and partial cells.
|
||||
// Note: `len(cellIndices)` must be equal to `len(partialCells)` and `cellIndices` must be sorted in ascending order.
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) ([]Cell, []Proof, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
for i := range partialCells {
|
||||
copy(ckzgPartialCells[i][:], partialCells[i][:])
|
||||
}
|
||||
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, errors.Wrap(err, "recover cells and KZG proofs")
|
||||
return nil, nil, errors.Wrap(err, "recover cells and KZG proofs")
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
// makeCellsAndProofs converts cells/proofs to the CellsAndProofs type defined in this package.
|
||||
func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) {
|
||||
if len(ckzgCells) != len(ckzgProofs) {
|
||||
return CellsAndProofs{}, errors.New("different number of cells/proofs")
|
||||
return nil, nil, errors.New("mismatched cells and proofs length")
|
||||
}
|
||||
|
||||
cells := make([]Cell, 0, len(ckzgCells))
|
||||
proofs := make([]Proof, 0, len(ckzgProofs))
|
||||
|
||||
cells := make([]Cell, len(ckzgCells))
|
||||
proofs := make([]Proof, len(ckzgProofs))
|
||||
for i := range ckzgCells {
|
||||
cells = append(cells, Cell(ckzgCells[i]))
|
||||
proofs = append(proofs, Proof(ckzgProofs[i]))
|
||||
copy(cells[i][:], ckzgCells[i][:])
|
||||
copy(proofs[i][:], ckzgProofs[i][:])
|
||||
}
|
||||
|
||||
return CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
}, nil
|
||||
return cells, proofs, nil
|
||||
}
|
||||
|
||||
236
beacon-chain/blockchain/kzg/kzg_test.go
Normal file
236
beacon-chain/blockchain/kzg/kzg_test.go
Normal file
@@ -0,0 +1,236 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/random"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestComputeCells(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid blob", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
cells, err := ComputeCells(&blob)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(cells))
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeBlobKZGProof(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid blob and commitment", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
proof, err := ComputeBlobKZGProof(&blob, commitment)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, BytesPerProof, len(proof))
|
||||
require.NotEqual(t, Proof{}, proof, "proof should not be empty")
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeCellsAndKZGProofs(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid blob returns matching cells and proofs", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
cells, proofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(cells))
|
||||
require.Equal(t, 128, len(proofs))
|
||||
require.Equal(t, len(cells), len(proofs), "cells and proofs should have matching lengths")
|
||||
})
|
||||
}
|
||||
|
||||
func TestVerifyCellKZGProofBatch(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid proof batch", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
cells, proofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify a subset of cells
|
||||
cellIndices := []uint64{0, 1, 2, 3, 4}
|
||||
selectedCells := make([]Cell, len(cellIndices))
|
||||
commitmentsBytes := make([]Bytes48, len(cellIndices))
|
||||
proofsBytes := make([]Bytes48, len(cellIndices))
|
||||
|
||||
for i, idx := range cellIndices {
|
||||
selectedCells[i] = cells[idx]
|
||||
copy(commitmentsBytes[i][:], commitment[:])
|
||||
copy(proofsBytes[i][:], proofs[idx][:])
|
||||
}
|
||||
|
||||
valid, err := VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, selectedCells, proofsBytes)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, valid)
|
||||
})
|
||||
|
||||
t.Run("invalid proof should fail", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
cells, _, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use invalid proofs
|
||||
cellIndices := []uint64{0}
|
||||
selectedCells := []Cell{cells[0]}
|
||||
commitmentsBytes := make([]Bytes48, 1)
|
||||
copy(commitmentsBytes[0][:], commitment[:])
|
||||
|
||||
// Create an invalid proof
|
||||
invalidProof := Bytes48{}
|
||||
proofsBytes := []Bytes48{invalidProof}
|
||||
|
||||
valid, err := VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, selectedCells, proofsBytes)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, false, valid)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRecoverCells(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("recover from partial cells", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
cells, err := ComputeCells(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use half of the cells
|
||||
partialIndices := make([]uint64, 64)
|
||||
partialCells := make([]Cell, 64)
|
||||
for i := range 64 {
|
||||
partialIndices[i] = uint64(i)
|
||||
partialCells[i] = cells[i]
|
||||
}
|
||||
|
||||
recoveredCells, err := RecoverCells(partialIndices, partialCells)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(recoveredCells))
|
||||
|
||||
// Verify recovered cells match original
|
||||
for i := range cells {
|
||||
require.Equal(t, cells[i], recoveredCells[i])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("insufficient cells should fail", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
cells, err := ComputeCells(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use only 32 cells (less than 50% required)
|
||||
partialIndices := make([]uint64, 32)
|
||||
partialCells := make([]Cell, 32)
|
||||
for i := range 32 {
|
||||
partialIndices[i] = uint64(i)
|
||||
partialCells[i] = cells[i]
|
||||
}
|
||||
|
||||
_, err = RecoverCells(partialIndices, partialCells)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRecoverCellsAndKZGProofs(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("recover cells and proofs from partial cells", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
cells, proofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use half of the cells
|
||||
partialIndices := make([]uint64, 64)
|
||||
partialCells := make([]Cell, 64)
|
||||
for i := range 64 {
|
||||
partialIndices[i] = uint64(i)
|
||||
partialCells[i] = cells[i]
|
||||
}
|
||||
|
||||
recoveredCells, recoveredProofs, err := RecoverCellsAndKZGProofs(partialIndices, partialCells)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(recoveredCells))
|
||||
require.Equal(t, 128, len(recoveredProofs))
|
||||
require.Equal(t, len(recoveredCells), len(recoveredProofs), "recovered cells and proofs should have matching lengths")
|
||||
|
||||
// Verify recovered cells match original
|
||||
for i := range cells {
|
||||
require.Equal(t, cells[i], recoveredCells[i])
|
||||
require.Equal(t, proofs[i], recoveredProofs[i])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("insufficient cells should fail", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
cells, err := ComputeCells(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use only 32 cells (less than 50% required)
|
||||
partialIndices := make([]uint64, 32)
|
||||
partialCells := make([]Cell, 32)
|
||||
for i := range 32 {
|
||||
partialIndices[i] = uint64(i)
|
||||
partialCells[i] = cells[i]
|
||||
}
|
||||
|
||||
_, _, err = RecoverCellsAndKZGProofs(partialIndices, partialCells)
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBlobToKZGCommitment(t *testing.T) {
|
||||
require.NoError(t, Start())
|
||||
|
||||
t.Run("valid blob", func(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
|
||||
commitment, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 48, len(commitment))
|
||||
|
||||
// Verify commitment is deterministic
|
||||
commitment2, err := BlobToKZGCommitment(&blob)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, commitment, commitment2)
|
||||
})
|
||||
}
|
||||
@@ -70,7 +70,7 @@ func TestVerifyBlobKZGProofBatch(t *testing.T) {
|
||||
commitments := make([][]byte, blobCount)
|
||||
proofs := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for i := range blobCount {
|
||||
blob := random.GetRandBlob(int64(i))
|
||||
commitment, proof, err := GenerateCommitmentAndProof(blob)
|
||||
require.NoError(t, err)
|
||||
@@ -203,13 +203,13 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
_, proofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create flattened cell proofs (like execution client format)
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
cellProofs[i] = proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
@@ -236,7 +236,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute cells and proofs
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
_, proofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobs[i] = blob[:]
|
||||
@@ -244,7 +244,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
|
||||
// Add cell proofs for this blob
|
||||
for j := range numberOfColumns {
|
||||
allCellProofs = append(allCellProofs, cellsAndProofs.Proofs[j][:])
|
||||
allCellProofs = append(allCellProofs, proofs[j][:])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -319,7 +319,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
randBlob := random.GetRandBlob(123)
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
cellsAndProofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
_, proofs, err := ComputeCellsAndKZGProofs(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Generate wrong commitment from different blob
|
||||
@@ -331,7 +331,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
|
||||
cellProofs := make([][]byte, numberOfColumns)
|
||||
for i := range numberOfColumns {
|
||||
cellProofs[i] = cellsAndProofs.Proofs[i][:]
|
||||
cellProofs[i] = proofs[i][:]
|
||||
}
|
||||
|
||||
blobs := [][]byte{blob[:]}
|
||||
@@ -432,8 +432,8 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
commitments[1] = make([]byte, 32) // Wrong size
|
||||
|
||||
// Add cell proofs for both blobs
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
for range blobCount {
|
||||
for range numberOfColumns {
|
||||
allCellProofs = append(allCellProofs, make([]byte, 48))
|
||||
}
|
||||
}
|
||||
@@ -450,7 +450,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
commitments := make([][]byte, blobCount)
|
||||
var allCellProofs [][]byte
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for i := range blobCount {
|
||||
randBlob := random.GetRandBlob(int64(i))
|
||||
var blob Blob
|
||||
copy(blob[:], randBlob[:])
|
||||
@@ -461,7 +461,7 @@ func TestVerifyCellKZGProofBatchFromBlobData(t *testing.T) {
|
||||
commitments[i] = commitment[:]
|
||||
|
||||
// Add cell proofs - make some invalid in the second blob
|
||||
for j := uint64(0); j < numberOfColumns; j++ {
|
||||
for j := range numberOfColumns {
|
||||
if i == 1 && j == 64 {
|
||||
// Invalid proof size in middle of second blob's proofs
|
||||
allCellProofs = append(allCellProofs, make([]byte, 20))
|
||||
|
||||
@@ -22,10 +22,7 @@ import (
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch < headEpoch {
|
||||
return nil
|
||||
}
|
||||
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
if c.Epoch+1 < headEpoch || c.Epoch == 0 {
|
||||
return nil
|
||||
}
|
||||
// Only use head state if the head state is compatible with the target checkpoint.
|
||||
@@ -33,11 +30,13 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), c.Epoch)
|
||||
// headEpoch - 1 equals c.Epoch if c is from the previous epoch and equals c.Epoch - 1 if c is from the current epoch.
|
||||
// We don't use the smaller c.Epoch - 1 because forkchoice would not have the data to answer that.
|
||||
headDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(headRoot), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), c.Epoch)
|
||||
targetDependent, err := s.cfg.ForkChoiceStore.DependentRootForEpoch([32]byte(c.Root), headEpoch-1)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -46,14 +45,18 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
}
|
||||
|
||||
// If the head state alone is enough, we can return it directly read only.
|
||||
if c.Epoch == headEpoch {
|
||||
if c.Epoch <= headEpoch {
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return st
|
||||
}
|
||||
// Otherwise we need to advance the head state to the start of the target epoch.
|
||||
// At this point we can only have c.Epoch > headEpoch.
|
||||
if !s.cfg.ForkChoiceStore.IsCanonical([32]byte(c.Root)) {
|
||||
return nil
|
||||
}
|
||||
// Advance the head state to the start of the target epoch.
|
||||
// This point can only be reached if c.Root == headRoot and c.Epoch > headEpoch.
|
||||
slot, err := slots.EpochStart(c.Epoch)
|
||||
if err != nil {
|
||||
|
||||
@@ -170,17 +170,141 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 31,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 1, Root: ckRoot}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Epoch_0(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Old_Checkpoint(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Same_DependentRoots(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
|
||||
// Create a fork 31 <-- 32 <--- 64
|
||||
// \---------33
|
||||
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
|
||||
st, blk, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'U'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'T'},
|
||||
block: headBlock,
|
||||
slot: 64,
|
||||
state: s,
|
||||
}
|
||||
require.NotNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Different_DependentRoots(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
|
||||
// Create a fork 30 <-- 31 <-- 32 <--- 64
|
||||
// \---------33
|
||||
// With the same dependent root at epoch 0 for a checkpoint at epoch 2
|
||||
st, blk, err := prepareForkchoiceState(ctx, 30, [32]byte(ckRoot), [32]byte{}, [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 31, [32]byte{'S'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 32, [32]byte{'T'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 64, [32]byte{'U'}, blk.Root(), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
headBlock := blk
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
st, blk, err = prepareForkchoiceState(ctx, 33, [32]byte{'V'}, [32]byte(ckRoot), [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
cpRoot := blk.Root()
|
||||
|
||||
service.head = &head{
|
||||
root: [32]byte{'U'},
|
||||
block: headBlock,
|
||||
state: s,
|
||||
slot: 64,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{Epoch: 2, Root: cpRoot[:]}))
|
||||
}
|
||||
|
||||
func TestService_GetRecentPreState_Different(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
s, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
ckRoot := bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)
|
||||
cp0 := ðpb.Checkpoint{Epoch: 0, Root: ckRoot}
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, blk, err := prepareForkchoiceState(ctx, 33, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blk))
|
||||
service.head = &head{
|
||||
root: [32]byte(ckRoot),
|
||||
state: s,
|
||||
block: blk,
|
||||
slot: 33,
|
||||
}
|
||||
require.IsNil(t, service.getRecentPreState(ctx, ðpb.Checkpoint{}))
|
||||
}
|
||||
|
||||
func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
service, _ := minimalTestService(t)
|
||||
ctx := t.Context()
|
||||
@@ -209,16 +333,14 @@ func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, 1000)
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for range 1000 {
|
||||
wg.Go(func() {
|
||||
cp1 := ðpb.Checkpoint{Epoch: 1, Root: ckRoot}
|
||||
_, err := service.getAttPreState(ctx, cp1)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
})
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
||||
@@ -66,9 +66,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
startTime := time.Now()
|
||||
fcuArgs := &fcuConfig{}
|
||||
|
||||
if s.inRegularSync() {
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
}
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
}
|
||||
@@ -105,14 +102,17 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return nil
|
||||
}
|
||||
if err := s.sendFCU(cfg, fcuArgs); err != nil {
|
||||
return errors.Wrap(err, "could not send FCU to engine")
|
||||
}
|
||||
s.sendFCU(cfg, fcuArgs)
|
||||
|
||||
// Pre-Fulu the caches are updated when computing the payload attributes
|
||||
if cfg.postState.Version() >= version.Fulu {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
cfg.ctx = ctx
|
||||
s.updateCachesPostBlockProcessing(cfg)
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -134,7 +134,7 @@ func getStateVersionAndPayload(st state.BeaconState) (int, interfaces.ExecutionD
|
||||
return preStateVersion, preStateHeader, nil
|
||||
}
|
||||
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.onBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
@@ -295,18 +295,10 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
arg := &fcuConfig{
|
||||
headState: preState,
|
||||
headRoot: lastBR,
|
||||
headBlock: lastB,
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
|
||||
}
|
||||
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityStore, roBlock consensusblocks.ROBlock) error {
|
||||
func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.AvailabilityChecker, roBlock consensusblocks.ROBlock) error {
|
||||
blockVersion := roBlock.Version()
|
||||
block := roBlock.Block()
|
||||
slot := block.Slot()
|
||||
@@ -330,6 +322,7 @@ func (s *Service) areSidecarsAvailable(ctx context.Context, avs das.Availability
|
||||
return nil
|
||||
}
|
||||
|
||||
// the caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
|
||||
e := coreTime.CurrentEpoch(st)
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
@@ -359,7 +352,9 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
s.ForkChoicer().RLock()
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
s.ForkChoicer().RUnlock()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
@@ -372,7 +367,7 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
}
|
||||
|
||||
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
|
||||
// caches.
|
||||
// caches. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
|
||||
defer span.End()
|
||||
@@ -634,9 +629,7 @@ func missingDataColumnIndices(store *filesystem.DataColumnStorage, root [fieldpa
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
if uint64(len(expected)) > numberOfColumns {
|
||||
if len(expected) > fieldparams.NumberOfColumns {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
@@ -817,11 +810,10 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missing))
|
||||
var missingIndices any = "all"
|
||||
missingIndicesCount := len(missing)
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
if missingIndicesCount < fieldparams.NumberOfColumns {
|
||||
missingIndices = helpers.SortedPrettySliceFromMap(missing)
|
||||
}
|
||||
|
||||
@@ -915,8 +907,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if currentSlot == s.HeadSlot() {
|
||||
return
|
||||
}
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
// return early if we are in init sync
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
@@ -929,14 +919,32 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if lastState == nil {
|
||||
lastRoot, lastState = headRoot[:], headState
|
||||
}
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
// Before Fulu we need to process the next slot to find out if we are proposing.
|
||||
if lastState.Version() < version.Fulu {
|
||||
// Copy all the field tries in our cached state in the event of late
|
||||
// blocks.
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
} else {
|
||||
// After Fulu, we can update the caches asynchronously after sending FCU to the engine
|
||||
defer func() {
|
||||
go func() {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, slotDeadline)
|
||||
defer cancel()
|
||||
lastState.CopyAllTries()
|
||||
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
|
||||
log.WithError(err).Debug("Could not update next slot state cache")
|
||||
}
|
||||
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not update epoch boundary caches")
|
||||
}
|
||||
}()
|
||||
}()
|
||||
}
|
||||
// return early if we already started building a block for the current
|
||||
// head root
|
||||
@@ -948,13 +956,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("head_root", headRoot).Error("Unable to retrieve head block to fire payload attributes event")
|
||||
}
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
go s.firePayloadAttributesEvent(s.cfg.StateNotifier.StateFeed(), headBlock, headRoot, s.CurrentSlot()+1)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -973,6 +974,8 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
|
||||
@@ -42,14 +42,8 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if err := s.getFCUArgsEarlyBlock(cfg, fcuArgs); err != nil {
|
||||
return err
|
||||
}
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(s.genesisTime, slot) {
|
||||
return nil
|
||||
}
|
||||
return s.computePayloadAttributes(cfg, fcuArgs)
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
@@ -173,26 +167,19 @@ func (s *Service) processLightClientUpdates(cfg *postBlockProcessConfig) {
|
||||
|
||||
// updateCachesPostBlockProcessing updates the next slot cache and handles the epoch
|
||||
// boundary in order to compute the right proposer indices after processing
|
||||
// state transition. This function is called on late blocks while still locked,
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
// state transition. The caller of this function must not hold a lock in forkchoice store.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) {
|
||||
slot := cfg.postState.Slot()
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
log.WithError(err).Error("Could not update next slot state cache")
|
||||
return
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
if err := s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:]); err != nil {
|
||||
log.WithError(err).Error("Could not handle epoch boundary")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -202,20 +189,6 @@ func reportProcessingTime(startTime time.Time) {
|
||||
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}
|
||||
|
||||
// computePayloadAttributes modifies the passed FCU arguments to
|
||||
// contain the right payload attributes with the tracked proposer. It gets
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fcuArgs.attributes = s.getPayloadAttribute(cfg.ctx, fcuArgs.headState, fcuArgs.proposingSlot, cfg.headRoot[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBlockPreState returns the pre state of an incoming block. It uses the parent root of the block
|
||||
// to retrieve the state in DB. It verifies the pre state's validity and the incoming block
|
||||
// is in the correct time window.
|
||||
|
||||
@@ -147,7 +147,7 @@ func TestStore_OnBlockBatch(t *testing.T) {
|
||||
bState := st.Copy()
|
||||
|
||||
var blks []consensusblocks.ROBlock
|
||||
for i := 0; i < 97; i++ {
|
||||
for i := range 97 {
|
||||
b, err := util.GenerateFullBlock(bState, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
@@ -738,7 +738,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -788,7 +790,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -816,25 +820,9 @@ func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
func TestOnBlock_InvalidSignature(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
gs, keys := util.DeterministicGenesisState(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
|
||||
blk, err := util.GenerateFullBlock(gs, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
blk.Signature = []byte{'a'} // Mutate the signature.
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -866,7 +854,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1323,7 +1313,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
logHook := logTest.NewGlobal()
|
||||
for i := 0; i < 10; i++ {
|
||||
for range 10 {
|
||||
fc := ðpb.Checkpoint{}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, wsb1.Block().ParentRoot(), [32]byte{}, [32]byte{}, fc, fc)
|
||||
require.NoError(t, err)
|
||||
@@ -1339,7 +1329,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1351,7 +1343,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1363,7 +1357,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1375,7 +1371,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
lock.Lock()
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1400,197 +1398,6 @@ func Test_verifyBlkFinalizedSlot_invalidBlock(t *testing.T) {
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
// INVALID from FCU, with LVH block 17. No head is viable. We check
|
||||
// that the node is optimistic and that we can actually import a block on top of
|
||||
// 17 and recover.
|
||||
func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.SlotsPerEpoch = 6
|
||||
config.AltairForkEpoch = 1
|
||||
config.BellatrixForkEpoch = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
mockEngine := &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrAcceptedSyncingPayloadStatus}
|
||||
service, tr := minimalTestService(t, WithExecutionEngineCaller(mockEngine))
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockAltair(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), jc.Epoch)
|
||||
|
||||
// import a block that justifies the second epoch
|
||||
driftGenesisTime(service, 18, 0)
|
||||
validHeadState, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlockBellatrix(validHeadState, keys, util.DefaultBlockGenConfig(), 18)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
firstInvalidRoot, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
|
||||
sjc := validHeadState.CurrentJustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(0), sjc.Epoch)
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrAcceptedSyncingPayloadStatus, ErrForkchoiceUpdated: execution.ErrInvalidPayloadStatus, ForkChoiceUpdatedResp: lvh}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 19, 0)
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err = util.GenerateFullBlockBellatrix(st, keys, util.DefaultBlockGenConfig(), 19)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
// not finish importing) one and that the node is optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
headRoot, err := service.HeadRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, firstInvalidRoot, bytesutil.ToBytes32(headRoot))
|
||||
optimistic, err := service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// import another block based on the last valid head state
|
||||
mockEngine = &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = mockEngine
|
||||
driftGenesisTime(service, 20, 0)
|
||||
b, err = util.GenerateFullBlockBellatrix(validHeadState, keys, &util.BlockGenConfig{}, 20)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
require.Equal(t, jc.Epoch, sjc.Epoch)
|
||||
require.Equal(t, jc.Root, bytesutil.ToBytes32(sjc.Root))
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
}
|
||||
|
||||
// See the description in #10777 and #10782 for the full setup
|
||||
// We sync optimistically a chain of blocks. Block 17 is the last block in Epoch
|
||||
// 2. Block 18 justifies block 12 (the first in Epoch 2) and Block 19 returns
|
||||
@@ -1642,7 +1449,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1662,8 +1471,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 12; i < 18; i++ {
|
||||
@@ -1684,8 +1494,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1708,7 +1519,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1718,6 +1531,10 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
lvh := b.Block.Body.ExecutionPayload.ParentHash
|
||||
// check our head
|
||||
require.Equal(t, firstInvalidRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
isBlock18OptimisticAfterImport, err := service.IsOptimisticForRoot(ctx, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isBlock18OptimisticAfterImport)
|
||||
time.Sleep(20 * time.Millisecond) // wait for async forkchoice update to be processed
|
||||
|
||||
// import another block to find out that it was invalid
|
||||
mockEngine = &mockExecution.EngineClient{ErrNewPayload: execution.ErrInvalidPayloadStatus, NewPayloadResp: lvh}
|
||||
@@ -1768,7 +1585,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1835,7 +1654,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1856,8 +1677,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -1877,7 +1699,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1906,8 +1730,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
jc := service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
@@ -1949,7 +1774,7 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
// Check that the invalid blocks are not in database
|
||||
for i := 0; i < 19-13; i++ {
|
||||
for i := range 19 - 13 {
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, invalidRoots[i]))
|
||||
}
|
||||
|
||||
@@ -1975,7 +1800,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -2000,7 +1827,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -2028,7 +1857,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -2072,7 +1903,6 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
t.Log(i)
|
||||
driftGenesisTime(service, primitives.Slot(i), 0)
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2089,7 +1919,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -2109,8 +1941,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
}
|
||||
|
||||
// import the merge block
|
||||
@@ -2130,7 +1963,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -2161,7 +1996,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -2282,7 +2119,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2348,7 +2187,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2495,7 +2336,8 @@ func TestMissingBlobIndices(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMissingDataColumnIndices(t *testing.T) {
|
||||
countPlusOne := params.BeaconConfig().NumberOfColumns + 1
|
||||
const countPlusOne = fieldparams.NumberOfColumns + 1
|
||||
|
||||
tooManyColumns := make(map[uint64]bool, countPlusOne)
|
||||
for i := range countPlusOne {
|
||||
tooManyColumns[uint64(i)] = true
|
||||
@@ -2630,7 +2472,10 @@ func TestRollbackBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -2731,7 +2576,9 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
@@ -2765,7 +2612,10 @@ func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false})
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.ErrorContains(t, "context canceled", err)
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
@@ -2805,6 +2655,10 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
require.NoError(t, s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, [32]byte{1, 2}))
|
||||
|
||||
for _, testVersion := range version.All()[1:] {
|
||||
if testVersion == version.Gloas {
|
||||
// TODO(16027): Unskip light client tests for Gloas
|
||||
continue
|
||||
}
|
||||
t.Run(version.String(testVersion), func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t, testVersion)
|
||||
|
||||
@@ -2879,7 +2733,7 @@ func TestProcessLightClientUpdate(t *testing.T) {
|
||||
|
||||
// set a better sync aggregate
|
||||
scb := make([]byte, 64)
|
||||
for i := 0; i < 5; i++ {
|
||||
for i := range 5 {
|
||||
scb[i] = 0x01
|
||||
}
|
||||
oldUpdate.SetSyncAggregate(ðpb.SyncAggregate{
|
||||
@@ -3257,7 +3111,9 @@ func Test_postBlockProcess_EventSending(t *testing.T) {
|
||||
}
|
||||
|
||||
// Execute postBlockProcess
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
err = service.postBlockProcess(cfg)
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
|
||||
// Check error expectation
|
||||
if tt.expectError {
|
||||
|
||||
@@ -156,13 +156,15 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
go s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs)
|
||||
}
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
if err := s.forkchoiceUpdateWithExecution(s.ctx, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not update forkchoice")
|
||||
if err := s.saveHead(s.ctx, fcuArgs.headRoot, fcuArgs.headBlock, fcuArgs.headState); err != nil {
|
||||
log.WithError(err).Error("Could not save head")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, fcuArgs.headState, fcuArgs.headBlock)
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -117,7 +117,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -177,7 +179,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
service.cfg.ForkChoiceStore.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
service.cfg.ForkChoiceStore.Unlock()
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -39,8 +39,8 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
|
||||
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
@@ -69,7 +69,7 @@ type SlashingReceiver interface {
|
||||
// 1. Validate block, apply state transition and update checkpoints
|
||||
// 2. Apply fork choice to the processed block
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
// Return early if the block is blacklisted
|
||||
@@ -242,7 +242,7 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
return postState, isValidPayload, nil
|
||||
}
|
||||
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block blocks.ROBlock) (time.Duration, error) {
|
||||
func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityChecker, block blocks.ROBlock) (time.Duration, error) {
|
||||
var err error
|
||||
start := time.Now()
|
||||
if avs != nil {
|
||||
@@ -332,7 +332,7 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
// ReceiveBlockBatch processes the whole block batch at once, assuming the block batch is linear ,transitioning
|
||||
// the state, performing batch verification of all collected signatures and then performing the appropriate
|
||||
// actions for a block post-transition.
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error {
|
||||
func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityChecker) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlockBatch")
|
||||
defer span.End()
|
||||
|
||||
|
||||
@@ -216,13 +216,11 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.ReceiveBlock(ctx, wsb, root, nil))
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
wg.Wait()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
@@ -470,30 +471,35 @@ func (s *Service) removeStartupState() {
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSubscribedToAllDataSubnets := flags.Get().SubscribeAllDataSubnets
|
||||
isSupernode := flags.Get().Supernode
|
||||
isSemiSupernode := flags.Get().SemiSupernode
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
custodyRequirement := cfg.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSubscribedToAllDataSubnets, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSubscribedToAllDataSubnets)
|
||||
wasSupernode, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update subscription status to all data subnets")
|
||||
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
|
||||
}
|
||||
|
||||
// Warn the user if the node was previously subscribed to all data subnets and is not any more.
|
||||
if wasSubscribedToAllDataSubnets && !isSubscribedToAllDataSubnets {
|
||||
log.Warnf(
|
||||
"Because the flag `--%s` was previously used, the node will still subscribe to all data subnets.",
|
||||
flags.SubscribeAllDataSubnets.Name,
|
||||
)
|
||||
// Compute the target custody group count based on current flag configuration.
|
||||
targetCustodyGroupCount := custodyRequirement
|
||||
|
||||
// Supernode: custody all groups (either currently set or previously enabled)
|
||||
if isSupernode {
|
||||
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
// Compute the custody group count.
|
||||
custodyGroupCount := custodyRequirement
|
||||
if isSubscribedToAllDataSubnets {
|
||||
custodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
|
||||
if isSemiSupernode {
|
||||
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "minimum custody group count")
|
||||
}
|
||||
|
||||
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
@@ -510,12 +516,23 @@ func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot,
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, custodyGroupCount)
|
||||
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
if isSupernode {
|
||||
log.WithFields(logrus.Fields{
|
||||
"current": actualCustodyGroupCount,
|
||||
"target": cfg.NumberOfCustodyGroups,
|
||||
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
|
||||
}
|
||||
|
||||
if wasSupernode && !isSupernode {
|
||||
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, actualCustodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
|
||||
@@ -412,8 +412,7 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
require.Equal(b, true, s.cfg.BeaconDB.HasBlock(ctx, r), "Block is not in DB")
|
||||
}
|
||||
}
|
||||
@@ -432,8 +431,7 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
require.Equal(b, true, s.cfg.ForkChoiceStore.HasNode(r), "Block is not in fork choice store")
|
||||
}
|
||||
}
|
||||
@@ -605,7 +603,6 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
custodyRequirement = uint64(4)
|
||||
earliestStoredSlot = primitives.Slot(12)
|
||||
numberOfCustodyGroups = uint64(64)
|
||||
numberOfColumns = uint64(128)
|
||||
)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
@@ -613,7 +610,6 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
cfg.CustodyRequirement = custodyRequirement
|
||||
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
|
||||
cfg.NumberOfColumns = numberOfColumns
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := t.Context()
|
||||
@@ -644,7 +640,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
@@ -682,7 +678,7 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
// ----------
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeAllDataSubnets = true
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
@@ -697,4 +693,121 @@ func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("Supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.Supernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should still be supernode
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.SemiSupernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start with semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Upgrade to full supernode
|
||||
gFlags.SemiSupernode = false
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should upgrade to full supernode
|
||||
upgradeSlot := slot + 2
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Mock a high custody requirement (simulating many validators)
|
||||
// We need to override the custody requirement calculation
|
||||
// For this test, we'll verify the logic by checking if custodyRequirement > 64
|
||||
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
|
||||
// This would require a different test setup with actual validators
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
// With low validator requirements (4), should use semi-supernode minimum (64)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ type EventFeedWrapper struct {
|
||||
subscribed chan struct{} // this channel is closed once a subscription is made
|
||||
}
|
||||
|
||||
func (w *EventFeedWrapper) Subscribe(channel interface{}) event.Subscription {
|
||||
func (w *EventFeedWrapper) Subscribe(channel any) event.Subscription {
|
||||
select {
|
||||
case <-w.subscribed:
|
||||
break // already closed
|
||||
@@ -116,7 +116,7 @@ func (w *EventFeedWrapper) Subscribe(channel interface{}) event.Subscription {
|
||||
return w.feed.Subscribe(channel)
|
||||
}
|
||||
|
||||
func (w *EventFeedWrapper) Send(value interface{}) int {
|
||||
func (w *EventFeedWrapper) Send(value any) int {
|
||||
return w.feed.Send(value)
|
||||
}
|
||||
|
||||
@@ -275,7 +275,7 @@ func (s *ChainService) ReceiveBlockInitialSync(ctx context.Context, block interf
|
||||
}
|
||||
|
||||
// ReceiveBlockBatch processes blocks in batches from initial-sync.
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityStore) error {
|
||||
func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBlock, _ das.AvailabilityChecker) error {
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
@@ -305,7 +305,7 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []blocks.ROBl
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityStore) error {
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, _ [32]byte, _ das.AvailabilityChecker) error {
|
||||
if s.ReceiveBlockMockErr != nil {
|
||||
return s.ReceiveBlockMockErr
|
||||
}
|
||||
|
||||
@@ -166,7 +166,7 @@ func (s *Service) RegisterValidator(ctx context.Context, reg []*ethpb.SignedVali
|
||||
indexToRegistration := make(map[primitives.ValidatorIndex]*ethpb.ValidatorRegistrationV1)
|
||||
|
||||
valid := make([]*ethpb.SignedValidatorRegistrationV1, 0)
|
||||
for i := 0; i < len(reg); i++ {
|
||||
for i := range reg {
|
||||
r := reg[i]
|
||||
nx, exists := s.cfg.headFetcher.HeadPublicKeyToValidatorIndex(bytesutil.ToBytes48(r.Message.Pubkey))
|
||||
if !exists {
|
||||
|
||||
4
beacon-chain/cache/active_balance_test.go
vendored
4
beacon-chain/cache/active_balance_test.go
vendored
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestBalanceCache_AddGetBalance(t *testing.T) {
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(i))
|
||||
blockRoots[i] = b
|
||||
@@ -61,7 +61,7 @@ func TestBalanceCache_AddGetBalance(t *testing.T) {
|
||||
|
||||
func TestBalanceCache_BalanceKey(t *testing.T) {
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
b := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(b, uint64(i))
|
||||
blockRoots[i] = b
|
||||
|
||||
2
beacon-chain/cache/committee.go
vendored
2
beacon-chain/cache/committee.go
vendored
@@ -51,7 +51,7 @@ type CommitteeCache struct {
|
||||
}
|
||||
|
||||
// committeeKeyFn takes the seed as the key to retrieve shuffled indices of a committee in a given epoch.
|
||||
func committeeKeyFn(obj interface{}) (string, error) {
|
||||
func committeeKeyFn(obj any) (string, error) {
|
||||
info, ok := obj.(*Committees)
|
||||
if !ok {
|
||||
return "", ErrNotCommittee
|
||||
|
||||
6
beacon-chain/cache/committee_fuzz_test.go
vendored
6
beacon-chain/cache/committee_fuzz_test.go
vendored
@@ -14,7 +14,7 @@ func TestCommitteeKeyFuzz_OK(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(c)
|
||||
k, err := committeeKeyFn(c)
|
||||
require.NoError(t, err)
|
||||
@@ -27,7 +27,7 @@ func TestCommitteeCache_FuzzCommitteesByEpoch(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
|
||||
_, err := cache.Committee(t.Context(), 0, c.Seed, 0)
|
||||
@@ -42,7 +42,7 @@ func TestCommitteeCache_FuzzActiveIndices(t *testing.T) {
|
||||
fuzzer := fuzz.NewWithSeed(0)
|
||||
c := &Committees{}
|
||||
|
||||
for i := 0; i < 100000; i++ {
|
||||
for range 100000 {
|
||||
fuzzer.Fuzz(c)
|
||||
require.NoError(t, cache.AddCommitteeShuffledList(t.Context(), c))
|
||||
|
||||
|
||||
2
beacon-chain/cache/common.go
vendored
2
beacon-chain/cache/common.go
vendored
@@ -17,6 +17,6 @@ func trim(queue *cache.FIFO, maxSize uint64) {
|
||||
}
|
||||
|
||||
// popProcessNoopFunc is a no-op function that never returns an error.
|
||||
func popProcessNoopFunc(_ interface{}, _ bool) error {
|
||||
func popProcessNoopFunc(_ any, _ bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -769,7 +769,7 @@ func TestFinalizedDeposits_ReturnsTrieCorrectly(t *testing.T) {
|
||||
}
|
||||
|
||||
var ctrs []*ethpb.DepositContainer
|
||||
for i := 0; i < 2000; i++ {
|
||||
for i := range 2000 {
|
||||
ctrs = append(ctrs, generateCtr(uint64(10+(i/2)), int64(i)))
|
||||
}
|
||||
|
||||
@@ -948,9 +948,9 @@ func rootCreator(rn byte) []byte {
|
||||
func BenchmarkDepositTree_InsertNewImplementation(b *testing.B) {
|
||||
totalDeposits := 10000
|
||||
input := bytesutil.ToBytes32([]byte("foo"))
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
dt := NewDepositTree()
|
||||
for j := 0; j < totalDeposits; j++ {
|
||||
for range totalDeposits {
|
||||
err := dt.Insert(input[:], 0)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -959,10 +959,10 @@ func BenchmarkDepositTree_InsertNewImplementation(b *testing.B) {
|
||||
func BenchmarkDepositTree_InsertOldImplementation(b *testing.B) {
|
||||
totalDeposits := 10000
|
||||
input := bytesutil.ToBytes32([]byte("foo"))
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
dt, err := trie.NewTrie(33)
|
||||
require.NoError(b, err)
|
||||
for j := 0; j < totalDeposits; j++ {
|
||||
for range totalDeposits {
|
||||
err := dt.Insert(input[:], 0)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -980,8 +980,8 @@ func BenchmarkDepositTree_HashTreeRootNewImplementation(b *testing.B) {
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err = tr.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
@@ -999,8 +999,8 @@ func BenchmarkDepositTree_HashTreeRootOldImplementation(b *testing.B) {
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
for b.Loop() {
|
||||
_, err = dt.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ func (ds *DepositTreeSnapshot) CalculateRoot() ([32]byte, error) {
|
||||
size := ds.depositCount
|
||||
index := len(ds.finalized)
|
||||
root := trie.ZeroHashes[0]
|
||||
for i := 0; i < DepositContractDepth; i++ {
|
||||
for i := range DepositContractDepth {
|
||||
if (size & 1) == 1 {
|
||||
if index == 0 {
|
||||
break
|
||||
|
||||
6
beacon-chain/cache/skip_slot_cache_test.go
vendored
6
beacon-chain/cache/skip_slot_cache_test.go
vendored
@@ -47,15 +47,13 @@ func TestSkipSlotCache_DisabledAndEnabled(t *testing.T) {
|
||||
|
||||
c.Enable()
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
wg.Go(func() {
|
||||
// Get call will only terminate when
|
||||
// it is not longer in progress.
|
||||
obj, err := c.Get(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, obj)
|
||||
wg.Done()
|
||||
}()
|
||||
})
|
||||
|
||||
c.MarkNotInProgress(r)
|
||||
wg.Wait()
|
||||
|
||||
2
beacon-chain/cache/sync_committee.go
vendored
2
beacon-chain/cache/sync_committee.go
vendored
@@ -236,7 +236,7 @@ func (s *SyncCommitteeCache) UpdatePositionsInCommittee(syncCommitteeBoundaryRoo
|
||||
// Given the `syncCommitteeIndexPosition` object, this returns the key of the object.
|
||||
// The key is the `currentSyncCommitteeRoot` within the field.
|
||||
// Error gets returned if input does not comply with `currentSyncCommitteeRoot` object.
|
||||
func keyFn(obj interface{}) (string, error) {
|
||||
func keyFn(obj any) (string, error) {
|
||||
info, ok := obj.(*syncCommitteeIndexPosition)
|
||||
if !ok {
|
||||
return "", errNotSyncCommitteeIndexPosition
|
||||
|
||||
8
beacon-chain/cache/sync_subnet_ids_test.go
vendored
8
beacon-chain/cache/sync_subnet_ids_test.go
vendored
@@ -12,12 +12,12 @@ import (
|
||||
func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
|
||||
c := newSyncSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for i := range 20 {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
|
||||
}
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
for i := range uint64(20) {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
|
||||
idxs, _, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
|
||||
@@ -34,7 +34,7 @@ func TestSyncSubnetIDsCache_Roundtrip(t *testing.T) {
|
||||
func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
|
||||
c := newSyncSubnetIDs()
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
for i := range 20 {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
c.AddSyncCommitteeSubnets(pubkey[:], 100, []uint64{uint64(i)}, 0)
|
||||
}
|
||||
@@ -42,7 +42,7 @@ func TestSyncSubnetIDsCache_ValidateCurrentEpoch(t *testing.T) {
|
||||
coms := c.GetAllSubnets(50)
|
||||
assert.Equal(t, 0, len(coms))
|
||||
|
||||
for i := uint64(0); i < 20; i++ {
|
||||
for i := range uint64(20) {
|
||||
pubkey := [fieldparams.BLSPubkeyLength]byte{byte(i)}
|
||||
|
||||
_, jEpoch, ok, _ := c.GetSyncCommitteeSubnets(pubkey[:], 100)
|
||||
|
||||
@@ -461,7 +461,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
st := ðpb.BeaconStateAltair{}
|
||||
b := ðpb.SignedBeaconBlockAltair{Block: ðpb.BeaconBlockAltair{}}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(st)
|
||||
fuzzer.Fuzz(b)
|
||||
if b.Block == nil {
|
||||
|
||||
@@ -240,7 +240,7 @@ func TestProcessSyncCommittee_processSyncAggregate(t *testing.T) {
|
||||
proposerIndex, err := helpers.BeaconProposerIndex(t.Context(), beaconState)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(syncBits); i++ {
|
||||
for i := range syncBits {
|
||||
if syncBits.BitAt(uint64(i)) {
|
||||
pk := bytesutil.ToBytes48(committeeKeys[i])
|
||||
require.DeepEqual(t, true, votedMap[pk])
|
||||
|
||||
@@ -195,10 +195,7 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// )
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
|
||||
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
if params.BeaconConfig().MaxEffectiveBalance < effectiveBalance {
|
||||
effectiveBalance = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
effectiveBalance := min(params.BeaconConfig().MaxEffectiveBalance, amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement))
|
||||
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposits := make([]*ethpb.Deposit, 100)
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
for i := range deposits {
|
||||
fuzzer.Fuzz(deposits[i])
|
||||
@@ -37,7 +37,7 @@ func TestFuzzProcessPreGenesisDeposit_10000(t *testing.T) {
|
||||
deposit := ðpb.Deposit{}
|
||||
ctx := t.Context()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeAltair(state)
|
||||
@@ -56,7 +56,7 @@ func TestFuzzProcessPreGenesisDeposit_Phase0_10000(t *testing.T) {
|
||||
deposit := ðpb.Deposit{}
|
||||
ctx := t.Context()
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -74,7 +74,7 @@ func TestFuzzProcessDeposit_Phase0_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -92,7 +92,7 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateAltair{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeAltair(state)
|
||||
|
||||
@@ -122,11 +122,8 @@ func ProcessInactivityScores(
|
||||
}
|
||||
|
||||
if !helpers.IsInInactivityLeak(prevEpoch, finalizedEpoch) {
|
||||
score := recoveryRate
|
||||
// Prevents underflow below 0.
|
||||
if score > v.InactivityScore {
|
||||
score = v.InactivityScore
|
||||
}
|
||||
score := min(recoveryRate, v.InactivityScore)
|
||||
v.InactivityScore -= score
|
||||
}
|
||||
inactivityScores[i] = v.InactivityScore
|
||||
@@ -242,7 +239,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
|
||||
}
|
||||
|
||||
balances := beaconState.Balances()
|
||||
for i := 0; i < numOfVals; i++ {
|
||||
for i := range numOfVals {
|
||||
vals[i].BeforeEpochTransitionBalance = balances[i]
|
||||
|
||||
// Compute the post balance of the validator after accounting for the
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
func TestSyncCommitteeIndices_CanGet(t *testing.T) {
|
||||
getState := func(t *testing.T, count uint64, vers int) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
@@ -113,7 +113,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
getState := func(t *testing.T, count uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MinDepositAmount,
|
||||
@@ -147,7 +147,7 @@ func TestSyncCommitteeIndices_DifferentPeriods(t *testing.T) {
|
||||
func TestSyncCommittee_CanGet(t *testing.T) {
|
||||
getState := func(t *testing.T, count uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -394,7 +394,7 @@ func Test_ValidateSyncMessageTime(t *testing.T) {
|
||||
|
||||
func getState(t *testing.T, count uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, count)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
blsKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -33,7 +33,7 @@ func TestTranslateParticipation(t *testing.T) {
|
||||
r, err := helpers.BlockRootAtSlot(s, 0)
|
||||
require.NoError(t, err)
|
||||
var pendingAtts []*ethpb.PendingAttestation
|
||||
for i := 0; i < 3; i++ {
|
||||
for i := range 3 {
|
||||
pendingAtts = append(pendingAtts, ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: primitives.CommitteeIndex(i),
|
||||
|
||||
@@ -257,7 +257,7 @@ func VerifyIndexedAttestation(ctx context.Context, beaconState state.ReadOnlyBea
|
||||
}
|
||||
indices := indexedAtt.GetAttestingIndices()
|
||||
var pubkeys []bls.PublicKey
|
||||
for i := 0; i < len(indices); i++ {
|
||||
for i := range indices {
|
||||
pubkeyAtIdx := beaconState.PubkeyAtIndex(primitives.ValidatorIndex(indices[i]))
|
||||
pk, err := bls.PublicKeyFromBytes(pubkeyAtIdx[:])
|
||||
if err != nil {
|
||||
|
||||
@@ -317,7 +317,7 @@ func TestVerifyAttestationNoVerifySignature_Electra(t *testing.T) {
|
||||
func TestConvertToIndexed_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -373,7 +373,7 @@ func TestVerifyIndexedAttestation_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
@@ -481,7 +481,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
sig := keys[0].Sign([]byte{'t', 'e', 's', 't'})
|
||||
list := bitfield.Bitlist{0b11111}
|
||||
var atts []ethpb.Att
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
for range uint64(1000) {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 1,
|
||||
@@ -498,7 +498,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
|
||||
atts = []ethpb.Att{}
|
||||
list = bitfield.Bitlist{0b10000}
|
||||
for i := uint64(0); i < 1000; i++ {
|
||||
for range uint64(1000) {
|
||||
atts = append(atts, ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
CommitteeIndex: 1,
|
||||
@@ -524,7 +524,7 @@ func TestVerifyAttestations_HandlesPlannedFork(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
@@ -588,7 +588,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
@@ -707,7 +707,7 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, numOfValidators)
|
||||
_, keys, err := util.DeterministicDepositsAndKeys(numOfValidators)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: keys[i].PublicKey().Marshal(),
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestFuzzProcessAttestationNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
att := ðpb.Attestation{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(att)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -37,7 +37,7 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
block := ðpb.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) {
|
||||
var p []byte
|
||||
var s []byte
|
||||
var d []byte
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(&ba)
|
||||
fuzzer.Fuzz(&pubkey)
|
||||
fuzzer.Fuzz(&sig)
|
||||
@@ -83,7 +83,7 @@ func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) {
|
||||
e := ðpb.Eth1Data{}
|
||||
state, err := state_native.InitializeFromProtoUnsafePhase0(ðpb.BeaconState{})
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := ProcessEth1DataInBlock(t.Context(), state, e)
|
||||
@@ -98,7 +98,7 @@ func TestFuzzareEth1DataEqual_10000(_ *testing.T) {
|
||||
eth1data := ðpb.Eth1Data{}
|
||||
eth1data2 := ðpb.Eth1Data{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(eth1data)
|
||||
fuzzer.Fuzz(eth1data2)
|
||||
AreEth1DataEqual(eth1data, eth1data2)
|
||||
@@ -110,7 +110,7 @@ func TestFuzzEth1DataHasEnoughSupport_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
eth1data := ðpb.Eth1Data{}
|
||||
var stateVotes []*ethpb.Eth1Data
|
||||
for i := 0; i < 100000; i++ {
|
||||
for i := range 100000 {
|
||||
fuzzer.Fuzz(eth1data)
|
||||
fuzzer.Fuzz(&stateVotes)
|
||||
s, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
@@ -129,7 +129,7 @@ func TestFuzzProcessBlockHeaderNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
block := ðpb.BeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(block)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -145,7 +145,7 @@ func TestFuzzProcessRandao_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
b := ðpb.SignedBeaconBlock{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -168,7 +168,7 @@ func TestFuzzProcessRandaoNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
blockBody := ðpb.BeaconBlockBody{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(blockBody)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -186,7 +186,7 @@ func TestFuzzProcessProposerSlashings_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
p := ðpb.ProposerSlashing{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(p)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -203,7 +203,7 @@ func TestFuzzVerifyProposerSlashing_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
proposerSlashing := ðpb.ProposerSlashing{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(proposerSlashing)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -219,7 +219,7 @@ func TestFuzzProcessAttesterSlashings_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
a := ðpb.AttesterSlashing{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(a)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -237,7 +237,7 @@ func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
attesterSlashing := ðpb.AttesterSlashing{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -253,7 +253,7 @@ func TestFuzzIsSlashableAttestationData_10000(_ *testing.T) {
|
||||
attestationData := ðpb.AttestationData{}
|
||||
attestationData2 := ðpb.AttestationData{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(attestationData)
|
||||
fuzzer.Fuzz(attestationData2)
|
||||
IsSlashableAttestationData(attestationData, attestationData2)
|
||||
@@ -264,7 +264,7 @@ func TestFuzzslashableAttesterIndices_10000(_ *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
attesterSlashing := ðpb.AttesterSlashing{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for range 10000 {
|
||||
fuzzer.Fuzz(attesterSlashing)
|
||||
SlashableAttesterIndices(attesterSlashing)
|
||||
}
|
||||
@@ -275,7 +275,7 @@ func TestFuzzProcessAttestationsNoVerify_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
b := ðpb.SignedBeaconBlock{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(b)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -298,7 +298,7 @@ func TestFuzzVerifyIndexedAttestationn_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
idxAttestation := ðpb.IndexedAttestation{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(idxAttestation)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -313,7 +313,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
deposit := ðpb.Deposit{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -329,7 +329,7 @@ func TestFuzzProcessVoluntaryExits_10000(t *testing.T) {
|
||||
state := ðpb.BeaconState{}
|
||||
e := ðpb.SignedVoluntaryExit{}
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -346,7 +346,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
state := ðpb.BeaconState{}
|
||||
e := ðpb.SignedVoluntaryExit{}
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(e)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
@@ -366,7 +366,7 @@ func TestFuzzVerifyExit_10000(t *testing.T) {
|
||||
fork := ðpb.Fork{}
|
||||
var slot primitives.Slot
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(ve)
|
||||
fuzzer.Fuzz(rawVal)
|
||||
fuzzer.Fuzz(fork)
|
||||
|
||||
@@ -60,7 +60,7 @@ func Eth1DataHasEnoughSupport(beaconState state.ReadOnlyBeaconState, data *ethpb
|
||||
voteCount := uint64(0)
|
||||
|
||||
for _, vote := range beaconState.Eth1DataVotes() {
|
||||
if AreEth1DataEqual(vote, data.Copy()) {
|
||||
if AreEth1DataEqual(vote, data) {
|
||||
voteCount++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
func FakeDeposits(n uint64) []*ethpb.Eth1Data {
|
||||
deposits := make([]*ethpb.Eth1Data, n)
|
||||
for i := uint64(0); i < n; i++ {
|
||||
for i := range n {
|
||||
deposits[i] = ðpb.Eth1Data{
|
||||
DepositCount: 1,
|
||||
DepositRoot: bytesutil.PadTo([]byte("root"), 32),
|
||||
@@ -175,7 +175,7 @@ func TestProcessEth1Data_SetsCorrectly(t *testing.T) {
|
||||
}
|
||||
|
||||
period := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod)))
|
||||
for i := uint64(0); i < period; i++ {
|
||||
for range period {
|
||||
processedState, err := blocks.ProcessEth1DataInBlock(t.Context(), beaconState, b.Block.Body.Eth1Data)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, processedState.Version() == version.Phase0)
|
||||
|
||||
@@ -27,7 +27,7 @@ func init() {
|
||||
|
||||
func TestProcessBlockHeader_ImproperBlockSlot(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -104,7 +104,7 @@ func TestProcessBlockHeader_WrongProposerSig(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -148,7 +148,7 @@ func TestProcessBlockHeader_DifferentSlots(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -189,7 +189,7 @@ func TestProcessBlockHeader_PreviousBlockRootNotSignedRoot(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -233,7 +233,7 @@ func TestProcessBlockHeader_SlashedProposer(t *testing.T) {
|
||||
|
||||
func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
@@ -290,52 +290,3 @@ func TestProcessBlockHeader_OK(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, true, proto.Equal(nsh, expected), "Expected %v, received %v", expected, nsh)
|
||||
}
|
||||
|
||||
func TestBlockSignatureSet_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, 32),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
Slashed: true,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(10))
|
||||
require.NoError(t, state.SetLatestBlockHeader(util.HydrateBeaconHeader(ðpb.BeaconBlockHeader{
|
||||
Slot: 9,
|
||||
ProposerIndex: 0,
|
||||
})))
|
||||
|
||||
latestBlockSignedRoot, err := state.LatestBlockHeader().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
currentEpoch := time.CurrentEpoch(state)
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pID, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 10
|
||||
block.Block.ProposerIndex = pID
|
||||
block.Block.Body.RandaoReveal = bytesutil.PadTo([]byte{'A', 'B', 'C'}, 96)
|
||||
block.Block.ParentRoot = latestBlockSignedRoot[:]
|
||||
block.Signature, err = signing.ComputeDomainAndSign(state, currentEpoch, block.Block, params.BeaconConfig().DomainBeaconProposer, priv)
|
||||
require.NoError(t, err)
|
||||
proposerIdx, err := helpers.BeaconProposerIndex(t.Context(), state)
|
||||
require.NoError(t, err)
|
||||
validators[proposerIdx].Slashed = false
|
||||
validators[proposerIdx].PublicKey = priv.PublicKey().Marshal()
|
||||
err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx])
|
||||
require.NoError(t, err)
|
||||
set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Block signature set returned a set which was unable to be verified")
|
||||
}
|
||||
|
||||
@@ -90,6 +90,9 @@ func IsExecutionEnabled(st state.ReadOnlyBeaconState, body interfaces.ReadOnlyBe
|
||||
if st == nil || body == nil {
|
||||
return false, errors.New("nil state or block body")
|
||||
}
|
||||
if st.Version() >= version.Capella {
|
||||
return true, nil
|
||||
}
|
||||
if IsPreBellatrixVersion(st.Version()) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -260,11 +260,12 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
|
||||
|
||||
func Test_IsExecutionEnabled(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header interfaces.ExecutionData
|
||||
useAltairSt bool
|
||||
want bool
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header interfaces.ExecutionData
|
||||
useAltairSt bool
|
||||
useCapellaSt bool
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "use older than bellatrix state",
|
||||
@@ -331,6 +332,17 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
}(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "capella state always enabled",
|
||||
payload: emptyPayload(),
|
||||
header: func() interfaces.ExecutionData {
|
||||
h, err := emptyPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
return h
|
||||
}(),
|
||||
useCapellaSt: true,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -342,6 +354,8 @@ func Test_IsExecutionEnabled(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
if tt.useAltairSt {
|
||||
st, _ = util.DeterministicGenesisStateAltair(t, 1)
|
||||
} else if tt.useCapellaSt {
|
||||
st, _ = util.DeterministicGenesisStateCapella(t, 1)
|
||||
}
|
||||
got, err := blocks.IsExecutionEnabled(st, body)
|
||||
require.NoError(t, err)
|
||||
@@ -851,8 +865,7 @@ func BenchmarkBellatrixComplete(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, st.SetLatestExecutionPayloadHeader(h))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
_, err := blocks.IsMergeTransitionComplete(st)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
@@ -122,24 +122,6 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState,
|
||||
proposerIndex primitives.ValidatorIndex,
|
||||
sig []byte,
|
||||
rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) {
|
||||
currentEpoch := slots.ToEpoch(beaconState.Slot())
|
||||
domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(proposerIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc)
|
||||
}
|
||||
|
||||
// RandaoSignatureBatch retrieves the relevant randao specific signature batch object
|
||||
// from a block and its corresponding state.
|
||||
func RandaoSignatureBatch(
|
||||
|
||||
@@ -28,7 +28,7 @@ func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Val
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: []byte(fmt.Sprintf("val_%d", i)),
|
||||
PublicKey: fmt.Appendf(nil, "val_%d", i),
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawalCredentials: wd,
|
||||
}
|
||||
|
||||
@@ -278,12 +278,12 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
bal, err := st.PendingBalanceToWithdraw(srcIdx)
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if bal > 0 {
|
||||
if hasBal {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ func TestFuzzProcessDeposits_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateElectra{}
|
||||
deposits := make([]*ethpb.Deposit, 100)
|
||||
ctx := t.Context()
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
for i := range deposits {
|
||||
fuzzer.Fuzz(deposits[i])
|
||||
@@ -36,7 +36,7 @@ func TestFuzzProcessDeposit_10000(t *testing.T) {
|
||||
state := ðpb.BeaconStateElectra{}
|
||||
deposit := ðpb.Deposit{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(state)
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(state)
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestProcessPendingDeposits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(100), res)
|
||||
// Validators 0..9 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 10; i++ {
|
||||
for i := range primitives.ValidatorIndex(10) {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/10, b)
|
||||
@@ -122,7 +122,7 @@ func TestProcessPendingDeposits(t *testing.T) {
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
// Validators 0..9 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 2; i++ {
|
||||
for i := range primitives.ValidatorIndex(2) {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing), b)
|
||||
@@ -149,7 +149,7 @@ func TestProcessPendingDeposits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
// Validators 0..4 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 4; i++ {
|
||||
for i := range primitives.ValidatorIndex(4) {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing)/5, b)
|
||||
@@ -528,7 +528,7 @@ func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
|
||||
|
||||
vals := make([]*eth.Validator, numVals)
|
||||
bals := make([]uint64, numVals)
|
||||
for i := uint64(0); i < numVals; i++ {
|
||||
for i := range numVals {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
|
||||
@@ -56,7 +56,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
ActivationEligibilityEpoch: finalizedEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
@@ -82,7 +82,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().EjectionBalance - 1,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -108,7 +108,7 @@ func TestProcessRegistryUpdates(t *testing.T) {
|
||||
Slot: 5 * params.BeaconConfig().SlotsPerEpoch,
|
||||
FinalizedCheckpoint: ð.Checkpoint{Epoch: finalizedEpoch, Root: make([]byte, fieldparams.RootLength)},
|
||||
}
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ð.Validator{
|
||||
EffectiveBalance: params.BeaconConfig().EjectionBalance - 1,
|
||||
ExitEpoch: 10,
|
||||
@@ -157,7 +157,7 @@ func Benchmark_ProcessRegistryUpdates_MassEjection(b *testing.B) {
|
||||
st, err := util.NewBeaconStateElectra()
|
||||
require.NoError(b, err)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
for b.Loop() {
|
||||
b.StopTimer()
|
||||
if err := st.SetValidators(genValidators(100000)); err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -329,10 +329,7 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) (state.BeaconState, er
|
||||
balance := bals[idx]
|
||||
|
||||
if balance+downwardThreshold < val.EffectiveBalance() || val.EffectiveBalance()+upwardThreshold < balance {
|
||||
effectiveBal := maxEffBalance
|
||||
if effectiveBal > balance-balance%effBalanceInc {
|
||||
effectiveBal = balance - balance%effBalanceInc
|
||||
}
|
||||
effectiveBal := min(maxEffBalance, balance-balance%effBalanceInc)
|
||||
if effectiveBal != val.EffectiveBalance() {
|
||||
newVal = val.Copy()
|
||||
newVal.EffectiveBalance = effectiveBal
|
||||
|
||||
@@ -14,7 +14,7 @@ func TestFuzzFinalUpdates_10000(t *testing.T) {
|
||||
fuzzer := gofuzz.NewWithSeed(0)
|
||||
base := ðpb.BeaconState{}
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
for i := range 10000 {
|
||||
fuzzer.Fuzz(base)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(base)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -218,7 +218,7 @@ func TestProcessRegistryUpdates_EligibleToActivate_Cancun(t *testing.T) {
|
||||
cfg.ChurnLimitQuotient = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
for i := uint64(0); i < 10; i++ {
|
||||
for range uint64(10) {
|
||||
base.Validators = append(base.Validators, ðpb.Validator{
|
||||
ActivationEligibilityEpoch: finalizedEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
@@ -314,28 +314,28 @@ func TestProcessRegistryUpdates_CanExits(t *testing.T) {
|
||||
|
||||
func buildState(t testing.TB, slot primitives.Slot, validatorCount uint64) state.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
validatorBalances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
for i := range validatorBalances {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
latestActiveIndexRoots := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestActiveIndexRoots); i++ {
|
||||
for i := range latestActiveIndexRoots {
|
||||
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
latestRandaoMixes := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestRandaoMixes); i++ {
|
||||
for i := range latestRandaoMixes {
|
||||
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
s, err := util.NewBeaconState()
|
||||
|
||||
@@ -19,7 +19,7 @@ func TestProcessJustificationAndFinalizationPreCompute_ConsecutiveEpochs(t *test
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
a := params.BeaconConfig().MaxEffectiveBalance
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = []byte{byte(i)}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
@@ -56,7 +56,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyCurrentEpoch(t *te
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
a := params.BeaconConfig().MaxEffectiveBalance
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = []byte{byte(i)}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
@@ -93,7 +93,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
|
||||
e := params.BeaconConfig().FarFutureEpoch
|
||||
a := params.BeaconConfig().MaxEffectiveBalance
|
||||
blockRoots := make([][]byte, params.BeaconConfig().SlotsPerEpoch*2+1)
|
||||
for i := 0; i < len(blockRoots); i++ {
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = []byte{byte(i)}
|
||||
}
|
||||
base := ðpb.BeaconState{
|
||||
@@ -128,7 +128,7 @@ func TestProcessJustificationAndFinalizationPreCompute_JustifyPrevEpoch(t *testi
|
||||
func TestUnrealizedCheckpoints(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
|
||||
@@ -42,7 +42,7 @@ func ProcessRewardsAndPenaltiesPrecompute(
|
||||
return nil, errors.Wrap(err, "could not get proposer attestation delta")
|
||||
}
|
||||
validatorBals := state.Balances()
|
||||
for i := 0; i < numOfVals; i++ {
|
||||
for i := range numOfVals {
|
||||
vp[i].BeforeEpochTransitionBalance = validatorBals[i]
|
||||
|
||||
// Compute the post balance of the validator after accounting for the
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestProcessRewardsAndPenaltiesPrecompute(t *testing.T) {
|
||||
validatorCount := uint64(2048)
|
||||
base := buildState(e+3, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -63,7 +63,7 @@ func TestAttestationDeltas_ZeroEpoch(t *testing.T) {
|
||||
base := buildState(e+2, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
var emptyRoot [32]byte
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
@@ -99,7 +99,7 @@ func TestAttestationDeltas_ZeroInclusionDelay(t *testing.T) {
|
||||
base := buildState(e+2, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
var emptyRoot [32]byte
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{
|
||||
@@ -131,7 +131,7 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
|
||||
validatorCount := uint64(2048)
|
||||
base := buildState(e+3, validatorCount)
|
||||
atts := make([]*ethpb.PendingAttestation, 3)
|
||||
for i := 0; i < len(atts); i++ {
|
||||
for i := range atts {
|
||||
atts[i] = ðpb.PendingAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, fieldparams.RootLength)},
|
||||
@@ -176,28 +176,28 @@ func TestProcessRewardsAndPenaltiesPrecompute_SlashedInactivePenalty(t *testing.
|
||||
|
||||
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
validatorBalances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
for i := range validatorBalances {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
latestActiveIndexRoots := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestActiveIndexRoots); i++ {
|
||||
for i := range latestActiveIndexRoots {
|
||||
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
latestRandaoMixes := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestRandaoMixes); i++ {
|
||||
for i := range latestRandaoMixes {
|
||||
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
return ðpb.BeaconState{
|
||||
|
||||
@@ -17,5 +17,5 @@ type Event struct {
|
||||
// Type is the type of event.
|
||||
Type EventType
|
||||
// Data is event-specific data.
|
||||
Data interface{}
|
||||
Data any
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ func TestAttestation_ComputeSubnetForAttestation(t *testing.T) {
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -5,7 +5,7 @@ package helpers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
@@ -515,9 +515,7 @@ func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState,
|
||||
// used for failing verify signature fallback.
|
||||
sortedIndices := make([]primitives.ValidatorIndex, len(shuffledIndices))
|
||||
copy(sortedIndices, shuffledIndices)
|
||||
sort.Slice(sortedIndices, func(i, j int) bool {
|
||||
return sortedIndices[i] < sortedIndices[j]
|
||||
})
|
||||
slices.Sort(sortedIndices)
|
||||
|
||||
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
|
||||
ShuffledIndices: shuffledIndices,
|
||||
|
||||
@@ -29,7 +29,7 @@ func TestComputeCommittee_WithoutCache(t *testing.T) {
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -122,7 +122,7 @@ func TestCommitteeAssignments_NoProposerForSlot0(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
var activationEpoch primitives.Epoch
|
||||
if i >= len(validators)/2 {
|
||||
activationEpoch = 3
|
||||
@@ -151,7 +151,7 @@ func TestCommitteeAssignments_CanRetrieve(t *testing.T) {
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
validatorIndices := make([]primitives.ValidatorIndex, len(validators))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
// First 2 epochs only half validators are activated.
|
||||
var activationEpoch primitives.Epoch
|
||||
if i >= len(validators)/2 {
|
||||
@@ -234,7 +234,7 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
// First 2 epochs only half validators are activated.
|
||||
var activationEpoch primitives.Epoch
|
||||
if i >= len(validators)/2 {
|
||||
@@ -266,7 +266,7 @@ func TestCommitteeAssignments_CannotRetrieveOlderThanSlotsPerHistoricalRoot(t *t
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -287,7 +287,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
|
||||
// Initialize test with 256 validators, each slot and each index gets 4 validators.
|
||||
validators := make([]*ethpb.Validator, 4*params.BeaconConfig().SlotsPerEpoch)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
@@ -323,7 +323,7 @@ func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
func TestVerifyAttestationBitfieldLengths_OK(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
activeRoots := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -489,7 +489,7 @@ func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
|
||||
|
||||
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 300000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -512,8 +512,7 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
for b.Loop() {
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -523,7 +522,7 @@ func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 3000000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -546,8 +545,7 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
for b.Loop() {
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -557,7 +555,7 @@ func BenchmarkComputeCommittee3000000_WithPreCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 128000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -576,8 +574,8 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
|
||||
i := uint64(0)
|
||||
index := uint64(0)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
||||
for b.Loop() {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
@@ -592,7 +590,7 @@ func BenchmarkComputeCommittee128000_WithOutPreCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 1000000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -611,8 +609,8 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
|
||||
i := uint64(0)
|
||||
index := uint64(0)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
||||
for b.Loop() {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
@@ -627,7 +625,7 @@ func BenchmarkComputeCommittee1000000_WithOutCache(b *testing.B) {
|
||||
|
||||
func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
validators := make([]*ethpb.Validator, 4000000)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -646,8 +644,8 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
|
||||
i := uint64(0)
|
||||
index := uint64(0)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
||||
for b.Loop() {
|
||||
i++
|
||||
_, err := helpers.ComputeCommittee(indices, seed, index, params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
@@ -663,7 +661,7 @@ func BenchmarkComputeCommittee4000000_WithOutCache(b *testing.B) {
|
||||
func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
|
||||
committeeSize := uint64(16)
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(committeeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -688,7 +686,7 @@ func TestBeaconCommitteeFromState_UpdateCacheForPreviousEpoch(t *testing.T) {
|
||||
|
||||
func TestPrecomputeProposerIndices_Ok(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -732,7 +730,7 @@ func TestAttestationCommitteesFromState(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -768,7 +766,7 @@ func TestAttestationCommitteesFromCache(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -934,7 +932,7 @@ func TestInitializeProposerLookahead_RegressionTest(t *testing.T) {
|
||||
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, state, epoch)
|
||||
require.NoError(t, err)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
for epochOffset := primitives.Epoch(0); epochOffset < 2; epochOffset++ {
|
||||
for epochOffset := range primitives.Epoch(2) {
|
||||
targetEpoch := epoch + epochOffset
|
||||
|
||||
activeIndices, err := helpers.ActiveValidatorIndices(ctx, state, targetEpoch)
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
|
||||
func TestRandaoMix_OK(t *testing.T) {
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
intInBytes := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
|
||||
randaoMixes[i] = intInBytes
|
||||
@@ -52,7 +52,7 @@ func TestRandaoMix_OK(t *testing.T) {
|
||||
|
||||
func TestRandaoMix_CopyOK(t *testing.T) {
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
intInBytes := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
|
||||
randaoMixes[i] = intInBytes
|
||||
@@ -96,7 +96,7 @@ func TestGenerateSeed_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
randaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector)
|
||||
for i := 0; i < len(randaoMixes); i++ {
|
||||
for i := range randaoMixes {
|
||||
intInBytes := make([]byte, 32)
|
||||
binary.LittleEndian.PutUint64(intInBytes, uint64(i))
|
||||
randaoMixes[i] = intInBytes
|
||||
|
||||
@@ -239,28 +239,28 @@ func TestIsInInactivityLeak(t *testing.T) {
|
||||
|
||||
func buildState(slot primitives.Slot, validatorCount uint64) *ethpb.BeaconState {
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
validatorBalances := make([]uint64, len(validators))
|
||||
for i := 0; i < len(validatorBalances); i++ {
|
||||
for i := range validatorBalances {
|
||||
validatorBalances[i] = params.BeaconConfig().MaxEffectiveBalance
|
||||
}
|
||||
latestActiveIndexRoots := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestActiveIndexRoots); i++ {
|
||||
for i := range latestActiveIndexRoots {
|
||||
latestActiveIndexRoots[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
latestRandaoMixes := make(
|
||||
[][]byte,
|
||||
params.BeaconConfig().EpochsPerHistoricalVector,
|
||||
)
|
||||
for i := 0; i < len(latestRandaoMixes); i++ {
|
||||
for i := range latestRandaoMixes {
|
||||
latestRandaoMixes[i] = params.BeaconConfig().ZeroHash[:]
|
||||
}
|
||||
return ðpb.BeaconState{
|
||||
|
||||
@@ -23,7 +23,7 @@ var maxShuffleListSize uint64 = 1 << 40
|
||||
func SplitIndices(l []uint64, n uint64) [][]uint64 {
|
||||
var divided [][]uint64
|
||||
var lSize = uint64(len(l))
|
||||
for i := uint64(0); i < n; i++ {
|
||||
for i := range n {
|
||||
start := slice.SplitOffset(lSize, n, i)
|
||||
end := slice.SplitOffset(lSize, n, i+1)
|
||||
divided = append(divided, l[start:end])
|
||||
@@ -103,10 +103,7 @@ func ComputeShuffledIndex(index primitives.ValidatorIndex, indexCount uint64, se
|
||||
pivot := hash8Int % indexCount
|
||||
flip := (pivot + indexCount - uint64(index)) % indexCount
|
||||
// Consider every pair only once by picking the highest pair index to retrieve randomness.
|
||||
position := uint64(index)
|
||||
if flip > position {
|
||||
position = flip
|
||||
}
|
||||
position := max(flip, uint64(index))
|
||||
// Add position except its last byte to []buf for randomness,
|
||||
// it will be used later to select a bit from the resulting hash.
|
||||
binary.LittleEndian.PutUint64(posBuffer[:8], position>>8)
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestShuffleList_OK(t *testing.T) {
|
||||
var list1 []primitives.ValidatorIndex
|
||||
seed1 := [32]byte{1, 128, 12}
|
||||
seed2 := [32]byte{2, 128, 12}
|
||||
for i := 0; i < 10; i++ {
|
||||
for i := range 10 {
|
||||
list1 = append(list1, primitives.ValidatorIndex(i))
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestSplitIndices_OK(t *testing.T) {
|
||||
|
||||
var l []uint64
|
||||
numValidators := uint64(64000)
|
||||
for i := uint64(0); i < numValidators; i++ {
|
||||
for i := range numValidators {
|
||||
l = append(l, i)
|
||||
}
|
||||
split := SplitIndices(l, uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
@@ -104,7 +104,7 @@ func BenchmarkIndexComparison(b *testing.B) {
|
||||
seed := [32]byte{123, 42}
|
||||
for _, listSize := range listSizes {
|
||||
b.Run(fmt.Sprintf("Indexwise_ShuffleList_%d", listSize), func(ib *testing.B) {
|
||||
for i := 0; i < ib.N; i++ {
|
||||
for ib.Loop() {
|
||||
// Simulate a list-shuffle by running shuffle-index listSize times.
|
||||
for j := primitives.ValidatorIndex(0); uint64(j) < listSize; j++ {
|
||||
_, err := ShuffledIndex(j, listSize, seed)
|
||||
@@ -120,11 +120,11 @@ func BenchmarkShuffleList(b *testing.B) {
|
||||
seed := [32]byte{123, 42}
|
||||
for _, listSize := range listSizes {
|
||||
testIndices := make([]primitives.ValidatorIndex, listSize)
|
||||
for i := uint64(0); i < listSize; i++ {
|
||||
for i := range listSize {
|
||||
testIndices[i] = primitives.ValidatorIndex(i)
|
||||
}
|
||||
b.Run(fmt.Sprintf("ShuffleList_%d", listSize), func(ib *testing.B) {
|
||||
for i := 0; i < ib.N; i++ {
|
||||
for ib.Loop() {
|
||||
_, err := ShuffleList(testIndices, seed)
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
@@ -161,12 +161,12 @@ func TestSplitIndicesAndOffset_OK(t *testing.T) {
|
||||
|
||||
var l []uint64
|
||||
validators := uint64(64000)
|
||||
for i := uint64(0); i < validators; i++ {
|
||||
for i := range validators {
|
||||
l = append(l, i)
|
||||
}
|
||||
chunks := uint64(6)
|
||||
split := SplitIndices(l, chunks)
|
||||
for i := uint64(0); i < chunks; i++ {
|
||||
for i := range chunks {
|
||||
if !reflect.DeepEqual(split[i], l[slice.SplitOffset(uint64(len(l)), chunks, i):slice.SplitOffset(uint64(len(l)), chunks, i+1)]) {
|
||||
t.Errorf("Want: %v got: %v", l[slice.SplitOffset(uint64(len(l)), chunks, i):slice.SplitOffset(uint64(len(l)), chunks, i+1)], split[i])
|
||||
break
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestCurrentPeriodPositions(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
Pubkeys: make([][]byte, params.BeaconConfig().SyncCommitteeSize),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -56,7 +56,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -87,7 +87,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -116,7 +116,7 @@ func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -144,7 +144,7 @@ func TestIsNextEpochSyncCommittee_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -175,7 +175,7 @@ func TestIsNextEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -203,7 +203,7 @@ func TestIsNextEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -231,7 +231,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -262,7 +262,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -304,7 +304,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -332,7 +332,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCache(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -363,7 +363,7 @@ func TestNextEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -391,7 +391,7 @@ func TestNextEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
@@ -449,7 +449,7 @@ func TestIsCurrentEpochSyncCommittee_SameBlockRoot(t *testing.T) {
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength),
|
||||
}
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
|
||||
@@ -152,7 +152,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
log.WithError(err).Error("Could not update committee cache")
|
||||
}
|
||||
|
||||
return indices, nil
|
||||
|
||||
@@ -184,7 +184,7 @@ func TestBeaconProposerIndex_OK(t *testing.T) {
|
||||
c.MinGenesisActiveValidatorCount = 16384
|
||||
params.OverrideBeaconConfig(c)
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount/8)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -241,7 +241,7 @@ func TestBeaconProposerIndex_BadState(t *testing.T) {
|
||||
c.MinGenesisActiveValidatorCount = 16384
|
||||
params.OverrideBeaconConfig(c)
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount/8)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -270,7 +270,7 @@ func TestComputeProposerIndex_Compatibility(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().MinGenesisActiveValidatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -322,7 +322,7 @@ func TestActiveValidatorCount_Genesis(t *testing.T) {
|
||||
|
||||
c := 1000
|
||||
validators := make([]*ethpb.Validator, c)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -357,7 +357,7 @@ func TestChurnLimit_OK(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, test.validatorCount)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
for i := range validators {
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
@@ -861,7 +861,7 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
|
||||
validators := make([]*ethpb.Validator, 4)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := uint64(0); i < 4; i++ {
|
||||
for i := range uint64(4) {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, params.BeaconConfig().BLSPubkeyLength),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
|
||||
@@ -270,7 +270,7 @@ func genState(t *testing.T, valCount, avgBalance uint64) state.BeaconState {
|
||||
|
||||
validators := make([]*ethpb.Validator, valCount)
|
||||
balances := make([]uint64, len(validators))
|
||||
for i := uint64(0); i < valCount; i++ {
|
||||
for i := range valCount {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: make([]byte, params.BeaconConfig().BLSPubkeyLength),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
|
||||
@@ -43,13 +43,15 @@ go_test(
|
||||
"das_core_test.go",
|
||||
"info_test.go",
|
||||
"p2p_interface_test.go",
|
||||
"reconstruction_helpers_test.go",
|
||||
"reconstruction_test.go",
|
||||
"semi_supernode_test.go",
|
||||
"utils_test.go",
|
||||
"validator_test.go",
|
||||
"verification_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
@@ -96,8 +97,7 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
return nil, ErrCustodyGroupTooLarge
|
||||
}
|
||||
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
|
||||
numberOfColumns := uint64(fieldparams.NumberOfColumns)
|
||||
columnsPerGroup := numberOfColumns / numberOfCustodyGroups
|
||||
|
||||
columns := make([]uint64, 0, columnsPerGroup)
|
||||
@@ -112,8 +112,9 @@ func ComputeColumnsForCustodyGroup(custodyGroup uint64) ([]uint64, error) {
|
||||
// ComputeCustodyGroupForColumn computes the custody group for a given column.
|
||||
// It is the reciprocal function of ComputeColumnsForCustodyGroup.
|
||||
func ComputeCustodyGroupForColumn(columnIndex uint64) (uint64, error) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
numberOfColumns := cfg.NumberOfColumns
|
||||
numberOfCustodyGroups := cfg.NumberOfCustodyGroups
|
||||
|
||||
if columnIndex >= numberOfColumns {
|
||||
|
||||
@@ -30,7 +30,6 @@ func TestComputeColumnsForCustodyGroup(t *testing.T) {
|
||||
func TestComputeCustodyGroupForColumn(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.NumberOfColumns = 128
|
||||
config.NumberOfCustodyGroups = 64
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package peerdas
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"maps"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
@@ -107,3 +108,102 @@ func computeInfoCacheKey(nodeID enode.ID, custodyGroupCount uint64) [nodeInfoCac
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// ColumnIndices represents as a set of ColumnIndices. This could be the set of indices that a node is required to custody,
|
||||
// the set that a peer custodies, missing indices for a given block, indices that are present on disk, etc.
|
||||
type ColumnIndices map[uint64]struct{}
|
||||
|
||||
// Has returns true if the index is present in the ColumnIndices.
|
||||
func (ci ColumnIndices) Has(index uint64) bool {
|
||||
_, ok := ci[index]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Count returns the number of indices present in the ColumnIndices.
|
||||
func (ci ColumnIndices) Count() int {
|
||||
return len(ci)
|
||||
}
|
||||
|
||||
// Set sets the index in the ColumnIndices.
|
||||
func (ci ColumnIndices) Set(index uint64) {
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
|
||||
// Unset removes the index from the ColumnIndices.
|
||||
func (ci ColumnIndices) Unset(index uint64) {
|
||||
delete(ci, index)
|
||||
}
|
||||
|
||||
// Copy creates a copy of the ColumnIndices.
|
||||
func (ci ColumnIndices) Copy() ColumnIndices {
|
||||
newCi := make(ColumnIndices, len(ci))
|
||||
maps.Copy(newCi, ci)
|
||||
return newCi
|
||||
}
|
||||
|
||||
// Intersection returns a new ColumnIndices that contains only the indices that are present in both ColumnIndices.
|
||||
func (ci ColumnIndices) Intersection(other ColumnIndices) ColumnIndices {
|
||||
result := make(ColumnIndices)
|
||||
for index := range ci {
|
||||
if other.Has(index) {
|
||||
result.Set(index)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Merge mutates the receiver so that any index that is set in either of
|
||||
// the two ColumnIndices is set in the receiver after the function finishes.
|
||||
// It does not mutate the other ColumnIndices given as a function argument.
|
||||
func (ci ColumnIndices) Merge(other ColumnIndices) {
|
||||
for index := range other {
|
||||
ci.Set(index)
|
||||
}
|
||||
}
|
||||
|
||||
// ToMap converts a ColumnIndices into a map[uint64]struct{}.
|
||||
// In the future ColumnIndices may be changed to a bit map, so using
|
||||
// ToMap will ensure forwards-compatibility.
|
||||
func (ci ColumnIndices) ToMap() map[uint64]struct{} {
|
||||
return ci.Copy()
|
||||
}
|
||||
|
||||
// ToSlice converts a ColumnIndices into a slice of uint64 indices.
|
||||
func (ci ColumnIndices) ToSlice() []uint64 {
|
||||
indices := make([]uint64, 0, len(ci))
|
||||
for index := range ci {
|
||||
indices = append(indices, index)
|
||||
}
|
||||
return indices
|
||||
}
|
||||
|
||||
// NewColumnIndicesFromSlice creates a ColumnIndices from a slice of uint64.
|
||||
func NewColumnIndicesFromSlice(indices []uint64) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for _, index := range indices {
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
// NewColumnIndicesFromMap creates a ColumnIndices from a map[uint64]bool. This kind of map
|
||||
// is used in several places in peerdas code. Converting from this map type to ColumnIndices
|
||||
// will allow us to move ColumnIndices underlying type to a bitmap in the future and avoid
|
||||
// lots of loops for things like intersections/unions or copies.
|
||||
func NewColumnIndicesFromMap(indices map[uint64]bool) ColumnIndices {
|
||||
ci := make(ColumnIndices, len(indices))
|
||||
for index, set := range indices {
|
||||
if !set {
|
||||
continue
|
||||
}
|
||||
ci[index] = struct{}{}
|
||||
}
|
||||
return ci
|
||||
}
|
||||
|
||||
// NewColumnIndices creates an empty ColumnIndices.
|
||||
// In the future ColumnIndices may change from a reference type to a value type,
|
||||
// so using this constructor will ensure forwards-compatibility.
|
||||
func NewColumnIndices() ColumnIndices {
|
||||
return make(ColumnIndices)
|
||||
}
|
||||
|
||||
@@ -25,3 +25,10 @@ func TestInfo(t *testing.T) {
|
||||
require.DeepEqual(t, expectedDataColumnsSubnets, actual.DataColumnsSubnets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewColumnIndicesFromMap(t *testing.T) {
|
||||
t.Run("nil map", func(t *testing.T) {
|
||||
ci := peerdas.NewColumnIndicesFromMap(nil)
|
||||
require.Equal(t, 0, ci.Count())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,10 +5,20 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
var (
|
||||
dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
)
|
||||
|
||||
cellsAndProofsFromStructuredComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cells_and_proofs_from_structured_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute cells and proofs from structured computation.",
|
||||
Buckets: []float64{10, 20, 30, 40, 50, 100, 200},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -33,8 +33,7 @@ func (Cgc) ENRKey() string { return params.BeaconNetworkConfig().CustodyGroupCou
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/p2p-interface.md#verify_data_column_sidecar
|
||||
func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
// The sidecar index must be within the valid range.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
if sidecar.Index >= numberOfColumns {
|
||||
if sidecar.Index >= fieldparams.NumberOfColumns {
|
||||
return ErrIndexTooLarge
|
||||
}
|
||||
|
||||
|
||||
@@ -100,7 +100,7 @@ func Test_VerifyKZGInclusionProofColumn(t *testing.T) {
|
||||
// Generate random KZG commitments `blobCount` blobs.
|
||||
kzgCommitments := make([][]byte, blobCount)
|
||||
|
||||
for i := 0; i < blobCount; i++ {
|
||||
for i := range blobCount {
|
||||
kzgCommitments[i] = make([]byte, 48)
|
||||
_, err := rand.Read(kzgCommitments[i])
|
||||
require.NoError(t, err)
|
||||
@@ -281,8 +281,11 @@ func BenchmarkVerifyDataColumnSidecarKZGProofs_SameCommitments_NoBatch(b *testin
|
||||
}
|
||||
|
||||
func BenchmarkVerifyDataColumnSidecarKZGProofs_DiffCommitments_Batch(b *testing.B) {
|
||||
const blobCount = 12
|
||||
numberOfColumns := int64(params.BeaconConfig().NumberOfColumns)
|
||||
const (
|
||||
blobCount = 12
|
||||
numberOfColumns = fieldparams.NumberOfColumns
|
||||
)
|
||||
|
||||
err := kzg.Start()
|
||||
require.NoError(b, err)
|
||||
|
||||
@@ -387,10 +390,10 @@ func generateRandomSidecars(t testing.TB, seed, blobCount int64) []blocks.ROData
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cellsAndProofs := util.GenerateCellsAndProofs(t, blobs)
|
||||
cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs)
|
||||
rob, err := blocks.NewROBlock(sBlock)
|
||||
require.NoError(t, err)
|
||||
sidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(rob))
|
||||
sidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(rob))
|
||||
require.NoError(t, err)
|
||||
|
||||
return sidecars
|
||||
|
||||
@@ -2,6 +2,8 @@ package peerdas
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -25,7 +27,114 @@ var (
|
||||
func MinimumColumnCountToReconstruct() uint64 {
|
||||
// If the number of columns is odd, then we need total / 2 + 1 columns to reconstruct.
|
||||
// If the number of columns is even, then we need total / 2 columns to reconstruct.
|
||||
return (params.BeaconConfig().NumberOfColumns + 1) / 2
|
||||
return (fieldparams.NumberOfColumns + 1) / 2
|
||||
}
|
||||
|
||||
// MinimumCustodyGroupCountToReconstruct returns the minimum number of custody groups needed to
|
||||
// custody enough data columns for reconstruction. This accounts for the relationship between
|
||||
// custody groups and columns, making it future-proof if these values change.
|
||||
// Returns an error if the configuration values are invalid (zero or would cause division by zero).
|
||||
func MinimumCustodyGroupCountToReconstruct() (uint64, error) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
// Validate configuration values
|
||||
if numberOfColumns == 0 {
|
||||
return 0, errors.New("NumberOfColumns cannot be zero")
|
||||
}
|
||||
if cfg.NumberOfCustodyGroups == 0 {
|
||||
return 0, errors.New("NumberOfCustodyGroups cannot be zero")
|
||||
}
|
||||
|
||||
minimumColumnCount := MinimumColumnCountToReconstruct()
|
||||
|
||||
// Calculate how many columns each custody group represents
|
||||
columnsPerGroup := numberOfColumns / cfg.NumberOfCustodyGroups
|
||||
|
||||
// If there are more groups than columns (columnsPerGroup = 0), this is an invalid configuration
|
||||
// for reconstruction purposes as we cannot determine a meaningful custody group count
|
||||
if columnsPerGroup == 0 {
|
||||
return 0, errors.Errorf("invalid configuration: NumberOfCustodyGroups (%d) exceeds NumberOfColumns (%d)",
|
||||
cfg.NumberOfCustodyGroups, numberOfColumns)
|
||||
}
|
||||
|
||||
// Use ceiling division to ensure we have enough groups to cover the minimum columns
|
||||
// ceiling(a/b) = (a + b - 1) / b
|
||||
return (minimumColumnCount + columnsPerGroup - 1) / columnsPerGroup, nil
|
||||
}
|
||||
|
||||
// recoverCellsForBlobs reconstructs cells for specified blobs from the given data column sidecars.
|
||||
// This is optimized to only recover cells without computing proofs.
|
||||
// Returns a map from blob index to recovered cells.
|
||||
func recoverCellsForBlobs(verifiedRoSidecars []blocks.VerifiedRODataColumn, blobIndices []int) (map[int][]kzg.Cell, error) {
|
||||
sidecarCount := len(verifiedRoSidecars)
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make(map[int][]kzg.Cell, len(blobIndices))
|
||||
var mu sync.Mutex
|
||||
|
||||
for _, blobIndex := range blobIndices {
|
||||
wg.Go(func() error {
|
||||
cellsIndices := make([]uint64, 0, sidecarCount)
|
||||
cells := make([]kzg.Cell, 0, sidecarCount)
|
||||
|
||||
for _, sidecar := range verifiedRoSidecars {
|
||||
cell := sidecar.Column[blobIndex]
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
cellsIndices = append(cellsIndices, sidecar.Index)
|
||||
}
|
||||
|
||||
recoveredCells, err := kzg.RecoverCells(cellsIndices, cells)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "recover cells for blob %d", blobIndex)
|
||||
}
|
||||
|
||||
mu.Lock()
|
||||
cellsPerBlob[blobIndex] = recoveredCells
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, errors.Wrap(err, "wait for RecoverCells")
|
||||
}
|
||||
return cellsPerBlob, nil
|
||||
}
|
||||
|
||||
// recoverCellsAndProofsForBlobs reconstructs both cells and proofs for specified blobs from the given data column sidecars.
|
||||
func recoverCellsAndProofsForBlobs(verifiedRoSidecars []blocks.VerifiedRODataColumn, blobIndices []int) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
sidecarCount := len(verifiedRoSidecars)
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, len(blobIndices))
|
||||
proofsPerBlob := make([][]kzg.Proof, len(blobIndices))
|
||||
|
||||
for i, blobIndex := range blobIndices {
|
||||
wg.Go(func() error {
|
||||
cellsIndices := make([]uint64, 0, sidecarCount)
|
||||
cells := make([]kzg.Cell, 0, sidecarCount)
|
||||
|
||||
for _, sidecar := range verifiedRoSidecars {
|
||||
cell := sidecar.Column[blobIndex]
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
cellsIndices = append(cellsIndices, sidecar.Index)
|
||||
}
|
||||
|
||||
recoveredCells, recoveredProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", blobIndex)
|
||||
}
|
||||
cellsPerBlob[i] = recoveredCells
|
||||
proofsPerBlob[i] = recoveredProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "wait for RecoverCellsAndKZGProofs")
|
||||
}
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
}
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs all the data column sidecars from the given input data column sidecars.
|
||||
@@ -66,38 +175,16 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
|
||||
})
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
var wg errgroup.Group
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
for blobIndex := range uint64(blobCount) {
|
||||
wg.Go(func() error {
|
||||
cellsIndices := make([]uint64, 0, sidecarCount)
|
||||
cells := make([]kzg.Cell, 0, sidecarCount)
|
||||
|
||||
for _, sidecar := range verifiedRoSidecars {
|
||||
cell := sidecar.Column[blobIndex]
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
cellsIndices = append(cellsIndices, sidecar.Index)
|
||||
}
|
||||
|
||||
// Recover the cells and proofs for the corresponding blob
|
||||
cellsAndProofsForBlob, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", blobIndex)
|
||||
}
|
||||
|
||||
// It is safe for multiple goroutines to concurrently write to the same slice,
|
||||
// as long as they are writing to different indices, which is the case here.
|
||||
cellsAndProofs[blobIndex] = cellsAndProofsForBlob
|
||||
return nil
|
||||
})
|
||||
blobIndices := make([]int, blobCount)
|
||||
for i := range blobIndices {
|
||||
blobIndices[i] = i
|
||||
}
|
||||
cellsPerBlob, proofsPerBlob, err := recoverCellsAndProofsForBlobs(verifiedRoSidecars, blobIndices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "recover cells and proofs for blobs")
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, errors.Wrap(err, "wait for RecoverCellsAndKZGProofs")
|
||||
}
|
||||
|
||||
outSidecars, err := DataColumnSidecars(cellsAndProofs, PopulateFromSidecar(referenceSidecar))
|
||||
outSidecars, err := DataColumnSidecars(cellsPerBlob, proofsPerBlob, PopulateFromSidecar(referenceSidecar))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars from items")
|
||||
}
|
||||
@@ -113,18 +200,216 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
|
||||
return reconstructedVerifiedRoSidecars, nil
|
||||
}
|
||||
|
||||
// ReconstructBlobs constructs verified read only blobs sidecars from verified read only blob sidecars.
|
||||
// reconstructIfNeeded validates the input data column sidecars and returns the prepared sidecars
|
||||
// (reconstructed if necessary). This function performs common validation and reconstruction logic used by
|
||||
// both ReconstructBlobs and ReconstructBlobSidecars.
|
||||
func reconstructIfNeeded(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
if len(verifiedDataColumnSidecars) == 0 {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// Check if the sidecars are sorted by index and do not contain duplicates.
|
||||
previousColumnIndex := verifiedDataColumnSidecars[0].Index
|
||||
for _, dataColumnSidecar := range verifiedDataColumnSidecars[1:] {
|
||||
columnIndex := dataColumnSidecar.Index
|
||||
if columnIndex <= previousColumnIndex {
|
||||
return nil, ErrDataColumnSidecarsNotSortedByIndex
|
||||
}
|
||||
|
||||
previousColumnIndex = columnIndex
|
||||
}
|
||||
|
||||
// Check if we have enough columns.
|
||||
cellsPerBlob := fieldparams.CellsPerBlob
|
||||
if len(verifiedDataColumnSidecars) < cellsPerBlob {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// If all column sidecars corresponding to (non-extended) blobs are present, no need to reconstruct.
|
||||
if verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1) {
|
||||
return verifiedDataColumnSidecars, nil
|
||||
}
|
||||
|
||||
// We need to reconstruct the data column sidecars.
|
||||
return ReconstructDataColumnSidecars(verifiedDataColumnSidecars)
|
||||
}
|
||||
|
||||
// ReconstructBlobSidecars constructs verified read only blobs sidecars from verified read only blob sidecars.
|
||||
// The following constraints must be satisfied:
|
||||
// - All `dataColumnSidecars` has to be committed to the same block, and
|
||||
// - `dataColumnSidecars` must be sorted by index and should not contain duplicates.
|
||||
// - `dataColumnSidecars` must contain either all sidecars corresponding to (non-extended) blobs,
|
||||
// or either enough sidecars to reconstruct the blobs.
|
||||
func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) {
|
||||
// - either enough sidecars to reconstruct the blobs.
|
||||
func ReconstructBlobSidecars(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.VerifiedRODataColumn, indices []int) ([]*blocks.VerifiedROBlob, error) {
|
||||
// Return early if no blobs are requested.
|
||||
if len(indices) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Validate and prepare data columns (reconstruct if necessary).
|
||||
// This also checks if input is empty.
|
||||
preparedDataColumnSidecars, err := reconstructIfNeeded(verifiedDataColumnSidecars)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if the blob index is too high.
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
for _, blobIndex := range indices {
|
||||
if blobIndex >= len(commitments) {
|
||||
return nil, ErrBlobIndexTooHigh
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the data column sidecars are aligned with the block.
|
||||
dataColumnSidecars := make([]blocks.RODataColumn, 0, len(preparedDataColumnSidecars))
|
||||
for _, verifiedDataColumnSidecar := range preparedDataColumnSidecars {
|
||||
dataColumnSidecar := verifiedDataColumnSidecar.RODataColumn
|
||||
dataColumnSidecars = append(dataColumnSidecars, dataColumnSidecar)
|
||||
}
|
||||
|
||||
if err := DataColumnsAlignWithBlock(block, dataColumnSidecars); err != nil {
|
||||
return nil, errors.Wrap(err, "data columns align with block")
|
||||
}
|
||||
|
||||
// Convert verified data column sidecars to verified blob sidecars.
|
||||
blobSidecars, err := blobSidecarsFromDataColumnSidecars(block, preparedDataColumnSidecars, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob sidecars from data column sidecars")
|
||||
}
|
||||
|
||||
return blobSidecars, nil
|
||||
}
|
||||
|
||||
// ComputeCellsAndProofsFromFlat computes the cells and proofs from blobs and cell flat proofs.
|
||||
func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
cellsCount := blobCount * numberOfColumns
|
||||
if cellsCount != cellProofsCount {
|
||||
return nil, nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, blobCount)
|
||||
|
||||
for i, blob := range blobs {
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
proofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = proofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
}
|
||||
|
||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
|
||||
|
||||
for i, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, nil, ErrNilBlobAndProof
|
||||
}
|
||||
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = kzgProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
}
|
||||
|
||||
// ReconstructBlobs reconstructs blobs from data column sidecars without computing KZG proofs or creating sidecars.
|
||||
// This is an optimized version for when only the blob data is needed (e.g., for the GetBlobs endpoint).
|
||||
// The following constraints must be satisfied:
|
||||
// - All `dataColumnSidecars` must be committed to the same block, and
|
||||
// - `dataColumnSidecars` must be sorted by index and should not contain duplicates.
|
||||
// - `dataColumnSidecars` must contain either all sidecars corresponding to (non-extended) blobs,
|
||||
// - or enough sidecars to reconstruct the blobs.
|
||||
func ReconstructBlobs(verifiedDataColumnSidecars []blocks.VerifiedRODataColumn, indices []int, blobCount int) ([][]byte, error) {
|
||||
// If no specific indices are requested, populate with all blob indices.
|
||||
if len(indices) == 0 {
|
||||
indices = make([]int, blobCount)
|
||||
for i := range indices {
|
||||
indices[i] = i
|
||||
}
|
||||
}
|
||||
|
||||
if len(verifiedDataColumnSidecars) == 0 {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
@@ -146,136 +431,70 @@ func ReconstructBlobs(block blocks.ROBlock, verifiedDataColumnSidecars []blocks.
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// Check if the blob index is too high.
|
||||
commitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
// Verify that the actual blob count from the first sidecar matches the expected count
|
||||
referenceSidecar := verifiedDataColumnSidecars[0]
|
||||
actualBlobCount := len(referenceSidecar.Column)
|
||||
if actualBlobCount != blobCount {
|
||||
return nil, errors.Errorf("blob count mismatch: expected %d, got %d", blobCount, actualBlobCount)
|
||||
}
|
||||
|
||||
// Check if the blob index is too high.
|
||||
for _, blobIndex := range indices {
|
||||
if blobIndex >= len(commitments) {
|
||||
if blobIndex >= blobCount {
|
||||
return nil, ErrBlobIndexTooHigh
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the data column sidecars are aligned with the block.
|
||||
dataColumnSidecars := make([]blocks.RODataColumn, 0, len(verifiedDataColumnSidecars))
|
||||
for _, verifiedDataColumnSidecar := range verifiedDataColumnSidecars {
|
||||
dataColumnSidecar := verifiedDataColumnSidecar.RODataColumn
|
||||
dataColumnSidecars = append(dataColumnSidecars, dataColumnSidecar)
|
||||
// Check if all columns have the same length and are committed to the same block.
|
||||
blockRoot := referenceSidecar.BlockRoot()
|
||||
for _, sidecar := range verifiedDataColumnSidecars[1:] {
|
||||
if len(sidecar.Column) != blobCount {
|
||||
return nil, ErrColumnLengthsDiffer
|
||||
}
|
||||
|
||||
if sidecar.BlockRoot() != blockRoot {
|
||||
return nil, ErrBlockRootMismatch
|
||||
}
|
||||
}
|
||||
|
||||
if err := DataColumnsAlignWithBlock(block, dataColumnSidecars); err != nil {
|
||||
return nil, errors.Wrap(err, "data columns align with block")
|
||||
}
|
||||
// Check if we have all non-extended columns (0..63) - if so, no reconstruction needed.
|
||||
hasAllNonExtendedColumns := verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1)
|
||||
|
||||
// If all column sidecars corresponding to (non-extended) blobs are present, no need to reconstruct.
|
||||
if verifiedDataColumnSidecars[cellsPerBlob-1].Index == uint64(cellsPerBlob-1) {
|
||||
// Convert verified data column sidecars to verified blob sidecars.
|
||||
blobSidecars, err := blobSidecarsFromDataColumnSidecars(block, verifiedDataColumnSidecars, indices)
|
||||
var reconstructedCells map[int][]kzg.Cell
|
||||
if !hasAllNonExtendedColumns {
|
||||
// Need to reconstruct cells (but NOT proofs) for the requested blobs only.
|
||||
var err error
|
||||
reconstructedCells, err = recoverCellsForBlobs(verifiedDataColumnSidecars, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob sidecars from data column sidecars")
|
||||
return nil, errors.Wrap(err, "recover cells")
|
||||
}
|
||||
|
||||
return blobSidecars, nil
|
||||
}
|
||||
|
||||
// We need to reconstruct the data column sidecars.
|
||||
reconstructedDataColumnSidecars, err := ReconstructDataColumnSidecars(verifiedDataColumnSidecars)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "reconstruct data column sidecars")
|
||||
}
|
||||
// Extract blob data without computing proofs.
|
||||
blobs := make([][]byte, 0, len(indices))
|
||||
for _, blobIndex := range indices {
|
||||
var blob kzg.Blob
|
||||
|
||||
// Convert verified data column sidecars to verified blob sidecars.
|
||||
blobSidecars, err := blobSidecarsFromDataColumnSidecars(block, reconstructedDataColumnSidecars, indices)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob sidecars from data column sidecars")
|
||||
}
|
||||
|
||||
return blobSidecars, nil
|
||||
}
|
||||
|
||||
// ComputeCellsAndProofsFromFlat computes the cells and proofs from blobs and cell flat proofs.
|
||||
func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([]kzg.CellsAndProofs, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
blobCount := uint64(len(blobs))
|
||||
cellProofsCount := uint64(len(cellProofs))
|
||||
|
||||
cellsCount := blobCount * numberOfColumns
|
||||
if cellsCount != cellProofsCount {
|
||||
return nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, blobCount)
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
// Compute the content of the blob.
|
||||
for columnIndex := range cellsPerBlob {
|
||||
var cell []byte
|
||||
if hasAllNonExtendedColumns {
|
||||
// Use existing cells from sidecars
|
||||
cell = verifiedDataColumnSidecars[columnIndex].Column[blobIndex]
|
||||
} else {
|
||||
// Use reconstructed cells
|
||||
cell = reconstructedCells[blobIndex][columnIndex][:]
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
if copy(blob[kzg.BytesPerCell*columnIndex:], cell) != kzg.BytesPerCell {
|
||||
return nil, errors.New("wrong cell size - should never happen")
|
||||
}
|
||||
}
|
||||
|
||||
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: proofs}
|
||||
cellsAndProofs = append(cellsAndProofs, cellsProofs)
|
||||
blobs = append(blobs, blob[:])
|
||||
}
|
||||
|
||||
return cellsAndProofs, nil
|
||||
}
|
||||
|
||||
// ComputeCellsAndProofs computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([]kzg.CellsAndProofs, error) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, ErrNilBlobAndProof
|
||||
}
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return nil, errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsProofs := kzg.CellsAndProofs{Cells: cells, Proofs: kzgProofs}
|
||||
cellsAndProofs = append(cellsAndProofs, cellsProofs)
|
||||
}
|
||||
|
||||
return cellsAndProofs, nil
|
||||
return blobs, nil
|
||||
}
|
||||
|
||||
// blobSidecarsFromDataColumnSidecars converts verified data column sidecars to verified blob sidecars.
|
||||
|
||||
79
beacon-chain/core/peerdas/reconstruction_helpers_test.go
Normal file
79
beacon-chain/core/peerdas/reconstruction_helpers_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package peerdas_test
|
||||
|
||||
// Test helpers for reconstruction tests
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
// testBlobSetup holds common test data for blob reconstruction tests.
|
||||
type testBlobSetup struct {
|
||||
blobCount int
|
||||
blobs []kzg.Blob
|
||||
roBlock blocks.ROBlock
|
||||
roDataColumnSidecars []blocks.RODataColumn
|
||||
verifiedRoDataColumnSidecars []blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
// setupTestBlobs creates a complete test setup with blobs, cells, proofs, and data column sidecars.
|
||||
func setupTestBlobs(t *testing.T, blobCount int) *testBlobSetup {
|
||||
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 42, blobCount)
|
||||
|
||||
blobs := make([]kzg.Blob, blobCount)
|
||||
for i := range blobCount {
|
||||
copy(blobs[i][:], roBlobSidecars[i].Blob)
|
||||
}
|
||||
|
||||
cellsPerBlob, proofsPerBlob := util.GenerateCellsAndProofs(t, blobs)
|
||||
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, blobCount, util.WithSlot(fs))
|
||||
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoSidecars := toVerifiedSidecars(roDataColumnSidecars)
|
||||
|
||||
return &testBlobSetup{
|
||||
blobCount: blobCount,
|
||||
blobs: blobs,
|
||||
roBlock: roBlock,
|
||||
roDataColumnSidecars: roDataColumnSidecars,
|
||||
verifiedRoDataColumnSidecars: verifiedRoSidecars,
|
||||
}
|
||||
}
|
||||
|
||||
// toVerifiedSidecars converts a slice of RODataColumn to VerifiedRODataColumn.
|
||||
func toVerifiedSidecars(roDataColumnSidecars []blocks.RODataColumn) []blocks.VerifiedRODataColumn {
|
||||
verifiedRoSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumnSidecars))
|
||||
for _, roDataColumnSidecar := range roDataColumnSidecars {
|
||||
verifiedRoSidecar := blocks.NewVerifiedRODataColumn(roDataColumnSidecar)
|
||||
verifiedRoSidecars = append(verifiedRoSidecars, verifiedRoSidecar)
|
||||
}
|
||||
return verifiedRoSidecars
|
||||
}
|
||||
|
||||
// filterEvenIndexedSidecars returns only the even-indexed sidecars (0, 2, 4, ...).
|
||||
// This is useful for forcing reconstruction in tests.
|
||||
func filterEvenIndexedSidecars(sidecars []blocks.VerifiedRODataColumn) []blocks.VerifiedRODataColumn {
|
||||
filtered := make([]blocks.VerifiedRODataColumn, 0, len(sidecars)/2)
|
||||
for i := 0; i < len(sidecars); i += 2 {
|
||||
filtered = append(filtered, sidecars[i])
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
// setupFuluForkEpoch sets up the test configuration with Fulu fork after Electra.
|
||||
func setupFuluForkEpoch(t *testing.T) primitives.Slot {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
return util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
}
|
||||
@@ -17,41 +17,9 @@ import (
|
||||
)
|
||||
|
||||
func TestMinimumColumnsCountToReconstruct(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
numberOfColumns uint64
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "numberOfColumns=128",
|
||||
numberOfColumns: 128,
|
||||
expected: 64,
|
||||
},
|
||||
{
|
||||
name: "numberOfColumns=129",
|
||||
numberOfColumns: 129,
|
||||
expected: 65,
|
||||
},
|
||||
{
|
||||
name: "numberOfColumns=130",
|
||||
numberOfColumns: 130,
|
||||
expected: 65,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the total number of columns.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfColumns = tc.numberOfColumns
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Compute the minimum number of columns needed to reconstruct.
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
const expected = uint64(64)
|
||||
actual := peerdas.MinimumColumnCountToReconstruct()
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
@@ -124,7 +92,7 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructBlobs(t *testing.T) {
|
||||
func TestReconstructBlobSidecars(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
|
||||
@@ -133,13 +101,13 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
|
||||
t.Run("no index", func(t *testing.T) {
|
||||
actual, err := peerdas.ReconstructBlobs(emptyBlock, nil, nil)
|
||||
actual, err := peerdas.ReconstructBlobSidecars(emptyBlock, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.IsNil(t, actual)
|
||||
})
|
||||
|
||||
t.Run("empty input", func(t *testing.T) {
|
||||
_, err := peerdas.ReconstructBlobs(emptyBlock, nil, []int{0})
|
||||
_, err := peerdas.ReconstructBlobSidecars(emptyBlock, nil, []int{0})
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
})
|
||||
|
||||
@@ -149,7 +117,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
// Arbitrarily change the order of the sidecars.
|
||||
verifiedRoSidecars[3], verifiedRoSidecars[2] = verifiedRoSidecars[2], verifiedRoSidecars[3]
|
||||
|
||||
_, err := peerdas.ReconstructBlobs(emptyBlock, verifiedRoSidecars, []int{0})
|
||||
_, err := peerdas.ReconstructBlobSidecars(emptyBlock, verifiedRoSidecars, []int{0})
|
||||
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
|
||||
})
|
||||
|
||||
@@ -159,7 +127,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
// [0, 1, 1, 3, 4, ...]
|
||||
verifiedRoSidecars[2] = verifiedRoSidecars[1]
|
||||
|
||||
_, err := peerdas.ReconstructBlobs(emptyBlock, verifiedRoSidecars, []int{0})
|
||||
_, err := peerdas.ReconstructBlobSidecars(emptyBlock, verifiedRoSidecars, []int{0})
|
||||
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
|
||||
})
|
||||
|
||||
@@ -169,7 +137,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
// [0, 1, 2, 1, 4, ...]
|
||||
verifiedRoSidecars[3] = verifiedRoSidecars[1]
|
||||
|
||||
_, err := peerdas.ReconstructBlobs(emptyBlock, verifiedRoSidecars, []int{0})
|
||||
_, err := peerdas.ReconstructBlobSidecars(emptyBlock, verifiedRoSidecars, []int{0})
|
||||
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
|
||||
})
|
||||
|
||||
@@ -177,7 +145,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
|
||||
inputSidecars := verifiedRoSidecars[:fieldparams.CellsPerBlob-1]
|
||||
_, err := peerdas.ReconstructBlobs(emptyBlock, inputSidecars, []int{0})
|
||||
_, err := peerdas.ReconstructBlobSidecars(emptyBlock, inputSidecars, []int{0})
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
})
|
||||
|
||||
@@ -186,7 +154,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
|
||||
roBlock, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, blobCount)
|
||||
|
||||
_, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{1, blobCount})
|
||||
_, err := peerdas.ReconstructBlobSidecars(roBlock, verifiedRoSidecars, []int{1, blobCount})
|
||||
require.ErrorIs(t, err, peerdas.ErrBlobIndexTooHigh)
|
||||
})
|
||||
|
||||
@@ -194,20 +162,20 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}), util.WithSlot(fs))
|
||||
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}), util.WithSlot(fs))
|
||||
|
||||
_, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
|
||||
_, err := peerdas.ReconstructBlobSidecars(roBlock, verifiedRoSidecars, []int{0})
|
||||
require.ErrorContains(t, peerdas.ErrRootMismatch.Error(), err)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 3
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
roBlock, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
// Compute cells and proofs from blob sidecars.
|
||||
var wg errgroup.Group
|
||||
blobs := make([][]byte, blobCount)
|
||||
inputCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
inputCellsPerBlob := make([][]kzg.Cell, blobCount)
|
||||
inputProofsPerBlob := make([][]kzg.Proof, blobCount)
|
||||
for i := range blobCount {
|
||||
blob := roBlobSidecars[i].Blob
|
||||
blobs[i] = blob
|
||||
@@ -217,14 +185,15 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
count := copy(kzgBlob[:], blob)
|
||||
require.Equal(t, len(kzgBlob), count)
|
||||
|
||||
cp, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
cells, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
|
||||
}
|
||||
|
||||
// It is safe for multiple goroutines to concurrently write to the same slice,
|
||||
// as long as they are writing to different indices, which is the case here.
|
||||
inputCellsAndProofs[i] = cp
|
||||
inputCellsPerBlob[i] = cells
|
||||
inputProofsPerBlob[i] = proofs
|
||||
|
||||
return nil
|
||||
})
|
||||
@@ -234,19 +203,19 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Flatten proofs.
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
for _, cp := range inputCellsAndProofs {
|
||||
for _, proof := range cp.Proofs {
|
||||
cellProofs := make([][]byte, 0, blobCount*fieldparams.NumberOfColumns)
|
||||
for _, proofs := range inputProofsPerBlob {
|
||||
for _, proof := range proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
}
|
||||
}
|
||||
|
||||
// Compute celles and proofs from the blobs and cell proofs.
|
||||
cellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
cellsPerBlob, proofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Construct data column sidears from the signed block and cells and proofs.
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsAndProofs, peerdas.PopulateFromBlock(roBlock))
|
||||
roDataColumnSidecars, err := peerdas.DataColumnSidecars(cellsPerBlob, proofsPerBlob, peerdas.PopulateFromBlock(roBlock))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert to verified data column sidecars.
|
||||
@@ -260,7 +229,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
|
||||
t.Run("no reconstruction needed", func(t *testing.T) {
|
||||
// Reconstruct blobs.
|
||||
reconstructedVerifiedRoBlobSidecars, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, indices)
|
||||
reconstructedVerifiedRoBlobSidecars, err := peerdas.ReconstructBlobSidecars(roBlock, verifiedRoSidecars, indices)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compare blobs.
|
||||
@@ -280,7 +249,7 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
}
|
||||
|
||||
// Reconstruct blobs.
|
||||
reconstructedVerifiedRoBlobSidecars, err := peerdas.ReconstructBlobs(roBlock, filteredSidecars, indices)
|
||||
reconstructedVerifiedRoBlobSidecars, err := peerdas.ReconstructBlobSidecars(roBlock, filteredSidecars, indices)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compare blobs.
|
||||
@@ -296,34 +265,162 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestReconstructBlobs(t *testing.T) {
|
||||
setupFuluForkEpoch(t)
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
t.Run("empty indices with blobCount > 0", func(t *testing.T) {
|
||||
setup := setupTestBlobs(t, 3)
|
||||
|
||||
// Call with empty indices - should return all blobs
|
||||
reconstructedBlobs, err := peerdas.ReconstructBlobs(setup.verifiedRoDataColumnSidecars, []int{}, setup.blobCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, setup.blobCount, len(reconstructedBlobs))
|
||||
|
||||
// Verify each blob matches
|
||||
for i := 0; i < setup.blobCount; i++ {
|
||||
require.DeepEqual(t, setup.blobs[i][:], reconstructedBlobs[i])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("specific indices", func(t *testing.T) {
|
||||
setup := setupTestBlobs(t, 3)
|
||||
|
||||
// Request only blobs at indices 0 and 2
|
||||
indices := []int{0, 2}
|
||||
reconstructedBlobs, err := peerdas.ReconstructBlobs(setup.verifiedRoDataColumnSidecars, indices, setup.blobCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(indices), len(reconstructedBlobs))
|
||||
|
||||
// Verify requested blobs match
|
||||
for i, blobIndex := range indices {
|
||||
require.DeepEqual(t, setup.blobs[blobIndex][:], reconstructedBlobs[i])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("blob count mismatch", func(t *testing.T) {
|
||||
setup := setupTestBlobs(t, 3)
|
||||
|
||||
// Pass wrong blob count
|
||||
wrongBlobCount := 5
|
||||
_, err := peerdas.ReconstructBlobs(setup.verifiedRoDataColumnSidecars, []int{0}, wrongBlobCount)
|
||||
require.ErrorContains(t, "blob count mismatch", err)
|
||||
})
|
||||
|
||||
t.Run("empty data columns", func(t *testing.T) {
|
||||
_, err := peerdas.ReconstructBlobs([]blocks.VerifiedRODataColumn{}, []int{0}, 1)
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
})
|
||||
|
||||
t.Run("index too high", func(t *testing.T) {
|
||||
setup := setupTestBlobs(t, 3)
|
||||
|
||||
// Request blob index that's too high
|
||||
_, err := peerdas.ReconstructBlobs(setup.verifiedRoDataColumnSidecars, []int{setup.blobCount}, setup.blobCount)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlobIndexTooHigh)
|
||||
})
|
||||
|
||||
t.Run("not enough columns", func(t *testing.T) {
|
||||
setup := setupTestBlobs(t, 3)
|
||||
|
||||
// Only provide 63 columns (need at least 64)
|
||||
inputSidecars := setup.verifiedRoDataColumnSidecars[:fieldparams.CellsPerBlob-1]
|
||||
_, err := peerdas.ReconstructBlobs(inputSidecars, []int{0}, setup.blobCount)
|
||||
require.ErrorIs(t, err, peerdas.ErrNotEnoughDataColumnSidecars)
|
||||
})
|
||||
|
||||
t.Run("not sorted", func(t *testing.T) {
|
||||
setup := setupTestBlobs(t, 3)
|
||||
|
||||
// Swap two sidecars to make them unsorted
|
||||
setup.verifiedRoDataColumnSidecars[3], setup.verifiedRoDataColumnSidecars[2] = setup.verifiedRoDataColumnSidecars[2], setup.verifiedRoDataColumnSidecars[3]
|
||||
|
||||
_, err := peerdas.ReconstructBlobs(setup.verifiedRoDataColumnSidecars, []int{0}, setup.blobCount)
|
||||
require.ErrorIs(t, err, peerdas.ErrDataColumnSidecarsNotSortedByIndex)
|
||||
})
|
||||
|
||||
t.Run("with reconstruction needed", func(t *testing.T) {
|
||||
setup := setupTestBlobs(t, 3)
|
||||
|
||||
// Keep only even-indexed columns (will need reconstruction)
|
||||
filteredSidecars := filterEvenIndexedSidecars(setup.verifiedRoDataColumnSidecars)
|
||||
|
||||
// Reconstruct all blobs
|
||||
reconstructedBlobs, err := peerdas.ReconstructBlobs(filteredSidecars, []int{}, setup.blobCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, setup.blobCount, len(reconstructedBlobs))
|
||||
|
||||
// Verify all blobs match
|
||||
for i := range setup.blobCount {
|
||||
require.DeepEqual(t, setup.blobs[i][:], reconstructedBlobs[i])
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no reconstruction needed - all non-extended columns present", func(t *testing.T) {
|
||||
setup := setupTestBlobs(t, 3)
|
||||
|
||||
// Use all columns (no reconstruction needed since we have all non-extended columns 0-63)
|
||||
reconstructedBlobs, err := peerdas.ReconstructBlobs(setup.verifiedRoDataColumnSidecars, []int{1}, setup.blobCount)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(reconstructedBlobs))
|
||||
|
||||
// Verify blob matches
|
||||
require.DeepEqual(t, setup.blobs[1][:], reconstructedBlobs[0])
|
||||
})
|
||||
|
||||
t.Run("reconstruct only requested blob indices", func(t *testing.T) {
|
||||
// This test verifies the optimization: when reconstruction is needed and specific
|
||||
// blob indices are requested, we only reconstruct those blobs, not all of them.
|
||||
setup := setupTestBlobs(t, 6)
|
||||
|
||||
// Keep only even-indexed columns (will need reconstruction)
|
||||
// This ensures we don't have all non-extended columns (0-63)
|
||||
filteredSidecars := filterEvenIndexedSidecars(setup.verifiedRoDataColumnSidecars)
|
||||
|
||||
// Request only specific blob indices (not all of them)
|
||||
requestedIndices := []int{1, 3, 5}
|
||||
reconstructedBlobs, err := peerdas.ReconstructBlobs(filteredSidecars, requestedIndices, setup.blobCount)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should only get the requested blobs back (not all 6)
|
||||
require.Equal(t, len(requestedIndices), len(reconstructedBlobs),
|
||||
"should only reconstruct requested blobs, not all blobs")
|
||||
|
||||
// Verify each requested blob matches the original
|
||||
for i, blobIndex := range requestedIndices {
|
||||
require.DeepEqual(t, setup.blobs[blobIndex][:], reconstructedBlobs[i],
|
||||
"blob at index %d should match", blobIndex)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
const numberOfColumns = fieldparams.NumberOfColumns
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("mismatched blob and proof counts", func(t *testing.T) {
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Create one blob but proofs for two blobs
|
||||
blobs := [][]byte{{}}
|
||||
|
||||
// Create proofs for 2 blobs worth of columns
|
||||
cellProofs := make([][]byte, 2*numberOfColumns)
|
||||
|
||||
_, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
_, _, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlobsCellsProofsMismatch)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
const blobCount = 2
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Generate test blobs
|
||||
_, roBlobSidecars := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 42, blobCount)
|
||||
|
||||
// Extract blobs and compute expected cells and proofs
|
||||
blobs := make([][]byte, blobCount)
|
||||
expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
expectedCellsPerBlob := make([][]kzg.Cell, blobCount)
|
||||
expectedProofsPerBlob := make([][]kzg.Proof, blobCount)
|
||||
var wg errgroup.Group
|
||||
|
||||
for i := range blobCount {
|
||||
@@ -335,12 +432,13 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
count := copy(kzgBlob[:], blob)
|
||||
require.Equal(t, len(kzgBlob), count)
|
||||
|
||||
cp, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
cells, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
|
||||
}
|
||||
|
||||
expectedCellsAndProofs[i] = cp
|
||||
expectedCellsPerBlob[i] = cells
|
||||
expectedProofsPerBlob[i] = proofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
@@ -350,30 +448,30 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
|
||||
// Flatten proofs
|
||||
cellProofs := make([][]byte, 0, blobCount*numberOfColumns)
|
||||
for _, cp := range expectedCellsAndProofs {
|
||||
for _, proof := range cp.Proofs {
|
||||
for _, proofs := range expectedProofsPerBlob {
|
||||
for _, proof := range proofs {
|
||||
cellProofs = append(cellProofs, proof[:])
|
||||
}
|
||||
}
|
||||
|
||||
// Test ComputeCellsAndProofs
|
||||
actualCellsAndProofs, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromFlat(blobs, cellProofs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blobCount, len(actualCellsAndProofs))
|
||||
require.Equal(t, blobCount, len(actualCellsPerBlob))
|
||||
|
||||
// Verify the results match expected
|
||||
for i := range blobCount {
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
|
||||
require.Equal(t, len(expectedCellsPerBlob[i]), len(actualCellsPerBlob[i]))
|
||||
require.Equal(t, len(expectedProofsPerBlob[i]), len(actualProofsPerBlob[i]))
|
||||
|
||||
// Compare cells
|
||||
for j, expectedCell := range expectedCellsAndProofs[i].Cells {
|
||||
require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j])
|
||||
for j, expectedCell := range expectedCellsPerBlob[i] {
|
||||
require.Equal(t, expectedCell, actualCellsPerBlob[i][j])
|
||||
}
|
||||
|
||||
// Compare proofs
|
||||
for j, expectedProof := range expectedCellsAndProofs[i].Proofs {
|
||||
require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j])
|
||||
for j, expectedProof := range expectedProofsPerBlob[i] {
|
||||
require.Equal(t, expectedProof, actualProofsPerBlob[i][j])
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -381,7 +479,7 @@ func TestComputeCellsAndProofsFromFlat(t *testing.T) {
|
||||
|
||||
func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
||||
t.Run("nil blob and proof", func(t *testing.T) {
|
||||
_, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
|
||||
_, _, err := peerdas.ComputeCellsAndProofsFromStructured([]*pb.BlobAndProofV2{nil})
|
||||
require.ErrorIs(t, err, peerdas.ErrNilBlobAndProof)
|
||||
})
|
||||
|
||||
@@ -397,7 +495,8 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
||||
|
||||
// Extract blobs and compute expected cells and proofs
|
||||
blobsAndProofs := make([]*pb.BlobAndProofV2, blobCount)
|
||||
expectedCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
expectedCellsPerBlob := make([][]kzg.Cell, blobCount)
|
||||
expectedProofsPerBlob := make([][]kzg.Proof, blobCount)
|
||||
|
||||
var wg errgroup.Group
|
||||
for i := range blobCount {
|
||||
@@ -408,14 +507,15 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
||||
count := copy(kzgBlob[:], blob)
|
||||
require.Equal(t, len(kzgBlob), count)
|
||||
|
||||
cellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
cells, proofs, err := kzg.ComputeCellsAndKZGProofs(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "compute cells and kzg proofs for blob %d", i)
|
||||
}
|
||||
expectedCellsAndProofs[i] = cellsAndProofs
|
||||
expectedCellsPerBlob[i] = cells
|
||||
expectedProofsPerBlob[i] = proofs
|
||||
|
||||
kzgProofs := make([][]byte, 0, len(cellsAndProofs.Proofs))
|
||||
for _, proof := range cellsAndProofs.Proofs {
|
||||
kzgProofs := make([][]byte, 0, len(proofs))
|
||||
for _, proof := range proofs {
|
||||
kzgProofs = append(kzgProofs, proof[:])
|
||||
}
|
||||
|
||||
@@ -433,24 +533,24 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test ComputeCellsAndProofs
|
||||
actualCellsAndProofs, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
|
||||
actualCellsPerBlob, actualProofsPerBlob, err := peerdas.ComputeCellsAndProofsFromStructured(blobsAndProofs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, blobCount, len(actualCellsAndProofs))
|
||||
require.Equal(t, blobCount, len(actualCellsPerBlob))
|
||||
|
||||
// Verify the results match expected
|
||||
for i := range blobCount {
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), cap(actualCellsAndProofs[i].Proofs))
|
||||
require.Equal(t, len(expectedCellsPerBlob[i]), len(actualCellsPerBlob[i]))
|
||||
require.Equal(t, len(expectedProofsPerBlob[i]), len(actualProofsPerBlob[i]))
|
||||
require.Equal(t, len(expectedProofsPerBlob[i]), cap(actualProofsPerBlob[i]))
|
||||
|
||||
// Compare cells
|
||||
for j, expectedCell := range expectedCellsAndProofs[i].Cells {
|
||||
require.Equal(t, expectedCell, actualCellsAndProofs[i].Cells[j])
|
||||
for j, expectedCell := range expectedCellsPerBlob[i] {
|
||||
require.Equal(t, expectedCell, actualCellsPerBlob[i][j])
|
||||
}
|
||||
|
||||
// Compare proofs
|
||||
for j, expectedProof := range expectedCellsAndProofs[i].Proofs {
|
||||
require.Equal(t, expectedProof, actualCellsAndProofs[i].Proofs[j])
|
||||
for j, expectedProof := range expectedProofsPerBlob[i] {
|
||||
require.Equal(t, expectedProof, actualProofsPerBlob[i][j])
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user