mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
179 Commits
peerDAS2
...
peerdas-af
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa9e3158b4 | ||
|
|
425a7e327e | ||
|
|
5a7aa3715e | ||
|
|
453ea01deb | ||
|
|
6537f8011e | ||
|
|
5f17317c1c | ||
|
|
79d05a87bb | ||
|
|
1707cf3ec7 | ||
|
|
bdbb850250 | ||
|
|
b28b1ed6ce | ||
|
|
3432ffa4a3 | ||
|
|
9dac67635b | ||
|
|
9be69fbd07 | ||
|
|
e21261e893 | ||
|
|
da53a8fc48 | ||
|
|
a14634e656 | ||
|
|
43761a8066 | ||
|
|
01dbc337c0 | ||
|
|
92f9b55fcb | ||
|
|
f65f12f58b | ||
|
|
f2b61a3dcf | ||
|
|
77a6d29a2e | ||
|
|
31d16da3a0 | ||
|
|
19221b77bd | ||
|
|
83df293647 | ||
|
|
c20c09ce36 | ||
|
|
2191faaa3f | ||
|
|
2de1e6f3e4 | ||
|
|
db44df3964 | ||
|
|
f92eb44c89 | ||
|
|
a26980b64d | ||
|
|
f58cf7e626 | ||
|
|
68da7dabe2 | ||
|
|
d1e43a2c02 | ||
|
|
3652bec2f8 | ||
|
|
81b7a1725f | ||
|
|
0c917079c4 | ||
|
|
a732fe7021 | ||
|
|
d75a7aae6a | ||
|
|
e788a46e82 | ||
|
|
199543125a | ||
|
|
ca63efa770 | ||
|
|
345e6edd9c | ||
|
|
6403064126 | ||
|
|
0517d76631 | ||
|
|
000d480f77 | ||
|
|
b40a8ed37e | ||
|
|
d21c2bd63e | ||
|
|
7a256e93f7 | ||
|
|
07fe76c2da | ||
|
|
54affa897f | ||
|
|
ac4c5fae3c | ||
|
|
2845d87077 | ||
|
|
dc2c90b8ed | ||
|
|
b469157e1f | ||
|
|
2697794e58 | ||
|
|
48cf24edb4 | ||
|
|
78f90db90b | ||
|
|
d0a3b9bc1d | ||
|
|
bfdb6dab86 | ||
|
|
7dd2fd52af | ||
|
|
b6bad9331b | ||
|
|
6e2122085d | ||
|
|
7a847292aa | ||
|
|
81f4db0afa | ||
|
|
a7dc2e6c8b | ||
|
|
0a010b5088 | ||
|
|
1e335e2cf2 | ||
|
|
42f4c0f14e | ||
|
|
d3c12abe25 | ||
|
|
b0ba05b4f4 | ||
|
|
e206506489 | ||
|
|
013cb28663 | ||
|
|
496914cb39 | ||
|
|
c032e78888 | ||
|
|
5e4deff6fd | ||
|
|
6daa91c465 | ||
|
|
32ce6423eb | ||
|
|
b0ea450df5 | ||
|
|
8bd10df423 | ||
|
|
dcbb543be2 | ||
|
|
be0580e1a9 | ||
|
|
1355178115 | ||
|
|
b78c3485b9 | ||
|
|
f503efc6ed | ||
|
|
1bfbd3980e | ||
|
|
3e722ea1bc | ||
|
|
d844026433 | ||
|
|
9ffc19d5ef | ||
|
|
3e23f6e879 | ||
|
|
c688c84393 | ||
|
|
74bb0821a8 | ||
|
|
8025a483e2 | ||
|
|
0475631543 | ||
|
|
f27092fa91 | ||
|
|
67cef41cbf | ||
|
|
258908d50e | ||
|
|
415a42a4aa | ||
|
|
25eae3acda | ||
|
|
956d9d108c | ||
|
|
c285715f9f | ||
|
|
9382ae736d | ||
|
|
f16ff45a6b | ||
|
|
8d6577be84 | ||
|
|
9de75b5376 | ||
|
|
a7ba11df37 | ||
|
|
00aeea3656 | ||
|
|
9dbf979e77 | ||
|
|
be60504512 | ||
|
|
1857496159 | ||
|
|
ccf61e1700 | ||
|
|
4edbd2f9ef | ||
|
|
5179af1438 | ||
|
|
c0f9689e30 | ||
|
|
ff8240a04f | ||
|
|
847498c648 | ||
|
|
2633684339 | ||
|
|
ab3f1963e2 | ||
|
|
b87d02eeb3 | ||
|
|
bcb4155523 | ||
|
|
77f10b9e0e | ||
|
|
928b707ef1 | ||
|
|
91c15247e5 | ||
|
|
5ef5b65ffe | ||
|
|
9ae97786c5 | ||
|
|
66d1bb54f6 | ||
|
|
4d98049054 | ||
|
|
d5ff25b59d | ||
|
|
a265cf08fa | ||
|
|
f2ade3caff | ||
|
|
e6ffc0701e | ||
|
|
61c296e075 | ||
|
|
f264680739 | ||
|
|
8fe024f6a1 | ||
|
|
6b7dd833a3 | ||
|
|
060527032b | ||
|
|
a29ecb6bbe | ||
|
|
54656e172f | ||
|
|
97c8adb003 | ||
|
|
2fe8614115 | ||
|
|
09accc7132 | ||
|
|
53f1f11c6d | ||
|
|
48fe9d9c4d | ||
|
|
4386c244e1 | ||
|
|
7ac522d8ff | ||
|
|
52cf3a155d | ||
|
|
83ed320826 | ||
|
|
616cdc1e8b | ||
|
|
361712e886 | ||
|
|
9ec8c6c4b5 | ||
|
|
073cf19b69 | ||
|
|
6ac8090599 | ||
|
|
4aa54107e4 | ||
|
|
ffc443b5f2 | ||
|
|
d6c5692dc0 | ||
|
|
1086bdf2b3 | ||
|
|
2afa63b442 | ||
|
|
5a5193c59d | ||
|
|
c8d3ed02cb | ||
|
|
30fcf5366a | ||
|
|
f776b968ad | ||
|
|
de094b0078 | ||
|
|
0a4ed8279b | ||
|
|
f307a369a5 | ||
|
|
dc91c963b9 | ||
|
|
7238848d81 | ||
|
|
80cafaa6df | ||
|
|
8a0545c3d7 | ||
|
|
9c61117b71 | ||
|
|
6c22edeecc | ||
|
|
57cc4950c0 | ||
|
|
2c981d5564 | ||
|
|
492c8af83f | ||
|
|
e40d2cbd2c | ||
|
|
3fa6d3bd9d | ||
|
|
56f0eb1437 | ||
|
|
7fc5c714a1 | ||
|
|
cfbfccb203 | ||
|
|
884b663455 |
1
.bazelrc
1
.bazelrc
@@ -22,6 +22,7 @@ coverage --define=coverage_enabled=1
|
||||
build --workspace_status_command=./hack/workspace_status.sh
|
||||
|
||||
build --define blst_disabled=false
|
||||
build --compilation_mode=opt
|
||||
run --define blst_disabled=false
|
||||
|
||||
build:blst_disabled --define blst_disabled=true
|
||||
|
||||
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -54,7 +54,7 @@ jobs:
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
version: v1.55.2
|
||||
version: v1.56.1
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
|
||||
@@ -73,6 +73,7 @@ linters:
|
||||
- promlinter
|
||||
- protogetter
|
||||
- revive
|
||||
- spancheck
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- tagalign
|
||||
|
||||
151
CHANGELOG.md
151
CHANGELOG.md
@@ -4,7 +4,134 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [Unreleased](https://github.com/prysmaticlabs/prysm/compare/v5.1.0...HEAD)
|
||||
## [Unreleased](https://github.com/prysmaticlabs/prysm/compare/v5.1.2...HEAD)
|
||||
|
||||
### Added
|
||||
|
||||
- Electra EIP6110: Queue deposit [pr](https://github.com/prysmaticlabs/prysm/pull/14430).
|
||||
- Add Bellatrix tests for light client functions.
|
||||
- Add Discovery Rebooter Feature.
|
||||
- Added GetBlockAttestationsV2 endpoint.
|
||||
- Light client support: Consensus types for Electra.
|
||||
- Added SubmitPoolAttesterSlashingV2 endpoint.
|
||||
- Added SubmitAggregateAndProofsRequestV2 endpoint.
|
||||
- Updated the `beacon-chain/monitor` package to Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14562)
|
||||
- Added ListAttestationsV2 endpoint.
|
||||
- Add ability to rollback node's internal state during processing.
|
||||
- Change how unsafe protobuf state is created to prevent unnecessary copies.
|
||||
- Added benchmarks for process slots for Capella, Deneb, Electra.
|
||||
- Add helper to cast bytes to string without allocating memory.
|
||||
- Added GetAggregatedAttestationV2 endpoint.
|
||||
- Added SubmitAttestationsV2 endpoint.
|
||||
- Validator REST mode Electra block support.
|
||||
- Added validator index label to `validator_statuses` metric.
|
||||
- Added Validator REST mode use of Attestation V2 endpoints and Electra attestations.
|
||||
- PeerDAS: Added proto for `DataColumnIdentifier`, `DataColumnSidecar`, `DataColumnSidecarsByRangeRequest` and `MetadataV2`.
|
||||
- Better attestation packing for Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14534)
|
||||
- P2P: Add logs when a peer is (dis)connected. Add the reason of the disconnection when we initiate it.
|
||||
|
||||
### Changed
|
||||
|
||||
- Electra EIP6110: Queue deposit requests changes from consensus spec pr #3818
|
||||
- reversed the boolean return on `BatchVerifyDepositsSignatures`, from need verification, to all keys successfully verified
|
||||
- Fix `engine_exchangeCapabilities` implementation.
|
||||
- Updated the default `scrape-interval` in `Client-stats` to 2 minutes to accommodate Beaconcha.in API rate limits.
|
||||
- Switch to compounding when consolidating with source==target.
|
||||
- Revert block db save when saving state fails.
|
||||
- Return false from HasBlock if the block is being synced.
|
||||
- Cleanup forkchoice on failed insertions.
|
||||
- Use read only validator for core processing to avoid unnecessary copying.
|
||||
- Use ROBlock across block processing pipeline.
|
||||
- Added missing Eth-Consensus-Version headers to GetBlockAttestationsV2 and GetAttesterSlashingsV2 endpoints.
|
||||
- When instantiating new validators, explicit set `Slashed` to false and move `EffectiveBalance` to match struct definition.
|
||||
- Updated pgo profile for beacon chain with holesky data. This improves the profile guided
|
||||
optimizations in the go compiler.
|
||||
- Use read only state when computing the active validator list.
|
||||
- Simplified `ExitedValidatorIndices`.
|
||||
- Simplified `EjectedValidatorIndices`.
|
||||
- `engine_newPayloadV4`,`engine_getPayloadV4` are changes due to new execution request serialization decisions, [PR](https://github.com/prysmaticlabs/prysm/pull/14580)
|
||||
- Fixed various small things in state-native code.
|
||||
- Use ROBlock earlier in block syncing pipeline.
|
||||
- Changed the signature of `ProcessPayload`.
|
||||
- Only Build the Protobuf state once during serialization.
|
||||
- Capella blocks are execution.
|
||||
- Fixed panic when http request to subscribe to event stream fails.
|
||||
- Return early for blob reconstructor during capella fork.
|
||||
- Updated block endpoint from V1 to V2.
|
||||
- Rename instances of "deposit receipts" to "deposit requests".
|
||||
- Non-blocking payload attribute event handling in beacon api [pr](https://github.com/prysmaticlabs/prysm/pull/14644).
|
||||
- Updated light client protobufs. [PR](https://github.com/prysmaticlabs/prysm/pull/14650)
|
||||
- Added `Eth-Consensus-Version` header to `ListAttestationsV2` and `GetAggregateAttestationV2` endpoints.
|
||||
- Updated light client consensus types. [PR](https://github.com/prysmaticlabs/prysm/pull/14652)
|
||||
- Fixed pending deposits processing on Electra.
|
||||
- Modified `ListAttestationsV2`, `GetAttesterSlashingsV2` and `GetAggregateAttestationV2` endpoints to use slot to determine fork version.
|
||||
- Improvements to HTTP response handling. [pr](https://github.com/prysmaticlabs/prysm/pull/14673)
|
||||
|
||||
### Deprecated
|
||||
|
||||
- `/eth/v1alpha1/validator/activation/stream` grpc wait for activation stream is deprecated. [pr](https://github.com/prysmaticlabs/prysm/pull/14514)
|
||||
|
||||
### Removed
|
||||
|
||||
- Removed finalized validator index cache, no longer needed.
|
||||
- Removed validator queue position log on key reload and wait for activation.
|
||||
- Removed outdated spectest exclusions for EIP-6110.
|
||||
- Removed kzg proof check from blob reconstructor.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed mesh size by appending `gParams.Dhi = gossipSubDhi`
|
||||
- Fix skipping partial withdrawals count.
|
||||
- wait for the async StreamEvent writer to exit before leaving the http handler, avoiding race condition panics [pr](https://github.com/prysmaticlabs/prysm/pull/14557)
|
||||
- Certain deb files were returning a 404 which made building new docker images without an existing
|
||||
cache impossible. This has been fixed with updates to rules_oci and bazel-lib.
|
||||
- Fixed an issue where the length check between block body KZG commitments and the existing cache from the database was incompatible.
|
||||
- Fix `--backfill-oldest-slot` handling - this flag was totally broken, the code would always backfill to the default slot [pr](https://github.com/prysmaticlabs/prysm/pull/14584)
|
||||
- Fix keymanager API should return corrected error format for malformed tokens
|
||||
- Fix keymanager API so that get keys returns an empty response instead of a 500 error when using an unsupported keystore.
|
||||
- Small log imporvement, removing some redundant or duplicate logs
|
||||
- EIP7521 - Fixes withdrawal bug by accounting for pending partial withdrawals and deducting already withdrawn amounts from the sweep balance. [PR](https://github.com/prysmaticlabs/prysm/pull/14578)
|
||||
- unskip electra merkle spec test
|
||||
- Fix panic in validator REST mode when checking status after removing all keys
|
||||
- Fix panic on attestation interface since we call data before validation
|
||||
- corrects nil check on some interface attestation types
|
||||
- temporary solution to handling electra attesation and attester_slashing events. [pr](14655)
|
||||
- Diverse log improvements and comment additions.
|
||||
- P2P: Avoid infinite loop when looking for peers in small networks.
|
||||
|
||||
|
||||
### Security
|
||||
|
||||
## [v5.1.2](https://github.com/prysmaticlabs/prysm/compare/v5.1.1...v5.1.2) - 2024-10-16
|
||||
|
||||
This is a hotfix release with one change.
|
||||
|
||||
Prysm v5.1.1 contains an updated implementation of the beacon api streaming events endpoint. This
|
||||
new implementation contains a bug that can cause a panic in certain conditions. The issue is
|
||||
difficult to reproduce reliably and we are still trying to determine the root cause, but in the
|
||||
meantime we are issuing a patch that recovers from the panic to prevent the node from crashing.
|
||||
|
||||
This only impacts the v5.1.1 release beacon api event stream endpoints. This endpoint is used by the
|
||||
prysm REST mode validator (a feature which requires the validator to be configured to use the beacon
|
||||
api intead of prysm's stock grpc endpoints) or accessory software that connects to the events api,
|
||||
like https://github.com/ethpandaops/ethereum-metrics-exporter
|
||||
|
||||
### Fixed
|
||||
|
||||
- Recover from panics when writing the event stream [#14545](https://github.com/prysmaticlabs/prysm/pull/14545)
|
||||
|
||||
## [v5.1.1](https://github.com/prysmaticlabs/prysm/compare/v5.1.0...v5.1.1) - 2024-10-15
|
||||
|
||||
This release has a number of features and improvements. Most notably, the feature flag
|
||||
`--enable-experimental-state` has been flipped to "opt out" via `--disable-experimental-state`.
|
||||
The experimental state management design has shown significant improvements in memory usage at
|
||||
runtime. Updates to libp2p's gossipsub have some bandwidith stability improvements with support for
|
||||
IDONTWANT control messages.
|
||||
|
||||
The gRPC gateway has been deprecated from Prysm in this release. If you need JSON data, consider the
|
||||
standardized beacon-APIs.
|
||||
|
||||
Updating to this release is recommended at your convenience.
|
||||
|
||||
### Added
|
||||
|
||||
@@ -14,12 +141,18 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Light client support: Add light client database changes.
|
||||
- Light client support: Implement capella and deneb changes.
|
||||
- Light client support: Implement `BlockToLightClientHeader` function.
|
||||
- Light client support: Consensus types.
|
||||
- GetBeaconStateV2: add Electra case.
|
||||
- Implement [consensus-specs/3875](https://github.com/ethereum/consensus-specs/pull/3875)
|
||||
- Tests to ensure sepolia config matches the official upstream yaml
|
||||
- HTTP endpoint for PublishBlobs
|
||||
- Implement [consensus-specs/3875](https://github.com/ethereum/consensus-specs/pull/3875).
|
||||
- Tests to ensure sepolia config matches the official upstream yaml.
|
||||
- `engine_newPayloadV4`,`engine_getPayloadV4` used for electra payload communication with execution client. [pr](https://github.com/prysmaticlabs/prysm/pull/14492)
|
||||
- HTTP endpoint for PublishBlobs.
|
||||
- GetBlockV2, GetBlindedBlock, ProduceBlockV2, ProduceBlockV3: add Electra case.
|
||||
- Add Electra support and tests for light client functions.
|
||||
- fastssz version bump (better error messages).
|
||||
- SSE implementation that sheds stuck clients. [pr](https://github.com/prysmaticlabs/prysm/pull/14413)
|
||||
- Added GetPoolAttesterSlashingsV2 endpoint.
|
||||
- Use engine API get-blobs for block subscriber to reduce block import latency and potentially reduce bandwidth.
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -48,6 +181,8 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Updated Libp2p Dependencies to allow prysm to use gossipsub v1.2 .
|
||||
- Updated Sepolia bootnodes.
|
||||
- Make committee aware packing the default by deprecating `--enable-committee-aware-packing`.
|
||||
- Moved `ConvertKzgCommitmentToVersionedHash` to the `primitives` package.
|
||||
- Updated correlation penalty for EIP-7251.
|
||||
|
||||
### Deprecated
|
||||
- `--disable-grpc-gateway` flag is deprecated due to grpc gateway removal.
|
||||
@@ -55,9 +190,10 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
|
||||
### Removed
|
||||
|
||||
- removed gRPC Gateway
|
||||
- Removed unused blobs bundle cache
|
||||
- Removed gRPC Gateway.
|
||||
- Removed unused blobs bundle cache.
|
||||
- Removed consolidation signing domain from params. The Electra design changed such that EL handles consolidation signature verification.
|
||||
- Remove engine_getPayloadBodiesBy{Hash|Range}V2
|
||||
|
||||
### Fixed
|
||||
|
||||
@@ -76,9 +212,12 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Light client support: fix light client attested header execution fields' wrong version bug.
|
||||
- Testing: added custom matcher for better push settings testing.
|
||||
- Registered `GetDepositSnapshot` Beacon API endpoint.
|
||||
- Fix rolling back of a block due to a context deadline.
|
||||
|
||||
### Security
|
||||
|
||||
No notable security updates.
|
||||
|
||||
## [v5.1.0](https://github.com/prysmaticlabs/prysm/compare/v5.0.4...v5.1.0) - 2024-08-20
|
||||
|
||||
This release contains 171 new changes and many of these are related to Electra! Along side the Electra changes, there
|
||||
|
||||
18
WORKSPACE
18
WORKSPACE
@@ -101,9 +101,9 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "aspect_bazel_lib",
|
||||
sha256 = "f5ea76682b209cc0bd90d0f5a3b26d2f7a6a2885f0c5f615e72913f4805dbb0d",
|
||||
strip_prefix = "bazel-lib-2.5.0",
|
||||
url = "https://github.com/aspect-build/bazel-lib/releases/download/v2.5.0/bazel-lib-v2.5.0.tar.gz",
|
||||
sha256 = "a272d79bb0ac6b6965aa199b1f84333413452e87f043b53eca7f347a23a478e8",
|
||||
strip_prefix = "bazel-lib-2.9.3",
|
||||
url = "https://github.com/bazel-contrib/bazel-lib/releases/download/v2.9.3/bazel-lib-v2.9.3.tar.gz",
|
||||
)
|
||||
|
||||
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
|
||||
@@ -165,7 +165,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
oci_pull(
|
||||
name = "linux_debian11_multiarch_base", # Debian bullseye
|
||||
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
|
||||
image = "gcr.io/distroless/cc-debian11",
|
||||
image = "gcr.io/prysmaticlabs/distroless/cc-debian11",
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm64/v8",
|
||||
@@ -227,7 +227,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-alpha.6"
|
||||
consensus_spec_version = "v1.5.0-alpha.8"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -243,7 +243,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-M7u/Ot/Vzorww+dFbHp0cxLyM2mezJjijCzq+LY3uvs=",
|
||||
integrity = "sha256-BsGIbEyJuYrzhShGl0tHhR4lP5Qwno8R3k8a6YBR/DA=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -259,7 +259,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-deOSeLRsmHXvkRp8n2bs3HXdkGUJWWqu8KFM/QABbZg=",
|
||||
integrity = "sha256-DkdvhPP2KiqUOpwFXQIFDCWCwsUDIC/xhTBD+TZevm0=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -275,7 +275,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-Zz7YCf6XVf57nzSEGq9ToflJFHM0lAGwhd18l9Rf3hA=",
|
||||
integrity = "sha256-vkZqV0HB8A2Uc56C1Us/p5G57iaHL+zw2No93Xt6M/4=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -290,7 +290,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-BoXckDxXnDcEmAjg/dQgf/tLiJsb6CT0aZvmWHFijrY=",
|
||||
integrity = "sha256-D/HPAW61lKqjoWwl7N0XvhdX+67dCEFAy8JxVzqBGtU=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -176,7 +177,7 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
err = non200Err(r)
|
||||
return
|
||||
}
|
||||
res, err = io.ReadAll(r.Body)
|
||||
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "error reading http response body from builder server")
|
||||
return
|
||||
@@ -358,7 +359,7 @@ func (c *Client) Status(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
|
||||
var errMessage ErrorMessage
|
||||
var body string
|
||||
if err != nil {
|
||||
|
||||
@@ -10,11 +10,17 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxBodySize int64 = 1 << 23 // 8MB default, WithMaxBodySize can override
|
||||
MaxErrBodySize int64 = 1 << 17 // 128KB
|
||||
)
|
||||
|
||||
// Client is a wrapper object around the HTTP client.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
token string
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
token string
|
||||
maxBodySize int64
|
||||
}
|
||||
|
||||
// NewClient constructs a new client with the provided options (ex WithTimeout).
|
||||
@@ -26,8 +32,9 @@ func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
maxBodySize: MaxBodySize,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
@@ -72,7 +79,7 @@ func (c *Client) NodeURL() string {
|
||||
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
|
||||
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), http.NoBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -89,7 +96,7 @@ func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byt
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, Non200Err(r)
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
b, err := io.ReadAll(io.LimitReader(r.Body, c.maxBodySize))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading http response body")
|
||||
}
|
||||
|
||||
@@ -25,16 +25,16 @@ var ErrInvalidNodeVersion = errors.New("invalid node version response")
|
||||
var ErrConnectionIssue = errors.New("could not connect")
|
||||
|
||||
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
|
||||
func Non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
func Non200Err(r *http.Response) error {
|
||||
b, err := io.ReadAll(io.LimitReader(r.Body, MaxErrBodySize))
|
||||
var body string
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
body = "response body:\n" + string(b)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", r.StatusCode, r.Request.URL, body)
|
||||
switch r.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
return errors.Wrap(ErrNotFound, msg)
|
||||
default:
|
||||
|
||||
@@ -93,6 +93,7 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestNewEventStream(t *testing.T) {
|
||||
|
||||
func TestEventStream(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, _ *http.Request) {
|
||||
flusher, ok := w.(http.Flusher)
|
||||
require.Equal(t, true, ok)
|
||||
for i := 1; i <= 3; i++ {
|
||||
@@ -79,3 +79,23 @@ func TestEventStream(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventStreamRequestError(t *testing.T) {
|
||||
topics := []string{"head"}
|
||||
eventsChannel := make(chan *Event, 1)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// use valid url that will result in failed request with nil body
|
||||
stream, err := NewEventStream(ctx, http.DefaultClient, "http://badhost:1234", topics)
|
||||
require.NoError(t, err)
|
||||
|
||||
// error will happen when request is made, should be received over events channel
|
||||
go stream.Subscribe(eventsChannel)
|
||||
|
||||
event := <-eventsChannel
|
||||
if event.EventType != EventConnectionError {
|
||||
t.Errorf("Expected event type %q, got %q", EventConnectionError, event.EventType)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -46,3 +46,10 @@ func WithAuthenticationToken(token string) ClientOpt {
|
||||
c.token = token
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxBodySize overrides the default max body size of 8MB.
|
||||
func WithMaxBodySize(size int64) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.maxBodySize = size
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1476,12 +1476,15 @@ func DepositSnapshotFromConsensus(ds *eth.DepositSnapshot) *DepositSnapshot {
|
||||
}
|
||||
}
|
||||
|
||||
func PendingBalanceDepositsFromConsensus(ds []*eth.PendingBalanceDeposit) []*PendingBalanceDeposit {
|
||||
deposits := make([]*PendingBalanceDeposit, len(ds))
|
||||
func PendingDepositsFromConsensus(ds []*eth.PendingDeposit) []*PendingDeposit {
|
||||
deposits := make([]*PendingDeposit, len(ds))
|
||||
for i, d := range ds {
|
||||
deposits[i] = &PendingBalanceDeposit{
|
||||
Index: fmt.Sprintf("%d", d.Index),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
deposits[i] = &PendingDeposit{
|
||||
Pubkey: hexutil.Encode(d.PublicKey),
|
||||
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
Signature: hexutil.Encode(d.Signature),
|
||||
Slot: fmt.Sprintf("%d", d.Slot),
|
||||
}
|
||||
}
|
||||
return deposits
|
||||
|
||||
@@ -722,7 +722,7 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
pbd, err := st.PendingDeposits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -770,7 +770,7 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
|
||||
EarliestExitEpoch: fmt.Sprintf("%d", eee),
|
||||
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
|
||||
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
|
||||
PendingBalanceDeposits: PendingBalanceDepositsFromConsensus(pbd),
|
||||
PendingDeposits: PendingDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
}, nil
|
||||
|
||||
@@ -21,11 +21,12 @@ type GetCommitteesResponse struct {
|
||||
}
|
||||
|
||||
type ListAttestationsResponse struct {
|
||||
Data []*Attestation `json:"data"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitAttestationsRequest struct {
|
||||
Data []*Attestation `json:"data"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type ListVoluntaryExitsResponse struct {
|
||||
@@ -133,6 +134,13 @@ type GetBlockAttestationsResponse struct {
|
||||
Data []*Attestation `json:"data"`
|
||||
}
|
||||
|
||||
type GetBlockAttestationsV2Response struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data json.RawMessage `json:"data"` // Accepts both `Attestation` and `AttestationElectra` types
|
||||
}
|
||||
|
||||
type GetStateRootResponse struct {
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
@@ -169,7 +177,8 @@ type BLSToExecutionChangesPoolResponse struct {
|
||||
}
|
||||
|
||||
type GetAttesterSlashingsResponse struct {
|
||||
Data []*AttesterSlashing `json:"data"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Data json.RawMessage `json:"data"` // Accepts both `[]*AttesterSlashing` and `[]*AttesterSlashingElectra` types
|
||||
}
|
||||
|
||||
type GetProposerSlashingsResponse struct {
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
)
|
||||
|
||||
type AggregateAttestationResponse struct {
|
||||
Data *Attestation `json:"data"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitContributionAndProofsRequest struct {
|
||||
@@ -15,7 +16,7 @@ type SubmitContributionAndProofsRequest struct {
|
||||
}
|
||||
|
||||
type SubmitAggregateAndProofsRequest struct {
|
||||
Data []*SignedAggregateAttestationAndProof `json:"data"`
|
||||
Data []json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitSyncCommitteeSubscriptionsRequest struct {
|
||||
|
||||
@@ -257,9 +257,12 @@ type ConsolidationRequest struct {
|
||||
TargetPubkey string `json:"target_pubkey"`
|
||||
}
|
||||
|
||||
type PendingBalanceDeposit struct {
|
||||
Index string `json:"index"`
|
||||
Amount string `json:"amount"`
|
||||
type PendingDeposit struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials"`
|
||||
Amount string `json:"amount"`
|
||||
Signature string `json:"signature"`
|
||||
Slot string `json:"slot"`
|
||||
}
|
||||
|
||||
type PendingPartialWithdrawal struct {
|
||||
|
||||
@@ -176,7 +176,7 @@ type BeaconStateElectra struct {
|
||||
EarliestExitEpoch string `json:"earliest_exit_epoch"`
|
||||
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
|
||||
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
|
||||
PendingBalanceDeposits []*PendingBalanceDeposit `json:"pending_balance_deposits"`
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_data_column.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -48,6 +49,7 @@ go_library(
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
@@ -157,6 +159,7 @@ go_test(
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/p2p/testing:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -44,7 +45,7 @@ type ForkchoiceFetcher interface {
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, [32]byte) error
|
||||
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
|
||||
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
ProposerBoost() [32]byte
|
||||
@@ -242,7 +243,7 @@ func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch primitives.Ep
|
||||
if !s.hasHeadState() {
|
||||
return []primitives.ValidatorIndex{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
|
||||
return helpers.ActiveValidatorIndices(ctx, s.headStateReadOnly(ctx), epoch)
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -44,10 +45,10 @@ func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
}
|
||||
|
||||
// InsertNode is a wrapper for node insertion which is self locked
|
||||
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, block consensus_blocks.ROBlock) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, block)
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns the corresponding value from forkchoice
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
@@ -38,7 +39,7 @@ func prepareForkchoiceState(
|
||||
payloadHash [32]byte,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
) (state.BeaconState, consensus_blocks.ROBlock, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
@@ -59,7 +60,26 @@ func prepareForkchoiceState(
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
if err != nil {
|
||||
return nil, consensus_blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, consensus_blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
}
|
||||
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
@@ -122,9 +142,9 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: []byte{'j'}}
|
||||
ofc := ðpb.Checkpoint{Root: []byte{'f'}}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
|
||||
|
||||
@@ -316,24 +336,24 @@ func TestService_ChainHeads(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.Equal(t, 3, len(roots))
|
||||
@@ -413,12 +433,12 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -449,12 +469,12 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -33,6 +33,7 @@ var (
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
var errMaxDataColumnsExceeded = errors.New("Expected data columns for node exceeds NUMBER_OF_COLUMNS")
|
||||
|
||||
// An invalid block is the block that fails state transition based on the core protocol rules.
|
||||
// The beacon node shall not be accepting nor building blocks that branch off from an invalid block.
|
||||
|
||||
@@ -2,13 +2,15 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
@@ -28,8 +30,6 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const blobCommitmentVersionKZG uint8 = 0x01
|
||||
|
||||
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
@@ -72,6 +72,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
if arg.attributes == nil {
|
||||
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
|
||||
}
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), arg)
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
|
||||
if err != nil {
|
||||
switch {
|
||||
@@ -170,6 +171,38 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
return payloadID, nil
|
||||
}
|
||||
|
||||
func firePayloadAttributesEvent(ctx context.Context, f event.SubscriberSender, cfg *fcuConfig) {
|
||||
pidx, err := helpers.BeaconProposerIndex(ctx, cfg.headState)
|
||||
if err != nil {
|
||||
log.WithError(err).
|
||||
WithField("head_root", cfg.headRoot[:]).
|
||||
Error("Could not get proposer index for PayloadAttributes event")
|
||||
return
|
||||
}
|
||||
evd := payloadattribute.EventData{
|
||||
ProposerIndex: pidx,
|
||||
ProposalSlot: cfg.headState.Slot(),
|
||||
ParentBlockRoot: cfg.headRoot[:],
|
||||
Attributer: cfg.attributes,
|
||||
HeadRoot: cfg.headRoot,
|
||||
HeadState: cfg.headState,
|
||||
HeadBlock: cfg.headBlock,
|
||||
}
|
||||
if cfg.headBlock != nil && !cfg.headBlock.IsNil() {
|
||||
headPayload, err := cfg.headBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return
|
||||
}
|
||||
evd.ParentBlockHash = headPayload.BlockHash()
|
||||
evd.ParentBlockNumber = headPayload.BlockNumber()
|
||||
}
|
||||
f.Send(&feed.Event{
|
||||
Type: statefeed.PayloadAttributes,
|
||||
Data: evd,
|
||||
})
|
||||
}
|
||||
|
||||
// getPayloadHash returns the payload hash given the block root.
|
||||
// if the block is before bellatrix fork epoch, it returns the zero hash.
|
||||
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {
|
||||
@@ -219,17 +252,25 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
}
|
||||
|
||||
var lastValidHash []byte
|
||||
var parentRoot *common.Hash
|
||||
var versionedHashes []common.Hash
|
||||
var requests *enginev1.ExecutionRequests
|
||||
if blk.Version() >= version.Deneb {
|
||||
var versionedHashes []common.Hash
|
||||
versionedHashes, err = kzgCommitmentsToVersionedHashes(blk.Block().Body())
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get versioned hashes to feed the engine")
|
||||
}
|
||||
pr := common.Hash(blk.Block().ParentRoot())
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &pr)
|
||||
} else {
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, []common.Hash{}, &common.Hash{} /*empty version hashes and root before Deneb*/)
|
||||
prh := common.Hash(blk.Block().ParentRoot())
|
||||
parentRoot = &prh
|
||||
}
|
||||
if blk.Version() >= version.Electra {
|
||||
requests, err = blk.Block().Body().ExecutionRequests()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution requests")
|
||||
}
|
||||
}
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
newPayloadValidNodeCount.Inc()
|
||||
@@ -402,13 +443,7 @@ func kzgCommitmentsToVersionedHashes(body interfaces.ReadOnlyBeaconBlockBody) ([
|
||||
|
||||
versionedHashes := make([]common.Hash, len(commitments))
|
||||
for i, commitment := range commitments {
|
||||
versionedHashes[i] = ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
}
|
||||
return versionedHashes, nil
|
||||
}
|
||||
|
||||
func ConvertKzgCommitmentToVersionedHash(commitment []byte) common.Hash {
|
||||
versionedHash := sha256.Sum256(commitment)
|
||||
versionedHash[0] = blobCommitmentVersionKZG
|
||||
return versionedHash
|
||||
}
|
||||
|
||||
@@ -1135,9 +1135,14 @@ func TestComputePayloadAttribute(t *testing.T) {
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
blockRoot: [32]byte{'a'},
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"kzg.go",
|
||||
"trusted_setup.go",
|
||||
"validation.go",
|
||||
],
|
||||
@@ -12,6 +13,9 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
109
beacon-chain/blockchain/kzg/kzg.go
Normal file
109
beacon-chain/blockchain/kzg/kzg.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package kzg
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
ckzg4844 "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// BytesPerBlob is the number of bytes in a single blob.
|
||||
const BytesPerBlob = ckzg4844.BytesPerBlob
|
||||
|
||||
// Blob represents a serialized chunk of data.
|
||||
type Blob [BytesPerBlob]byte
|
||||
|
||||
// BytesPerCell is the number of bytes in a single cell.
|
||||
const BytesPerCell = ckzg4844.BytesPerCell
|
||||
|
||||
// Cell represents a chunk of an encoded Blob.
|
||||
type Cell [BytesPerCell]byte
|
||||
|
||||
// Commitment represent a KZG commitment to a Blob.
|
||||
type Commitment [48]byte
|
||||
|
||||
// Proof represents a KZG proof that attests to the validity of a Blob or parts of it.
|
||||
type Proof [48]byte
|
||||
|
||||
// Bytes48 is a 48-byte array.
|
||||
type Bytes48 = ckzg4844.Bytes48
|
||||
|
||||
// Bytes32 is a 32-byte array.
|
||||
type Bytes32 = ckzg4844.Bytes32
|
||||
|
||||
// CellsAndProofs represents the Cells and Proofs corresponding to
|
||||
// a single blob.
|
||||
type CellsAndProofs struct {
|
||||
Cells []Cell
|
||||
Proofs []Proof
|
||||
}
|
||||
|
||||
func BlobToKZGCommitment(blob *Blob) (Commitment, error) {
|
||||
comm, err := kzg4844.BlobToCommitment(kzg4844.Blob(*blob))
|
||||
if err != nil {
|
||||
return Commitment{}, err
|
||||
}
|
||||
return Commitment(comm), nil
|
||||
}
|
||||
|
||||
func ComputeBlobKZGProof(blob *Blob, commitment Commitment) (Proof, error) {
|
||||
proof, err := kzg4844.ComputeBlobProof(kzg4844.Blob(*blob), kzg4844.Commitment(commitment))
|
||||
if err != nil {
|
||||
return [48]byte{}, err
|
||||
}
|
||||
return Proof(proof), nil
|
||||
}
|
||||
|
||||
func ComputeCellsAndKZGProofs(blob *Blob) (CellsAndProofs, error) {
|
||||
ckzgBlob := (*ckzg4844.Blob)(blob)
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.ComputeCellsAndKZGProofs(ckzgBlob)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, cells []Cell, proofsBytes []Bytes48) (bool, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgCells := make([]ckzg4844.Cell, len(cells))
|
||||
for i := range cells {
|
||||
ckzgCells[i] = ckzg4844.Cell(cells[i])
|
||||
}
|
||||
|
||||
return ckzg4844.VerifyCellKZGProofBatch(commitmentsBytes, cellIndices, ckzgCells, proofsBytes)
|
||||
}
|
||||
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
for i := range partialCells {
|
||||
ckzgPartialCells[i] = ckzg4844.Cell(partialCells[i])
|
||||
}
|
||||
|
||||
ckzgCells, ckzgProofs, err := ckzg4844.RecoverCellsAndKZGProofs(cellIndices, ckzgPartialCells)
|
||||
if err != nil {
|
||||
return CellsAndProofs{}, err
|
||||
}
|
||||
|
||||
return makeCellsAndProofs(ckzgCells[:], ckzgProofs[:])
|
||||
}
|
||||
|
||||
// Convert cells/proofs to the CellsAndProofs type defined in this package.
|
||||
func makeCellsAndProofs(ckzgCells []ckzg4844.Cell, ckzgProofs []ckzg4844.KZGProof) (CellsAndProofs, error) {
|
||||
if len(ckzgCells) != len(ckzgProofs) {
|
||||
return CellsAndProofs{}, errors.New("different number of cells/proofs")
|
||||
}
|
||||
|
||||
var cells []Cell
|
||||
var proofs []Proof
|
||||
for i := range ckzgCells {
|
||||
cells = append(cells, Cell(ckzgCells[i]))
|
||||
proofs = append(proofs, Proof(ckzgProofs[i]))
|
||||
}
|
||||
|
||||
return CellsAndProofs{
|
||||
Cells: cells,
|
||||
Proofs: proofs,
|
||||
}, nil
|
||||
}
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
CKZG "github.com/ethereum/c-kzg-4844/v2/bindings/go"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -12,17 +14,53 @@ var (
|
||||
//go:embed trusted_setup.json
|
||||
embeddedTrustedSetup []byte // 1.2Mb
|
||||
kzgContext *GoKZG.Context
|
||||
kzgLoaded bool
|
||||
)
|
||||
|
||||
type TrustedSetup struct {
|
||||
G1Monomial [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_monomial"`
|
||||
G1Lagrange [GoKZG.ScalarsPerBlob]GoKZG.G1CompressedHexStr `json:"g1_lagrange"`
|
||||
G2Monomial [65]GoKZG.G2CompressedHexStr `json:"g2_monomial"`
|
||||
}
|
||||
|
||||
func Start() error {
|
||||
parsedSetup := GoKZG.JSONTrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, &parsedSetup)
|
||||
trustedSetup := &TrustedSetup{}
|
||||
err := json.Unmarshal(embeddedTrustedSetup, trustedSetup)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse trusted setup JSON")
|
||||
}
|
||||
kzgContext, err = GoKZG.NewContext4096(&parsedSetup)
|
||||
kzgContext, err = GoKZG.NewContext4096(&GoKZG.JSONTrustedSetup{
|
||||
SetupG2: trustedSetup.G2Monomial[:],
|
||||
SetupG1Lagrange: trustedSetup.G1Lagrange})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize go-kzg context")
|
||||
}
|
||||
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1MonomialBytes := make([]byte, len(trustedSetup.G1Monomial)*(len(trustedSetup.G1Monomial[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Monomial {
|
||||
copy(g1MonomialBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G1 point, converted from hex to binary.
|
||||
g1LagrangeBytes := make([]byte, len(trustedSetup.G1Lagrange)*(len(trustedSetup.G1Lagrange[0])-2)/2)
|
||||
for i, g1 := range &trustedSetup.G1Lagrange {
|
||||
copy(g1LagrangeBytes[i*(len(g1)-2)/2:], hexutil.MustDecode(g1))
|
||||
}
|
||||
// Length of a G2 point, converted from hex to binary.
|
||||
g2MonomialBytes := make([]byte, len(trustedSetup.G2Monomial)*(len(trustedSetup.G2Monomial[0])-2)/2)
|
||||
for i, g2 := range &trustedSetup.G2Monomial {
|
||||
copy(g2MonomialBytes[i*(len(g2)-2)/2:], hexutil.MustDecode(g2))
|
||||
}
|
||||
if !kzgLoaded {
|
||||
// TODO: Provide a configuration option for this.
|
||||
var precompute uint = 8
|
||||
|
||||
// Free the current trusted setup before running this method. CKZG
|
||||
// panics if the same setup is run multiple times.
|
||||
if err = CKZG.LoadTrustedSetup(g1MonomialBytes, g1LagrangeBytes, g2MonomialBytes, precompute); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
kzgLoaded = true
|
||||
return nil
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -118,9 +118,9 @@ func WithBLSToExecPool(p blstoexec.PoolManager) Option {
|
||||
}
|
||||
|
||||
// WithP2PBroadcaster to broadcast messages after appropriate processing.
|
||||
func WithP2PBroadcaster(p p2p.Broadcaster) Option {
|
||||
func WithP2PBroadcaster(p p2p.Acceser) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.P2p = p
|
||||
s.cfg.P2P = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,18 +32,18 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
|
||||
|
||||
cp := ðpb.Checkpoint{}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
blkWithStateBadAtt := util.NewBeaconBlock()
|
||||
blkWithStateBadAtt.Block.Slot = 1
|
||||
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp = ðpb.Checkpoint{Root: r[:]}
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
|
||||
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -92,12 +92,12 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
{
|
||||
name: "process nil attestation",
|
||||
a: nil,
|
||||
wantedErr: "attestation can't be nil",
|
||||
wantedErr: "attestation is nil",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Data) in attestation",
|
||||
a: ðpb.Attestation{},
|
||||
wantedErr: "attestation's data can't be nil",
|
||||
wantedErr: "attestation is nil",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Target) in attestation",
|
||||
@@ -139,9 +139,9 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, roblock, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, roblock))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
}
|
||||
|
||||
@@ -318,10 +318,9 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: r1[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
|
||||
@@ -337,10 +336,9 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: r2[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
s, err := slots.EpochStart(newCheckpoint.Epoch)
|
||||
|
||||
@@ -3,13 +3,13 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
@@ -46,8 +46,7 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
|
||||
// process the beacon block after validating the state transition function
|
||||
type postBlockProcessConfig struct {
|
||||
ctx context.Context
|
||||
signed interfaces.ReadOnlySignedBeaconBlock
|
||||
blockRoot [32]byte
|
||||
roblock consensusblocks.ROBlock
|
||||
headRoot [32]byte
|
||||
postState state.BeaconState
|
||||
isValidPayload bool
|
||||
@@ -61,7 +60,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
ctx, span := trace.StartSpan(cfg.ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
cfg.ctx = ctx
|
||||
if err := consensusblocks.BeaconBlockIsNil(cfg.signed); err != nil {
|
||||
if err := consensusblocks.BeaconBlockIsNil(cfg.roblock); err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
startTime := time.Now()
|
||||
@@ -73,19 +72,22 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
defer s.sendLightClientFeeds(cfg)
|
||||
defer s.sendStateFeedOnBlock(cfg)
|
||||
defer reportProcessingTime(startTime)
|
||||
defer reportAttestationInclusion(cfg.signed.Block())
|
||||
defer reportAttestationInclusion(cfg.roblock.Block())
|
||||
|
||||
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.blockRoot)
|
||||
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.roblock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.signed.Block().Slot())
|
||||
// Do not use parent context in the event it deadlined
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
s.rollbackBlock(ctx, cfg.roblock.Root())
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.roblock.Block().Slot())
|
||||
}
|
||||
if err := s.handleBlockAttestations(ctx, cfg.signed.Block(), cfg.postState); err != nil {
|
||||
if err := s.handleBlockAttestations(ctx, cfg.roblock.Block(), cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, cfg.signed.Block().Body().AttesterSlashings())
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, cfg.roblock.Block().Body().AttesterSlashings())
|
||||
if cfg.isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.blockRoot); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.roblock.Root()); err != nil {
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
@@ -95,8 +97,8 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
if cfg.headRoot != cfg.blockRoot {
|
||||
s.logNonCanonicalBlockReceived(cfg.blockRoot, cfg.headRoot)
|
||||
if cfg.headRoot != cfg.roblock.Root() {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
@@ -154,7 +156,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0].Block(), preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
@@ -231,10 +233,12 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
if err := avs.IsDataAvailable(ctx, nodeID, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
@@ -279,7 +283,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastB); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Set their optimistic status
|
||||
@@ -404,6 +408,10 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Block().Slot())
|
||||
}
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
log.Warnf("Rolling back insertion of block with root %#x", r)
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, r); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete block with block root %#x", r)
|
||||
}
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
return nil
|
||||
@@ -499,7 +507,7 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
}
|
||||
indices, err := bs.Indices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "indices")
|
||||
}
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
for i := range expected {
|
||||
@@ -513,12 +521,40 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
func missingDataColumns(bs *filesystem.BlobStorage, root [32]byte, expected map[uint64]bool) (map[uint64]bool, error) {
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if len(expected) > int(params.BeaconConfig().NumberOfColumns) {
|
||||
return nil, errMaxDataColumnsExceeded
|
||||
}
|
||||
|
||||
indices, err := bs.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
missing := make(map[uint64]bool, len(expected))
|
||||
for col := range expected {
|
||||
if !indices[col] {
|
||||
missing[col] = true
|
||||
}
|
||||
}
|
||||
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// isDataAvailable blocks until all BlobSidecars committed to in the block are available,
|
||||
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
|
||||
// The function will first check the database to see if all sidecars have been persisted. If any
|
||||
// sidecars are missing, it will then read from the blobNotifier channel for the given root until the channel is
|
||||
// closed, the context hits cancellation/timeout, or notifications have been received for all the missing sidecars.
|
||||
func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if coreTime.PeerDASIsActive(signed.Block().Slot()) {
|
||||
return s.areDataColumnsAvailable(ctx, root, signed)
|
||||
}
|
||||
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -548,7 +584,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "missing indices")
|
||||
}
|
||||
// If there are no missing indices, all BlobSidecars are available.
|
||||
if len(missing) == 0 {
|
||||
@@ -567,8 +603,13 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
if len(missing) == 0 {
|
||||
return
|
||||
}
|
||||
log.WithFields(daCheckLogFields(root, signed.Block().Slot(), expected, len(missing))).
|
||||
Error("Still waiting for DA check at slot end.")
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": len(missing),
|
||||
}).Error("Still waiting for blobs DA check at slot end.")
|
||||
})
|
||||
defer nst.Stop()
|
||||
}
|
||||
@@ -590,12 +631,166 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
}
|
||||
}
|
||||
|
||||
func daCheckLogFields(root [32]byte, slot primitives.Slot, expected, missing int) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"blobsExpected": expected,
|
||||
"blobsWaiting": missing,
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
func (s *Service) areDataColumnsAvailable(ctx context.Context, root [32]byte, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
if signed.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
block := signed.Block()
|
||||
if block == nil {
|
||||
return errors.New("invalid nil beacon block")
|
||||
}
|
||||
// We are only required to check within MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(block.Slot()), slots.ToEpoch(s.CurrentSlot())) {
|
||||
return nil
|
||||
}
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
return errors.New("invalid nil beacon block body")
|
||||
}
|
||||
|
||||
kzgCommitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// If block has not commitments there is nothing to wait for.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// All columns to sample need to be available for the block to be considered available.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#subnet-sampling
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
subnetSamplingSize := peerdas.SubnetSamplingSize()
|
||||
|
||||
colMap, err := peerdas.CustodyColumns(nodeID, subnetSamplingSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// colMap represents the data columnns a node is expected to custody.
|
||||
if len(colMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Subscribe to newsly data columns stored in the database.
|
||||
rootIndexChan := make(chan filesystem.RootIndexPair)
|
||||
subscription := s.blobStorage.DataColumnFeed.Subscribe(rootIndexChan)
|
||||
defer subscription.Unsubscribe()
|
||||
|
||||
// Get the count of data columns we already have in the store.
|
||||
retrievedDataColumns, err := s.blobStorage.ColumnIndices(root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "column indices")
|
||||
}
|
||||
|
||||
retrievedDataColumnsCount := uint64(len(retrievedDataColumns))
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumns(s.blobStorage, root, colMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
nst := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
expected interface{} = "all"
|
||||
missing interface{} = "all"
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
colMapCount := uint64(len(colMap))
|
||||
|
||||
if colMapCount < numberOfColumns {
|
||||
expected = uint64MapToSortedSlice(colMap)
|
||||
}
|
||||
|
||||
if missingMapCount < numberOfColumns {
|
||||
missing = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": signed.Block().Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
}).Error("Some data columns are still unavailable at slot end")
|
||||
})
|
||||
|
||||
defer nst.Stop()
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case rootIndex := <-rootIndexChan:
|
||||
if rootIndex.Root != root {
|
||||
// This is not the root we are looking for.
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[rootIndex.Index]; ok {
|
||||
retrievedDataColumnsCount++
|
||||
}
|
||||
|
||||
// As soon as we have more than half of the data columns, we can reconstruct the missing ones.
|
||||
// We don't need to wait for the rest of the data columns to declare the block as available.
|
||||
if peerdas.CanSelfReconstruct(retrievedDataColumnsCount) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, rootIndex.Index)
|
||||
|
||||
// Exit if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missingMap))
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing %v", block.Slot(), root, missingIndices)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -614,9 +809,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
}
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.MissedSlot,
|
||||
})
|
||||
s.headLock.RLock()
|
||||
headRoot := s.headRoot()
|
||||
headState := s.headState(ctx)
|
||||
@@ -644,6 +836,13 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: nil,
|
||||
attributes: attribute,
|
||||
}
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), fcuArgs)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -678,9 +877,21 @@ func (s *Service) waitForSync() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot [32]byte, parentRoot [32]byte) error {
|
||||
func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, blockRoot, parentRoot [32]byte) error {
|
||||
if IsInvalidBlock(err) && InvalidBlockLVH(err) != [32]byte{} {
|
||||
return s.pruneInvalidBlock(ctx, blockRoot, parentRoot, InvalidBlockLVH(err))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// In the event of an issue processing a block we rollback changes done to the db and our caches
|
||||
// to always ensure that the node's internal state is consistent.
|
||||
func (s *Service) rollbackBlock(ctx context.Context, blockRoot [32]byte) {
|
||||
log.Warnf("Rolling back insertion of block with root %#x due to processing error", blockRoot)
|
||||
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, blockRoot); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete state from caches with block root %#x", blockRoot)
|
||||
}
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, blockRoot); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete block with block root %#x", blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -42,7 +43,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.signed.Block().Slot()
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) {
|
||||
return nil
|
||||
}
|
||||
@@ -50,9 +51,9 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.blockRoot == cfg.headRoot {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
fcuArgs.headState = cfg.postState
|
||||
fcuArgs.headBlock = cfg.signed
|
||||
fcuArgs.headBlock = cfg.roblock
|
||||
fcuArgs.headRoot = cfg.headRoot
|
||||
fcuArgs.proposingSlot = s.CurrentSlot() + 1
|
||||
return nil
|
||||
@@ -96,7 +97,7 @@ func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig, fcuArgs
|
||||
|
||||
// sendStateFeedOnBlock sends an event that a new block has been synced
|
||||
func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.blockRoot)
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.roblock.Root())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not check if block is optimistic")
|
||||
optimistic = true
|
||||
@@ -105,9 +106,9 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: cfg.signed.Block().Slot(),
|
||||
BlockRoot: cfg.blockRoot,
|
||||
SignedBlock: cfg.signed,
|
||||
Slot: cfg.roblock.Block().Slot(),
|
||||
BlockRoot: cfg.roblock.Root(),
|
||||
SignedBlock: cfg.roblock,
|
||||
Verified: true,
|
||||
Optimistic: optimistic,
|
||||
},
|
||||
@@ -117,7 +118,7 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
// sendLightClientFeeds sends the light client feeds when feature flag is enabled.
|
||||
func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) {
|
||||
if features.Get().EnableLightClient {
|
||||
if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.signed, cfg.postState); err != nil {
|
||||
if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to send light client optimistic update")
|
||||
}
|
||||
|
||||
@@ -125,7 +126,7 @@ func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) {
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
|
||||
// LightClientFinalityUpdate needs super majority
|
||||
s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.signed, finalized, cfg.postState)
|
||||
s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.roblock, finalized, cfg.postState)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -252,20 +253,21 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
slot := cfg.postState.Slot()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, cfg.blockRoot[:], cfg.postState); err != nil {
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return nil
|
||||
}
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, cfg.blockRoot[:])
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.blockRoot {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
}
|
||||
}
|
||||
@@ -281,7 +283,7 @@ func reportProcessingTime(startTime time.Time) {
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.blockRoot == cfg.headRoot {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -438,7 +440,7 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
|
||||
|
||||
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock,
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
@@ -448,10 +450,15 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: blk,
|
||||
// The first block can have a bogus root since the block is not inserted in forkchoice
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, [32]byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
root := blk.ParentRoot()
|
||||
root := roblock.Block().ParentRoot()
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
if err != nil {
|
||||
@@ -460,8 +467,12 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if b.Block().Slot() <= fSlot {
|
||||
break
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(b, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root = b.Block().ParentRoot()
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint,
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
|
||||
@@ -145,9 +145,8 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
@@ -190,7 +189,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
@@ -246,7 +245,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
// Set finalized epoch to 2.
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be 1 node: block 65
|
||||
@@ -279,7 +278,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
@@ -566,7 +565,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -614,7 +615,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -640,7 +643,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, nil, [32]byte{}, [32]byte{}, nil, true})
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -688,7 +693,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1114,7 +1121,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb1, r1, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1124,7 +1133,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb2, r2, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1134,7 +1145,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb3, r3, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1144,7 +1157,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb4, r4, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1219,7 +1234,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1237,7 +1254,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1256,7 +1275,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1278,7 +1299,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1306,7 +1329,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
@@ -1335,7 +1360,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1397,7 +1424,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1415,7 +1444,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1435,7 +1466,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1457,7 +1490,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1485,7 +1520,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
@@ -1513,7 +1550,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1578,7 +1617,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1597,7 +1638,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1616,7 +1659,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1643,7 +1688,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, invalidRoots[i-13], [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
@@ -1669,7 +1716,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
@@ -1708,7 +1757,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true}))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -1731,7 +1782,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -1757,7 +1810,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -1813,7 +1868,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1831,7 +1888,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1850,7 +1909,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1879,7 +1940,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -1905,7 +1968,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that the headroot/state are not in DB and restart the node
|
||||
@@ -1995,7 +2060,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2059,7 +2126,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2209,11 +2278,11 @@ func Test_getFCUArgs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
signed: wsb,
|
||||
blockRoot: [32]byte{'a'},
|
||||
roblock: roblock,
|
||||
postState: st,
|
||||
isValidPayload: true,
|
||||
}
|
||||
@@ -2223,11 +2292,143 @@ func Test_getFCUArgs(t *testing.T) {
|
||||
require.ErrorContains(t, "block does not exist", err)
|
||||
|
||||
// canonical branch
|
||||
cfg.headRoot = cfg.blockRoot
|
||||
cfg.headRoot = cfg.roblock.Root()
|
||||
fcuArgs = &fcuConfig{}
|
||||
err = s.getFCUArgs(cfg, fcuArgs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cfg.blockRoot, fcuArgs.headRoot)
|
||||
require.Equal(t, cfg.roblock.Root(), fcuArgs.headRoot)
|
||||
}
|
||||
|
||||
func TestRollbackBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
|
||||
require.Equal(t, true, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err := service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasState)
|
||||
|
||||
// Set invalid parent root to trigger forkchoice error.
|
||||
wsb.SetParentRoot([]byte("bad"))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err = service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasState)
|
||||
}
|
||||
|
||||
func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 33)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
|
||||
require.Equal(t, true, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err := service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasState)
|
||||
|
||||
// Set deadlined context when processing the block
|
||||
cancCtx, canc := context.WithCancel(context.Background())
|
||||
canc()
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentRoot = roblock.Block().ParentRoot()
|
||||
|
||||
cj := ðpb.Checkpoint{}
|
||||
cj.Epoch = 1
|
||||
cj.Root = parentRoot[:]
|
||||
require.NoError(t, postState.SetCurrentJustifiedCheckpoint(cj))
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err = service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasState)
|
||||
}
|
||||
|
||||
func fakeCommitments(n int) [][]byte {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -46,7 +47,7 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, r32))
|
||||
|
||||
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32, params.BeaconConfig().ZeroHash, fc, fc)
|
||||
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32.Root(), params.BeaconConfig().ZeroHash, fc, fc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, r33))
|
||||
|
||||
@@ -54,10 +55,12 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
a := util.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = []byte{'c'}
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
r33Root := r33.Root()
|
||||
a.Data.BeaconBlockRoot = r33Root[:]
|
||||
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(context.Background(), a))
|
||||
|
||||
a.Data.Target.Root = r32[:]
|
||||
r32Root := r32.Root()
|
||||
a.Data.Target.Root = r32Root[:]
|
||||
err = service.VerifyLmdFfgConsistency(context.Background(), a)
|
||||
require.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
|
||||
}
|
||||
@@ -116,7 +119,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -176,7 +181,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -17,6 +17,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -51,6 +53,12 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// DataColumnReceiver interface defines the methods of chain service for receiving new
|
||||
// data columns
|
||||
type DataColumnReceiver interface {
|
||||
ReceiveDataColumn(blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
@@ -69,6 +77,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
|
||||
return nil
|
||||
}
|
||||
|
||||
receivedTime := time.Now()
|
||||
s.blockBeingSynced.set(blockRoot)
|
||||
defer s.blockBeingSynced.unset(blockRoot)
|
||||
@@ -77,20 +86,28 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
preState, err := s.getBlockPreState(ctx, blockCopy.Block())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get block's prestate")
|
||||
}
|
||||
|
||||
currentCheckpoints := s.saveCurrentCheckpoints(preState)
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, blockCopy, blockRoot)
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Defragment the state before continuing block processing.
|
||||
s.defragmentState(postState)
|
||||
|
||||
@@ -102,8 +119,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
args := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
signed: blockCopy,
|
||||
blockRoot: blockRoot,
|
||||
roblock: roblock,
|
||||
postState: postState,
|
||||
isValidPayload: isValidPayload,
|
||||
}
|
||||
@@ -184,8 +200,7 @@ func (s *Service) updateCheckpoints(
|
||||
func (s *Service) validateExecutionAndConsensus(
|
||||
ctx context.Context,
|
||||
preState state.BeaconState,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
block consensusblocks.ROBlock,
|
||||
) (state.BeaconState, bool, error) {
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
@@ -204,7 +219,7 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block, blockRoot)
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
}
|
||||
@@ -228,12 +243,14 @@ func (s *Service) handleDA(
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), rob); err != nil {
|
||||
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
if err := avs.IsDataAvailable(ctx, nodeID, s.CurrentSlot(), rob); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability (AvailabilityStore.IsDataAvailable)")
|
||||
}
|
||||
} else {
|
||||
if err := s.isDataAvailable(ctx, blockRoot, block); err != nil {
|
||||
return 0, errors.Wrap(err, "could not validate blob data availability")
|
||||
return 0, errors.Wrap(err, "is data available")
|
||||
}
|
||||
}
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
@@ -273,7 +290,6 @@ func (s *Service) reportPostBlockProcessing(
|
||||
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
go func() {
|
||||
finalizedState.SaveValidatorIndices() // used to handle Validator index invariant from EIP6110
|
||||
s.sendNewFinalizedEvent(ctx, finalizedState)
|
||||
}()
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
@@ -350,6 +366,9 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
|
||||
|
||||
// HasBlock returns true if the block of the input root exists in initial sync blocks cache or DB.
|
||||
func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
if s.BlockBeingSynced(root) {
|
||||
return false
|
||||
}
|
||||
return s.hasBlockInInitSyncOrDB(ctx, root)
|
||||
}
|
||||
|
||||
@@ -553,16 +572,16 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
}
|
||||
|
||||
// validateExecutionOnBlock notifies the engine of the incoming block execution payload and returns true if the payload is valid
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, block consensusblocks.ROBlock) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
|
||||
if err != nil {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
|
||||
if block.Block().Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, block); err != nil {
|
||||
return isValidPayload, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,6 +278,8 @@ func TestService_HasBlock(t *testing.T) {
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, s.HasBlock(context.Background(), r))
|
||||
s.blockBeingSynced.set(r)
|
||||
require.Equal(t, false, s.HasBlock(context.Background(), r))
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
|
||||
14
beacon-chain/blockchain/receive_data_column.go
Normal file
14
beacon-chain/blockchain/receive_data_column.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
)
|
||||
|
||||
func (s *Service) ReceiveDataColumn(ds blocks.VerifiedRODataColumn) error {
|
||||
if err := s.blobStorage.SaveDataColumn(ds); err != nil {
|
||||
return errors.Wrap(err, "save data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -81,7 +82,7 @@ type config struct {
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
BLSToExecPool blstoexec.PoolManager
|
||||
P2p p2p.Broadcaster
|
||||
P2P p2p.Acceser
|
||||
MaxRoutines int
|
||||
StateNotifier statefeed.Notifier
|
||||
ForkChoiceStore f.ForkChoicer
|
||||
@@ -106,15 +107,17 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][fieldparams.MaxBlobsPerBlock]bool
|
||||
seenIndex map[[32]byte][fieldparams.NumberOfColumns]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}
|
||||
// TODO: Separate Data Columns from blobs
|
||||
/*
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
return
|
||||
}*/
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
@@ -128,7 +131,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -142,7 +145,7 @@ func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, fieldparams.NumberOfColumns)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -168,7 +171,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
@@ -303,7 +306,15 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint state")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, fRoot); err != nil {
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
if !features.Get().EnableStartOptimistic {
|
||||
@@ -329,8 +340,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
saved.SaveValidatorIndices() // used to handle Validator index invariant from EIP6110
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -517,7 +526,11 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
gb, err := consensus_blocks.NewROBlockWithRoot(genesisBlk, genesisBlkRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, gb); err != nil {
|
||||
log.WithError(err).Fatal("Could not process genesis block for fork choice")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetOriginRoot(genesisBlkRoot)
|
||||
|
||||
@@ -97,13 +97,14 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithSlashingPool(slashings.NewPool()),
|
||||
WithExitPool(voluntaryexits.NewPool()),
|
||||
WithP2PBroadcaster(&mockBroadcaster{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fc),
|
||||
WithAttestationService(attService),
|
||||
WithStateGen(stateGen),
|
||||
WithPayloadIDCache(cache.NewPayloadIDCache()),
|
||||
WithClockSynchronizer(startup.NewClockSynchronizer()),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
}
|
||||
|
||||
chainService, err := NewService(ctx, opts...)
|
||||
@@ -376,11 +377,15 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
@@ -453,7 +458,11 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(b, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@@ -579,7 +588,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][fieldparams.NumberOfColumns]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
|
||||
@@ -19,8 +19,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p"
|
||||
p2pTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -45,6 +47,11 @@ type mockBroadcaster struct {
|
||||
broadcastCalled bool
|
||||
}
|
||||
|
||||
type mockAccesser struct {
|
||||
mockBroadcaster
|
||||
p2pTesting.MockPeerManager
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) Broadcast(_ context.Context, _ proto.Message) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
@@ -65,6 +72,11 @@ func (mb *mockBroadcaster) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.B
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumn(_ context.Context, _ [fieldparams.RootLength]byte, _ uint64, _ *ethpb.DataColumnSidecar) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastBLSChanges(_ context.Context, _ []*ethpb.SignedBLSToExecutionChange) {
|
||||
}
|
||||
|
||||
@@ -122,6 +134,7 @@ func minimalTestService(t *testing.T, opts ...Option) (*Service, *testServiceReq
|
||||
WithBlobStorage(filesystem.NewEphemeralBlobStorage(t)),
|
||||
WithSyncChecker(mock.MockChecker{}),
|
||||
WithExecutionEngineCaller(&mockExecution.EngineClient{}),
|
||||
WithP2PBroadcaster(&mockAccesser{}),
|
||||
}
|
||||
// append the variadic opts so they override the defaults by being processed afterwards
|
||||
opts = append(defOpts, opts...)
|
||||
|
||||
@@ -567,7 +567,7 @@ func prepareForkchoiceState(
|
||||
payloadHash [32]byte,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
) (state.BeaconState, blocks.ROBlock, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
@@ -588,7 +588,26 @@ func prepareForkchoiceState(
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
}
|
||||
|
||||
// CachedHeadRoot mocks the same method in the chain service
|
||||
@@ -631,9 +650,9 @@ func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
|
||||
}
|
||||
|
||||
// InsertNode mocks the same method in the chain service
|
||||
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, block blocks.ROBlock) error {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
return s.ForkChoiceStore.InsertNode(ctx, st, block)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -683,6 +702,11 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveDataColumn implements the same method in chain service
|
||||
func (*ChainService) ReceiveDataColumn(_ blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TargetRootForEpoch mocks the same method in the chain service
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
|
||||
1
beacon-chain/cache/BUILD.bazel
vendored
1
beacon-chain/cache/BUILD.bazel
vendored
@@ -8,6 +8,7 @@ go_library(
|
||||
"attestation_data.go",
|
||||
"balance_cache_key.go",
|
||||
"checkpoint_state.go",
|
||||
"column_subnet_ids.go",
|
||||
"committee.go",
|
||||
"committee_disabled.go", # keep
|
||||
"committees.go",
|
||||
|
||||
70
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
70
beacon-chain/cache/column_subnet_ids.go
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
)
|
||||
|
||||
type columnSubnetIDs struct {
|
||||
colSubCache *cache.Cache
|
||||
colSubLock sync.RWMutex
|
||||
}
|
||||
|
||||
// ColumnSubnetIDs for column subnet participants
|
||||
var ColumnSubnetIDs = newColumnSubnetIDs()
|
||||
|
||||
const columnKey = "columns"
|
||||
|
||||
func newColumnSubnetIDs() *columnSubnetIDs {
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
epochDuration := time.Duration(slotsPerEpoch.Mul(secondsPerSlot))
|
||||
|
||||
// Set the default duration of a column subnet subscription as the column expiry period.
|
||||
minEpochsForDataColumnSidecarsRequest := time.Duration(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
subLength := epochDuration * minEpochsForDataColumnSidecarsRequest
|
||||
|
||||
persistentCache := cache.New(subLength*time.Second, epochDuration*time.Second)
|
||||
return &columnSubnetIDs{colSubCache: persistentCache}
|
||||
}
|
||||
|
||||
// GetColumnSubnets retrieves the data column subnets.
|
||||
func (s *columnSubnetIDs) GetColumnSubnets() ([]uint64, bool, time.Time) {
|
||||
s.colSubLock.RLock()
|
||||
defer s.colSubLock.RUnlock()
|
||||
|
||||
id, duration, ok := s.colSubCache.GetWithExpiration(columnKey)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
// Retrieve indices from the cache.
|
||||
idxs, ok := id.([]uint64)
|
||||
if !ok {
|
||||
return nil, false, time.Time{}
|
||||
}
|
||||
|
||||
return idxs, ok, duration
|
||||
}
|
||||
|
||||
// AddColumnSubnets adds the relevant data column subnets.
|
||||
func (s *columnSubnetIDs) AddColumnSubnets(colIdx []uint64) {
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Set(columnKey, colIdx, 0)
|
||||
}
|
||||
|
||||
// EmptyAllCaches empties out all the related caches and flushes any stored
|
||||
// entries on them. This should only ever be used for testing, in normal
|
||||
// production, handling of the relevant subnets for each role is done
|
||||
// separately.
|
||||
func (s *columnSubnetIDs) EmptyAllCaches() {
|
||||
// Clear the cache.
|
||||
s.colSubLock.Lock()
|
||||
defer s.colSubLock.Unlock()
|
||||
|
||||
s.colSubCache.Flush()
|
||||
}
|
||||
@@ -37,7 +37,7 @@ func ProcessDeposits(
|
||||
beaconState state.BeaconState,
|
||||
deposits []*ethpb.Deposit,
|
||||
) (state.BeaconState, error) {
|
||||
batchVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -46,7 +46,7 @@ func ProcessDeposits(
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, errors.New("got a nil deposit in block")
|
||||
}
|
||||
beaconState, err = ProcessDeposit(beaconState, deposit, batchVerified)
|
||||
beaconState, err = ProcessDeposit(beaconState, deposit, allSignaturesVerified)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process deposit from %#x", bytesutil.Trunc(deposit.Data.PublicKey))
|
||||
}
|
||||
@@ -81,7 +81,7 @@ func ProcessDeposits(
|
||||
// amount=deposit.data.amount,
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconState, error) {
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
@@ -92,7 +92,7 @@ func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verif
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ApplyDeposit(beaconState, deposit.Data, verifySignature)
|
||||
return ApplyDeposit(beaconState, deposit.Data, allSignaturesVerified)
|
||||
}
|
||||
|
||||
// ApplyDeposit
|
||||
@@ -115,13 +115,13 @@ func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verif
|
||||
// # Increase balance by deposit amount
|
||||
// index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
// increase_balance(state, index, amount)
|
||||
func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, verifySignature bool) (state.BeaconState, error) {
|
||||
func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
pubKey := data.PublicKey
|
||||
amount := data.Amount
|
||||
withdrawalCredentials := data.WithdrawalCredentials
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
if verifySignature {
|
||||
if !allSignaturesVerified {
|
||||
valid, err := blocks.IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -187,11 +187,12 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
|
||||
// return Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// effective_balance=effective_balance,
|
||||
// slashed=False,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// effective_balance=effective_balance,
|
||||
// )
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
|
||||
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
@@ -202,10 +203,11 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
EffectiveBalance: effectiveBalance,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: effectiveBalance,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,7 +199,7 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposit(beaconState, dep[0], true)
|
||||
newState, err := altair.ProcessDeposit(beaconState, dep[0], false)
|
||||
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
|
||||
|
||||
if newState.Eth1DepositIndex() != 1 {
|
||||
|
||||
@@ -448,6 +448,7 @@ func TestValidateIndexedAttestation_AboveMaxLength(t *testing.T) {
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: primitives.Epoch(i),
|
||||
},
|
||||
Source: ðpb.Checkpoint{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -489,6 +490,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
Target: ðpb.Checkpoint{
|
||||
Root: []byte{},
|
||||
},
|
||||
Source: ðpb.Checkpoint{},
|
||||
},
|
||||
Signature: sig.Marshal(),
|
||||
AggregationBits: list,
|
||||
|
||||
@@ -55,12 +55,26 @@ func BatchVerifyDepositsSignatures(ctx context.Context, deposits []*ethpb.Deposi
|
||||
return false, err
|
||||
}
|
||||
|
||||
verified := false
|
||||
if err := verifyDepositDataWithDomain(ctx, deposits, domain); err != nil {
|
||||
log.WithError(err).Debug("Failed to batch verify deposits signatures, will try individual verify")
|
||||
verified = true
|
||||
return false, nil
|
||||
}
|
||||
return verified, nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// BatchVerifyPendingDepositsSignatures batch verifies pending deposit signatures.
|
||||
func BatchVerifyPendingDepositsSignatures(ctx context.Context, deposits []*ethpb.PendingDeposit) (bool, error) {
|
||||
var err error
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := verifyPendingDepositDataWithDomain(ctx, deposits, domain); err != nil {
|
||||
log.WithError(err).Debug("Failed to batch verify deposits signatures, will try individual verify")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// IsValidDepositSignature returns whether deposit_data is valid
|
||||
@@ -159,3 +173,44 @@ func verifyDepositDataWithDomain(ctx context.Context, deps []*ethpb.Deposit, dom
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifyPendingDepositDataWithDomain(ctx context.Context, deps []*ethpb.PendingDeposit, domain []byte) error {
|
||||
if len(deps) == 0 {
|
||||
return nil
|
||||
}
|
||||
pks := make([]bls.PublicKey, len(deps))
|
||||
sigs := make([][]byte, len(deps))
|
||||
msgs := make([][32]byte, len(deps))
|
||||
for i, dep := range deps {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if dep == nil {
|
||||
return errors.New("nil deposit")
|
||||
}
|
||||
dpk, err := bls.PublicKeyFromBytes(dep.PublicKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pks[i] = dpk
|
||||
sigs[i] = dep.Signature
|
||||
depositMessage := ðpb.DepositMessage{
|
||||
PublicKey: dep.PublicKey,
|
||||
WithdrawalCredentials: dep.WithdrawalCredentials,
|
||||
Amount: dep.Amount,
|
||||
}
|
||||
sr, err := signing.ComputeSigningRoot(depositMessage, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgs[i] = sr
|
||||
}
|
||||
verify, err := bls.VerifyMultipleSignatures(sigs, msgs, pks)
|
||||
if err != nil {
|
||||
return errors.Errorf("could not verify multiple signatures: %v", err)
|
||||
}
|
||||
if !verify {
|
||||
return errors.New("one or more deposit signatures did not verify")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,41 @@ import (
|
||||
)
|
||||
|
||||
func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
require.NoError(t, err)
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 3000,
|
||||
},
|
||||
}
|
||||
sr, err := signing.ComputeSigningRoot(ðpb.DepositMessage{
|
||||
PublicKey: deposit.Data.PublicKey,
|
||||
WithdrawalCredentials: deposit.Data.WithdrawalCredentials,
|
||||
Amount: 3000,
|
||||
}, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
deposit.Data.Signature = sig.Marshal()
|
||||
leaf, err := deposit.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(context.Background(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
|
||||
func TestBatchVerifyDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{1, 2, 3}, 48),
|
||||
@@ -34,9 +69,9 @@ func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) {
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
ok, err := blocks.BatchVerifyDepositsSignatures(context.Background(), []*ethpb.Deposit{deposit})
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(context.Background(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
|
||||
func TestVerifyDeposit_MerkleBranchFailsVerification(t *testing.T) {
|
||||
@@ -93,3 +128,54 @@ func TestIsValidDepositSignature_Ok(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, valid)
|
||||
}
|
||||
|
||||
func TestBatchVerifyPendingDepositsSignatures_Ok(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
require.NoError(t, err)
|
||||
pendingDeposit := ðpb.PendingDeposit{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 3000,
|
||||
}
|
||||
sr, err := signing.ComputeSigningRoot(ðpb.DepositMessage{
|
||||
PublicKey: pendingDeposit.PublicKey,
|
||||
WithdrawalCredentials: pendingDeposit.WithdrawalCredentials,
|
||||
Amount: 3000,
|
||||
}, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
pendingDeposit.Signature = sig.Marshal()
|
||||
|
||||
sk2, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pendingDeposit2 := ðpb.PendingDeposit{
|
||||
PublicKey: sk2.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 4000,
|
||||
}
|
||||
sr2, err := signing.ComputeSigningRoot(ðpb.DepositMessage{
|
||||
PublicKey: pendingDeposit2.PublicKey,
|
||||
WithdrawalCredentials: pendingDeposit2.WithdrawalCredentials,
|
||||
Amount: 4000,
|
||||
}, domain)
|
||||
require.NoError(t, err)
|
||||
sig2 := sk2.Sign(sr2[:])
|
||||
pendingDeposit2.Signature = sig2.Marshal()
|
||||
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(context.Background(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
|
||||
func TestBatchVerifyPendingDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
pendingDeposit := ðpb.PendingDeposit{
|
||||
PublicKey: bytesutil.PadTo([]byte{1, 2, 3}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(context.Background(), []*ethpb.PendingDeposit{pendingDeposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
|
||||
@@ -61,6 +61,9 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
|
||||
if body == nil {
|
||||
return false, errors.New("nil block body")
|
||||
}
|
||||
if body.Version() >= version.Capella {
|
||||
return true, nil
|
||||
}
|
||||
payload, err := body.Execution()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
@@ -202,24 +205,24 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
|
||||
// block_hash=payload.block_hash,
|
||||
// transactions_root=hash_tree_root(payload.transactions),
|
||||
// )
|
||||
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) (state.BeaconState, error) {
|
||||
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := verifyBlobCommitmentCount(body); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := ValidatePayload(st, payload); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(payload); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
return st, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
|
||||
@@ -253,7 +253,8 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.IsExecutionBlock(wrappedBlock.Body())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, got)
|
||||
// #14614
|
||||
require.Equal(t, true, got)
|
||||
}
|
||||
|
||||
func Test_IsExecutionEnabled(t *testing.T) {
|
||||
@@ -587,8 +588,7 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
ExecutionPayload: tt.payload,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, body)
|
||||
if err != nil {
|
||||
if err := blocks.ProcessPayload(st, body); err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
@@ -619,8 +619,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
|
||||
ExecutionPayload: payload,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = blocks.ProcessPayload(st, body)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.ProcessPayload(st, body))
|
||||
}
|
||||
|
||||
func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
@@ -677,8 +676,7 @@ func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
ExecutionPayloadHeader: p,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, body)
|
||||
if err != nil {
|
||||
if err := blocks.ProcessPayload(st, body); err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
|
||||
@@ -96,6 +96,24 @@ func VerifyBlockHeaderSignature(beaconState state.BeaconState, header *ethpb.Sig
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
func VerifyBlockHeaderSignatureUsingCurrentFork(beaconState state.BeaconState, header *ethpb.SignedBeaconBlockHeader) error {
|
||||
currentEpoch := slots.ToEpoch(header.Header.Slot)
|
||||
fork, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
domain, err := signing.Domain(fork, currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposer, err := beaconState.ValidatorAtIndex(header.Header.ProposerIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
return signing.VerifyBlockHeaderSigningRoot(header.Header, proposerPubKey, header.Signature, domain)
|
||||
}
|
||||
|
||||
// VerifyBlockSignatureUsingCurrentFork verifies the proposer signature of a beacon block. This differs
|
||||
// from the above method by not using fork data from the state and instead retrieving it
|
||||
// via the respective epoch.
|
||||
|
||||
@@ -120,35 +120,36 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
// expected_withdrawals, partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
|
||||
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
//
|
||||
// assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
// expected_withdrawals, processed_partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
|
||||
//
|
||||
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
|
||||
// assert withdrawal == expected_withdrawal
|
||||
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
// assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
//
|
||||
// # Update pending partial withdrawals [New in Electra:EIP7251]
|
||||
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[partial_withdrawals_count:]
|
||||
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
|
||||
// assert withdrawal == expected_withdrawal
|
||||
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
//
|
||||
// # Update the next withdrawal index if this block contained withdrawals
|
||||
// if len(expected_withdrawals) != 0:
|
||||
// latest_withdrawal = expected_withdrawals[-1]
|
||||
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
// # Update pending partial withdrawals [New in Electra:EIP7251]
|
||||
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[processed_partial_withdrawals_count:]
|
||||
//
|
||||
// # Update the next validator index to start the next withdrawal sweep
|
||||
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// # Next sweep starts after the latest withdrawal's validator index
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// # Update the next withdrawal index if this block contained withdrawals
|
||||
// if len(expected_withdrawals) != 0:
|
||||
// latest_withdrawal = expected_withdrawals[-1]
|
||||
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
//
|
||||
// # Update the next validator index to start the next withdrawal sweep
|
||||
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// # Next sweep starts after the latest withdrawal's validator index
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
expectedWithdrawals, partialWithdrawalsCount, err := st.ExpectedWithdrawals()
|
||||
expectedWithdrawals, processedPartialWithdrawalsCount, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get expected withdrawals")
|
||||
}
|
||||
@@ -192,7 +193,7 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
|
||||
}
|
||||
|
||||
if st.Version() >= version.Electra {
|
||||
if err := st.DequeuePartialWithdrawals(partialWithdrawalsCount); err != nil {
|
||||
if err := st.DequeuePendingPartialWithdrawals(processedPartialWithdrawalsCount); err != nil {
|
||||
return nil, fmt.Errorf("unable to dequeue partial withdrawals from state: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,11 +32,13 @@ go_library(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -52,24 +54,27 @@ go_test(
|
||||
"deposit_fuzz_test.go",
|
||||
"deposits_test.go",
|
||||
"effective_balance_updates_test.go",
|
||||
"export_test.go",
|
||||
"registry_updates_test.go",
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProcessPendingConsolidations implements the spec definition below. This method makes mutating
|
||||
@@ -22,25 +23,28 @@ import (
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_pending_consolidations(state: BeaconState) -> None:
|
||||
// next_pending_consolidation = 0
|
||||
// for pending_consolidation in state.pending_consolidations:
|
||||
// source_validator = state.validators[pending_consolidation.source_index]
|
||||
// if source_validator.slashed:
|
||||
// next_pending_consolidation += 1
|
||||
// continue
|
||||
// if source_validator.withdrawable_epoch > get_current_epoch(state):
|
||||
// break
|
||||
// def process_pending_consolidations(state: BeaconState) -> None:
|
||||
//
|
||||
// # Churn any target excess active balance of target and raise its max
|
||||
// switch_to_compounding_validator(state, pending_consolidation.target_index)
|
||||
// # Move active balance to target. Excess balance is withdrawable.
|
||||
// active_balance = get_active_balance(state, pending_consolidation.source_index)
|
||||
// decrease_balance(state, pending_consolidation.source_index, active_balance)
|
||||
// increase_balance(state, pending_consolidation.target_index, active_balance)
|
||||
// next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
// next_pending_consolidation = 0
|
||||
// for pending_consolidation in state.pending_consolidations:
|
||||
// source_validator = state.validators[pending_consolidation.source_index]
|
||||
// if source_validator.slashed:
|
||||
// next_pending_consolidation += 1
|
||||
// continue
|
||||
// if source_validator.withdrawable_epoch > next_epoch:
|
||||
// break
|
||||
//
|
||||
// state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:]
|
||||
// # Calculate the consolidated balance
|
||||
// max_effective_balance = get_max_effective_balance(source_validator)
|
||||
// source_effective_balance = min(state.balances[pending_consolidation.source_index], max_effective_balance)
|
||||
//
|
||||
// # Move active balance to target. Excess balance is withdrawable.
|
||||
// decrease_balance(state, pending_consolidation.source_index, source_effective_balance)
|
||||
// increase_balance(state, pending_consolidation.target_index, source_effective_balance)
|
||||
// next_pending_consolidation += 1
|
||||
//
|
||||
// state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:]
|
||||
func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessPendingConsolidations")
|
||||
defer span.End()
|
||||
@@ -51,37 +55,34 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
|
||||
nextEpoch := slots.ToEpoch(st.Slot()) + 1
|
||||
|
||||
var nextPendingConsolidation uint64
|
||||
pendingConsolidations, err := st.PendingConsolidations()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var nextPendingConsolidation uint64
|
||||
for _, pc := range pendingConsolidations {
|
||||
sourceValidator, err := st.ValidatorAtIndex(pc.SourceIndex)
|
||||
sourceValidator, err := st.ValidatorAtIndexReadOnly(pc.SourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sourceValidator.Slashed {
|
||||
if sourceValidator.Slashed() {
|
||||
nextPendingConsolidation++
|
||||
continue
|
||||
}
|
||||
if sourceValidator.WithdrawableEpoch > nextEpoch {
|
||||
if sourceValidator.WithdrawableEpoch() > nextEpoch {
|
||||
break
|
||||
}
|
||||
|
||||
if err := SwitchToCompoundingValidator(st, pc.TargetIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
activeBalance, err := st.ActiveBalanceAtIndex(pc.SourceIndex)
|
||||
validatorBalance, err := st.BalanceAtIndex(pc.SourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.DecreaseBalance(st, pc.SourceIndex, activeBalance); err != nil {
|
||||
b := min(validatorBalance, helpers.ValidatorMaxEffectiveBalance(sourceValidator))
|
||||
|
||||
if err := helpers.DecreaseBalance(st, pc.SourceIndex, b); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.IncreaseBalance(st, pc.TargetIndex, activeBalance); err != nil {
|
||||
if err := helpers.IncreaseBalance(st, pc.TargetIndex, b); err != nil {
|
||||
return err
|
||||
}
|
||||
nextPendingConsolidation++
|
||||
@@ -101,6 +102,16 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
// state: BeaconState,
|
||||
// consolidation_request: ConsolidationRequest
|
||||
// ) -> None:
|
||||
// if is_valid_switch_to_compounding_request(state, consolidation_request):
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// switch_to_compounding_validator(state, source_index)
|
||||
// return
|
||||
//
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// if consolidation_request.source_pubkey == consolidation_request.target_pubkey:
|
||||
// return
|
||||
// # If the pending consolidations queue is full, consolidation requests are ignored
|
||||
// if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
|
||||
// return
|
||||
@@ -121,10 +132,6 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
// source_validator = state.validators[source_index]
|
||||
// target_validator = state.validators[target_index]
|
||||
//
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// if source_index == target_index:
|
||||
// return
|
||||
//
|
||||
// # Verify source withdrawal credentials
|
||||
// has_correct_credential = has_execution_withdrawal_credential(source_validator)
|
||||
// is_correct_source_address = (
|
||||
@@ -160,19 +167,14 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
// source_index=source_index,
|
||||
// target_index=target_index
|
||||
// ))
|
||||
//
|
||||
// # Churn any target excess active balance of target and raise its max
|
||||
// if has_eth1_withdrawal_credential(target_validator):
|
||||
// switch_to_compounding_validator(state, target_index)
|
||||
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
|
||||
if len(reqs) == 0 || st == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
return nil
|
||||
}
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
minValWithdrawDelay := params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
@@ -182,22 +184,44 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
|
||||
}
|
||||
if IsValidSwitchToCompoundingRequest(st, cr) {
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
log.Error("failed to find source validator index")
|
||||
continue
|
||||
}
|
||||
if err := SwitchToCompoundingValidator(st, srcIdx); err != nil {
|
||||
log.WithError(err).Error("failed to switch to compounding validator")
|
||||
}
|
||||
continue
|
||||
}
|
||||
sourcePubkey := bytesutil.ToBytes48(cr.SourcePubkey)
|
||||
targetPubkey := bytesutil.ToBytes48(cr.TargetPubkey)
|
||||
if sourcePubkey == targetPubkey {
|
||||
continue
|
||||
}
|
||||
|
||||
if npc, err := st.NumPendingConsolidations(); err != nil {
|
||||
return fmt.Errorf("failed to fetch number of pending consolidations: %w", err) // This should never happen.
|
||||
} else if npc >= pcLimit {
|
||||
return nil
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
continue
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tgtIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.TargetPubkey))
|
||||
if !ok {
|
||||
continue
|
||||
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if srcIdx == tgtIdx {
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(sourcePubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
tgtIdx, ok := st.ValidatorIndexByPubkey(targetPubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -237,7 +261,8 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
// Initiate the exit of the source validator.
|
||||
exitEpoch, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(srcV.EffectiveBalance))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute consolidaiton epoch: %w", err)
|
||||
log.WithError(err).Error("failed to compute consolidation epoch")
|
||||
continue
|
||||
}
|
||||
srcV.ExitEpoch = exitEpoch
|
||||
srcV.WithdrawableEpoch = exitEpoch + minValWithdrawDelay
|
||||
@@ -248,7 +273,95 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if err := st.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
|
||||
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
if helpers.HasETH1WithdrawalCredential(tgtV) {
|
||||
if err := SwitchToCompoundingValidator(st, tgtIdx); err != nil {
|
||||
log.WithError(err).Error("failed to switch to compounding validator")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValidSwitchToCompoundingRequest returns true if the given consolidation request is valid for switching to compounding.
|
||||
//
|
||||
// Spec code:
|
||||
//
|
||||
// def is_valid_switch_to_compounding_request(
|
||||
//
|
||||
// state: BeaconState,
|
||||
// consolidation_request: ConsolidationRequest
|
||||
//
|
||||
// ) -> bool:
|
||||
//
|
||||
// # Switch to compounding requires source and target be equal
|
||||
// if consolidation_request.source_pubkey != consolidation_request.target_pubkey:
|
||||
// return False
|
||||
//
|
||||
// # Verify pubkey exists
|
||||
// source_pubkey = consolidation_request.source_pubkey
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if source_pubkey not in validator_pubkeys:
|
||||
// return False
|
||||
//
|
||||
// source_validator = state.validators[ValidatorIndex(validator_pubkeys.index(source_pubkey))]
|
||||
//
|
||||
// # Verify request has been authorized
|
||||
// if source_validator.withdrawal_credentials[12:] != consolidation_request.source_address:
|
||||
// return False
|
||||
//
|
||||
// # Verify source withdrawal credentials
|
||||
// if not has_eth1_withdrawal_credential(source_validator):
|
||||
// return False
|
||||
//
|
||||
// # Verify the source is active
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// if not is_active_validator(source_validator, current_epoch):
|
||||
// return False
|
||||
//
|
||||
// # Verify exit for source has not been initiated
|
||||
// if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return False
|
||||
//
|
||||
// return True
|
||||
func IsValidSwitchToCompoundingRequest(st state.BeaconState, req *enginev1.ConsolidationRequest) bool {
|
||||
if req.SourcePubkey == nil || req.TargetPubkey == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if !bytes.Equal(req.SourcePubkey, req.TargetPubkey) {
|
||||
return false
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(req.SourcePubkey))
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// As per the consensus specification, this error is not considered an assertion.
|
||||
// Therefore, if the source_pubkey is not found in validator_pubkeys, we simply return false.
|
||||
srcV, err := st.ValidatorAtIndexReadOnly(srcIdx)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
sourceAddress := req.SourceAddress
|
||||
withdrawalCreds := srcV.GetWithdrawalCredentials()
|
||||
if len(withdrawalCreds) != 32 || len(sourceAddress) != 20 || !bytes.HasSuffix(withdrawalCreds, sourceAddress) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !helpers.HasETH1WithdrawalCredential(srcV) {
|
||||
return false
|
||||
}
|
||||
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
if !helpers.IsActiveValidatorUsingTrie(srcV, curEpoch) {
|
||||
return false
|
||||
}
|
||||
|
||||
if srcV.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessPendingConsolidations(t *testing.T) {
|
||||
@@ -80,10 +81,10 @@ func TestProcessPendingConsolidations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), num)
|
||||
|
||||
// v1 is switched to compounding validator.
|
||||
// v1 withdrawal credentials should not be updated.
|
||||
v1, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, v1.WithdrawalCredentials[0])
|
||||
require.Equal(t, params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, v1.WithdrawalCredentials[0])
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
@@ -201,38 +202,6 @@ func TestProcessPendingConsolidations(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
|
||||
gwei := balETH * 1_000_000_000
|
||||
balPerVal := params.BeaconConfig().MinActivationBalance
|
||||
numVals := gwei / balPerVal
|
||||
|
||||
vals := make([]*eth.Validator, numVals)
|
||||
bals := make([]uint64, numVals)
|
||||
for i := uint64(0); i < numVals; i++ {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
vals[i] = ð.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: balPerVal,
|
||||
WithdrawalCredentials: wc,
|
||||
}
|
||||
bals[i] = balPerVal
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: 10 * params.BeaconConfig().SlotsPerEpoch,
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
Fork: ð.Fork{
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
func TestProcessConsolidationRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -428,3 +397,87 @@ func TestProcessConsolidationRequests(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidSwitchToCompoundingRequest(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
t.Run("nil source pubkey", func(t *testing.T) {
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
SourcePubkey: nil,
|
||||
TargetPubkey: []byte{'a'},
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("nil target pubkey", func(t *testing.T) {
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
TargetPubkey: nil,
|
||||
SourcePubkey: []byte{'a'},
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("different source and target pubkey", func(t *testing.T) {
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
TargetPubkey: []byte{'a'},
|
||||
SourcePubkey: []byte{'b'},
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("source validator not found in state", func(t *testing.T) {
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
SourceAddress: make([]byte, 20),
|
||||
TargetPubkey: []byte{'a'},
|
||||
SourcePubkey: []byte{'a'},
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("incorrect source address", func(t *testing.T) {
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
pubkey := v.PublicKey
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
SourceAddress: make([]byte, 20),
|
||||
TargetPubkey: pubkey,
|
||||
SourcePubkey: pubkey,
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("incorrect eth1 withdrawal credential", func(t *testing.T) {
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
pubkey := v.PublicKey
|
||||
wc := v.WithdrawalCredentials
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
SourceAddress: wc[12:],
|
||||
TargetPubkey: pubkey,
|
||||
SourcePubkey: pubkey,
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("is valid compounding request", func(t *testing.T) {
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
pubkey := v.PublicKey
|
||||
wc := v.WithdrawalCredentials
|
||||
v.WithdrawalCredentials[0] = 1
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(0, v))
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
SourceAddress: wc[12:],
|
||||
TargetPubkey: pubkey,
|
||||
SourcePubkey: pubkey,
|
||||
})
|
||||
require.Equal(t, true, ok)
|
||||
})
|
||||
t.Run("already has an exit epoch", func(t *testing.T) {
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
pubkey := v.PublicKey
|
||||
wc := v.WithdrawalCredentials
|
||||
v.ExitEpoch = 100
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(0, v))
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
SourceAddress: wc[12:],
|
||||
TargetPubkey: pubkey,
|
||||
SourcePubkey: pubkey,
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,13 +2,13 @@ package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/contracts/deposit"
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -38,7 +39,7 @@ func ProcessDeposits(
|
||||
defer span.End()
|
||||
// Attempt to verify all deposit signatures at once, if this fails then fall back to processing
|
||||
// individual deposits with signature verification enabled.
|
||||
batchVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signatures in batch")
|
||||
}
|
||||
@@ -47,7 +48,7 @@ func ProcessDeposits(
|
||||
if d == nil || d.Data == nil {
|
||||
return nil, errors.New("got a nil deposit in block")
|
||||
}
|
||||
beaconState, err = ProcessDeposit(beaconState, d, batchVerified)
|
||||
beaconState, err = ProcessDeposit(beaconState, d, allSignaturesVerified)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process deposit from %#x", bytesutil.Trunc(d.Data.PublicKey))
|
||||
}
|
||||
@@ -82,7 +83,7 @@ func ProcessDeposits(
|
||||
// amount=deposit.data.amount,
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconState, error) {
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
@@ -92,37 +93,49 @@ func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verif
|
||||
if err := beaconState.SetEth1DepositIndex(beaconState.Eth1DepositIndex() + 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ApplyDeposit(beaconState, deposit.Data, verifySignature)
|
||||
return ApplyDeposit(beaconState, deposit.Data, allSignaturesVerified)
|
||||
}
|
||||
|
||||
// ApplyDeposit
|
||||
// def apply_deposit(state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64, signature: BLSSignature) -> None:
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if pubkey not in validator_pubkeys:
|
||||
// ApplyDeposit adds the incoming deposit as a pending deposit on the state
|
||||
//
|
||||
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
// if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
|
||||
// add_validator_to_registry(state, pubkey, withdrawal_credentials, amount)
|
||||
// Spec pseudocode definition:
|
||||
// def apply_deposit(state: BeaconState,
|
||||
//
|
||||
// else:
|
||||
//
|
||||
// # Increase balance by deposit amount
|
||||
// index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
// state.pending_balance_deposits.append(PendingBalanceDeposit(index=index, amount=amount)) # [Modified in Electra:EIP-7251]
|
||||
// # Check if valid deposit switch to compounding credentials
|
||||
//
|
||||
// if ( is_compounding_withdrawal_credential(withdrawal_credentials) and has_eth1_withdrawal_credential(state.validators[index])
|
||||
//
|
||||
// and is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature)
|
||||
// ):
|
||||
// switch_to_compounding_validator(state, index)
|
||||
func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, verifySignature bool) (state.BeaconState, error) {
|
||||
// pubkey: BLSPubkey,
|
||||
// withdrawal_credentials: Bytes32,
|
||||
// amount: uint64,
|
||||
// signature: BLSSignature) -> None:
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if pubkey not in validator_pubkeys:
|
||||
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
// if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
|
||||
// add_validator_to_registry(state, pubkey, withdrawal_credentials, Gwei(0)) # [Modified in Electra:EIP7251]
|
||||
// # [New in Electra:EIP7251]
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// amount=amount,
|
||||
// signature=signature,
|
||||
// slot=GENESIS_SLOT, # Use GENESIS_SLOT to distinguish from a pending deposit request
|
||||
// ))
|
||||
// else:
|
||||
// # Increase balance by deposit amount
|
||||
// # [Modified in Electra:EIP7251]
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// amount=amount,
|
||||
// signature=signature,
|
||||
// slot=GENESIS_SLOT # Use GENESIS_SLOT to distinguish from a pending deposit request
|
||||
// ))
|
||||
func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
pubKey := data.PublicKey
|
||||
amount := data.Amount
|
||||
withdrawalCredentials := data.WithdrawalCredentials
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
signature := data.Signature
|
||||
_, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
if verifySignature {
|
||||
if !allSignaturesVerified {
|
||||
valid, err := IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signature")
|
||||
@@ -131,32 +144,20 @@ func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, verif
|
||||
return beaconState, nil
|
||||
}
|
||||
}
|
||||
if err := AddValidatorToRegistry(beaconState, pubKey, withdrawalCredentials, amount); err != nil {
|
||||
|
||||
if err := AddValidatorToRegistry(beaconState, pubKey, withdrawalCredentials, 0); err != nil { // # [Modified in Electra:EIP7251]
|
||||
return nil, errors.Wrap(err, "could not add validator to registry")
|
||||
}
|
||||
} else {
|
||||
// no validation on top-ups (phase0 feature). no validation before state change
|
||||
if err := beaconState.AppendPendingBalanceDeposit(index, amount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
val, err := beaconState.ValidatorAtIndex(index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if helpers.IsCompoundingWithdrawalCredential(withdrawalCredentials) && helpers.HasETH1WithdrawalCredential(val) {
|
||||
if verifySignature {
|
||||
valid, err := IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signature")
|
||||
}
|
||||
if !valid {
|
||||
return beaconState, nil
|
||||
}
|
||||
}
|
||||
if err := SwitchToCompoundingValidator(beaconState, index); err != nil {
|
||||
return nil, errors.Wrap(err, "could not switch to compound validator")
|
||||
}
|
||||
}
|
||||
}
|
||||
// no validation on top-ups (phase0 feature). no validation before state change
|
||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
Amount: amount,
|
||||
Signature: signature,
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
@@ -185,152 +186,386 @@ func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, domain []byte) error
|
||||
return deposit.VerifyDepositSignature(obj, domain)
|
||||
}
|
||||
|
||||
// ProcessPendingBalanceDeposits implements the spec definition below. This method mutates the state.
|
||||
// ProcessPendingDeposits implements the spec definition below. This method mutates the state.
|
||||
// Iterating over `pending_deposits` queue this function runs the following checks before applying pending deposit:
|
||||
// 1. All Eth1 bridge deposits are processed before the first deposit request gets processed.
|
||||
// 2. Deposit position in the queue is finalized.
|
||||
// 3. Deposit does not exceed the `MAX_PENDING_DEPOSITS_PER_EPOCH` limit.
|
||||
// 4. Deposit does not exceed the activation churn limit.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_pending_balance_deposits(state: BeaconState) -> None:
|
||||
// available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(state)
|
||||
// processed_amount = 0
|
||||
// next_deposit_index = 0
|
||||
// deposits_to_postpone = []
|
||||
// def process_pending_deposits(state: BeaconState) -> None:
|
||||
//
|
||||
// for deposit in state.pending_balance_deposits:
|
||||
// validator = state.validators[deposit.index]
|
||||
// next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
// available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(state)
|
||||
// processed_amount = 0
|
||||
// next_deposit_index = 0
|
||||
// deposits_to_postpone = []
|
||||
// is_churn_limit_reached = False
|
||||
// finalized_slot = compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
|
||||
//
|
||||
// for deposit in state.pending_deposits:
|
||||
// # Do not process deposit requests if Eth1 bridge deposits are not yet applied.
|
||||
// if (
|
||||
// # Is deposit request
|
||||
// deposit.slot > GENESIS_SLOT and
|
||||
// # There are pending Eth1 bridge deposits
|
||||
// state.eth1_deposit_index < state.deposit_requests_start_index
|
||||
// ):
|
||||
// break
|
||||
//
|
||||
// # Check if deposit has been finalized, otherwise, stop processing.
|
||||
// if deposit.slot > finalized_slot:
|
||||
// break
|
||||
//
|
||||
// # Check if number of processed deposits has not reached the limit, otherwise, stop processing.
|
||||
// if next_deposit_index >= MAX_PENDING_DEPOSITS_PER_EPOCH:
|
||||
// break
|
||||
//
|
||||
// # Read validator state
|
||||
// is_validator_exited = False
|
||||
// is_validator_withdrawn = False
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if deposit.pubkey in validator_pubkeys:
|
||||
// validator = state.validators[ValidatorIndex(validator_pubkeys.index(deposit.pubkey))]
|
||||
// is_validator_exited = validator.exit_epoch < FAR_FUTURE_EPOCH
|
||||
// is_validator_withdrawn = validator.withdrawable_epoch < next_epoch
|
||||
//
|
||||
// if is_validator_withdrawn:
|
||||
// # Deposited balance will never become active. Increase balance but do not consume churn
|
||||
// apply_pending_deposit(state, deposit)
|
||||
// elif is_validator_exited:
|
||||
// # Validator is exiting, postpone the deposit until after withdrawable epoch
|
||||
// if validator.exit_epoch < FAR_FUTURE_EPOCH:
|
||||
// if get_current_epoch(state) <= validator.withdrawable_epoch:
|
||||
// deposits_to_postpone.append(deposit)
|
||||
// # Deposited balance will never become active. Increase balance but do not consume churn
|
||||
// else:
|
||||
// increase_balance(state, deposit.index, deposit.amount)
|
||||
// # Validator is not exiting, attempt to process deposit
|
||||
// else:
|
||||
// # Deposit does not fit in the churn, no more deposit processing in this epoch.
|
||||
// if processed_amount + deposit.amount > available_for_processing:
|
||||
// break
|
||||
// # Deposit fits in the churn, process it. Increase balance and consume churn.
|
||||
// else:
|
||||
// increase_balance(state, deposit.index, deposit.amount)
|
||||
// processed_amount += deposit.amount
|
||||
// # Regardless of how the deposit was handled, we move on in the queue.
|
||||
// next_deposit_index += 1
|
||||
//
|
||||
// state.pending_balance_deposits = state.pending_balance_deposits[next_deposit_index:]
|
||||
//
|
||||
// if len(state.pending_balance_deposits) == 0:
|
||||
// state.deposit_balance_to_consume = Gwei(0)
|
||||
// deposits_to_postpone.append(deposit)
|
||||
// else:
|
||||
// state.deposit_balance_to_consume = available_for_processing - processed_amount
|
||||
// # Check if deposit fits in the churn, otherwise, do no more deposit processing in this epoch.
|
||||
// is_churn_limit_reached = processed_amount + deposit.amount > available_for_processing
|
||||
// if is_churn_limit_reached:
|
||||
// break
|
||||
//
|
||||
// state.pending_balance_deposits += deposits_to_postpone
|
||||
func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, activeBalance primitives.Gwei) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessPendingBalanceDeposits")
|
||||
// # Consume churn and apply deposit.
|
||||
// processed_amount += deposit.amount
|
||||
// apply_pending_deposit(state, deposit)
|
||||
//
|
||||
// # Regardless of how the deposit was handled, we move on in the queue.
|
||||
// next_deposit_index += 1
|
||||
//
|
||||
// state.pending_deposits = state.pending_deposits[next_deposit_index:] + deposits_to_postpone
|
||||
//
|
||||
// # Accumulate churn only if the churn limit has been hit.
|
||||
// if is_churn_limit_reached:
|
||||
// state.deposit_balance_to_consume = available_for_processing - processed_amount
|
||||
// else:
|
||||
// state.deposit_balance_to_consume = Gwei(0)
|
||||
func ProcessPendingDeposits(ctx context.Context, st state.BeaconState, activeBalance primitives.Gwei) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessPendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if st == nil || st.IsNil() {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
|
||||
// constants & initializations
|
||||
nextEpoch := slots.ToEpoch(st.Slot()) + 1
|
||||
processedAmount := uint64(0)
|
||||
nextDepositIndex := uint64(0)
|
||||
isChurnLimitReached := false
|
||||
|
||||
var pendingDepositsToBatchVerify []*ethpb.PendingDeposit
|
||||
var pendingDepositsToPostpone []*eth.PendingDeposit
|
||||
|
||||
depBalToConsume, err := st.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not get deposit balance to consume")
|
||||
}
|
||||
availableForProcessing := depBalToConsume + helpers.ActivationExitChurnLimit(activeBalance)
|
||||
processedAmount := uint64(0)
|
||||
nextDepositIndex := 0
|
||||
var depositsToPostpone []*eth.PendingBalanceDeposit
|
||||
|
||||
deposits, err := st.PendingBalanceDeposits()
|
||||
finalizedSlot, err := slots.EpochStart(st.FinalizedCheckpoint().Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized slot")
|
||||
}
|
||||
|
||||
startIndex, err := st.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get starting pendingDeposit index")
|
||||
}
|
||||
|
||||
pendingDeposits, err := st.PendingDeposits()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// constants
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
nextEpoch := slots.ToEpoch(st.Slot()) + 1
|
||||
|
||||
for _, balanceDeposit := range deposits {
|
||||
v, err := st.ValidatorAtIndexReadOnly(balanceDeposit.Index)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch validator at index: %w", err)
|
||||
for _, pendingDeposit := range pendingDeposits {
|
||||
// Do not process pendingDeposit requests if Eth1 bridge deposits are not yet applied.
|
||||
if pendingDeposit.Slot > params.BeaconConfig().GenesisSlot && st.Eth1DepositIndex() < startIndex {
|
||||
break
|
||||
}
|
||||
|
||||
// If the validator is currently exiting, postpone the deposit until after the withdrawable
|
||||
// epoch.
|
||||
if v.ExitEpoch() < ffe {
|
||||
if nextEpoch <= v.WithdrawableEpoch() {
|
||||
depositsToPostpone = append(depositsToPostpone, balanceDeposit)
|
||||
} else {
|
||||
// The deposited balance will never become active. Therefore, we increase the balance but do
|
||||
// not consume the churn.
|
||||
if err := helpers.IncreaseBalance(st, balanceDeposit.Index, balanceDeposit.Amount); err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if pendingDeposit has been finalized, otherwise, stop processing.
|
||||
if pendingDeposit.Slot > finalizedSlot {
|
||||
break
|
||||
}
|
||||
|
||||
// Check if number of processed deposits has not reached the limit, otherwise, stop processing.
|
||||
if nextDepositIndex >= params.BeaconConfig().MaxPendingDepositsPerEpoch {
|
||||
break
|
||||
}
|
||||
|
||||
var isValidatorExited bool
|
||||
var isValidatorWithdrawn bool
|
||||
index, found := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
|
||||
if found {
|
||||
val, err := st.ValidatorAtIndexReadOnly(index)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get validator")
|
||||
}
|
||||
isValidatorExited = val.ExitEpoch() < params.BeaconConfig().FarFutureEpoch
|
||||
isValidatorWithdrawn = val.WithdrawableEpoch() < nextEpoch
|
||||
}
|
||||
|
||||
if isValidatorWithdrawn {
|
||||
// note: the validator will never be active, just increase the balance
|
||||
if err := helpers.IncreaseBalance(st, index, pendingDeposit.Amount); err != nil {
|
||||
return errors.Wrap(err, "could not increase balance")
|
||||
}
|
||||
} else if isValidatorExited {
|
||||
pendingDepositsToPostpone = append(pendingDepositsToPostpone, pendingDeposit)
|
||||
} else {
|
||||
// Validator is not exiting, attempt to process deposit.
|
||||
if primitives.Gwei(processedAmount+balanceDeposit.Amount) > availableForProcessing {
|
||||
isChurnLimitReached = primitives.Gwei(processedAmount+pendingDeposit.Amount) > availableForProcessing
|
||||
if isChurnLimitReached {
|
||||
break
|
||||
}
|
||||
// Deposit fits in churn, process it. Increase balance and consume churn.
|
||||
if err := helpers.IncreaseBalance(st, balanceDeposit.Index, balanceDeposit.Amount); err != nil {
|
||||
return err
|
||||
processedAmount += pendingDeposit.Amount
|
||||
|
||||
// note: the following code deviates from the spec in order to perform batch signature verification
|
||||
if found {
|
||||
if err := helpers.IncreaseBalance(st, index, pendingDeposit.Amount); err != nil {
|
||||
return errors.Wrap(err, "could not increase balance")
|
||||
}
|
||||
} else {
|
||||
// Collect deposit for batch signature verification
|
||||
pendingDepositsToBatchVerify = append(pendingDepositsToBatchVerify, pendingDeposit)
|
||||
}
|
||||
processedAmount += balanceDeposit.Amount
|
||||
}
|
||||
|
||||
// Regardless of how the deposit was handled, we move on in the queue.
|
||||
// Regardless of how the pendingDeposit was handled, we move on in the queue.
|
||||
nextDepositIndex++
|
||||
}
|
||||
|
||||
// Perform batch signature verification on pending deposits that require validator registration
|
||||
if err = batchProcessNewPendingDeposits(ctx, st, pendingDepositsToBatchVerify); err != nil {
|
||||
return errors.Wrap(err, "could not process pending deposits with new public keys")
|
||||
}
|
||||
|
||||
// Combined operation:
|
||||
// - state.pending_balance_deposits = state.pending_balance_deposits[next_deposit_index:]
|
||||
// - state.pending_balance_deposits += deposits_to_postpone
|
||||
// However, the number of remaining deposits must be maintained to properly update the deposit
|
||||
// - state.pending_deposits = state.pending_deposits[next_deposit_index:]
|
||||
// - state.pending_deposits += deposits_to_postpone
|
||||
// However, the number of remaining deposits must be maintained to properly update the pendingDeposit
|
||||
// balance to consume.
|
||||
numRemainingDeposits := len(deposits[nextDepositIndex:])
|
||||
deposits = append(deposits[nextDepositIndex:], depositsToPostpone...)
|
||||
if err := st.SetPendingBalanceDeposits(deposits); err != nil {
|
||||
pendingDeposits = append(pendingDeposits[nextDepositIndex:], pendingDepositsToPostpone...)
|
||||
if err := st.SetPendingDeposits(pendingDeposits); err != nil {
|
||||
return errors.Wrap(err, "could not set pending deposits")
|
||||
}
|
||||
// Accumulate churn only if the churn limit has been hit.
|
||||
if isChurnLimitReached {
|
||||
return st.SetDepositBalanceToConsume(availableForProcessing - primitives.Gwei(processedAmount))
|
||||
}
|
||||
return st.SetDepositBalanceToConsume(0)
|
||||
}
|
||||
|
||||
// batchProcessNewPendingDeposits should only be used to process new deposits that require validator registration
|
||||
func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState, pendingDeposits []*ethpb.PendingDeposit) error {
|
||||
// Return early if there are no deposits to process
|
||||
if len(pendingDeposits) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try batch verification of all deposit signatures
|
||||
allSignaturesVerified, err := blocks.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch signature verification failed")
|
||||
}
|
||||
|
||||
pubKeyMap := make(map[[48]byte]struct{}, len(pendingDeposits))
|
||||
|
||||
// Process each deposit individually
|
||||
for _, pendingDeposit := range pendingDeposits {
|
||||
_, found := pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)]
|
||||
if !found {
|
||||
pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)] = struct{}{}
|
||||
}
|
||||
validSignature := allSignaturesVerified
|
||||
|
||||
// If batch verification failed, check the individual deposit signature
|
||||
if !allSignaturesVerified {
|
||||
validSignature, err = blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.SafeCopyBytes(pendingDeposit.PublicKey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(pendingDeposit.WithdrawalCredentials),
|
||||
Amount: pendingDeposit.Amount,
|
||||
Signature: bytesutil.SafeCopyBytes(pendingDeposit.Signature),
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "individual deposit signature verification failed")
|
||||
}
|
||||
}
|
||||
|
||||
// Add validator to the registry if the signature is valid
|
||||
if validSignature {
|
||||
if found {
|
||||
index, _ := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
|
||||
if err := helpers.IncreaseBalance(state, index, pendingDeposit.Amount); err != nil {
|
||||
return errors.Wrap(err, "could not increase balance")
|
||||
}
|
||||
} else {
|
||||
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to add validator to registry")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyPendingDeposit implements the spec definition below.
|
||||
// Note : This function is NOT used by ProcessPendingDeposits due to simplified logic for more readable batch processing
|
||||
//
|
||||
// Spec Definition:
|
||||
//
|
||||
// def apply_pending_deposit(state: BeaconState, deposit: PendingDeposit) -> None:
|
||||
//
|
||||
// """
|
||||
// Applies ``deposit`` to the ``state``.
|
||||
// """
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if deposit.pubkey not in validator_pubkeys:
|
||||
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
// if is_valid_deposit_signature(
|
||||
// deposit.pubkey,
|
||||
// deposit.withdrawal_credentials,
|
||||
// deposit.amount,
|
||||
// deposit.signature
|
||||
// ):
|
||||
// add_validator_to_registry(state, deposit.pubkey, deposit.withdrawal_credentials, deposit.amount)
|
||||
// else:
|
||||
// validator_index = ValidatorIndex(validator_pubkeys.index(deposit.pubkey))
|
||||
// # Increase balance
|
||||
// increase_balance(state, validator_index, deposit.amount)
|
||||
func ApplyPendingDeposit(ctx context.Context, st state.BeaconState, deposit *ethpb.PendingDeposit) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ApplyPendingDeposit")
|
||||
defer span.End()
|
||||
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(deposit.PublicKey))
|
||||
if !ok {
|
||||
verified, err := blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.SafeCopyBytes(deposit.PublicKey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(deposit.WithdrawalCredentials),
|
||||
Amount: deposit.Amount,
|
||||
Signature: bytesutil.SafeCopyBytes(deposit.Signature),
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not verify deposit signature")
|
||||
}
|
||||
|
||||
if verified {
|
||||
if err := AddValidatorToRegistry(st, deposit.PublicKey, deposit.WithdrawalCredentials, deposit.Amount); err != nil {
|
||||
return errors.Wrap(err, "could not add validator to registry")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return helpers.IncreaseBalance(st, index, deposit.Amount)
|
||||
}
|
||||
|
||||
// AddValidatorToRegistry updates the beacon state with validator information
|
||||
// def add_validator_to_registry(state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> None:
|
||||
//
|
||||
// index = get_index_for_new_validator(state)
|
||||
// validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount) # [Modified in Electra:EIP7251]
|
||||
// set_or_append_list(state.validators, index, validator)
|
||||
// set_or_append_list(state.balances, index, amount)
|
||||
// set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
// set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
// set_or_append_list(state.inactivity_scores, index, uint64(0))
|
||||
func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdrawalCredentials []byte, amount uint64) error {
|
||||
val, err := GetValidatorFromDeposit(pubKey, withdrawalCredentials, amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get validator from deposit")
|
||||
}
|
||||
if err := beaconState.AppendValidator(val); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendBalance(amount); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numRemainingDeposits == 0 {
|
||||
return st.SetDepositBalanceToConsume(0)
|
||||
} else {
|
||||
return st.SetDepositBalanceToConsume(availableForProcessing - primitives.Gwei(processedAmount))
|
||||
// only active in altair and only when it's a new validator (after append balance)
|
||||
if beaconState.Version() >= version.Altair {
|
||||
if err := beaconState.AppendInactivityScore(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendPreviousParticipationBits(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendCurrentParticipationBits(0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetValidatorFromDeposit gets a new validator object with provided parameters
|
||||
//
|
||||
// def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator:
|
||||
//
|
||||
// validator = Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// effective_balance=Gwei(0),
|
||||
// slashed=False,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// )
|
||||
//
|
||||
// # [Modified in Electra:EIP7251]
|
||||
// max_effective_balance = get_max_effective_balance(validator)
|
||||
// validator.effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, max_effective_balance)
|
||||
//
|
||||
// return validator
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) (*ethpb.Validator, error) {
|
||||
validator := ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
EffectiveBalance: 0,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
v, err := state_native.NewValidator(validator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maxEffectiveBalance := helpers.ValidatorMaxEffectiveBalance(v)
|
||||
validator.EffectiveBalance = min(amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement), maxEffectiveBalance)
|
||||
return validator, nil
|
||||
}
|
||||
|
||||
// ProcessDepositRequests is a function as part of electra to process execution layer deposits
|
||||
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
|
||||
defer span.End()
|
||||
|
||||
if len(requests) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
deposits := make([]*ethpb.Deposit, 0)
|
||||
for _, req := range requests {
|
||||
if req == nil {
|
||||
return nil, errors.New("got a nil DepositRequest")
|
||||
}
|
||||
deposits = append(deposits, ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: req.Pubkey,
|
||||
WithdrawalCredentials: req.WithdrawalCredentials,
|
||||
Amount: req.Amount,
|
||||
Signature: req.Signature,
|
||||
},
|
||||
})
|
||||
}
|
||||
batchVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signatures in batch")
|
||||
}
|
||||
var err error
|
||||
for _, receipt := range requests {
|
||||
beaconState, err = processDepositRequest(beaconState, receipt, batchVerified)
|
||||
beaconState, err = processDepositRequest(beaconState, receipt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not apply deposit request")
|
||||
}
|
||||
@@ -338,34 +573,42 @@ func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState,
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// processDepositRequest processes the specific deposit receipt
|
||||
// processDepositRequest processes the specific deposit request
|
||||
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||
//
|
||||
// # Set deposit request start index
|
||||
// if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUEST_START_INDEX:
|
||||
// if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUESTS_START_INDEX:
|
||||
// state.deposit_requests_start_index = deposit_request.index
|
||||
//
|
||||
// apply_deposit(
|
||||
// state=state,
|
||||
// # Create pending deposit
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=deposit_request.pubkey,
|
||||
// withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||
// amount=deposit_request.amount,
|
||||
// signature=deposit_request.signature,
|
||||
// )
|
||||
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest, verifySignature bool) (state.BeaconState, error) {
|
||||
// slot=state.slot,
|
||||
// ))
|
||||
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get deposit requests start index")
|
||||
}
|
||||
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
|
||||
if request == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
}
|
||||
if err := beaconState.SetDepositRequestsStartIndex(request.Index); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set deposit requests start index")
|
||||
}
|
||||
}
|
||||
return ApplyDeposit(beaconState, ðpb.Deposit_Data{
|
||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: bytesutil.SafeCopyBytes(request.Pubkey),
|
||||
Amount: request.Amount,
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(request.WithdrawalCredentials),
|
||||
Amount: request.Amount,
|
||||
Signature: bytesutil.SafeCopyBytes(request.Signature),
|
||||
}, verifySignature)
|
||||
Slot: beaconState.Slot(),
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not append deposit request")
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
@@ -9,10 +9,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
stateTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/common"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -20,7 +22,41 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
func TestProcessPendingDepositsMultiplesSameDeposits(t *testing.T) {
|
||||
st := stateWithActiveBalanceETH(t, 1000)
|
||||
deps := make([]*eth.PendingDeposit, 2) // Make same deposit twice
|
||||
validators := st.Validators()
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
validators[i].PublicKey = sk.PublicKey().Marshal()
|
||||
validators[i].WithdrawalCredentials = wc
|
||||
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, 32, bytesutil.ToBytes32(wc), 0)
|
||||
}
|
||||
require.NoError(t, st.SetPendingDeposits(deps))
|
||||
|
||||
err = electra.ProcessPendingDeposits(context.TODO(), st, 10000)
|
||||
require.NoError(t, err)
|
||||
|
||||
val := st.Validators()
|
||||
seenPubkeys := make(map[string]struct{})
|
||||
for i := 0; i < len(val); i += 1 {
|
||||
if len(val[i].PublicKey) == 0 {
|
||||
continue
|
||||
}
|
||||
_, ok := seenPubkeys[string(val[i].PublicKey)]
|
||||
if ok {
|
||||
t.Fatalf("duplicated pubkeys")
|
||||
} else {
|
||||
seenPubkeys[string(val[i].PublicKey)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessPendingDeposits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
@@ -48,17 +84,10 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
{
|
||||
name: "more deposits than balance to consume processes partial deposits",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(100))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 20)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 10,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
depositAmount := uint64(amountAvailForProcessing) / 10
|
||||
st := stateWithPendingDeposits(t, 1_000, 20, depositAmount)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(100))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
@@ -74,25 +103,45 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
}
|
||||
|
||||
// Half of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(remaining))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "withdrawn validators should not consume churn",
|
||||
state: func() state.BeaconState {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
depositAmount := uint64(amountAvailForProcessing)
|
||||
// set the pending deposits to the maximum churn limit
|
||||
st := stateWithPendingDeposits(t, 1_000, 2, depositAmount)
|
||||
vals := st.Validators()
|
||||
vals[1].WithdrawableEpoch = 0
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
// Validators 0..9 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 2; i++ {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing), b)
|
||||
}
|
||||
|
||||
// All pending deposits should have been processed
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remaining))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "less deposits than balance to consume processes all deposits",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(0))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 5)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 5,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
depositAmount := uint64(amountAvailForProcessing) / 5
|
||||
st := stateWithPendingDeposits(t, 1_000, 5, depositAmount)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(0))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
@@ -108,7 +157,73 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
}
|
||||
|
||||
// All of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remaining))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "process pending deposit for unknown key, activates new key",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 0)
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
dep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0)
|
||||
require.NoError(t, st.SetPendingDeposits([]*eth.PendingDeposit{dep}))
|
||||
require.Equal(t, 0, len(st.Validators()))
|
||||
require.Equal(t, 0, len(st.Balances()))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
b, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b)
|
||||
|
||||
// All of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remaining))
|
||||
|
||||
// validator becomes active
|
||||
require.Equal(t, 1, len(st.Validators()))
|
||||
require.Equal(t, 1, len(st.Balances()))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "process excess balance that uses a point to infinity signature, processed as a topup",
|
||||
state: func() state.BeaconState {
|
||||
excessBalance := uint64(100)
|
||||
st := stateWithActiveBalanceETH(t, 32)
|
||||
validators := st.Validators()
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
validators[0].PublicKey = sk.PublicKey().Marshal()
|
||||
validators[0].WithdrawalCredentials = wc
|
||||
dep := stateTesting.GeneratePendingDeposit(t, sk, excessBalance, bytesutil.ToBytes32(wc), 0)
|
||||
dep.Signature = common.InfiniteSignature[:]
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
require.NoError(t, st.SetPendingDeposits([]*eth.PendingDeposit{dep}))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
b, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(100), b)
|
||||
|
||||
// All of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remaining))
|
||||
},
|
||||
@@ -116,17 +231,10 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
{
|
||||
name: "exiting validator deposit postponed",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(0))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 5)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 5,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
depositAmount := uint64(amountAvailForProcessing) / 5
|
||||
st := stateWithPendingDeposits(t, 1_000, 5, depositAmount)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(0))
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
v.ExitEpoch = 10
|
||||
@@ -148,7 +256,7 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
|
||||
// All of the balance deposits should have been processed, except validator index 0 was
|
||||
// added back to the pending deposits queue.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(remaining))
|
||||
},
|
||||
@@ -156,15 +264,7 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
{
|
||||
name: "exited validator balance increased",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 1)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: 1_000_000,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
st := stateWithPendingDeposits(t, 1_000, 1, 1_000_000)
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
v.ExitEpoch = 2
|
||||
@@ -182,7 +282,7 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
require.Equal(t, uint64(1_100_000), b)
|
||||
|
||||
// All of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remaining))
|
||||
},
|
||||
@@ -199,7 +299,7 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
tab, err = helpers.TotalActiveBalance(tt.state)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = electra.ProcessPendingBalanceDeposits(context.TODO(), tt.state, primitives.Gwei(tab))
|
||||
err = electra.ProcessPendingDeposits(context.TODO(), tt.state, primitives.Gwei(tab))
|
||||
require.Equal(t, tt.wantErr, err != nil, "wantErr=%v, got err=%s", tt.wantErr, err)
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
@@ -208,6 +308,27 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchProcessNewPendingDeposits(t *testing.T) {
|
||||
t.Run("invalid batch initiates correct individual validation", func(t *testing.T) {
|
||||
st := stateWithActiveBalanceETH(t, 0)
|
||||
require.Equal(t, 0, len(st.Validators()))
|
||||
require.Equal(t, 0, len(st.Balances()))
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
validDep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0)
|
||||
invalidDep := ð.PendingDeposit{PublicKey: make([]byte, 48)}
|
||||
// have a combination of valid and invalid deposits
|
||||
deps := []*eth.PendingDeposit{validDep, invalidDep}
|
||||
require.NoError(t, electra.BatchProcessNewPendingDeposits(context.Background(), st, deps))
|
||||
// successfully added to register
|
||||
require.Equal(t, 1, len(st.Validators()))
|
||||
require.Equal(t, 1, len(st.Balances()))
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessDepositRequests(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
sk, err := bls.RandKey()
|
||||
@@ -220,7 +341,7 @@ func TestProcessDepositRequests(t *testing.T) {
|
||||
})
|
||||
t.Run("nil request errors", func(t *testing.T) {
|
||||
_, err = electra.ProcessDepositRequests(context.Background(), st, []*enginev1.DepositRequest{nil})
|
||||
require.ErrorContains(t, "got a nil DepositRequest", err)
|
||||
require.ErrorContains(t, "nil deposit request", err)
|
||||
})
|
||||
|
||||
vals := st.Validators()
|
||||
@@ -230,7 +351,7 @@ func TestProcessDepositRequests(t *testing.T) {
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(make([]*eth.PendingBalanceDeposit, 0))) // reset pbd as the determinitstic state populates this already
|
||||
require.NoError(t, st.SetPendingDeposits(make([]*eth.PendingDeposit, 0))) // reset pbd as the determinitstic state populates this already
|
||||
withdrawalCred := make([]byte, 32)
|
||||
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
depositMessage := ð.DepositMessage{
|
||||
@@ -255,11 +376,10 @@ func TestProcessDepositRequests(t *testing.T) {
|
||||
st, err = electra.ProcessDepositRequests(context.Background(), st, requests)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
pbd, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(pbd))
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||
require.Equal(t, uint64(2000), pbd[1].Amount)
|
||||
}
|
||||
|
||||
func TestProcessDeposit_Electra_Simple(t *testing.T) {
|
||||
@@ -286,7 +406,7 @@ func TestProcessDeposit_Electra_Simple(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
pdSt, err := electra.ProcessDeposits(context.Background(), st, deps)
|
||||
require.NoError(t, err)
|
||||
pbd, err := pdSt.PendingBalanceDeposits()
|
||||
pbd, err := pdSt.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, pbd[2].Amount)
|
||||
require.Equal(t, 3, len(pbd))
|
||||
@@ -322,7 +442,7 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := electra.ProcessDeposit(beaconState, dep[0], true)
|
||||
newState, err := electra.ProcessDeposit(beaconState, dep[0], false)
|
||||
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
|
||||
|
||||
if newState.Eth1DepositIndex() != 1 {
|
||||
@@ -359,42 +479,128 @@ func TestApplyDeposit_TopUps_WithBadSignature(t *testing.T) {
|
||||
vals[0].PublicKey = sk.PublicKey().Marshal()
|
||||
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
adSt, err := electra.ApplyDeposit(st, depositData, true)
|
||||
adSt, err := electra.ApplyDeposit(st, depositData, false)
|
||||
require.NoError(t, err)
|
||||
pbd, err := adSt.PendingBalanceDeposits()
|
||||
pbd, err := adSt.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, topUpAmount, pbd[0].Amount)
|
||||
}
|
||||
|
||||
func TestApplyDeposit_Electra_SwitchToCompoundingValidator(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 3)
|
||||
// stateWithActiveBalanceETH generates a mock beacon state given a balance in eth
|
||||
func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
|
||||
gwei := balETH * 1_000_000_000
|
||||
balPerVal := params.BeaconConfig().MinActivationBalance
|
||||
numVals := gwei / balPerVal
|
||||
|
||||
vals := make([]*eth.Validator, numVals)
|
||||
bals := make([]uint64, numVals)
|
||||
for i := uint64(0); i < numVals; i++ {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
vals[i] = ð.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: balPerVal,
|
||||
WithdrawalCredentials: wc,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
bals[i] = balPerVal
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: 10 * params.BeaconConfig().SlotsPerEpoch,
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
Fork: ð.Fork{
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// set some fake finalized checkpoint
|
||||
require.NoError(t, st.SetFinalizedCheckpoint(ð.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
}))
|
||||
return st
|
||||
}
|
||||
|
||||
// stateWithPendingDeposits with pending deposits and existing ethbalance
|
||||
func stateWithPendingDeposits(t *testing.T, balETH uint64, numDeposits, amount uint64) state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, balETH)
|
||||
deps := make([]*eth.PendingDeposit, numDeposits)
|
||||
validators := st.Validators()
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
validators[i].PublicKey = sk.PublicKey().Marshal()
|
||||
validators[i].WithdrawalCredentials = wc
|
||||
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, amount, bytesutil.ToBytes32(wc), 0)
|
||||
}
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
require.NoError(t, st.SetPendingDeposits(deps))
|
||||
return st
|
||||
}
|
||||
|
||||
func TestApplyPendingDeposit_TopUp(t *testing.T) {
|
||||
excessBalance := uint64(100)
|
||||
st := stateWithActiveBalanceETH(t, 32)
|
||||
validators := st.Validators()
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
withdrawalCred := make([]byte, 32)
|
||||
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
depositData := ð.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: 1000,
|
||||
WithdrawalCredentials: withdrawalCred,
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
vals := st.Validators()
|
||||
vals[0].PublicKey = sk.PublicKey().Marshal()
|
||||
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
sr, err := signing.ComputeSigningRoot(depositData, bytesutil.ToBytes(3, 32))
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
validators[0].PublicKey = sk.PublicKey().Marshal()
|
||||
validators[0].WithdrawalCredentials = wc
|
||||
dep := stateTesting.GeneratePendingDeposit(t, sk, excessBalance, bytesutil.ToBytes32(wc), 0)
|
||||
dep.Signature = common.InfiniteSignature[:]
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
|
||||
require.NoError(t, electra.ApplyPendingDeposit(context.Background(), st, dep))
|
||||
|
||||
b, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
depositData.Signature = sig.Marshal()
|
||||
adSt, err := electra.ApplyDeposit(st, depositData, false)
|
||||
require.NoError(t, err)
|
||||
pbd, err := adSt.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(pbd))
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||
require.Equal(t, uint64(2000), pbd[1].Amount)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(excessBalance), b)
|
||||
}
|
||||
|
||||
func TestApplyPendingDeposit_UnknownKey(t *testing.T) {
|
||||
st := stateWithActiveBalanceETH(t, 0)
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
dep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0)
|
||||
require.Equal(t, 0, len(st.Validators()))
|
||||
require.NoError(t, electra.ApplyPendingDeposit(context.Background(), st, dep))
|
||||
// activates new validator
|
||||
require.Equal(t, 1, len(st.Validators()))
|
||||
b, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b)
|
||||
}
|
||||
|
||||
func TestApplyPendingDeposit_InvalidSignature(t *testing.T) {
|
||||
st := stateWithActiveBalanceETH(t, 0)
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
dep := ð.PendingDeposit{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: wc,
|
||||
Amount: 100,
|
||||
}
|
||||
require.Equal(t, 0, len(st.Validators()))
|
||||
require.NoError(t, electra.ApplyPendingDeposit(context.Background(), st, dep))
|
||||
// no validator added
|
||||
require.Equal(t, 0, len(st.Validators()))
|
||||
// no topup either
|
||||
require.Equal(t, 0, len(st.Balances()))
|
||||
}
|
||||
|
||||
3
beacon-chain/core/electra/export_test.go
Normal file
3
beacon-chain/core/electra/export_test.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package electra
|
||||
|
||||
var BatchProcessNewPendingDeposits = batchProcessNewPendingDeposits
|
||||
@@ -29,7 +29,6 @@ var (
|
||||
ProcessParticipationFlagUpdates = altair.ProcessParticipationFlagUpdates
|
||||
ProcessSyncCommitteeUpdates = altair.ProcessSyncCommitteeUpdates
|
||||
AttestationsDelta = altair.AttestationsDelta
|
||||
ProcessSyncAggregate = altair.ProcessSyncAggregate
|
||||
)
|
||||
|
||||
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.
|
||||
@@ -44,7 +43,7 @@ var (
|
||||
// process_registry_updates(state)
|
||||
// process_slashings(state)
|
||||
// process_eth1_data_reset(state)
|
||||
// process_pending_balance_deposits(state) # New in EIP7251
|
||||
// process_pending_deposits(state) # New in EIP7251
|
||||
// process_pending_consolidations(state) # New in EIP7251
|
||||
// process_effective_balance_updates(state)
|
||||
// process_slashings_reset(state)
|
||||
@@ -94,7 +93,7 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = ProcessPendingBalanceDeposits(ctx, state, primitives.Gwei(bp.ActiveCurrentEpoch)); err != nil {
|
||||
if err = ProcessPendingDeposits(ctx, state, primitives.Gwei(bp.ActiveCurrentEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = ProcessPendingConsolidations(ctx, state); err != nil {
|
||||
|
||||
@@ -84,11 +84,11 @@ func ProcessOperations(
|
||||
}
|
||||
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit receipts")
|
||||
return nil, errors.Wrap(err, "could not process deposit requests")
|
||||
}
|
||||
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
|
||||
return nil, errors.Wrap(err, "could not process withdrawal requests")
|
||||
}
|
||||
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, fmt.Errorf("could not process consolidation requests: %w", err)
|
||||
|
||||
@@ -57,14 +57,17 @@ func TestProcessEpoch_CanProcessElectra(t *testing.T) {
|
||||
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(100))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*ethpb.PendingBalanceDeposit, 20)
|
||||
validators := st.Validators()
|
||||
deps := make([]*ethpb.PendingDeposit, 20)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ðpb.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 10,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
deps[i] = ðpb.PendingDeposit{
|
||||
PublicKey: validators[i].PublicKey,
|
||||
WithdrawalCredentials: validators[i].WithdrawalCredentials,
|
||||
Amount: uint64(amountAvailForProcessing) / 10,
|
||||
Slot: 0,
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
require.NoError(t, st.SetPendingDeposits(deps))
|
||||
require.NoError(t, st.SetPendingConsolidations([]*ethpb.PendingConsolidation{
|
||||
{
|
||||
SourceIndex: 2,
|
||||
@@ -108,7 +111,7 @@ func TestProcessEpoch_CanProcessElectra(t *testing.T) {
|
||||
require.Equal(t, primitives.Gwei(100), res)
|
||||
|
||||
// Half of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(remaining))
|
||||
|
||||
|
||||
@@ -101,7 +101,7 @@ import (
|
||||
// earliest_exit_epoch=earliest_exit_epoch,
|
||||
// consolidation_balance_to_consume=0,
|
||||
// earliest_consolidation_epoch=compute_activation_exit_epoch(get_current_epoch(pre)),
|
||||
// pending_balance_deposits=[],
|
||||
// pending_deposits=[],
|
||||
// pending_partial_withdrawals=[],
|
||||
// pending_consolidations=[],
|
||||
// )
|
||||
@@ -272,7 +272,7 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
EarliestExitEpoch: earliestExitEpoch,
|
||||
ConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(primitives.Gwei(tab)),
|
||||
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
|
||||
PendingBalanceDeposits: make([]*ethpb.PendingBalanceDeposit, 0),
|
||||
PendingDeposits: make([]*ethpb.PendingDeposit, 0),
|
||||
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
|
||||
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
|
||||
}
|
||||
|
||||
@@ -169,10 +169,10 @@ func TestUpgradeToElectra(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ActivationExitEpoch(slots.ToEpoch(preForkState.Slot())), earliestConsolidationEpoch)
|
||||
|
||||
pendingBalanceDeposits, err := mSt.PendingBalanceDeposits()
|
||||
pendingDeposits, err := mSt.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(pendingBalanceDeposits))
|
||||
require.Equal(t, uint64(1000), pendingBalanceDeposits[1].Amount)
|
||||
require.Equal(t, 2, len(pendingDeposits))
|
||||
require.Equal(t, uint64(1000), pendingDeposits[1].Amount)
|
||||
|
||||
numPendingPartialWithdrawals, err := mSt.NumPendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -3,87 +3,22 @@ package electra
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/common"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// AddValidatorToRegistry updates the beacon state with validator information
|
||||
// def add_validator_to_registry(state: BeaconState,
|
||||
//
|
||||
// pubkey: BLSPubkey,
|
||||
// withdrawal_credentials: Bytes32,
|
||||
// amount: uint64) -> None:
|
||||
// index = get_index_for_new_validator(state)
|
||||
// validator = get_validator_from_deposit(pubkey, withdrawal_credentials)
|
||||
// set_or_append_list(state.validators, index, validator)
|
||||
// set_or_append_list(state.balances, index, 0) # [Modified in Electra:EIP7251]
|
||||
// set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
// set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
// set_or_append_list(state.inactivity_scores, index, uint64(0))
|
||||
// state.pending_balance_deposits.append(PendingBalanceDeposit(index=index, amount=amount)) # [New in Electra:EIP7251]
|
||||
func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdrawalCredentials []byte, amount uint64) error {
|
||||
val := ValidatorFromDeposit(pubKey, withdrawalCredentials)
|
||||
if err := beaconState.AppendValidator(val); err != nil {
|
||||
return err
|
||||
}
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
return errors.New("could not find validator in registry")
|
||||
}
|
||||
if err := beaconState.AppendBalance(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendPendingBalanceDeposit(index, amount); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendInactivityScore(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendPreviousParticipationBits(0); err != nil {
|
||||
return err
|
||||
}
|
||||
return beaconState.AppendCurrentParticipationBits(0)
|
||||
}
|
||||
|
||||
// ValidatorFromDeposit gets a new validator object with provided parameters
|
||||
//
|
||||
// def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32) -> Validator:
|
||||
//
|
||||
// return Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// effective_balance=0, # [Modified in Electra:EIP7251]
|
||||
//
|
||||
// )
|
||||
func ValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte) *ethpb.Validator {
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: 0, // [Modified in Electra:EIP7251]
|
||||
}
|
||||
}
|
||||
|
||||
// SwitchToCompoundingValidator
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def switch_to_compounding_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// validator = state.validators[index]
|
||||
// if has_eth1_withdrawal_credential(validator):
|
||||
// validator.withdrawal_credentials = COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
|
||||
// queue_excess_active_balance(state, index)
|
||||
// def switch_to_compounding_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
//
|
||||
// validator = state.validators[index]
|
||||
// validator.withdrawal_credentials = COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
|
||||
// queue_excess_active_balance(state, index)
|
||||
func SwitchToCompoundingValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
v, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
@@ -92,28 +27,32 @@ func SwitchToCompoundingValidator(s state.BeaconState, idx primitives.ValidatorI
|
||||
if len(v.WithdrawalCredentials) == 0 {
|
||||
return errors.New("validator has no withdrawal credentials")
|
||||
}
|
||||
if helpers.HasETH1WithdrawalCredential(v) {
|
||||
v.WithdrawalCredentials[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return QueueExcessActiveBalance(s, idx)
|
||||
|
||||
v.WithdrawalCredentials[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return QueueExcessActiveBalance(s, idx)
|
||||
}
|
||||
|
||||
// QueueExcessActiveBalance queues validators with balances above the min activation balance and adds to pending balance deposit.
|
||||
// QueueExcessActiveBalance queues validators with balances above the min activation balance and adds to pending deposit.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// if balance > MIN_ACTIVATION_BALANCE:
|
||||
// excess_balance = balance - MIN_ACTIVATION_BALANCE
|
||||
// state.balances[index] = MIN_ACTIVATION_BALANCE
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=excess_balance)
|
||||
// )
|
||||
// def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
//
|
||||
// balance = state.balances[index]
|
||||
// if balance > MIN_ACTIVATION_BALANCE:
|
||||
// excess_balance = balance - MIN_ACTIVATION_BALANCE
|
||||
// state.balances[index] = MIN_ACTIVATION_BALANCE
|
||||
// validator = state.validators[index]
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=validator.pubkey,
|
||||
// withdrawal_credentials=validator.withdrawal_credentials,
|
||||
// amount=excess_balance,
|
||||
// signature=bls.G2_POINT_AT_INFINITY,
|
||||
// slot=GENESIS_SLOT,
|
||||
// ))
|
||||
func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
@@ -121,11 +60,22 @@ func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex
|
||||
}
|
||||
|
||||
if bal > params.BeaconConfig().MinActivationBalance {
|
||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
||||
if err := s.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.AppendPendingBalanceDeposit(idx, excessBalance)
|
||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
||||
val, err := s.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
return s.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: pk[:],
|
||||
WithdrawalCredentials: val.GetWithdrawalCredentials(),
|
||||
Amount: excessBalance,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -134,15 +84,21 @@ func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_entire_balance_and_reset_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// state.balances[index] = 0
|
||||
// validator = state.validators[index]
|
||||
// validator.effective_balance = 0
|
||||
// validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=balance)
|
||||
// )
|
||||
// def queue_entire_balance_and_reset_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
//
|
||||
// balance = state.balances[index]
|
||||
// state.balances[index] = 0
|
||||
// validator = state.validators[index]
|
||||
// validator.effective_balance = 0
|
||||
// validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=validator.pubkey,
|
||||
// withdrawal_credentials=validator.withdrawal_credentials,
|
||||
// amount=balance,
|
||||
// signature=bls.G2_POINT_AT_INFINITY,
|
||||
// slot=GENESIS_SLOT,
|
||||
//
|
||||
// ))
|
||||
//
|
||||
//nolint:dupword
|
||||
func QueueEntireBalanceAndResetValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
@@ -166,5 +122,11 @@ func QueueEntireBalanceAndResetValidator(s state.BeaconState, idx primitives.Val
|
||||
return err
|
||||
}
|
||||
|
||||
return s.AppendPendingBalanceDeposit(idx, bal)
|
||||
return s.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: v.PublicKey,
|
||||
WithdrawalCredentials: v.WithdrawalCredentials,
|
||||
Amount: bal,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -14,20 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestAddValidatorToRegistry(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, electra.AddValidatorToRegistry(st, make([]byte, fieldparams.BLSPubkeyLength), make([]byte, fieldparams.RootLength), 100))
|
||||
balances := st.Balances()
|
||||
require.Equal(t, 1, len(balances))
|
||||
require.Equal(t, uint64(0), balances[0])
|
||||
pbds, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbds))
|
||||
require.Equal(t, uint64(100), pbds[0].Amount)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), pbds[0].Index)
|
||||
}
|
||||
|
||||
func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
@@ -60,7 +45,7 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
b, err := s.BalanceAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was changed")
|
||||
pbd, err := s.PendingBalanceDeposits()
|
||||
pbd, err := s.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(pbd), "pending balance deposits should be empty")
|
||||
|
||||
@@ -69,11 +54,10 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
b, err = s.BalanceAtIndex(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was not changed")
|
||||
pbd, err = s.PendingBalanceDeposits()
|
||||
pbd, err = s.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd), "pending balance deposits should have one element")
|
||||
require.Equal(t, uint64(100_000), pbd[0].Amount, "pending balance deposit amount is incorrect")
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pbd[0].Index, "pending balance deposit index is incorrect")
|
||||
}
|
||||
|
||||
func TestQueueEntireBalanceAndResetValidator(t *testing.T) {
|
||||
@@ -97,11 +81,10 @@ func TestQueueEntireBalanceAndResetValidator(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), v.EffectiveBalance, "effective balance was not reset")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, v.ActivationEligibilityEpoch, "activation eligibility epoch was not reset")
|
||||
pbd, err := s.PendingBalanceDeposits()
|
||||
pbd, err := s.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd), "pending balance deposits should have one element")
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+100_000, pbd[0].Amount, "pending balance deposit amount is incorrect")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), pbd[0].Index, "pending balance deposit index is incorrect")
|
||||
}
|
||||
|
||||
func TestSwitchToCompoundingValidator_Ok(t *testing.T) {
|
||||
@@ -114,7 +97,7 @@ func TestSwitchToCompoundingValidator_Ok(t *testing.T) {
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(st, 0))
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
pbd, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1010), pbd[0].Amount) // appends it at the end
|
||||
val, err := st.ValidatorAtIndex(0)
|
||||
@@ -132,7 +115,7 @@ func TestQueueExcessActiveBalance_Ok(t *testing.T) {
|
||||
err := electra.QueueExcessActiveBalance(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
pbd, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount) // appends it at the end
|
||||
|
||||
@@ -149,7 +132,7 @@ func TestQueueEntireBalanceAndResetValidator_Ok(t *testing.T) {
|
||||
err := electra.QueueEntireBalanceAndResetValidator(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
pbd, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance-1000, pbd[0].Amount)
|
||||
|
||||
@@ -111,30 +111,31 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
log.Debugf("Skipping execution layer withdrawal request, validator index for %s not found\n", hexutil.Encode(wr.ValidatorPubkey))
|
||||
continue
|
||||
}
|
||||
validator, err := st.ValidatorAtIndex(vIdx)
|
||||
validator, err := st.ValidatorAtIndexReadOnly(vIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Verify withdrawal credentials
|
||||
hasCorrectCredential := helpers.HasExecutionWithdrawalCredentials(validator)
|
||||
isCorrectSourceAddress := bytes.Equal(validator.WithdrawalCredentials[12:], wr.SourceAddress)
|
||||
wc := validator.GetWithdrawalCredentials()
|
||||
isCorrectSourceAddress := bytes.Equal(wc[12:], wr.SourceAddress)
|
||||
if !hasCorrectCredential || !isCorrectSourceAddress {
|
||||
log.Debugln("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the validator is active.
|
||||
if !helpers.IsActiveValidator(validator, currentEpoch) {
|
||||
if !helpers.IsActiveValidatorUsingTrie(validator, currentEpoch) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator not active")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has not yet submitted an exit.
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has been active long enough.
|
||||
if currentEpoch < validator.ActivationEpoch.AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
|
||||
if currentEpoch < validator.ActivationEpoch().AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
continue
|
||||
}
|
||||
@@ -156,7 +157,7 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
continue
|
||||
}
|
||||
|
||||
hasSufficientEffectiveBalance := validator.EffectiveBalance >= params.BeaconConfig().MinActivationBalance
|
||||
hasSufficientEffectiveBalance := validator.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
||||
vBal, err := st.BalanceAtIndex(vIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -147,11 +147,17 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
|
||||
// epoch = get_current_epoch(state)
|
||||
// total_balance = get_total_active_balance(state)
|
||||
// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance)
|
||||
// if state.version == electra:
|
||||
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from total balance to avoid uint64 overflow
|
||||
// penalty_per_effective_balance_increment = adjusted_total_slashing_balance // (total_balance // increment)
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
// penalty = penalty_numerator // total_balance * increment
|
||||
// if state.version == electra:
|
||||
// effective_balance_increments = validator.effective_balance // increment
|
||||
// penalty = penalty_per_effective_balance_increment * effective_balance_increments
|
||||
// decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
func ProcessSlashings(st state.BeaconState, slashingMultiplier uint64) (state.BeaconState, error) {
|
||||
currentEpoch := time.CurrentEpoch(st)
|
||||
@@ -177,13 +183,26 @@ func ProcessSlashings(st state.BeaconState, slashingMultiplier uint64) (state.Be
|
||||
// below equally.
|
||||
increment := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
minSlashing := math.Min(totalSlashing*slashingMultiplier, totalBalance)
|
||||
|
||||
// Modified in Electra:EIP7251
|
||||
var penaltyPerEffectiveBalanceIncrement uint64
|
||||
if st.Version() >= version.Electra {
|
||||
penaltyPerEffectiveBalanceIncrement = minSlashing / (totalBalance / increment)
|
||||
}
|
||||
|
||||
bals := st.Balances()
|
||||
changed := false
|
||||
err = st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch()
|
||||
if val.Slashed() && correctEpoch {
|
||||
penaltyNumerator := val.EffectiveBalance() / increment * minSlashing
|
||||
penalty := penaltyNumerator / totalBalance * increment
|
||||
var penalty uint64
|
||||
if st.Version() >= version.Electra {
|
||||
effectiveBalanceIncrements := val.EffectiveBalance() / increment
|
||||
penalty = penaltyPerEffectiveBalanceIncrement * effectiveBalanceIncrements
|
||||
} else {
|
||||
penaltyNumerator := val.EffectiveBalance() / increment * minSlashing
|
||||
penalty = penaltyNumerator / totalBalance * increment
|
||||
}
|
||||
bals[idx] = helpers.DecreaseBalanceWithVal(bals[idx], penalty)
|
||||
changed = true
|
||||
}
|
||||
|
||||
@@ -448,3 +448,75 @@ func TestProcessHistoricalDataUpdate(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessSlashings_SlashedElectra(t *testing.T) {
|
||||
tests := []struct {
|
||||
state *ethpb.BeaconStateElectra
|
||||
want uint64
|
||||
}{
|
||||
{
|
||||
state: ðpb.BeaconStateElectra{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance}},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
},
|
||||
want: uint64(29000000000),
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconStateElectra{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
},
|
||||
want: uint64(30500000000),
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconStateElectra{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance * 10, params.BeaconConfig().MaxEffectiveBalance * 20},
|
||||
Slashings: []uint64{0, 2 * 1e9},
|
||||
},
|
||||
want: uint64(317000001536),
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconStateElectra{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra - params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra - params.BeaconConfig().EffectiveBalanceIncrement}},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalanceElectra - params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalanceElectra - params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
},
|
||||
want: uint64(2044000000727),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprint(i), func(t *testing.T) {
|
||||
original := proto.Clone(tt.state)
|
||||
s, err := state_native.InitializeFromProtoElectra(tt.state)
|
||||
require.NoError(t, err)
|
||||
helpers.ClearCache()
|
||||
newState, err := epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplierBellatrix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, newState.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, newState.Balances()[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,9 @@ const (
|
||||
|
||||
// AttesterSlashingReceived is sent after an attester slashing is received from gossip or rpc
|
||||
AttesterSlashingReceived = 8
|
||||
|
||||
// DataColumnSidecarReceived is sent after a data column sidecar is received from gossip or rpc.
|
||||
DataColumnSidecarReceived = 9
|
||||
)
|
||||
|
||||
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
|
||||
@@ -77,3 +80,7 @@ type ProposerSlashingReceivedData struct {
|
||||
type AttesterSlashingReceivedData struct {
|
||||
AttesterSlashing ethpb.AttSlashing
|
||||
}
|
||||
|
||||
type DataColumnSidecarReceivedData struct {
|
||||
DataColumn *blocks.VerifiedRODataColumn
|
||||
}
|
||||
|
||||
@@ -31,6 +31,8 @@ const (
|
||||
LightClientFinalityUpdate
|
||||
// LightClientOptimisticUpdate event
|
||||
LightClientOptimisticUpdate
|
||||
// PayloadAttributes events are fired upon a missed slot or new head.
|
||||
PayloadAttributes
|
||||
)
|
||||
|
||||
// BlockProcessedData is the data sent with BlockProcessed events.
|
||||
|
||||
@@ -63,6 +63,7 @@ go_test(
|
||||
"validators_test.go",
|
||||
"weak_subjectivity_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
shard_count = 2,
|
||||
tags = ["CI_race_detection"],
|
||||
|
||||
@@ -23,11 +23,8 @@ var (
|
||||
// Access to these nil fields will result in run time panic,
|
||||
// it is recommended to run these checks as first line of defense.
|
||||
func ValidateNilAttestation(attestation ethpb.Att) error {
|
||||
if attestation == nil {
|
||||
return errors.New("attestation can't be nil")
|
||||
}
|
||||
if attestation.GetData() == nil {
|
||||
return errors.New("attestation's data can't be nil")
|
||||
if attestation == nil || attestation.IsNil() {
|
||||
return errors.New("attestation is nil")
|
||||
}
|
||||
if attestation.GetData().Source == nil {
|
||||
return errors.New("attestation's source can't be nil")
|
||||
|
||||
@@ -260,12 +260,12 @@ func TestValidateNilAttestation(t *testing.T) {
|
||||
{
|
||||
name: "nil attestation",
|
||||
attestation: nil,
|
||||
errString: "attestation can't be nil",
|
||||
errString: "attestation is nil",
|
||||
},
|
||||
{
|
||||
name: "nil attestation data",
|
||||
attestation: ðpb.Attestation{},
|
||||
errString: "attestation's data can't be nil",
|
||||
errString: "attestation is nil",
|
||||
},
|
||||
{
|
||||
name: "nil attestation source",
|
||||
|
||||
@@ -69,15 +69,16 @@ func IsNextPeriodSyncCommittee(
|
||||
}
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(root, valIdx)
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
val, err := st.ValidatorAtIndexReadOnly(valIdx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
committee, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(findSubCommitteeIndices(val.PublicKey, committee.Pubkeys)) > 0, nil
|
||||
return len(findSubCommitteeIndices(pk[:], committee.Pubkeys)) > 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -96,10 +97,11 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
||||
}
|
||||
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(root, valIdx)
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
val, err := st.ValidatorAtIndexReadOnly(valIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
committee, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -112,7 +114,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
||||
}
|
||||
}()
|
||||
|
||||
return findSubCommitteeIndices(val.PublicKey, committee.Pubkeys), nil
|
||||
return findSubCommitteeIndices(pk[:], committee.Pubkeys), nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -130,15 +132,16 @@ func NextPeriodSyncSubcommitteeIndices(
|
||||
}
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(root, valIdx)
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
val, err := st.ValidatorAtIndexReadOnly(valIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
committee, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return findSubCommitteeIndices(val.PublicKey, committee.Pubkeys), nil
|
||||
return findSubCommitteeIndices(pk[:], committee.Pubkeys), nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -78,6 +78,7 @@ func TestIsCurrentEpochSyncCommittee_UsingCommittee(t *testing.T) {
|
||||
|
||||
func TestIsCurrentEpochSyncCommittee_DoesNotExist(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := ðpb.SyncCommittee{
|
||||
@@ -264,6 +265,7 @@ func TestCurrentEpochSyncSubcommitteeIndices_UsingCommittee(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCurrentEpochSyncSubcommitteeIndices_DoesNotExist(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
helpers.ClearCache()
|
||||
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -584,23 +584,23 @@ func IsSameWithdrawalCredentials(a, b *ethpb.Validator) bool {
|
||||
// and validator.withdrawable_epoch <= epoch
|
||||
// and balance > 0
|
||||
// )
|
||||
func IsFullyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
func IsFullyWithdrawableValidator(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
if val == nil || balance <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Electra / EIP-7251 logic
|
||||
if fork >= version.Electra {
|
||||
return HasExecutionWithdrawalCredentials(val) && val.WithdrawableEpoch <= epoch
|
||||
return HasExecutionWithdrawalCredentials(val) && val.WithdrawableEpoch() <= epoch
|
||||
}
|
||||
|
||||
return HasETH1WithdrawalCredential(val) && val.WithdrawableEpoch <= epoch
|
||||
return HasETH1WithdrawalCredential(val) && val.WithdrawableEpoch() <= epoch
|
||||
}
|
||||
|
||||
// IsPartiallyWithdrawableValidator returns whether the validator is able to perform a
|
||||
// partial withdrawal. This function assumes that the caller has a lock on the state.
|
||||
// This method conditionally calls the fork appropriate implementation based on the epoch argument.
|
||||
func IsPartiallyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
func IsPartiallyWithdrawableValidator(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
if val == nil {
|
||||
return false
|
||||
}
|
||||
@@ -630,9 +630,9 @@ func IsPartiallyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoc
|
||||
// and has_max_effective_balance
|
||||
// and has_excess_balance
|
||||
// )
|
||||
func isPartiallyWithdrawableValidatorElectra(val *ethpb.Validator, balance uint64, epoch primitives.Epoch) bool {
|
||||
func isPartiallyWithdrawableValidatorElectra(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch) bool {
|
||||
maxEB := ValidatorMaxEffectiveBalance(val)
|
||||
hasMaxBalance := val.EffectiveBalance == maxEB
|
||||
hasMaxBalance := val.EffectiveBalance() == maxEB
|
||||
hasExcessBalance := balance > maxEB
|
||||
|
||||
return HasExecutionWithdrawalCredentials(val) &&
|
||||
@@ -652,8 +652,8 @@ func isPartiallyWithdrawableValidatorElectra(val *ethpb.Validator, balance uint6
|
||||
// has_max_effective_balance = validator.effective_balance == MAX_EFFECTIVE_BALANCE
|
||||
// has_excess_balance = balance > MAX_EFFECTIVE_BALANCE
|
||||
// return has_eth1_withdrawal_credential(validator) and has_max_effective_balance and has_excess_balance
|
||||
func isPartiallyWithdrawableValidatorCapella(val *ethpb.Validator, balance uint64, epoch primitives.Epoch) bool {
|
||||
hasMaxBalance := val.EffectiveBalance == params.BeaconConfig().MaxEffectiveBalance
|
||||
func isPartiallyWithdrawableValidatorCapella(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch) bool {
|
||||
hasMaxBalance := val.EffectiveBalance() == params.BeaconConfig().MaxEffectiveBalance
|
||||
hasExcessBalance := balance > params.BeaconConfig().MaxEffectiveBalance
|
||||
return HasETH1WithdrawalCredential(val) && hasExcessBalance && hasMaxBalance
|
||||
}
|
||||
@@ -670,7 +670,7 @@ func isPartiallyWithdrawableValidatorCapella(val *ethpb.Validator, balance uint6
|
||||
// return MAX_EFFECTIVE_BALANCE_ELECTRA
|
||||
// else:
|
||||
// return MIN_ACTIVATION_BALANCE
|
||||
func ValidatorMaxEffectiveBalance(val *ethpb.Validator) uint64 {
|
||||
func ValidatorMaxEffectiveBalance(val state.ReadOnlyValidator) uint64 {
|
||||
if HasCompoundingWithdrawalCredential(val) {
|
||||
return params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
|
||||
@@ -974,13 +974,6 @@ func TestIsFullyWithdrawableValidator(t *testing.T) {
|
||||
fork int
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Handles nil case",
|
||||
validator: nil,
|
||||
balance: 0,
|
||||
epoch: 0,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "No ETH1 prefix",
|
||||
validator: ðpb.Validator{
|
||||
@@ -1036,7 +1029,9 @@ func TestIsFullyWithdrawableValidator(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.IsFullyWithdrawableValidator(tt.validator, tt.balance, tt.epoch, tt.fork))
|
||||
v, err := state_native.NewValidator(tt.validator)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, helpers.IsFullyWithdrawableValidator(v, tt.balance, tt.epoch, tt.fork))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1050,13 +1045,6 @@ func TestIsPartiallyWithdrawableValidator(t *testing.T) {
|
||||
fork int
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Handles nil case",
|
||||
validator: nil,
|
||||
balance: 0,
|
||||
epoch: 0,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "No ETH1 prefix",
|
||||
validator: ðpb.Validator{
|
||||
@@ -1113,7 +1101,9 @@ func TestIsPartiallyWithdrawableValidator(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.IsPartiallyWithdrawableValidator(tt.validator, tt.balance, tt.epoch, tt.fork))
|
||||
v, err := state_native.NewValidator(tt.validator)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, helpers.IsPartiallyWithdrawableValidator(v, tt.balance, tt.epoch, tt.fork))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1167,15 +1157,12 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
|
||||
validator: ðpb.Validator{WithdrawalCredentials: []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC}},
|
||||
want: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
{
|
||||
"Handles nil case",
|
||||
nil,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.ValidatorMaxEffectiveBalance(tt.validator))
|
||||
v, err := state_native.NewValidator(tt.validator)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, helpers.ValidatorMaxEffectiveBalance(v))
|
||||
})
|
||||
}
|
||||
// Sanity check that MinActivationBalance equals (pre-electra) MaxEffectiveBalance
|
||||
|
||||
@@ -243,8 +243,8 @@ func createDefaultLightClientUpdate() (*ethpbv2.LightClientUpdate, error) {
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: make([]byte, fieldparams.BLSPubkeyLength),
|
||||
}
|
||||
nextSyncCommitteeBranch := make([][]byte, fieldparams.NextSyncCommitteeBranchDepth)
|
||||
for i := 0; i < fieldparams.NextSyncCommitteeBranchDepth; i++ {
|
||||
nextSyncCommitteeBranch := make([][]byte, fieldparams.SyncCommitteeBranchDepth)
|
||||
for i := 0; i < fieldparams.SyncCommitteeBranchDepth; i++ {
|
||||
nextSyncCommitteeBranch[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
executionBranch := make([][]byte, executionBranchNumOfLeaves)
|
||||
@@ -321,10 +321,10 @@ func BlockToLightClientHeader(block interfaces.ReadOnlySignedBeaconBlock) (*ethp
|
||||
HeaderCapella: capellaHeader,
|
||||
},
|
||||
}, nil
|
||||
case version.Deneb:
|
||||
case version.Deneb, version.Electra:
|
||||
denebHeader, err := blockToLightClientHeaderDeneb(context.Background(), block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get deneb header")
|
||||
return nil, errors.Wrap(err, "could not get header")
|
||||
}
|
||||
return ðpbv2.LightClientHeaderContainer{
|
||||
Header: ðpbv2.LightClientHeaderContainer_HeaderDeneb{
|
||||
@@ -337,7 +337,7 @@ func BlockToLightClientHeader(block interfaces.ReadOnlySignedBeaconBlock) (*ethp
|
||||
}
|
||||
|
||||
func blockToLightClientHeaderAltair(block interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientHeader, error) {
|
||||
if block.Version() != version.Altair {
|
||||
if block.Version() < version.Altair {
|
||||
return nil, fmt.Errorf("block version is %s instead of Altair", version.String(block.Version()))
|
||||
}
|
||||
|
||||
@@ -360,7 +360,7 @@ func blockToLightClientHeaderAltair(block interfaces.ReadOnlySignedBeaconBlock)
|
||||
}
|
||||
|
||||
func blockToLightClientHeaderCapella(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientHeaderCapella, error) {
|
||||
if block.Version() != version.Capella {
|
||||
if block.Version() < version.Capella {
|
||||
return nil, fmt.Errorf("block version is %s instead of Capella", version.String(block.Version()))
|
||||
}
|
||||
|
||||
@@ -422,8 +422,8 @@ func blockToLightClientHeaderCapella(ctx context.Context, block interfaces.ReadO
|
||||
}
|
||||
|
||||
func blockToLightClientHeaderDeneb(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientHeaderDeneb, error) {
|
||||
if block.Version() != version.Deneb {
|
||||
return nil, fmt.Errorf("block version is %s instead of Deneb", version.String(block.Version()))
|
||||
if block.Version() < version.Deneb {
|
||||
return nil, fmt.Errorf("block version is %s instead of Deneb/Electra", version.String(block.Version()))
|
||||
}
|
||||
|
||||
payload, err := block.Block().Body().Execution()
|
||||
|
||||
@@ -364,6 +364,26 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
require.DeepSSZEqual(t, bodyRoot[:], header.Beacon.BodyRoot, "Body root is not equal")
|
||||
})
|
||||
|
||||
t.Run("Bellatrix", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestBellatrix()
|
||||
|
||||
container, err := lightClient.BlockToLightClientHeader(l.Block)
|
||||
require.NoError(t, err)
|
||||
header := container.GetHeaderAltair()
|
||||
require.NotNil(t, header, "header is nil")
|
||||
|
||||
parentRoot := l.Block.Block().ParentRoot()
|
||||
stateRoot := l.Block.Block().StateRoot()
|
||||
bodyRoot, err := l.Block.Block().Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, l.Block.Block().Slot(), header.Beacon.Slot, "Slot is not equal")
|
||||
require.Equal(t, l.Block.Block().ProposerIndex(), header.Beacon.ProposerIndex, "Proposer index is not equal")
|
||||
require.DeepSSZEqual(t, parentRoot[:], header.Beacon.ParentRoot, "Parent root is not equal")
|
||||
require.DeepSSZEqual(t, stateRoot[:], header.Beacon.StateRoot, "State root is not equal")
|
||||
require.DeepSSZEqual(t, bodyRoot[:], header.Beacon.BodyRoot, "Body root is not equal")
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
t.Run("Non-Blinded Beacon Block", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false)
|
||||
@@ -599,4 +619,130 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
require.DeepSSZEqual(t, executionPayloadProof, header.ExecutionBranch, "Execution payload proofs are not equal")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Electra", func(t *testing.T) {
|
||||
t.Run("Non-Blinded Beacon Block", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestElectra(false)
|
||||
|
||||
container, err := lightClient.BlockToLightClientHeader(l.Block)
|
||||
require.NoError(t, err)
|
||||
header := container.GetHeaderDeneb()
|
||||
require.NotNil(t, header, "header is nil")
|
||||
|
||||
parentRoot := l.Block.Block().ParentRoot()
|
||||
stateRoot := l.Block.Block().StateRoot()
|
||||
bodyRoot, err := l.Block.Block().Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
payload, err := l.Block.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
|
||||
transactionsRoot, err := lightClient.ComputeTransactionsRoot(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
withdrawalsRoot, err := lightClient.ComputeWithdrawalsRoot(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobGasUsed, err := payload.BlobGasUsed()
|
||||
require.NoError(t, err)
|
||||
|
||||
excessBlobGas, err := payload.ExcessBlobGas()
|
||||
require.NoError(t, err)
|
||||
|
||||
executionHeader := &v11.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: payload.ParentHash(),
|
||||
FeeRecipient: payload.FeeRecipient(),
|
||||
StateRoot: payload.StateRoot(),
|
||||
ReceiptsRoot: payload.ReceiptsRoot(),
|
||||
LogsBloom: payload.LogsBloom(),
|
||||
PrevRandao: payload.PrevRandao(),
|
||||
BlockNumber: payload.BlockNumber(),
|
||||
GasLimit: payload.GasLimit(),
|
||||
GasUsed: payload.GasUsed(),
|
||||
Timestamp: payload.Timestamp(),
|
||||
ExtraData: payload.ExtraData(),
|
||||
BaseFeePerGas: payload.BaseFeePerGas(),
|
||||
BlockHash: payload.BlockHash(),
|
||||
TransactionsRoot: transactionsRoot,
|
||||
WithdrawalsRoot: withdrawalsRoot,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
}
|
||||
|
||||
executionPayloadProof, err := blocks.PayloadProof(l.Ctx, l.Block.Block())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, l.Block.Block().Slot(), header.Beacon.Slot, "Slot is not equal")
|
||||
require.Equal(t, l.Block.Block().ProposerIndex(), header.Beacon.ProposerIndex, "Proposer index is not equal")
|
||||
require.DeepSSZEqual(t, parentRoot[:], header.Beacon.ParentRoot, "Parent root is not equal")
|
||||
require.DeepSSZEqual(t, stateRoot[:], header.Beacon.StateRoot, "State root is not equal")
|
||||
require.DeepSSZEqual(t, bodyRoot[:], header.Beacon.BodyRoot, "Body root is not equal")
|
||||
|
||||
require.DeepSSZEqual(t, executionHeader, header.Execution, "Execution headers are not equal")
|
||||
|
||||
require.DeepSSZEqual(t, executionPayloadProof, header.ExecutionBranch, "Execution payload proofs are not equal")
|
||||
})
|
||||
|
||||
t.Run("Blinded Beacon Block", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestElectra(true)
|
||||
|
||||
container, err := lightClient.BlockToLightClientHeader(l.Block)
|
||||
require.NoError(t, err)
|
||||
header := container.GetHeaderDeneb()
|
||||
require.NotNil(t, header, "header is nil")
|
||||
|
||||
parentRoot := l.Block.Block().ParentRoot()
|
||||
stateRoot := l.Block.Block().StateRoot()
|
||||
bodyRoot, err := l.Block.Block().Body().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
payload, err := l.Block.Block().Body().Execution()
|
||||
require.NoError(t, err)
|
||||
|
||||
transactionsRoot, err := payload.TransactionsRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
withdrawalsRoot, err := payload.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blobGasUsed, err := payload.BlobGasUsed()
|
||||
require.NoError(t, err)
|
||||
|
||||
excessBlobGas, err := payload.ExcessBlobGas()
|
||||
require.NoError(t, err)
|
||||
|
||||
executionHeader := &v11.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: payload.ParentHash(),
|
||||
FeeRecipient: payload.FeeRecipient(),
|
||||
StateRoot: payload.StateRoot(),
|
||||
ReceiptsRoot: payload.ReceiptsRoot(),
|
||||
LogsBloom: payload.LogsBloom(),
|
||||
PrevRandao: payload.PrevRandao(),
|
||||
BlockNumber: payload.BlockNumber(),
|
||||
GasLimit: payload.GasLimit(),
|
||||
GasUsed: payload.GasUsed(),
|
||||
Timestamp: payload.Timestamp(),
|
||||
ExtraData: payload.ExtraData(),
|
||||
BaseFeePerGas: payload.BaseFeePerGas(),
|
||||
BlockHash: payload.BlockHash(),
|
||||
TransactionsRoot: transactionsRoot,
|
||||
WithdrawalsRoot: withdrawalsRoot,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
}
|
||||
|
||||
executionPayloadProof, err := blocks.PayloadProof(l.Ctx, l.Block.Block())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, l.Block.Block().Slot(), header.Beacon.Slot, "Slot is not equal")
|
||||
require.Equal(t, l.Block.Block().ProposerIndex(), header.Beacon.ProposerIndex, "Proposer index is not equal")
|
||||
require.DeepSSZEqual(t, parentRoot[:], header.Beacon.ParentRoot, "Parent root is not equal")
|
||||
require.DeepSSZEqual(t, stateRoot[:], header.Beacon.StateRoot, "State root is not equal")
|
||||
require.DeepSSZEqual(t, bodyRoot[:], header.Beacon.BodyRoot, "Body root is not equal")
|
||||
|
||||
require.DeepSSZEqual(t, executionHeader, header.Execution, "Execution headers are not equal")
|
||||
|
||||
require.DeepSSZEqual(t, executionPayloadProof, header.ExecutionBranch, "Execution payload proofs are not equal")
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
51
beacon-chain/core/peerdas/BUILD.bazel
Normal file
51
beacon-chain/core/peerdas/BUILD.bazel
Normal file
@@ -0,0 +1,51 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"helpers.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["helpers_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/blockchain/kzg:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_consensys_gnark_crypto//ecc/bls12-381/fr:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
631
beacon-chain/core/peerdas/helpers.go
Normal file
631
beacon-chain/core/peerdas/helpers.go
Normal file
@@ -0,0 +1,631 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/holiman/uint256"
|
||||
errors "github.com/pkg/errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
CustodySubnetCountEnrKey = "csc"
|
||||
)
|
||||
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5
|
||||
type Csc uint64
|
||||
|
||||
func (Csc) ENRKey() string { return CustodySubnetCountEnrKey }
|
||||
|
||||
var (
|
||||
// Custom errors
|
||||
errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count")
|
||||
errIndexTooLarge = errors.New("column index is larger than the specified columns count")
|
||||
errMismatchLength = errors.New("mismatch in the length of the commitments and proofs")
|
||||
errRecordNil = errors.New("record is nil")
|
||||
errCannotLoadCustodySubnetCount = errors.New("cannot load the custody subnet count from peer")
|
||||
|
||||
// maxUint256 is the maximum value of a uint256.
|
||||
maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64}
|
||||
)
|
||||
|
||||
// CustodyColumnSubnets computes the subnets the node should participate in for custody.
|
||||
func CustodyColumnSubnets(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Check if the custody subnet count is larger than the data column sidecar subnet count.
|
||||
if custodySubnetCount > dataColumnSidecarSubnetCount {
|
||||
return nil, errCustodySubnetCountTooLarge
|
||||
}
|
||||
|
||||
// First, compute the subnet IDs that the node should participate in.
|
||||
subnetIds := make(map[uint64]bool, custodySubnetCount)
|
||||
|
||||
one := uint256.NewInt(1)
|
||||
|
||||
for currentId := new(uint256.Int).SetBytes(nodeId.Bytes()); uint64(len(subnetIds)) < custodySubnetCount; currentId.Add(currentId, one) {
|
||||
// Convert to big endian bytes.
|
||||
currentIdBytesBigEndian := currentId.Bytes32()
|
||||
|
||||
// Convert to little endian.
|
||||
currentIdBytesLittleEndian := bytesutil.ReverseByteOrder(currentIdBytesBigEndian[:])
|
||||
|
||||
// Hash the result.
|
||||
hashedCurrentId := hash.Hash(currentIdBytesLittleEndian)
|
||||
|
||||
// Get the subnet ID.
|
||||
subnetId := binary.LittleEndian.Uint64(hashedCurrentId[:8]) % dataColumnSidecarSubnetCount
|
||||
|
||||
// Add the subnet to the map.
|
||||
subnetIds[subnetId] = true
|
||||
|
||||
// Overflow prevention.
|
||||
if currentId.Cmp(maxUint256) == 0 {
|
||||
currentId = uint256.NewInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
return subnetIds, nil
|
||||
}
|
||||
|
||||
// CustodyColumns computes the columns the node should custody.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#helper-functions
|
||||
func CustodyColumns(nodeId enode.ID, custodySubnetCount uint64) (map[uint64]bool, error) {
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Compute the custody subnets.
|
||||
subnetIds, err := CustodyColumnSubnets(nodeId, custodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody subnets")
|
||||
}
|
||||
|
||||
columnsPerSubnet := fieldparams.NumberOfColumns / dataColumnSidecarSubnetCount
|
||||
|
||||
// Knowing the subnet ID and the number of columns per subnet, select all the columns the node should custody.
|
||||
// Columns belonging to the same subnet are contiguous.
|
||||
columnIndices := make(map[uint64]bool, custodySubnetCount*columnsPerSubnet)
|
||||
for i := uint64(0); i < columnsPerSubnet; i++ {
|
||||
for subnetId := range subnetIds {
|
||||
columnIndex := dataColumnSidecarSubnetCount*i + subnetId
|
||||
columnIndices[columnIndex] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columnIndices, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecars computes the data column sidecars from the signed block and blobs.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#recover_matrix
|
||||
func DataColumnSidecars(signedBlock interfaces.ReadOnlySignedBeaconBlock, blobs []kzg.Blob) ([]*ethpb.DataColumnSidecar, error) {
|
||||
startTime := time.Now()
|
||||
blobsCount := len(blobs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the signed block header.
|
||||
signedBlockHeader, err := signedBlock.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "signed block header")
|
||||
}
|
||||
|
||||
// Get the block body.
|
||||
block := signedBlock.Block()
|
||||
blockBody := block.Body()
|
||||
|
||||
// Get the blob KZG commitments.
|
||||
blobKzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Compute the KZG commitments inclusion proof.
|
||||
kzgCommitmentsInclusionProof, err := blocks.MerkleProofKZGCommitments(blockBody)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merkle proof ZKG commitments")
|
||||
}
|
||||
|
||||
// Compute cells and proofs.
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobsCount)
|
||||
|
||||
eg, _ := errgroup.WithContext(context.Background())
|
||||
for i := range blobs {
|
||||
blobIndex := i
|
||||
eg.Go(func() error {
|
||||
blob := &blobs[blobIndex]
|
||||
blobCellsAndProofs, err := kzg.ComputeCellsAndKZGProofs(blob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells and KZG proofs")
|
||||
}
|
||||
|
||||
cellsAndProofs[blobIndex] = blobCellsAndProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
dataColumnComputationTime.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// populateAndFilterIndices returns a sorted slices of indices, setting all indices if none are provided,
|
||||
// and filtering out indices higher than the blob count.
|
||||
func populateAndFilterIndices(indices map[uint64]bool, blobCount uint64) []uint64 {
|
||||
// If no indices are provided, provide all blobs.
|
||||
if len(indices) == 0 {
|
||||
for i := range blobCount {
|
||||
indices[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Filter blobs index higher than the blob count.
|
||||
filteredIndices := make(map[uint64]bool, len(indices))
|
||||
for i := range indices {
|
||||
if i < blobCount {
|
||||
filteredIndices[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Transform set to slice.
|
||||
indicesSlice := make([]uint64, 0, len(filteredIndices))
|
||||
for i := range filteredIndices {
|
||||
indicesSlice = append(indicesSlice, i)
|
||||
}
|
||||
|
||||
// Sort the indices.
|
||||
slices.Sort[[]uint64](indicesSlice)
|
||||
|
||||
return indicesSlice
|
||||
}
|
||||
|
||||
// Blobs extract blobs from `dataColumnsSidecar`.
|
||||
// This can be seen as the reciprocal function of DataColumnSidecars.
|
||||
// `dataColumnsSidecar` needs to contain the datacolumns corresponding to the non-extended matrix,
|
||||
// else an error will be returned.
|
||||
// (`dataColumnsSidecar` can contain extra columns, but they will be ignored.)
|
||||
func Blobs(indices map[uint64]bool, dataColumnsSidecar []*ethpb.DataColumnSidecar) ([]*blocks.VerifiedROBlob, error) {
|
||||
columnCount := fieldparams.NumberOfColumns
|
||||
|
||||
neededColumnCount := columnCount / 2
|
||||
|
||||
// Check if all needed columns are present.
|
||||
sliceIndexFromColumnIndex := make(map[uint64]int, len(dataColumnsSidecar))
|
||||
for i := range dataColumnsSidecar {
|
||||
dataColumnSideCar := dataColumnsSidecar[i]
|
||||
columnIndex := dataColumnSideCar.ColumnIndex
|
||||
|
||||
if columnIndex < uint64(neededColumnCount) {
|
||||
sliceIndexFromColumnIndex[columnIndex] = i
|
||||
}
|
||||
}
|
||||
|
||||
actualColumnCount := len(sliceIndexFromColumnIndex)
|
||||
|
||||
// Get missing columns.
|
||||
if actualColumnCount < neededColumnCount {
|
||||
missingColumns := make(map[int]bool, neededColumnCount-actualColumnCount)
|
||||
for i := range neededColumnCount {
|
||||
if _, ok := sliceIndexFromColumnIndex[uint64(i)]; !ok {
|
||||
missingColumns[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
missingColumnsSlice := make([]int, 0, len(missingColumns))
|
||||
for i := range missingColumns {
|
||||
missingColumnsSlice = append(missingColumnsSlice, i)
|
||||
}
|
||||
|
||||
slices.Sort[[]int](missingColumnsSlice)
|
||||
return nil, errors.Errorf("some columns are missing: %v", missingColumnsSlice)
|
||||
}
|
||||
|
||||
// It is safe to retrieve the first column since we already checked that `dataColumnsSidecar` is not empty.
|
||||
firstDataColumnSidecar := dataColumnsSidecar[0]
|
||||
|
||||
blobCount := uint64(len(firstDataColumnSidecar.DataColumn))
|
||||
|
||||
// Check all colums have te same length.
|
||||
for i := range dataColumnsSidecar {
|
||||
if uint64(len(dataColumnsSidecar[i].DataColumn)) != blobCount {
|
||||
return nil, errors.Errorf("mismatch in the length of the data columns, expected %d, got %d", blobCount, len(dataColumnsSidecar[i].DataColumn))
|
||||
}
|
||||
}
|
||||
|
||||
// Reconstruct verified RO blobs from columns.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Populate and filter indices.
|
||||
indicesSlice := populateAndFilterIndices(indices, blobCount)
|
||||
|
||||
for _, blobIndex := range indicesSlice {
|
||||
var blob kzg.Blob
|
||||
|
||||
// Compute the content of the blob.
|
||||
for columnIndex := range neededColumnCount {
|
||||
sliceIndex, ok := sliceIndexFromColumnIndex[uint64(columnIndex)]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing column %d, this should never happen", columnIndex)
|
||||
}
|
||||
|
||||
dataColumnSideCar := dataColumnsSidecar[sliceIndex]
|
||||
cell := dataColumnSideCar.DataColumn[blobIndex]
|
||||
|
||||
for i := 0; i < len(cell); i++ {
|
||||
blob[columnIndex*kzg.BytesPerCell+i] = cell[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the blob KZG commitment.
|
||||
blobKZGCommitment := kzg.Commitment(firstDataColumnSidecar.KzgCommitments[blobIndex])
|
||||
|
||||
// Compute the blob KZG proof.
|
||||
blobKzgProof, err := kzg.ComputeBlobKZGProof(&blob, blobKZGCommitment)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "compute blob KZG proof")
|
||||
}
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: blobIndex,
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: firstDataColumnSidecar.SignedBlockHeader,
|
||||
CommitmentInclusionProof: firstDataColumnSidecar.KzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "new RO blob")
|
||||
}
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
return verifiedROBlobs, nil
|
||||
}
|
||||
|
||||
// DataColumnSidecarsForReconstruct is a TEMPORARY function until there is an official specification for it.
|
||||
// It is scheduled for deletion.
|
||||
func DataColumnSidecarsForReconstruct(
|
||||
blobKzgCommitments [][]byte,
|
||||
signedBlockHeader *ethpb.SignedBeaconBlockHeader,
|
||||
kzgCommitmentsInclusionProof [][]byte,
|
||||
cellsAndProofs []kzg.CellsAndProofs,
|
||||
) ([]*ethpb.DataColumnSidecar, error) {
|
||||
// Each CellsAndProofs corresponds to a Blob
|
||||
// So we can get the BlobCount by checking the length of CellsAndProofs
|
||||
blobsCount := len(cellsAndProofs)
|
||||
if blobsCount == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Get the column sidecars.
|
||||
sidecars := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns)
|
||||
for columnIndex := uint64(0); columnIndex < fieldparams.NumberOfColumns; columnIndex++ {
|
||||
column := make([]kzg.Cell, 0, blobsCount)
|
||||
kzgProofOfColumn := make([]kzg.Proof, 0, blobsCount)
|
||||
|
||||
for rowIndex := 0; rowIndex < blobsCount; rowIndex++ {
|
||||
cellsForRow := cellsAndProofs[rowIndex].Cells
|
||||
proofsForRow := cellsAndProofs[rowIndex].Proofs
|
||||
|
||||
cell := cellsForRow[columnIndex]
|
||||
column = append(column, cell)
|
||||
|
||||
kzgProof := proofsForRow[columnIndex]
|
||||
kzgProofOfColumn = append(kzgProofOfColumn, kzgProof)
|
||||
}
|
||||
|
||||
columnBytes := make([][]byte, 0, blobsCount)
|
||||
for i := range column {
|
||||
columnBytes = append(columnBytes, column[i][:])
|
||||
}
|
||||
|
||||
kzgProofOfColumnBytes := make([][]byte, 0, blobsCount)
|
||||
for _, kzgProof := range kzgProofOfColumn {
|
||||
copiedProof := kzgProof
|
||||
kzgProofOfColumnBytes = append(kzgProofOfColumnBytes, copiedProof[:])
|
||||
}
|
||||
|
||||
sidecar := ðpb.DataColumnSidecar{
|
||||
ColumnIndex: columnIndex,
|
||||
DataColumn: columnBytes,
|
||||
KzgCommitments: blobKzgCommitments,
|
||||
KzgProof: kzgProofOfColumnBytes,
|
||||
SignedBlockHeader: signedBlockHeader,
|
||||
KzgCommitmentsInclusionProof: kzgCommitmentsInclusionProof,
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, sidecar)
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// VerifyDataColumnsSidecarKZGProofs verifies the provided KZG Proofs of data columns.
|
||||
func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) (bool, error) {
|
||||
// Retrieve the number of columns.
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// Compute the total count.
|
||||
count := 0
|
||||
for _, sidecar := range sidecars {
|
||||
count += len(sidecar.DataColumn)
|
||||
}
|
||||
|
||||
commitments := make([]kzg.Bytes48, 0, count)
|
||||
indices := make([]uint64, 0, count)
|
||||
cells := make([]kzg.Cell, 0, count)
|
||||
proofs := make([]kzg.Bytes48, 0, count)
|
||||
|
||||
for _, sidecar := range sidecars {
|
||||
// Check if the columns index is not too large
|
||||
if sidecar.ColumnIndex >= numberOfColumns {
|
||||
return false, errIndexTooLarge
|
||||
}
|
||||
|
||||
// Check if the KZG commitments size and data column size match.
|
||||
if len(sidecar.DataColumn) != len(sidecar.KzgCommitments) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
// Check if the KZG proofs size and data column size match.
|
||||
if len(sidecar.DataColumn) != len(sidecar.KzgProof) {
|
||||
return false, errMismatchLength
|
||||
}
|
||||
|
||||
for i := range sidecar.DataColumn {
|
||||
commitments = append(commitments, kzg.Bytes48(sidecar.KzgCommitments[i]))
|
||||
indices = append(indices, sidecar.ColumnIndex)
|
||||
cells = append(cells, kzg.Cell(sidecar.DataColumn[i]))
|
||||
proofs = append(proofs, kzg.Bytes48(sidecar.KzgProof[i]))
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all the batch at once.
|
||||
verified, err := kzg.VerifyCellKZGProofBatch(commitments, indices, cells, proofs)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "verify cell KZG proof batch")
|
||||
}
|
||||
|
||||
return verified, nil
|
||||
}
|
||||
|
||||
// CustodySubnetCount returns the number of subnets the node should participate in for custody.
|
||||
func CustodySubnetCount() uint64 {
|
||||
if flags.Get().SubscribeToAllSubnets {
|
||||
return params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
}
|
||||
|
||||
return params.BeaconConfig().CustodyRequirement
|
||||
}
|
||||
|
||||
// SubnetSamplingSize returns the number of subnets the node should sample from.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/das-core.md#subnet-sampling
|
||||
func SubnetSamplingSize() uint64 {
|
||||
samplesPerSlot := params.BeaconConfig().SamplesPerSlot
|
||||
custodySubnetCount := CustodySubnetCount()
|
||||
|
||||
return max(samplesPerSlot, custodySubnetCount)
|
||||
}
|
||||
|
||||
// CustodyColumnCount returns the number of columns the node should custody.
|
||||
func CustodyColumnCount() uint64 {
|
||||
// Get the number of subnets.
|
||||
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount
|
||||
|
||||
// Compute the number of columns per subnet.
|
||||
columnsPerSubnet := fieldparams.NumberOfColumns / dataColumnSidecarSubnetCount
|
||||
|
||||
// Get the number of subnets we custody
|
||||
custodySubnetCount := CustodySubnetCount()
|
||||
|
||||
// Finally, compute the number of columns we should custody.
|
||||
custodyColumnCount := custodySubnetCount * columnsPerSubnet
|
||||
|
||||
return custodyColumnCount
|
||||
}
|
||||
|
||||
// HypergeomCDF computes the hypergeometric cumulative distribution function.
|
||||
// https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
func HypergeomCDF(k, M, n, N uint64) float64 {
|
||||
denominatorInt := new(big.Int).Binomial(int64(M), int64(N)) // lint:ignore uintcast
|
||||
denominator := new(big.Float).SetInt(denominatorInt)
|
||||
|
||||
rBig := big.NewFloat(0)
|
||||
|
||||
for i := uint64(0); i < k+1; i++ {
|
||||
a := new(big.Int).Binomial(int64(n), int64(i)) // lint:ignore uintcast
|
||||
b := new(big.Int).Binomial(int64(M-n), int64(N-i))
|
||||
numeratorInt := new(big.Int).Mul(a, b)
|
||||
numerator := new(big.Float).SetInt(numeratorInt)
|
||||
item := new(big.Float).Quo(numerator, denominator)
|
||||
rBig.Add(rBig, item)
|
||||
}
|
||||
|
||||
r, _ := rBig.Float64()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// ExtendedSampleCount computes, for a given number of samples per slot and allowed failures the
|
||||
// number of samples we should actually query from peers.
|
||||
// TODO: Add link to the specification once it is available.
|
||||
func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 {
|
||||
// Retrieve the columns count
|
||||
columnsCount := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
// If half of the columns are missing, we are able to reconstruct the data.
|
||||
// If half of the columns + 1 are missing, we are not able to reconstruct the data.
|
||||
// This is the smallest worst case.
|
||||
worstCaseMissing := columnsCount/2 + 1
|
||||
|
||||
// Compute the false positive threshold.
|
||||
falsePositiveThreshold := HypergeomCDF(0, columnsCount, worstCaseMissing, samplesPerSlot)
|
||||
|
||||
var sampleCount uint64
|
||||
|
||||
// Finally, compute the extended sample count.
|
||||
for sampleCount = samplesPerSlot; sampleCount < columnsCount+1; sampleCount++ {
|
||||
if HypergeomCDF(allowedFailures, columnsCount, worstCaseMissing, sampleCount) <= falsePositiveThreshold {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sampleCount
|
||||
}
|
||||
|
||||
func CustodyCountFromRecord(record *enr.Record) (uint64, error) {
|
||||
// By default, we assume the peer custodies the minimum number of subnets.
|
||||
if record == nil {
|
||||
return 0, errRecordNil
|
||||
}
|
||||
|
||||
// Load the `custody_subnet_count`
|
||||
var csc Csc
|
||||
if err := record.Load(&csc); err != nil {
|
||||
return 0, errCannotLoadCustodySubnetCount
|
||||
}
|
||||
|
||||
return uint64(csc), nil
|
||||
}
|
||||
|
||||
func CanSelfReconstruct(numCol uint64) bool {
|
||||
total := params.BeaconConfig().NumberOfColumns
|
||||
// if total is odd, then we need total / 2 + 1 columns to reconstruct
|
||||
// if total is even, then we need total / 2 columns to reconstruct
|
||||
columnsNeeded := total/2 + total%2
|
||||
return numCol >= columnsNeeded
|
||||
}
|
||||
|
||||
// RecoverCellsAndProofs recovers the cells and proofs from the data column sidecars.
|
||||
func RecoverCellsAndProofs(
|
||||
dataColumnSideCars []*ethpb.DataColumnSidecar,
|
||||
blockRoot [fieldparams.RootLength]byte,
|
||||
) ([]kzg.CellsAndProofs, error) {
|
||||
var wg errgroup.Group
|
||||
|
||||
dataColumnSideCarsCount := len(dataColumnSideCars)
|
||||
|
||||
if dataColumnSideCarsCount == 0 {
|
||||
return nil, errors.New("no data column sidecars")
|
||||
}
|
||||
|
||||
// Check if all columns have the same length.
|
||||
blobCount := len(dataColumnSideCars[0].DataColumn)
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
length := len(sidecar.DataColumn)
|
||||
|
||||
if length != blobCount {
|
||||
return nil, errors.New("columns do not have the same length")
|
||||
}
|
||||
}
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
recoveredCellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
|
||||
for blobIndex := 0; blobIndex < blobCount; blobIndex++ {
|
||||
bIndex := blobIndex
|
||||
wg.Go(func() error {
|
||||
start := time.Now()
|
||||
|
||||
cellsIndices := make([]uint64, 0, dataColumnSideCarsCount)
|
||||
cells := make([]kzg.Cell, 0, dataColumnSideCarsCount)
|
||||
|
||||
for _, sidecar := range dataColumnSideCars {
|
||||
// Build the cell indices.
|
||||
cellsIndices = append(cellsIndices, sidecar.ColumnIndex)
|
||||
|
||||
// Get the cell.
|
||||
column := sidecar.DataColumn
|
||||
cell := column[bIndex]
|
||||
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
}
|
||||
|
||||
// Recover the cells and proofs for the corresponding blob
|
||||
cellsAndProofs, err := kzg.RecoverCellsAndKZGProofs(cellsIndices, cells)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "recover cells and KZG proofs for blob %d", bIndex)
|
||||
}
|
||||
|
||||
recoveredCellsAndProofs[bIndex] = cellsAndProofs
|
||||
log.WithFields(logrus.Fields{
|
||||
"elapsed": time.Since(start),
|
||||
"index": bIndex,
|
||||
"root": fmt.Sprintf("%x", blockRoot),
|
||||
}).Debug("Recovered cells and proofs")
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return recoveredCellsAndProofs, nil
|
||||
}
|
||||
544
beacon-chain/core/peerdas/helpers_test.go
Normal file
544
beacon-chain/core/peerdas/helpers_test.go
Normal file
@@ -0,0 +1,544 @@
|
||||
package peerdas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
|
||||
GoKZG "github.com/crate-crypto/go-kzg-4844"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/kzg"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func deterministicRandomness(seed int64) [32]byte {
|
||||
// Converts an int64 to a byte slice
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.BigEndian, seed)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to write int64 to bytes buffer")
|
||||
return [32]byte{}
|
||||
}
|
||||
bytes := buf.Bytes()
|
||||
|
||||
return sha256.Sum256(bytes)
|
||||
}
|
||||
|
||||
// Returns a serialized random field element in big-endian
|
||||
func GetRandFieldElement(seed int64) [32]byte {
|
||||
bytes := deterministicRandomness(seed)
|
||||
var r fr.Element
|
||||
r.SetBytes(bytes[:])
|
||||
|
||||
return GoKZG.SerializeScalar(r)
|
||||
}
|
||||
|
||||
// Returns a random blob using the passed seed as entropy
|
||||
func GetRandBlob(seed int64) kzg.Blob {
|
||||
var blob kzg.Blob
|
||||
bytesPerBlob := GoKZG.ScalarsPerBlob * GoKZG.SerializedScalarSize
|
||||
for i := 0; i < bytesPerBlob; i += GoKZG.SerializedScalarSize {
|
||||
fieldElementBytes := GetRandFieldElement(seed + int64(i))
|
||||
copy(blob[i:i+GoKZG.SerializedScalarSize], fieldElementBytes[:])
|
||||
}
|
||||
return blob
|
||||
}
|
||||
|
||||
func GenerateCommitmentAndProof(blob *kzg.Blob) (*kzg.Commitment, *kzg.Proof, error) {
|
||||
commitment, err := kzg.BlobToKZGCommitment(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
proof, err := kzg.ComputeBlobKZGProof(blob, commitment)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return &commitment, &proof, err
|
||||
}
|
||||
|
||||
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
dbBlock := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
var (
|
||||
comms [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := int64(0); i < 6; i++ {
|
||||
blob := GetRandBlob(i)
|
||||
commitment, _, err := GenerateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
comms = append(comms, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
dbBlock.Block.Body.BlobKzgCommitments = comms
|
||||
sBlock, err := blocks.NewSignedBeaconBlock(dbBlock)
|
||||
require.NoError(t, err)
|
||||
sCars, err := peerdas.DataColumnSidecars(sBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i, sidecar := range sCars {
|
||||
roCol, err := blocks.NewRODataColumn(sidecar)
|
||||
require.NoError(t, err)
|
||||
verified, err := peerdas.VerifyDataColumnsSidecarKZGProofs([]blocks.RODataColumn{roCol})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, fmt.Sprintf("sidecar %d failed", i))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnSidecars(t *testing.T) {
|
||||
var expected []*ethpb.DataColumnSidecar = nil
|
||||
actual, err := peerdas.DataColumnSidecars(nil, []kzg.Blob{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestBlobs(t *testing.T) {
|
||||
blobsIndice := map[uint64]bool{}
|
||||
|
||||
almostAllColumns := make([]*ethpb.DataColumnSidecar, 0, fieldparams.NumberOfColumns/2)
|
||||
for i := 2; i < fieldparams.NumberOfColumns/2+2; i++ {
|
||||
almostAllColumns = append(almostAllColumns, ðpb.DataColumnSidecar{
|
||||
ColumnIndex: uint64(i),
|
||||
})
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
input []*ethpb.DataColumnSidecar
|
||||
expected []*blocks.VerifiedROBlob
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "empty input",
|
||||
input: []*ethpb.DataColumnSidecar{},
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63]"),
|
||||
},
|
||||
{
|
||||
name: "missing columns",
|
||||
input: almostAllColumns,
|
||||
expected: nil,
|
||||
err: errors.New("some columns are missing: [0 1]"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual, err := peerdas.Blobs(blobsIndice, tc.input)
|
||||
if tc.err != nil {
|
||||
require.Equal(t, tc.err.Error(), err.Error())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.DeepSSZEqual(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataColumnsSidecarsBlobsRoundtrip(t *testing.T) {
|
||||
const blobCount = 5
|
||||
blobsIndex := map[uint64]bool{}
|
||||
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a protobuf signed beacon block.
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
|
||||
// Generate random blobs and their corresponding commitments and proofs.
|
||||
blobs := make([]kzg.Blob, 0, blobCount)
|
||||
blobKzgCommitments := make([]*kzg.Commitment, 0, blobCount)
|
||||
blobKzgProofs := make([]*kzg.Proof, 0, blobCount)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
// Create a random blob.
|
||||
blob := GetRandBlob(int64(blobIndex))
|
||||
blobs = append(blobs, blob)
|
||||
|
||||
// Generate a blobKZGCommitment for the blob.
|
||||
blobKZGCommitment, proof, err := GenerateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobKzgCommitments = append(blobKzgCommitments, blobKZGCommitment)
|
||||
blobKzgProofs = append(blobKzgProofs, proof)
|
||||
}
|
||||
|
||||
// Set the commitments into the block.
|
||||
blobZkgCommitmentsBytes := make([][]byte, 0, blobCount)
|
||||
for _, blobKZGCommitment := range blobKzgCommitments {
|
||||
blobZkgCommitmentsBytes = append(blobZkgCommitmentsBytes, blobKZGCommitment[:])
|
||||
}
|
||||
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobZkgCommitmentsBytes
|
||||
|
||||
// Generate verified RO blobs.
|
||||
verifiedROBlobs := make([]*blocks.VerifiedROBlob, 0, blobCount)
|
||||
|
||||
// Create a signed beacon block from the protobuf.
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
commitmentInclusionProof, err := blocks.MerkleProofKZGCommitments(signedBeaconBlock.Block().Body())
|
||||
require.NoError(t, err)
|
||||
|
||||
for blobIndex := range blobCount {
|
||||
blob := blobs[blobIndex]
|
||||
blobKZGCommitment := blobKzgCommitments[blobIndex]
|
||||
blobKzgProof := blobKzgProofs[blobIndex]
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
blobSidecar := ðpb.BlobSidecar{
|
||||
Index: uint64(blobIndex),
|
||||
Blob: blob[:],
|
||||
KzgCommitment: blobKZGCommitment[:],
|
||||
KzgProof: blobKzgProof[:],
|
||||
SignedBlockHeader: signedBeaconBlockHeader,
|
||||
CommitmentInclusionProof: commitmentInclusionProof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlob(blobSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedROBlob := blocks.NewVerifiedROBlob(roBlob)
|
||||
verifiedROBlobs = append(verifiedROBlobs, &verifiedROBlob)
|
||||
}
|
||||
|
||||
// Compute data columns sidecars from the signed beacon block and from the blobs.
|
||||
dataColumnsSidecar, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compute the blobs from the data columns sidecar.
|
||||
roundtripBlobs, err := peerdas.Blobs(blobsIndex, dataColumnsSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that the blobs are the same.
|
||||
require.DeepSSZEqual(t, verifiedROBlobs, roundtripBlobs)
|
||||
}
|
||||
|
||||
func TestCustodySubnetCount(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
subscribeToAllSubnets bool
|
||||
expected uint64
|
||||
}{
|
||||
{
|
||||
name: "subscribeToAllSubnets=false",
|
||||
subscribeToAllSubnets: false,
|
||||
expected: params.BeaconConfig().CustodyRequirement,
|
||||
},
|
||||
{
|
||||
name: "subscribeToAllSubnets=true",
|
||||
subscribeToAllSubnets: true,
|
||||
expected: params.BeaconConfig().DataColumnSidecarSubnetCount,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set flags.
|
||||
resetFlags := flags.Get()
|
||||
defer func() {
|
||||
flags.Init(resetFlags)
|
||||
}()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = tc.subscribeToAllSubnets
|
||||
flags.Init(gFlags)
|
||||
|
||||
// Get the custody subnet count.
|
||||
actual := peerdas.CustodySubnetCount()
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyColumnCount(t *testing.T) {
|
||||
const expected uint64 = 8
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.DataColumnSidecarSubnetCount = 32
|
||||
config.CustodyRequirement = 2
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
actual := peerdas.CustodyColumnCount()
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestHypergeomCDF(t *testing.T) {
|
||||
// Test case from https://en.wikipedia.org/wiki/Hypergeometric_distribution
|
||||
// Population size: 1000, number of successes in population: 500, sample size: 10, number of successes in sample: 5
|
||||
// Expected result: 0.072
|
||||
const (
|
||||
expected = 0.0796665913283742
|
||||
margin = 0.000001
|
||||
)
|
||||
|
||||
actual := peerdas.HypergeomCDF(5, 128, 65, 16)
|
||||
require.Equal(t, true, expected-margin <= actual && actual <= expected+margin)
|
||||
}
|
||||
|
||||
func TestExtendedSampleCount(t *testing.T) {
|
||||
const samplesPerSlot = 16
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
allowedMissings uint64
|
||||
extendedSampleCount uint64
|
||||
}{
|
||||
{name: "allowedMissings=0", allowedMissings: 0, extendedSampleCount: 16},
|
||||
{name: "allowedMissings=1", allowedMissings: 1, extendedSampleCount: 20},
|
||||
{name: "allowedMissings=2", allowedMissings: 2, extendedSampleCount: 24},
|
||||
{name: "allowedMissings=3", allowedMissings: 3, extendedSampleCount: 27},
|
||||
{name: "allowedMissings=4", allowedMissings: 4, extendedSampleCount: 29},
|
||||
{name: "allowedMissings=5", allowedMissings: 5, extendedSampleCount: 32},
|
||||
{name: "allowedMissings=6", allowedMissings: 6, extendedSampleCount: 35},
|
||||
{name: "allowedMissings=7", allowedMissings: 7, extendedSampleCount: 37},
|
||||
{name: "allowedMissings=8", allowedMissings: 8, extendedSampleCount: 40},
|
||||
{name: "allowedMissings=9", allowedMissings: 9, extendedSampleCount: 42},
|
||||
{name: "allowedMissings=10", allowedMissings: 10, extendedSampleCount: 44},
|
||||
{name: "allowedMissings=11", allowedMissings: 11, extendedSampleCount: 47},
|
||||
{name: "allowedMissings=12", allowedMissings: 12, extendedSampleCount: 49},
|
||||
{name: "allowedMissings=13", allowedMissings: 13, extendedSampleCount: 51},
|
||||
{name: "allowedMissings=14", allowedMissings: 14, extendedSampleCount: 53},
|
||||
{name: "allowedMissings=15", allowedMissings: 15, extendedSampleCount: 55},
|
||||
{name: "allowedMissings=16", allowedMissings: 16, extendedSampleCount: 57},
|
||||
{name: "allowedMissings=17", allowedMissings: 17, extendedSampleCount: 59},
|
||||
{name: "allowedMissings=18", allowedMissings: 18, extendedSampleCount: 61},
|
||||
{name: "allowedMissings=19", allowedMissings: 19, extendedSampleCount: 63},
|
||||
{name: "allowedMissings=20", allowedMissings: 20, extendedSampleCount: 65},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := peerdas.ExtendedSampleCount(samplesPerSlot, tc.allowedMissings)
|
||||
require.Equal(t, tc.extendedSampleCount, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCustodyCountFromRecord(t *testing.T) {
|
||||
const expected uint64 = 7
|
||||
|
||||
// Create an Ethereum record.
|
||||
record := &enr.Record{}
|
||||
record.Set(peerdas.Csc(expected))
|
||||
|
||||
actual, err := peerdas.CustodyCountFromRecord(record)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestCanSelfReconstruct(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
totalNumberOfColumns uint64
|
||||
custodyNumberOfColumns uint64
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "totalNumberOfColumns=64, custodyNumberOfColumns=31",
|
||||
totalNumberOfColumns: 64,
|
||||
custodyNumberOfColumns: 31,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfColumns=64, custodyNumberOfColumns=32",
|
||||
totalNumberOfColumns: 64,
|
||||
custodyNumberOfColumns: 32,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfColumns=65, custodyNumberOfColumns=32",
|
||||
totalNumberOfColumns: 65,
|
||||
custodyNumberOfColumns: 32,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "totalNumberOfColumns=63, custodyNumberOfColumns=33",
|
||||
totalNumberOfColumns: 65,
|
||||
custodyNumberOfColumns: 33,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Set the total number of columns.
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.NumberOfColumns = tc.totalNumberOfColumns
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Check if reconstuction is possible.
|
||||
actual := peerdas.CanSelfReconstruct(tc.custodyNumberOfColumns)
|
||||
require.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconstructionRoundTrip(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
const blobCount = 5
|
||||
|
||||
var blockRoot [fieldparams.RootLength]byte
|
||||
|
||||
signedBeaconBlockPb := util.NewBeaconBlockDeneb()
|
||||
require.NoError(t, kzg.Start())
|
||||
|
||||
// Generate random blobs and their corresponding commitments.
|
||||
var (
|
||||
blobsKzgCommitments [][]byte
|
||||
blobs []kzg.Blob
|
||||
)
|
||||
for i := range blobCount {
|
||||
blob := GetRandBlob(int64(i))
|
||||
commitment, _, err := GenerateCommitmentAndProof(&blob)
|
||||
require.NoError(t, err)
|
||||
|
||||
blobsKzgCommitments = append(blobsKzgCommitments, commitment[:])
|
||||
blobs = append(blobs, blob)
|
||||
}
|
||||
|
||||
// Generate a signed beacon block.
|
||||
signedBeaconBlockPb.Block.Body.BlobKzgCommitments = blobsKzgCommitments
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(signedBeaconBlockPb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get the signed beacon block header.
|
||||
signedBeaconBlockHeader, err := signedBeaconBlock.Header()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Convert data columns sidecars from signed block and blobs.
|
||||
dataColumnSidecars, err := peerdas.DataColumnSidecars(signedBeaconBlock, blobs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create verified RO data columns.
|
||||
verifiedRoDataColumns := make([]*blocks.VerifiedRODataColumn, 0, blobCount)
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
roDataColumn, err := blocks.NewRODataColumn(dataColumnSidecar)
|
||||
require.NoError(t, err)
|
||||
|
||||
verifiedRoDataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRoDataColumns = append(verifiedRoDataColumns, &verifiedRoDataColumn)
|
||||
}
|
||||
|
||||
verifiedRoDataColumn := verifiedRoDataColumns[0]
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
|
||||
var noDataColumns []*ethpb.DataColumnSidecar
|
||||
dataColumnsWithDifferentLengths := []*ethpb.DataColumnSidecar{
|
||||
{DataColumn: [][]byte{{}, {}}},
|
||||
{DataColumn: [][]byte{{}}},
|
||||
}
|
||||
notEnoughDataColumns := dataColumnSidecars[:numberOfColumns/2-1]
|
||||
originalDataColumns := dataColumnSidecars[:numberOfColumns/2]
|
||||
extendedDataColumns := dataColumnSidecars[numberOfColumns/2:]
|
||||
evenDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
|
||||
oddDataColumns := make([]*ethpb.DataColumnSidecar, 0, numberOfColumns/2)
|
||||
allDataColumns := dataColumnSidecars
|
||||
|
||||
for i, dataColumn := range dataColumnSidecars {
|
||||
if i%2 == 0 {
|
||||
evenDataColumns = append(evenDataColumns, dataColumn)
|
||||
} else {
|
||||
oddDataColumns = append(oddDataColumns, dataColumn)
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
dataColumnsSidecar []*ethpb.DataColumnSidecar
|
||||
isError bool
|
||||
}{
|
||||
{
|
||||
name: "No data columns sidecars",
|
||||
dataColumnsSidecar: noDataColumns,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "Data columns sidecar with different lengths",
|
||||
dataColumnsSidecar: dataColumnsWithDifferentLengths,
|
||||
isError: true,
|
||||
},
|
||||
{
|
||||
name: "All columns are present (no actual need to reconstruct)",
|
||||
dataColumnsSidecar: allDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only original columns are present",
|
||||
dataColumnsSidecar: originalDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only extended columns are present",
|
||||
dataColumnsSidecar: extendedDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only even columns are present",
|
||||
dataColumnsSidecar: evenDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Only odd columns are present",
|
||||
dataColumnsSidecar: oddDataColumns,
|
||||
isError: false,
|
||||
},
|
||||
{
|
||||
name: "Not enough columns to reconstruct",
|
||||
dataColumnsSidecar: notEnoughDataColumns,
|
||||
isError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Recover cells and proofs from available data columns sidecars.
|
||||
cellsAndProofs, err := peerdas.RecoverCellsAndProofs(tc.dataColumnsSidecar, blockRoot)
|
||||
isError := (err != nil)
|
||||
require.Equal(t, tc.isError, isError)
|
||||
|
||||
if isError {
|
||||
return
|
||||
}
|
||||
|
||||
// Recover all data columns sidecars from cells and proofs.
|
||||
reconstructedDataColumnsSideCars, err := peerdas.DataColumnSidecarsForReconstruct(
|
||||
blobsKzgCommitments,
|
||||
signedBeaconBlockHeader,
|
||||
verifiedRoDataColumn.KzgCommitmentsInclusionProof,
|
||||
cellsAndProofs,
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := dataColumnSidecars
|
||||
actual := reconstructedDataColumnsSideCars
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
5
beacon-chain/core/peerdas/log.go
Normal file
5
beacon-chain/core/peerdas/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package peerdas
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "peerdas")
|
||||
14
beacon-chain/core/peerdas/metrics.go
Normal file
14
beacon-chain/core/peerdas/metrics.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
@@ -53,6 +53,11 @@ func HigherEqualThanAltairVersionAndEpoch(s state.BeaconState, e primitives.Epoc
|
||||
return s.Version() >= version.Altair && e >= params.BeaconConfig().AltairForkEpoch
|
||||
}
|
||||
|
||||
// PeerDASIsActive checks whether peerDAS is active at the provided slot.
|
||||
func PeerDASIsActive(slot primitives.Slot) bool {
|
||||
return slots.ToEpoch(slot) >= params.BeaconConfig().Eip7594ForkEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToAltair returns true if the input `slot` can upgrade to Altair.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH
|
||||
|
||||
@@ -333,8 +333,7 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
}
|
||||
state, err = b.ProcessPayload(state, blk.Body())
|
||||
if err != nil {
|
||||
if err = b.ProcessPayload(state, blk.Body()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution data")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -698,3 +698,45 @@ func TestProcessSlotsConditionally(t *testing.T) {
|
||||
assert.Equal(t, primitives.Slot(6), s.Slot())
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkProcessSlots_Capella(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateCapella(b, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkProcessSlots_Deneb(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateDeneb(b, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkProcessSlots_Electra(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(b, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,76 +241,35 @@ func SlashedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validat
|
||||
return slashed
|
||||
}
|
||||
|
||||
// ExitedValidatorIndices determines the indices exited during the current epoch.
|
||||
func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator, activeValidatorCount uint64) ([]primitives.ValidatorIndex, error) {
|
||||
// ExitedValidatorIndices returns the indices of validators who exited during the specified epoch.
|
||||
//
|
||||
// A validator is considered to have exited during an epoch if their ExitEpoch equals the epoch and
|
||||
// excludes validators that have been ejected.
|
||||
// This function simplifies the exit determination by directly checking the validator's ExitEpoch,
|
||||
// avoiding the complexities and potential inaccuracies of calculating withdrawable epochs.
|
||||
func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) ([]primitives.ValidatorIndex, error) {
|
||||
exited := make([]primitives.ValidatorIndex, 0)
|
||||
exitEpochs := make([]primitives.Epoch, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
val := validators[i]
|
||||
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
exitEpochs = append(exitEpochs, val.ExitEpoch)
|
||||
}
|
||||
}
|
||||
exitQueueEpoch := primitives.Epoch(0)
|
||||
for _, i := range exitEpochs {
|
||||
if exitQueueEpoch < i {
|
||||
exitQueueEpoch = i
|
||||
}
|
||||
}
|
||||
|
||||
// We use the exit queue churn to determine if we have passed a churn limit.
|
||||
exitQueueChurn := uint64(0)
|
||||
for _, val := range validators {
|
||||
if val.ExitEpoch == exitQueueEpoch {
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
for i, val := range validators {
|
||||
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
|
||||
val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
|
||||
if val.ExitEpoch == epoch && val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
|
||||
exited = append(exited, primitives.ValidatorIndex(i))
|
||||
}
|
||||
}
|
||||
return exited, nil
|
||||
}
|
||||
|
||||
// EjectedValidatorIndices determines the indices ejected during the given epoch.
|
||||
func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator, activeValidatorCount uint64) ([]primitives.ValidatorIndex, error) {
|
||||
// EjectedValidatorIndices returns the indices of validators who were ejected during the specified epoch.
|
||||
//
|
||||
// A validator is considered ejected during an epoch if:
|
||||
// - Their ExitEpoch equals the epoch.
|
||||
// - Their EffectiveBalance is less than or equal to the EjectionBalance threshold.
|
||||
//
|
||||
// This function simplifies the ejection determination by directly checking the validator's ExitEpoch
|
||||
// and EffectiveBalance, avoiding the complexities and potential inaccuracies of calculating
|
||||
// withdrawable epochs.
|
||||
func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) ([]primitives.ValidatorIndex, error) {
|
||||
ejected := make([]primitives.ValidatorIndex, 0)
|
||||
exitEpochs := make([]primitives.Epoch, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
val := validators[i]
|
||||
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
exitEpochs = append(exitEpochs, val.ExitEpoch)
|
||||
}
|
||||
}
|
||||
exitQueueEpoch := primitives.Epoch(0)
|
||||
for _, i := range exitEpochs {
|
||||
if exitQueueEpoch < i {
|
||||
exitQueueEpoch = i
|
||||
}
|
||||
}
|
||||
|
||||
// We use the exit queue churn to determine if we have passed a churn limit.
|
||||
exitQueueChurn := uint64(0)
|
||||
for _, val := range validators {
|
||||
if val.ExitEpoch == exitQueueEpoch {
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
for i, val := range validators {
|
||||
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
|
||||
val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
|
||||
if val.ExitEpoch == epoch && val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
|
||||
ejected = append(ejected, primitives.ValidatorIndex(i))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -389,19 +389,16 @@ func TestExitedValidatorIndices(t *testing.T) {
|
||||
state: ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: 10,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 10,
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -433,11 +430,7 @@ func TestExitedValidatorIndices(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
activeCount, err := helpers.ActiveValidatorCount(context.Background(), s, time.PrevEpoch(s))
|
||||
require.NoError(t, err)
|
||||
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators, activeCount)
|
||||
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.wanted, exitedIndices)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"availability.go",
|
||||
"availability_columns.go",
|
||||
"cache.go",
|
||||
"iface.go",
|
||||
"mock.go",
|
||||
@@ -11,6 +12,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/das",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -20,6 +22,7 @@ go_library(
|
||||
"//runtime/logging:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -28,6 +31,7 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"availability_columns_test.go",
|
||||
"availability_test.go",
|
||||
"cache_test.go",
|
||||
],
|
||||
@@ -35,6 +39,7 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -43,6 +48,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
@@ -80,7 +81,7 @@ func (s *LazilyPersistentStore) Persist(current primitives.Slot, sc ...blocks.RO
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (s *LazilyPersistentStore) IsDataAvailable(ctx context.Context, _ enode.ID, current primitives.Slot, b blocks.ROBlock) error {
|
||||
blockCommitments, err := commitmentsToCheck(b, current)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could check data availability for block %#x", b.Root())
|
||||
|
||||
182
beacon-chain/das/availability_columns.go
Normal file
182
beacon-chain/das/availability_columns.go
Normal file
@@ -0,0 +1,182 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// LazilyPersistentStoreColumn is an implementation of AvailabilityStore to be used when batch syncing data columns.
|
||||
// This implementation will hold any blobs passed to Persist until the IsDataAvailable is called for their
|
||||
// block, at which time they will undergo full verification and be saved to the disk.
|
||||
type LazilyPersistentStoreColumn struct {
|
||||
store *filesystem.BlobStorage
|
||||
cache *cache
|
||||
}
|
||||
|
||||
func NewLazilyPersistentStoreColumn(store *filesystem.BlobStorage) *LazilyPersistentStoreColumn {
|
||||
return &LazilyPersistentStoreColumn{
|
||||
store: store,
|
||||
cache: newCache(),
|
||||
}
|
||||
}
|
||||
|
||||
// Persist do nothing at the moment.
|
||||
// TODO: Very Ugly, change interface to allow for columns and blobs
|
||||
func (*LazilyPersistentStoreColumn) Persist(_ primitives.Slot, _ ...blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PersistColumns adds columns to the working column cache. columns stored in this cache will be persisted
|
||||
// for at least as long as the node is running. Once IsDataAvailable succeeds, all blobs referenced
|
||||
// by the given block are guaranteed to be persisted for the remainder of the retention period.
|
||||
func (s *LazilyPersistentStoreColumn) PersistColumns(current primitives.Slot, sc ...blocks.RODataColumn) error {
|
||||
if len(sc) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(sc) > 1 {
|
||||
first := sc[0].BlockRoot()
|
||||
for i := 1; i < len(sc); i++ {
|
||||
if first != sc[i].BlockRoot() {
|
||||
return errMixedRoots
|
||||
}
|
||||
}
|
||||
}
|
||||
if !params.WithinDAPeriod(slots.ToEpoch(sc[0].Slot()), slots.ToEpoch(current)) {
|
||||
return nil
|
||||
}
|
||||
key := keyFromColumn(sc[0])
|
||||
entry := s.cache.ensure(key)
|
||||
for i := range sc {
|
||||
if err := entry.stashColumns(&sc[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDataAvailable returns nil if all the commitments in the given block are persisted to the db and have been verified.
|
||||
// BlobSidecars already in the db are assumed to have been previously verified against the block.
|
||||
func (s *LazilyPersistentStoreColumn) IsDataAvailable(
|
||||
ctx context.Context,
|
||||
nodeID enode.ID,
|
||||
currentSlot primitives.Slot,
|
||||
block blocks.ROBlock,
|
||||
) error {
|
||||
blockCommitments, err := fullCommitmentsToCheck(nodeID, block, currentSlot)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "full commitments to check with block root `%#x` and current slot `%d`", block.Root(), currentSlot)
|
||||
}
|
||||
|
||||
// Return early for blocks that do not have any commitments.
|
||||
if blockCommitments.count() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the cache key for the block.
|
||||
key := keyFromBlock(block)
|
||||
|
||||
// Retrieve the cache entry for the block, or create an empty one if it doesn't exist.
|
||||
entry := s.cache.ensure(key)
|
||||
|
||||
// Delete the cache entry for the block at the end.
|
||||
defer s.cache.delete(key)
|
||||
|
||||
// Get the root of the block.
|
||||
blockRoot := block.Root()
|
||||
|
||||
// Wait for the summarizer to be ready before proceeding.
|
||||
summarizer, err := s.store.WaitForSummarizer(ctx)
|
||||
if err != nil {
|
||||
log.
|
||||
WithField("root", fmt.Sprintf("%#x", blockRoot)).
|
||||
WithError(err).
|
||||
Debug("Failed to receive BlobStorageSummarizer within IsDataAvailable")
|
||||
} else {
|
||||
// Get the summary for the block, and set it in the cache entry.
|
||||
summary := summarizer.Summary(blockRoot)
|
||||
entry.setDiskSummary(summary)
|
||||
}
|
||||
|
||||
// Verify we have all the expected sidecars, and fail fast if any are missing or inconsistent.
|
||||
// We don't try to salvage problematic batches because this indicates a misbehaving peer and we'd rather
|
||||
// ignore their response and decrease their peer score.
|
||||
roDataColumns, err := entry.filterColumns(blockRoot, blockCommitments)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "incomplete BlobSidecar batch")
|
||||
}
|
||||
|
||||
// Create verified RO data columns from RO data columns.
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roDataColumns))
|
||||
|
||||
for _, roDataColumn := range roDataColumns {
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roDataColumn)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
}
|
||||
|
||||
// Ensure that each column sidecar is written to disk.
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
if err := s.store.SaveDataColumn(verifiedRODataColumn); err != nil {
|
||||
return errors.Wrapf(err, "save data columns for index `%d` for block `%#x`", verifiedRODataColumn.ColumnIndex, blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// All ColumnSidecars are persisted - data availability check succeeds.
|
||||
return nil
|
||||
}
|
||||
|
||||
// fullCommitmentsToCheck returns the commitments to check for a given block.
|
||||
func fullCommitmentsToCheck(nodeID enode.ID, block blocks.ROBlock, currentSlot primitives.Slot) (*safeCommitmentsArray, error) {
|
||||
// Return early for blocks that are pre-deneb.
|
||||
if block.Version() < version.Deneb {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Compute the block epoch.
|
||||
blockSlot := block.Block().Slot()
|
||||
blockEpoch := slots.ToEpoch(blockSlot)
|
||||
|
||||
// Compute the current spoch.
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// Return early if the request is out of the MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS window.
|
||||
if !params.WithinDAPeriod(blockEpoch, currentEpoch) {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the KZG commitments for the block.
|
||||
kzgCommitments, err := block.Block().Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "blob KZG commitments")
|
||||
}
|
||||
|
||||
// Return early if there are no commitments in the block.
|
||||
if len(kzgCommitments) == 0 {
|
||||
return &safeCommitmentsArray{}, nil
|
||||
}
|
||||
|
||||
// Retrieve the custody columns.
|
||||
custodySubnetCount := peerdas.CustodySubnetCount()
|
||||
custodyColumns, err := peerdas.CustodyColumns(nodeID, custodySubnetCount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "custody columns")
|
||||
}
|
||||
|
||||
// Create a safe commitments array for the custody columns.
|
||||
commitmentsArray := &safeCommitmentsArray{}
|
||||
for column := range custodyColumns {
|
||||
commitmentsArray[column] = kzgCommitments
|
||||
}
|
||||
|
||||
return commitmentsArray, nil
|
||||
}
|
||||
94
beacon-chain/das/availability_columns_test.go
Normal file
94
beacon-chain/das/availability_columns_test.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package das
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestFullCommitmentsToCheck(t *testing.T) {
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
commits [][]byte
|
||||
block func(*testing.T) blocks.ROBlock
|
||||
slot primitives.Slot
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "pre deneb",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
bb := util.NewBeaconBlockBellatrix()
|
||||
sb, err := blocks.NewSignedBeaconBlock(bb)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "commitments within da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Slot = 100
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
commits: commits,
|
||||
slot: 100,
|
||||
},
|
||||
{
|
||||
name: "commitments outside da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SubscribeToAllSubnets = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
b := c.block(t)
|
||||
co, err := fullCommitmentsToCheck(enode.ID{}, b, c.slot)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
for i := 0; i < len(co); i++ {
|
||||
require.DeepEqual(t, c.commits, co[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
errors "github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
@@ -124,18 +125,18 @@ func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[2]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
err := as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
err = as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, scs...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
require.NoError(t, as.IsDataAvailable(ctx, enode.ID{}, 1, blk))
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
@@ -150,7 +151,7 @@ func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, scs[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
err := as.IsDataAvailable(ctx, enode.ID{}, 1, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package das
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
@@ -38,6 +39,10 @@ func keyFromSidecar(sc blocks.ROBlob) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
func keyFromColumn(sc blocks.RODataColumn) cacheKey {
|
||||
return cacheKey{slot: sc.Slot(), root: sc.BlockRoot()}
|
||||
}
|
||||
|
||||
// keyFromBlock is a convenience method for constructing a cacheKey from a ROBlock value.
|
||||
func keyFromBlock(b blocks.ROBlock) cacheKey {
|
||||
return cacheKey{slot: b.Block().Slot(), root: b.Root()}
|
||||
@@ -61,6 +66,7 @@ func (c *cache) delete(key cacheKey) {
|
||||
// cacheEntry holds a fixed-length cache of BlobSidecars.
|
||||
type cacheEntry struct {
|
||||
scs [fieldparams.MaxBlobsPerBlock]*blocks.ROBlob
|
||||
colScs [fieldparams.NumberOfColumns]*blocks.RODataColumn
|
||||
diskSummary filesystem.BlobStorageSummary
|
||||
}
|
||||
|
||||
@@ -82,6 +88,17 @@ func (e *cacheEntry) stash(sc *blocks.ROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) stashColumns(sc *blocks.RODataColumn) error {
|
||||
if sc.ColumnIndex >= fieldparams.NumberOfColumns {
|
||||
return errors.Wrapf(errIndexOutOfBounds, "index=%d", sc.ColumnIndex)
|
||||
}
|
||||
if e.colScs[sc.ColumnIndex] != nil {
|
||||
return errors.Wrapf(ErrDuplicateSidecar, "root=%#x, index=%d, commitment=%#x", sc.BlockRoot(), sc.ColumnIndex, sc.KzgCommitments)
|
||||
}
|
||||
e.colScs[sc.ColumnIndex] = sc
|
||||
return nil
|
||||
}
|
||||
|
||||
// filter evicts sidecars that are not committed to by the block and returns custom
|
||||
// errors if the cache is missing any of the commitments, or if the commitments in
|
||||
// the cache do not match those found in the block. If err is nil, then all expected
|
||||
@@ -117,6 +134,39 @@ func (e *cacheEntry) filter(root [32]byte, kc safeCommitmentArray) ([]blocks.ROB
|
||||
return scs, nil
|
||||
}
|
||||
|
||||
func (e *cacheEntry) filterColumns(root [32]byte, commitmentsArray *safeCommitmentsArray) ([]blocks.RODataColumn, error) {
|
||||
nonEmptyIndices := commitmentsArray.nonEmptyIndices()
|
||||
if e.diskSummary.AllDataColumnsAvailable(nonEmptyIndices) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
commitmentsCount := commitmentsArray.count()
|
||||
sidecars := make([]blocks.RODataColumn, 0, commitmentsCount)
|
||||
|
||||
for i := uint64(0); i < fieldparams.NumberOfColumns; i++ {
|
||||
// Skip if we arleady store this data column.
|
||||
if e.diskSummary.HasIndex(i) {
|
||||
continue
|
||||
}
|
||||
|
||||
if commitmentsArray[i] == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if e.colScs[i] == nil {
|
||||
return nil, errors.Wrapf(errMissingSidecar, "root=%#x, index=%#x", root, i)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(commitmentsArray[i], e.colScs[i].KzgCommitments) {
|
||||
return nil, errors.Wrapf(errCommitmentMismatch, "root=%#x, index=%#x, commitment=%#x, block commitment=%#x", root, i, e.colScs[i].KzgCommitments, commitmentsArray[i])
|
||||
}
|
||||
|
||||
sidecars = append(sidecars, *e.colScs[i])
|
||||
}
|
||||
|
||||
return sidecars, nil
|
||||
}
|
||||
|
||||
// safeCommitmentArray is a fixed size array of commitment byte slices. This is helpful for avoiding
|
||||
// gratuitous bounds checks.
|
||||
type safeCommitmentArray [fieldparams.MaxBlobsPerBlock][]byte
|
||||
@@ -129,3 +179,33 @@ func (s safeCommitmentArray) count() int {
|
||||
}
|
||||
return fieldparams.MaxBlobsPerBlock
|
||||
}
|
||||
|
||||
// safeCommitmentsArray is a fixed size array of commitments.
|
||||
// This is helpful for avoiding gratuitous bounds checks.
|
||||
type safeCommitmentsArray [fieldparams.NumberOfColumns][][]byte
|
||||
|
||||
// count returns the number of commitments in the array.
|
||||
func (s *safeCommitmentsArray) count() int {
|
||||
count := 0
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// nonEmptyIndices returns a map of indices that are non-nil in the array.
|
||||
func (s *safeCommitmentsArray) nonEmptyIndices() map[uint64]bool {
|
||||
columns := make(map[uint64]bool)
|
||||
|
||||
for i := range s {
|
||||
if s[i] != nil {
|
||||
columns[uint64(i)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package das
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -14,6 +15,6 @@ import (
|
||||
// IsDataAvailable guarantees that all blobs committed to in the block have been
|
||||
// durably persisted before returning a non-error value.
|
||||
type AvailabilityStore interface {
|
||||
IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error
|
||||
IsDataAvailable(ctx context.Context, nodeID enode.ID, current primitives.Slot, b blocks.ROBlock) error
|
||||
Persist(current primitives.Slot, sc ...blocks.ROBlob) error
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package das
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -16,7 +17,7 @@ type MockAvailabilityStore struct {
|
||||
var _ AvailabilityStore = &MockAvailabilityStore{}
|
||||
|
||||
// IsDataAvailable satisfies the corresponding method of the AvailabilityStore interface in a way that is useful for tests.
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, current primitives.Slot, b blocks.ROBlock) error {
|
||||
func (m *MockAvailabilityStore) IsDataAvailable(ctx context.Context, _ enode.ID, current primitives.Slot, b blocks.ROBlock) error {
|
||||
if m.VerifyAvailabilityCallback != nil {
|
||||
return m.VerifyAvailabilityCallback(ctx, current, b)
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -39,8 +40,15 @@ const (
|
||||
directoryPermissions = 0700
|
||||
)
|
||||
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
type BlobStorageOption func(*BlobStorage) error
|
||||
type (
|
||||
// BlobStorageOption is a functional option for configuring a BlobStorage.
|
||||
BlobStorageOption func(*BlobStorage) error
|
||||
|
||||
RootIndexPair struct {
|
||||
Root [fieldparams.RootLength]byte
|
||||
Index uint64
|
||||
}
|
||||
)
|
||||
|
||||
// WithBasePath is a required option that sets the base path of blob storage.
|
||||
func WithBasePath(base string) BlobStorageOption {
|
||||
@@ -70,7 +78,10 @@ func WithSaveFsync(fsync bool) BlobStorageOption {
|
||||
// attempt to hold a file lock to guarantee exclusive control of the blob storage directory, so this should only be
|
||||
// initialized once per beacon node.
|
||||
func NewBlobStorage(opts ...BlobStorageOption) (*BlobStorage, error) {
|
||||
b := &BlobStorage{}
|
||||
b := &BlobStorage{
|
||||
DataColumnFeed: new(event.Feed),
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(b); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create blob storage")
|
||||
@@ -99,6 +110,7 @@ type BlobStorage struct {
|
||||
fsync bool
|
||||
fs afero.Fs
|
||||
pruner *blobPruner
|
||||
DataColumnFeed *event.Feed
|
||||
}
|
||||
|
||||
// WarmCache runs the prune routine with an expiration of slot of 0, so nothing will be pruned, but the pruner's cache
|
||||
@@ -221,6 +233,112 @@ func (bs *BlobStorage) Save(sidecar blocks.VerifiedROBlob) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveDataColumn saves a data column to our local filesystem.
|
||||
func (bs *BlobStorage) SaveDataColumn(column blocks.VerifiedRODataColumn) error {
|
||||
startTime := time.Now()
|
||||
fname := namerForDataColumn(column)
|
||||
sszPath := fname.path()
|
||||
exists, err := afero.Exists(bs.fs, sszPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
log.Trace("Ignoring a duplicate data column sidecar save attempt")
|
||||
return nil
|
||||
}
|
||||
|
||||
if bs.pruner != nil {
|
||||
hRoot, err := column.SignedBlockHeader.Header.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bs.pruner.notify(hRoot, column.SignedBlockHeader.Header.Slot, column.ColumnIndex); err != nil {
|
||||
return errors.Wrapf(err, "problem maintaining pruning cache/metrics for sidecar with root=%#x", hRoot)
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize the ethpb.DataColumnSidecar to binary data using SSZ.
|
||||
sidecarData, err := column.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to serialize sidecar data")
|
||||
} else if len(sidecarData) == 0 {
|
||||
return errSidecarEmptySSZData
|
||||
}
|
||||
|
||||
if err := bs.fs.MkdirAll(fname.dir(), directoryPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
partPath := fname.partPath(fmt.Sprintf("%p", sidecarData))
|
||||
|
||||
partialMoved := false
|
||||
// Ensure the partial file is deleted.
|
||||
defer func() {
|
||||
if partialMoved {
|
||||
return
|
||||
}
|
||||
// It's expected to error if the save is successful.
|
||||
err = bs.fs.Remove(partPath)
|
||||
if err == nil {
|
||||
log.WithFields(logrus.Fields{
|
||||
"partPath": partPath,
|
||||
}).Debugf("Removed partial file")
|
||||
}
|
||||
}()
|
||||
|
||||
// Create a partial file and write the serialized data to it.
|
||||
partialFile, err := bs.fs.Create(partPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create partial file")
|
||||
}
|
||||
|
||||
n, err := partialFile.Write(sidecarData)
|
||||
if err != nil {
|
||||
closeErr := partialFile.Close()
|
||||
if closeErr != nil {
|
||||
return closeErr
|
||||
}
|
||||
return errors.Wrap(err, "failed to write to partial file")
|
||||
}
|
||||
if bs.fsync {
|
||||
if err := partialFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := partialFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n != len(sidecarData) {
|
||||
return fmt.Errorf("failed to write the full bytes of sidecarData, wrote only %d of %d bytes", n, len(sidecarData))
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return errEmptyBlobWritten
|
||||
}
|
||||
|
||||
// Atomically rename the partial file to its final name.
|
||||
err = bs.fs.Rename(partPath, sszPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to rename partial file to final name")
|
||||
}
|
||||
partialMoved = true
|
||||
|
||||
// Notify the data column notifier that a new data column has been saved.
|
||||
if bs.DataColumnFeed != nil {
|
||||
bs.DataColumnFeed.Send(RootIndexPair{
|
||||
Root: column.BlockRoot(),
|
||||
Index: column.ColumnIndex,
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: Use new metrics for data columns
|
||||
blobsWrittenCounter.Inc()
|
||||
blobSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves a single BlobSidecar by its root and index.
|
||||
// Since BlobStorage only writes blobs that have undergone full verification, the return
|
||||
// value is always a VerifiedROBlob.
|
||||
@@ -246,6 +364,20 @@ func (bs *BlobStorage) Get(root [32]byte, idx uint64) (blocks.VerifiedROBlob, er
|
||||
return verification.BlobSidecarNoop(ro)
|
||||
}
|
||||
|
||||
// GetColumn retrieves a single DataColumnSidecar by its root and index.
|
||||
func (bs *BlobStorage) GetColumn(root [32]byte, idx uint64) (*ethpb.DataColumnSidecar, error) {
|
||||
expected := blobNamer{root: root, index: idx}
|
||||
encoded, err := afero.ReadFile(bs.fs, expected.path())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := ðpb.DataColumnSidecar{}
|
||||
if err := s.UnmarshalSSZ(encoded); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Remove removes all blobs for a given root.
|
||||
func (bs *BlobStorage) Remove(root [32]byte) error {
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
@@ -289,6 +421,61 @@ func (bs *BlobStorage) Indices(root [32]byte) ([fieldparams.MaxBlobsPerBlock]boo
|
||||
return mask, nil
|
||||
}
|
||||
|
||||
// ColumnIndices retrieve the stored column indexes from our filesystem.
|
||||
func (bs *BlobStorage) ColumnIndices(root [32]byte) (map[uint64]bool, error) {
|
||||
custody := make(map[uint64]bool, fieldparams.NumberOfColumns)
|
||||
|
||||
// Get all the files in the directory.
|
||||
rootDir := blobNamer{root: root}.dir()
|
||||
entries, err := afero.ReadDir(bs.fs, rootDir)
|
||||
if err != nil {
|
||||
// If the directory does not exist, we do not custody any columns.
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(err, "read directory")
|
||||
}
|
||||
|
||||
// Iterate over all the entries in the directory.
|
||||
for _, entry := range entries {
|
||||
// If the entry is a directory, skip it.
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// If the entry does not have the correct extension, skip it.
|
||||
name := entry.Name()
|
||||
if !strings.HasSuffix(name, sszExt) {
|
||||
continue
|
||||
}
|
||||
|
||||
// The file should be in the `<index>.<extension>` format.
|
||||
// Skip the file if it does not match the format.
|
||||
parts := strings.Split(name, ".")
|
||||
if len(parts) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the column index from the file name.
|
||||
columnIndexStr := parts[0]
|
||||
columnIndex, err := strconv.ParseUint(columnIndexStr, 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unexpected directory entry breaks listing, %s", parts[0])
|
||||
}
|
||||
|
||||
// If the column index is out of bounds, return an error.
|
||||
if columnIndex >= fieldparams.NumberOfColumns {
|
||||
return nil, errors.Wrapf(errIndexOutOfBounds, "invalid index %d", columnIndex)
|
||||
}
|
||||
|
||||
// Mark the column index as in custody.
|
||||
custody[columnIndex] = true
|
||||
}
|
||||
|
||||
return custody, nil
|
||||
}
|
||||
|
||||
// Clear deletes all files on the filesystem.
|
||||
func (bs *BlobStorage) Clear() error {
|
||||
dirs, err := listDir(bs.fs, ".")
|
||||
@@ -321,6 +508,10 @@ func namerForSidecar(sc blocks.VerifiedROBlob) blobNamer {
|
||||
return blobNamer{root: sc.BlockRoot(), index: sc.Index}
|
||||
}
|
||||
|
||||
func namerForDataColumn(col blocks.VerifiedRODataColumn) blobNamer {
|
||||
return blobNamer{root: col.BlockRoot(), index: col.ColumnIndex}
|
||||
}
|
||||
|
||||
func (p blobNamer) dir() string {
|
||||
return rootString(p.root)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user