mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
101 Commits
consolidat
...
tracked-va
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
45d2219cf5 | ||
|
|
660b4d7678 | ||
|
|
b3f0194a79 | ||
|
|
be60504512 | ||
|
|
1857496159 | ||
|
|
ccf61e1700 | ||
|
|
4edbd2f9ef | ||
|
|
5179af1438 | ||
|
|
c0f9689e30 | ||
|
|
ff8240a04f | ||
|
|
847498c648 | ||
|
|
2633684339 | ||
|
|
ab3f1963e2 | ||
|
|
b87d02eeb3 | ||
|
|
bcb4155523 | ||
|
|
77f10b9e0e | ||
|
|
928b707ef1 | ||
|
|
91c15247e5 | ||
|
|
5ef5b65ffe | ||
|
|
9ae97786c5 | ||
|
|
66d1bb54f6 | ||
|
|
4d98049054 | ||
|
|
d5ff25b59d | ||
|
|
a265cf08fa | ||
|
|
f2ade3caff | ||
|
|
e6ffc0701e | ||
|
|
61c296e075 | ||
|
|
f264680739 | ||
|
|
8fe024f6a1 | ||
|
|
6b7dd833a3 | ||
|
|
060527032b | ||
|
|
421f7a75e0 | ||
|
|
a29ecb6bbe | ||
|
|
54656e172f | ||
|
|
b6965f0174 | ||
|
|
97c8adb003 | ||
|
|
2fe8614115 | ||
|
|
09accc7132 | ||
|
|
53f1f11c6d | ||
|
|
48fe9d9c4d | ||
|
|
4386c244e1 | ||
|
|
7ac522d8ff | ||
|
|
b98e9c510e | ||
|
|
d4017743d3 | ||
|
|
c85a23251c | ||
|
|
52cf3a155d | ||
|
|
83ed320826 | ||
|
|
0a6457fc3a | ||
|
|
f3458a3f4a | ||
|
|
8da7bdeaa6 | ||
|
|
74b07cf48c | ||
|
|
616cdc1e8b | ||
|
|
361712e886 | ||
|
|
9ec8c6c4b5 | ||
|
|
073cf19b69 | ||
|
|
6ac8090599 | ||
|
|
4aa54107e4 | ||
|
|
ffc443b5f2 | ||
|
|
d6c5692dc0 | ||
|
|
1086bdf2b3 | ||
|
|
2afa63b442 | ||
|
|
5a5193c59d | ||
|
|
c8d3ed02cb | ||
|
|
30fcf5366a | ||
|
|
f776b968ad | ||
|
|
de094b0078 | ||
|
|
0a4ed8279b | ||
|
|
f307a369a5 | ||
|
|
dc91c963b9 | ||
|
|
7238848d81 | ||
|
|
80cafaa6df | ||
|
|
8a0545c3d7 | ||
|
|
9c61117b71 | ||
|
|
6c22edeecc | ||
|
|
57cc4950c0 | ||
|
|
2c981d5564 | ||
|
|
492c8af83f | ||
|
|
e40d2cbd2c | ||
|
|
3fa6d3bd9d | ||
|
|
56f0eb1437 | ||
|
|
7fc5c714a1 | ||
|
|
cfbfccb203 | ||
|
|
884b663455 | ||
|
|
0f1d16c599 | ||
|
|
c11e3392d4 | ||
|
|
f498463843 | ||
|
|
cf4ffc97e2 | ||
|
|
3824e8a463 | ||
|
|
21ca4e008f | ||
|
|
6af44a1466 | ||
|
|
2e29164582 | ||
|
|
6d499bc9fc | ||
|
|
7786cb5684 | ||
|
|
003b70c34b | ||
|
|
71edf96c7d | ||
|
|
ddafedc268 | ||
|
|
7e5738bfcd | ||
|
|
315c05b351 | ||
|
|
3662cf6009 | ||
|
|
98d8b50b0e | ||
|
|
1a1cc25bd1 |
@@ -26,7 +26,6 @@ approval_rules:
|
||||
only_changed_files:
|
||||
paths:
|
||||
- "*pb.go"
|
||||
- "*pb.gw.go"
|
||||
- "*.bazel"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
@@ -69,7 +68,6 @@ approval_rules:
|
||||
changed_files:
|
||||
ignore:
|
||||
- "*pb.go"
|
||||
- "*pb.gw.go"
|
||||
- "*.bazel"
|
||||
options:
|
||||
ignore_commits_by:
|
||||
|
||||
156
CHANGELOG.md
156
CHANGELOG.md
@@ -4,7 +4,116 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [Unreleased](https://github.com/prysmaticlabs/prysm/compare/v5.1.0...HEAD)
|
||||
## [Unreleased](https://github.com/prysmaticlabs/prysm/compare/v5.1.2...HEAD)
|
||||
|
||||
### Added
|
||||
|
||||
- Electra EIP6110: Queue deposit [pr](https://github.com/prysmaticlabs/prysm/pull/14430)
|
||||
- Add Bellatrix tests for light client functions.
|
||||
- Add Discovery Rebooter Feature.
|
||||
- Added GetBlockAttestationsV2 endpoint.
|
||||
- Light client support: Consensus types for Electra.
|
||||
- Added SubmitPoolAttesterSlashingV2 endpoint.
|
||||
- Added SubmitAggregateAndProofsRequestV2 endpoint.
|
||||
- Updated the `beacon-chain/monitor` package to Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14562)
|
||||
- Added ListAttestationsV2 endpoint.
|
||||
- Add ability to rollback node's internal state during processing.
|
||||
- Change how unsafe protobuf state is created to prevent unnecessary copies.
|
||||
- Added benchmarks for process slots for Capella, Deneb, Electra
|
||||
- Add helper to cast bytes to string without allocating memory.
|
||||
- Added GetAggregatedAttestationV2 endpoint.
|
||||
- Added SubmitAttestationsV2 endpoint.
|
||||
- Validator REST mode Electra block support
|
||||
- Metric for the size of tracked validators cache
|
||||
|
||||
### Changed
|
||||
|
||||
- Electra EIP6110: Queue deposit requests changes from consensus spec pr #3818
|
||||
- reversed the boolean return on `BatchVerifyDepositsSignatures`, from need verification, to all keys successfully verified
|
||||
- Fix `engine_exchangeCapabilities` implementation.
|
||||
- Updated the default `scrape-interval` in `Client-stats` to 2 minutes to accommodate Beaconcha.in API rate limits.
|
||||
- Switch to compounding when consolidating with source==target.
|
||||
- Revert block db save when saving state fails.
|
||||
- Return false from HasBlock if the block is being synced.
|
||||
- Cleanup forkchoice on failed insertions.
|
||||
- Use read only validator for core processing to avoid unnecessary copying.
|
||||
- Use ROBlock across block processing pipeline.
|
||||
- Added missing Eth-Consensus-Version headers to GetBlockAttestationsV2 and GetAttesterSlashingsV2 endpoints.
|
||||
- When instantiating new validators, explicit set `Slashed` to false and move `EffectiveBalance` to match struct definition.
|
||||
- Updated pgo profile for beacon chain with holesky data. This improves the profile guided
|
||||
optimizations in the go compiler.
|
||||
- Use read only state when computing the active validator list.
|
||||
- Simplified `ExitedValidatorIndices`.
|
||||
- Simplified `EjectedValidatorIndices`.
|
||||
- `engine_newPayloadV4`,`engine_getPayloadV4` are changes due to new execution request serialization decisions, [PR](https://github.com/prysmaticlabs/prysm/pull/14580)
|
||||
- Fixed various small things in state-native code.
|
||||
- Use ROBlock earlier in block syncing pipeline.
|
||||
- Changed the signature of `ProcessPayload`.
|
||||
- Only Build the Protobuf state once during serialization.
|
||||
- Capella blocks are execution.
|
||||
- Fixed panic when http request to subscribe to event stream fails.
|
||||
- Return early for blob reconstructor during capella fork
|
||||
- Updated block endpoint from V1 to V2
|
||||
- Rename instances of "deposit receipts" to "deposit requests".
|
||||
|
||||
### Deprecated
|
||||
|
||||
- `/eth/v1alpha1/validator/activation/stream` grpc wait for activation stream is deprecated. [pr](https://github.com/prysmaticlabs/prysm/pull/14514)
|
||||
|
||||
### Removed
|
||||
|
||||
- Removed finalized validator index cache, no longer needed.
|
||||
- Removed validator queue position log on key reload and wait for activation.
|
||||
- Removed outdated spectest exclusions for EIP-6110.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed mesh size by appending `gParams.Dhi = gossipSubDhi`
|
||||
- Fix skipping partial withdrawals count.
|
||||
- wait for the async StreamEvent writer to exit before leaving the http handler, avoiding race condition panics [pr](https://github.com/prysmaticlabs/prysm/pull/14557)
|
||||
- Certain deb files were returning a 404 which made building new docker images without an existing
|
||||
cache impossible. This has been fixed with updates to rules_oci and bazel-lib.
|
||||
- Fixed an issue where the length check between block body KZG commitments and the existing cache from the database was incompatible.
|
||||
- Fix `--backfill-oldest-slot` handling - this flag was totally broken, the code would always backfill to the default slot [pr](https://github.com/prysmaticlabs/prysm/pull/14584)
|
||||
- Fix keymanager API should return corrected error format for malformed tokens
|
||||
- Fix keymanager API so that get keys returns an empty response instead of a 500 error when using an unsupported keystore.
|
||||
- Small log imporvement, removing some redundant or duplicate logs
|
||||
- EIP7521 - Fixes withdrawal bug by accounting for pending partial withdrawals and deducting already withdrawn amounts from the sweep balance. [PR](https://github.com/prysmaticlabs/prysm/pull/14578)
|
||||
- unskip electra merkle spec test
|
||||
- Fix panic in validator REST mode when checking status after removing all keys
|
||||
|
||||
### Security
|
||||
|
||||
## [v5.1.2](https://github.com/prysmaticlabs/prysm/compare/v5.1.1...v5.1.2) - 2024-10-16
|
||||
|
||||
This is a hotfix release with one change.
|
||||
|
||||
Prysm v5.1.1 contains an updated implementation of the beacon api streaming events endpoint. This
|
||||
new implementation contains a bug that can cause a panic in certain conditions. The issue is
|
||||
difficult to reproduce reliably and we are still trying to determine the root cause, but in the
|
||||
meantime we are issuing a patch that recovers from the panic to prevent the node from crashing.
|
||||
|
||||
This only impacts the v5.1.1 release beacon api event stream endpoints. This endpoint is used by the
|
||||
prysm REST mode validator (a feature which requires the validator to be configured to use the beacon
|
||||
api intead of prysm's stock grpc endpoints) or accessory software that connects to the events api,
|
||||
like https://github.com/ethpandaops/ethereum-metrics-exporter
|
||||
|
||||
### Fixed
|
||||
|
||||
- Recover from panics when writing the event stream [#14545](https://github.com/prysmaticlabs/prysm/pull/14545)
|
||||
|
||||
## [v5.1.1](https://github.com/prysmaticlabs/prysm/compare/v5.1.0...v5.1.1) - 2024-10-15
|
||||
|
||||
This release has a number of features and improvements. Most notably, the feature flag
|
||||
`--enable-experimental-state` has been flipped to "opt out" via `--disable-experimental-state`.
|
||||
The experimental state management design has shown significant improvements in memory usage at
|
||||
runtime. Updates to libp2p's gossipsub have some bandwidith stability improvements with support for
|
||||
IDONTWANT control messages.
|
||||
|
||||
The gRPC gateway has been deprecated from Prysm in this release. If you need JSON data, consider the
|
||||
standardized beacon-APIs.
|
||||
|
||||
Updating to this release is recommended at your convenience.
|
||||
|
||||
### Added
|
||||
|
||||
@@ -13,10 +122,23 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Light client support: Implement `ComputeFieldRootsForBlockBody`.
|
||||
- Light client support: Add light client database changes.
|
||||
- Light client support: Implement capella and deneb changes.
|
||||
- Light client support: Implement `BlockToLightClientHeaderXXX` functions upto Deneb
|
||||
- Light client support: Implement `BlockToLightClientHeader` function.
|
||||
- Light client support: Consensus types.
|
||||
- GetBeaconStateV2: add Electra case.
|
||||
- Implement [consensus-specs/3875](https://github.com/ethereum/consensus-specs/pull/3875).
|
||||
- Tests to ensure sepolia config matches the official upstream yaml.
|
||||
- `engine_newPayloadV4`,`engine_getPayloadV4` used for electra payload communication with execution client. [pr](https://github.com/prysmaticlabs/prysm/pull/14492)
|
||||
- HTTP endpoint for PublishBlobs.
|
||||
- GetBlockV2, GetBlindedBlock, ProduceBlockV2, ProduceBlockV3: add Electra case.
|
||||
- Add Electra support and tests for light client functions.
|
||||
- fastssz version bump (better error messages).
|
||||
- SSE implementation that sheds stuck clients. [pr](https://github.com/prysmaticlabs/prysm/pull/14413)
|
||||
- Added GetPoolAttesterSlashingsV2 endpoint.
|
||||
- Use engine API get-blobs for block subscriber to reduce block import latency and potentially reduce bandwidth.
|
||||
|
||||
### Changed
|
||||
|
||||
- Electra: Updated interop genesis generator to support Electra.
|
||||
- `getLocalPayload` has been refactored to enable work in ePBS branch.
|
||||
- `TestNodeServer_GetPeer` and `TestNodeServer_ListPeers` test flakes resolved by iterating the whole peer list to find
|
||||
a match rather than taking the first peer in the map.
|
||||
@@ -37,6 +159,12 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Updated k8s-io/client-go to v0.30.4 and k8s-io/apimachinery to v0.30.4
|
||||
- Migrated tracing library from opencensus to opentelemetry for both the beacon node and validator.
|
||||
- Refactored light client code to make it more readable and make future PRs easier.
|
||||
- Update light client helper functions to reference `dev` branch of CL specs
|
||||
- Updated Libp2p Dependencies to allow prysm to use gossipsub v1.2 .
|
||||
- Updated Sepolia bootnodes.
|
||||
- Make committee aware packing the default by deprecating `--enable-committee-aware-packing`.
|
||||
- Moved `ConvertKzgCommitmentToVersionedHash` to the `primitives` package.
|
||||
- Updated correlation penalty for EIP-7251.
|
||||
|
||||
### Deprecated
|
||||
- `--disable-grpc-gateway` flag is deprecated due to grpc gateway removal.
|
||||
@@ -44,9 +172,10 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
|
||||
### Removed
|
||||
|
||||
- removed gRPC Gateway
|
||||
- Removed unused blobs bundle cache
|
||||
- Removed gRPC Gateway.
|
||||
- Removed unused blobs bundle cache.
|
||||
- Removed consolidation signing domain from params. The Electra design changed such that EL handles consolidation signature verification.
|
||||
- Remove engine_getPayloadBodiesBy{Hash|Range}V2
|
||||
|
||||
### Fixed
|
||||
|
||||
@@ -58,12 +187,19 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- validator registration log changed to debug, and the frequency of validator registration calls are reduced
|
||||
- Core: Fix process effective balance update to safe copy validator for Electra.
|
||||
- `== nil` checks before calling `IsNil()` on interfaces to prevent panics.
|
||||
- Core: Fixed slash processing causing extra hashing
|
||||
- Core: Fixed extra allocations when processing slashings
|
||||
- Core: Fixed slash processing causing extra hashing.
|
||||
- Core: Fixed extra allocations when processing slashings.
|
||||
- remove unneeded container in blob sidecar ssz response
|
||||
- Light client support: create finalized header based on finalizedBlock's version, not attestedBlock.
|
||||
- Light client support: fix light client attested header execution fields' wrong version bug.
|
||||
- Testing: added custom matcher for better push settings testing.
|
||||
- Registered `GetDepositSnapshot` Beacon API endpoint.
|
||||
- Fix rolling back of a block due to a context deadline.
|
||||
|
||||
### Security
|
||||
|
||||
No notable security updates.
|
||||
|
||||
## [v5.1.0](https://github.com/prysmaticlabs/prysm/compare/v5.0.4...v5.1.0) - 2024-08-20
|
||||
|
||||
This release contains 171 new changes and many of these are related to Electra! Along side the Electra changes, there
|
||||
@@ -2673,7 +2809,7 @@ on your validators.
|
||||
**Beacon chain node**
|
||||
|
||||
| Metric | Description | References |
|
||||
|--------------------------------------------------|-------------------------------------------------------------------------------------------------------|------------|
|
||||
| ------------------------------------------------ | ----------------------------------------------------------------------------------------------------- | ---------- |
|
||||
| `p2p_message_ignored_validation_total` | Count of messages that were ignored in validation | |
|
||||
| `beacon_current_active_validators` | Current total active validators | |
|
||||
| `beacon_processed_deposits_total` | Total number of deposits processed | |
|
||||
@@ -2724,9 +2860,9 @@ on your validators.
|
||||
#### Changed Metrics
|
||||
|
||||
**Beacon chain node**
|
||||
| Metric | Old Name | Description | References |
|
||||
|-----------------------|----------------------|------------------------------------------------------|------------|
|
||||
| `beacon_reorgs_total` | `beacon_reorg_total` | Count the number of times a beacon chain has a reorg | |
|
||||
| Metric | Old Name | Description | References |
|
||||
| --------------------- | -------------------- | ---------------------------------------------------- | ---------- |
|
||||
| `beacon_reorgs_total` | `beacon_reorg_total` | Count the number of times a beacon chain has a reorg | |
|
||||
|
||||
### Deprecated
|
||||
|
||||
|
||||
34
WORKSPACE
34
WORKSPACE
@@ -101,9 +101,9 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "aspect_bazel_lib",
|
||||
sha256 = "f5ea76682b209cc0bd90d0f5a3b26d2f7a6a2885f0c5f615e72913f4805dbb0d",
|
||||
strip_prefix = "bazel-lib-2.5.0",
|
||||
url = "https://github.com/aspect-build/bazel-lib/releases/download/v2.5.0/bazel-lib-v2.5.0.tar.gz",
|
||||
sha256 = "a272d79bb0ac6b6965aa199b1f84333413452e87f043b53eca7f347a23a478e8",
|
||||
strip_prefix = "bazel-lib-2.9.3",
|
||||
url = "https://github.com/bazel-contrib/bazel-lib/releases/download/v2.9.3/bazel-lib-v2.9.3.tar.gz",
|
||||
)
|
||||
|
||||
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
|
||||
@@ -165,7 +165,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
oci_pull(
|
||||
name = "linux_debian11_multiarch_base", # Debian bullseye
|
||||
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
|
||||
image = "gcr.io/distroless/cc-debian11",
|
||||
image = "gcr.io/prysmaticlabs/distroless/cc-debian11",
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm64/v8",
|
||||
@@ -227,7 +227,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-alpha.5"
|
||||
consensus_spec_version = "v1.5.0-alpha.8"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -243,7 +243,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-R9vG5HEL5eGMOAmbkKfJ2jfelNqL5V0xBUPiXOiGM6U=",
|
||||
integrity = "sha256-BsGIbEyJuYrzhShGl0tHhR4lP5Qwno8R3k8a6YBR/DA=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -259,7 +259,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-AEIiEOlf1XuxoRMCsN+kgJMo4LrS05+biTA1p/7Ro00=",
|
||||
integrity = "sha256-DkdvhPP2KiqUOpwFXQIFDCWCwsUDIC/xhTBD+TZevm0=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -275,7 +275,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-LH/Xr20yrJRYnbpjRGupMWTIOWt3cpxZJWXgThwVDsk=",
|
||||
integrity = "sha256-vkZqV0HB8A2Uc56C1Us/p5G57iaHL+zw2No93Xt6M/4=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -290,7 +290,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-mlytz4MPjKh0DwV7FMiAtnRbJw9B6o78/x66/vmnYc8=",
|
||||
integrity = "sha256-D/HPAW61lKqjoWwl7N0XvhdX+67dCEFAy8JxVzqBGtU=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -342,6 +342,22 @@ filegroup(
|
||||
url = "https://github.com/eth-clients/holesky/archive/874c199423ccd180607320c38cbaca05d9a1573a.tar.gz", # 2024-06-18
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "sepolia_testnet",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = [
|
||||
"metadata/config.yaml",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-cY/UgpCcYEhQf7JefD65FI8tn/A+rAvKhcm2/qiVdqY=",
|
||||
strip_prefix = "sepolia-f2c219a93c4491cee3d90c18f2f8e82aed850eab",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/f2c219a93c4491cee3d90c18f2f8e82aed850eab.tar.gz", # 2024-09-19
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_google_protobuf",
|
||||
sha256 = "9bd87b8280ef720d3240514f884e56a712f2218f0d693b48050c836028940a42",
|
||||
|
||||
@@ -93,6 +93,7 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestNewEventStream(t *testing.T) {
|
||||
|
||||
func TestEventStream(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, _ *http.Request) {
|
||||
flusher, ok := w.(http.Flusher)
|
||||
require.Equal(t, true, ok)
|
||||
for i := 1; i <= 3; i++ {
|
||||
@@ -79,3 +79,23 @@ func TestEventStream(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventStreamRequestError(t *testing.T) {
|
||||
topics := []string{"head"}
|
||||
eventsChannel := make(chan *Event, 1)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// use valid url that will result in failed request with nil body
|
||||
stream, err := NewEventStream(ctx, http.DefaultClient, "http://badhost:1234", topics)
|
||||
require.NoError(t, err)
|
||||
|
||||
// error will happen when request is made, should be received over events channel
|
||||
go stream.Subscribe(eventsChannel)
|
||||
|
||||
event := <-eventsChannel
|
||||
if event.EventType != EventConnectionError {
|
||||
t.Errorf("Expected event type %q, got %q", EventConnectionError, event.EventType)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package api
|
||||
|
||||
import "net/http"
|
||||
|
||||
const (
|
||||
VersionHeader = "Eth-Consensus-Version"
|
||||
ExecutionPayloadBlindedHeader = "Eth-Execution-Payload-Blinded"
|
||||
@@ -10,3 +12,9 @@ const (
|
||||
EventStreamMediaType = "text/event-stream"
|
||||
KeepAlive = "keep-alive"
|
||||
)
|
||||
|
||||
// SetSSEHeaders sets the headers needed for a server-sent event response.
|
||||
func SetSSEHeaders(w http.ResponseWriter) {
|
||||
w.Header().Set("Content-Type", EventStreamMediaType)
|
||||
w.Header().Set("Connection", KeepAlive)
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"block.go",
|
||||
"conversions.go",
|
||||
"conversions_blob.go",
|
||||
"conversions_block.go",
|
||||
"conversions_lightclient.go",
|
||||
"conversions_state.go",
|
||||
|
||||
@@ -365,6 +365,7 @@ type BeaconBlockBodyElectra struct {
|
||||
ExecutionPayload *ExecutionPayloadElectra `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockElectra struct {
|
||||
@@ -403,6 +404,7 @@ type BlindedBeaconBlockBodyElectra struct {
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockHeaderContainer struct {
|
||||
@@ -514,6 +516,8 @@ type ExecutionPayloadDeneb struct {
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadElectra = ExecutionPayloadDeneb
|
||||
|
||||
type ExecutionPayloadHeaderDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
@@ -534,48 +538,10 @@ type ExecutionPayloadHeaderDeneb struct {
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadElectra struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
DepositRequests []*DepositRequest `json:"deposit_requests"`
|
||||
WithdrawalRequests []*WithdrawalRequest `json:"withdrawal_requests"`
|
||||
ConsolidationRequests []*ConsolidationRequest `json:"consolidation_requests"`
|
||||
}
|
||||
type ExecutionPayloadHeaderElectra = ExecutionPayloadHeaderDeneb
|
||||
|
||||
type ExecutionPayloadHeaderElectra struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
DepositRequestsRoot string `json:"deposit_requests_root"`
|
||||
WithdrawalRequestsRoot string `json:"withdrawal_requests_root"`
|
||||
ConsolidationRequestsRoot string `json:"consolidation_requests_root"`
|
||||
type ExecutionRequests struct {
|
||||
Deposits []*DepositRequest `json:"deposits"`
|
||||
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
|
||||
Consolidations []*ConsolidationRequest `json:"consolidations"`
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -1475,12 +1476,15 @@ func DepositSnapshotFromConsensus(ds *eth.DepositSnapshot) *DepositSnapshot {
|
||||
}
|
||||
}
|
||||
|
||||
func PendingBalanceDepositsFromConsensus(ds []*eth.PendingBalanceDeposit) []*PendingBalanceDeposit {
|
||||
deposits := make([]*PendingBalanceDeposit, len(ds))
|
||||
func PendingDepositsFromConsensus(ds []*eth.PendingDeposit) []*PendingDeposit {
|
||||
deposits := make([]*PendingDeposit, len(ds))
|
||||
for i, d := range ds {
|
||||
deposits[i] = &PendingBalanceDeposit{
|
||||
Index: fmt.Sprintf("%d", d.Index),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
deposits[i] = &PendingDeposit{
|
||||
Pubkey: hexutil.Encode(d.PublicKey),
|
||||
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
Signature: hexutil.Encode(d.Signature),
|
||||
Slot: fmt.Sprintf("%d", d.Slot),
|
||||
}
|
||||
}
|
||||
return deposits
|
||||
@@ -1508,3 +1512,37 @@ func PendingConsolidationsFromConsensus(cs []*eth.PendingConsolidation) []*Pendi
|
||||
}
|
||||
return consolidations
|
||||
}
|
||||
|
||||
func HeadEventFromV1(event *ethv1.EventHead) *HeadEvent {
|
||||
return &HeadEvent{
|
||||
Slot: fmt.Sprintf("%d", event.Slot),
|
||||
Block: hexutil.Encode(event.Block),
|
||||
State: hexutil.Encode(event.State),
|
||||
EpochTransition: event.EpochTransition,
|
||||
ExecutionOptimistic: event.ExecutionOptimistic,
|
||||
PreviousDutyDependentRoot: hexutil.Encode(event.PreviousDutyDependentRoot),
|
||||
CurrentDutyDependentRoot: hexutil.Encode(event.CurrentDutyDependentRoot),
|
||||
}
|
||||
}
|
||||
|
||||
func FinalizedCheckpointEventFromV1(event *ethv1.EventFinalizedCheckpoint) *FinalizedCheckpointEvent {
|
||||
return &FinalizedCheckpointEvent{
|
||||
Block: hexutil.Encode(event.Block),
|
||||
State: hexutil.Encode(event.State),
|
||||
Epoch: fmt.Sprintf("%d", event.Epoch),
|
||||
ExecutionOptimistic: event.ExecutionOptimistic,
|
||||
}
|
||||
}
|
||||
|
||||
func EventChainReorgFromV1(event *ethv1.EventChainReorg) *ChainReorgEvent {
|
||||
return &ChainReorgEvent{
|
||||
Slot: fmt.Sprintf("%d", event.Slot),
|
||||
Depth: fmt.Sprintf("%d", event.Depth),
|
||||
OldHeadBlock: hexutil.Encode(event.OldHeadBlock),
|
||||
NewHeadBlock: hexutil.Encode(event.NewHeadBlock),
|
||||
OldHeadState: hexutil.Encode(event.OldHeadState),
|
||||
NewHeadState: hexutil.Encode(event.NewHeadState),
|
||||
Epoch: fmt.Sprintf("%d", event.Epoch),
|
||||
ExecutionOptimistic: event.ExecutionOptimistic,
|
||||
}
|
||||
}
|
||||
|
||||
61
api/server/structs/conversions_blob.go
Normal file
61
api/server/structs/conversions_blob.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func (sc *Sidecar) ToConsensus() (*eth.BlobSidecar, error) {
|
||||
if sc == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
|
||||
index, err := strconv.ParseUint(sc.Index, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Index")
|
||||
}
|
||||
|
||||
blob, err := bytesutil.DecodeHexWithLength(sc.Blob, 131072)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Blob")
|
||||
}
|
||||
|
||||
kzgCommitment, err := bytesutil.DecodeHexWithLength(sc.KzgCommitment, 48)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "KzgCommitment")
|
||||
}
|
||||
|
||||
kzgProof, err := bytesutil.DecodeHexWithLength(sc.KzgProof, 48)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "KzgProof")
|
||||
}
|
||||
|
||||
header, err := sc.SignedBeaconBlockHeader.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SignedBeaconBlockHeader")
|
||||
}
|
||||
|
||||
// decode the commitment inclusion proof
|
||||
var commitmentInclusionProof [][]byte
|
||||
for _, proof := range sc.CommitmentInclusionProof {
|
||||
proofBytes, err := bytesutil.DecodeHexWithLength(proof, 32)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "CommitmentInclusionProof")
|
||||
}
|
||||
commitmentInclusionProof = append(commitmentInclusionProof, proofBytes)
|
||||
}
|
||||
|
||||
bsc := ð.BlobSidecar{
|
||||
Index: index,
|
||||
Blob: blob,
|
||||
KzgCommitment: kzgCommitment,
|
||||
KzgProof: kzgProof,
|
||||
SignedBlockHeader: header,
|
||||
CommitmentInclusionProof: commitmentInclusionProof,
|
||||
}
|
||||
|
||||
return bsc, nil
|
||||
}
|
||||
@@ -20,6 +20,9 @@ import (
|
||||
var ErrUnsupportedConversion = errors.New("Could not determine api struct type to use for value")
|
||||
|
||||
func (h *SignedBeaconBlockHeader) ToConsensus() (*eth.SignedBeaconBlockHeader, error) {
|
||||
if h == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
msg, err := h.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -36,6 +39,9 @@ func (h *SignedBeaconBlockHeader) ToConsensus() (*eth.SignedBeaconBlockHeader, e
|
||||
}
|
||||
|
||||
func (h *BeaconBlockHeader) ToConsensus() (*eth.BeaconBlockHeader, error) {
|
||||
if h == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
s, err := strconv.ParseUint(h.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Slot")
|
||||
@@ -2088,27 +2094,31 @@ func (b *BeaconBlockElectra) ToConsensus() (*eth.BeaconBlockElectra, error) {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ExcessBlobGas")
|
||||
}
|
||||
|
||||
depositRequests := make([]*enginev1.DepositRequest, len(b.Body.ExecutionPayload.DepositRequests))
|
||||
for i, d := range b.Body.ExecutionPayload.DepositRequests {
|
||||
if b.Body.ExecutionRequests == nil {
|
||||
return nil, server.NewDecodeError(errors.New("nil execution requests"), "Body.ExequtionRequests")
|
||||
}
|
||||
|
||||
depositRequests := make([]*enginev1.DepositRequest, len(b.Body.ExecutionRequests.Deposits))
|
||||
for i, d := range b.Body.ExecutionRequests.Deposits {
|
||||
depositRequests[i], err = d.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.DepositRequests[%d]", i))
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Deposits[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
withdrawalRequests := make([]*enginev1.WithdrawalRequest, len(b.Body.ExecutionPayload.WithdrawalRequests))
|
||||
for i, w := range b.Body.ExecutionPayload.WithdrawalRequests {
|
||||
withdrawalRequests := make([]*enginev1.WithdrawalRequest, len(b.Body.ExecutionRequests.Withdrawals))
|
||||
for i, w := range b.Body.ExecutionRequests.Withdrawals {
|
||||
withdrawalRequests[i], err = w.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.WithdrawalRequests[%d]", i))
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Withdrawals[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
consolidationRequests := make([]*enginev1.ConsolidationRequest, len(b.Body.ExecutionPayload.ConsolidationRequests))
|
||||
for i, c := range b.Body.ExecutionPayload.ConsolidationRequests {
|
||||
consolidationRequests := make([]*enginev1.ConsolidationRequest, len(b.Body.ExecutionRequests.Consolidations))
|
||||
for i, c := range b.Body.ExecutionRequests.Consolidations {
|
||||
consolidationRequests[i], err = c.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionPayload.ConsolidationRequests[%d]", i))
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Consolidations[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2151,29 +2161,31 @@ func (b *BeaconBlockElectra) ToConsensus() (*eth.BeaconBlockElectra, error) {
|
||||
SyncCommitteeSignature: syncCommitteeSig,
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadElectra{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
BlobGasUsed: payloadBlobGasUsed,
|
||||
ExcessBlobGas: payloadExcessBlobGas,
|
||||
DepositRequests: depositRequests,
|
||||
WithdrawalRequests: withdrawalRequests,
|
||||
ConsolidationRequests: consolidationRequests,
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
BlobGasUsed: payloadBlobGasUsed,
|
||||
ExcessBlobGas: payloadExcessBlobGas,
|
||||
},
|
||||
BlsToExecutionChanges: blsChanges,
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{
|
||||
Deposits: depositRequests,
|
||||
Withdrawals: withdrawalRequests,
|
||||
Consolidations: consolidationRequests,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -2383,17 +2395,31 @@ func (b *BlindedBeaconBlockElectra) ToConsensus() (*eth.BlindedBeaconBlockElectr
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ExcessBlobGas")
|
||||
}
|
||||
payloadDepositRequestsRoot, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayloadHeader.DepositRequestsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayloadHeader.DepositRequestsRoot")
|
||||
if b.Body.ExecutionRequests == nil {
|
||||
return nil, server.NewDecodeError(errors.New("nil execution requests"), "Body.ExecutionRequests")
|
||||
}
|
||||
payloadWithdrawalRequestsRoot, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayloadHeader.WithdrawalRequestsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayloadHeader.WithdrawalRequestsRoot")
|
||||
depositRequests := make([]*enginev1.DepositRequest, len(b.Body.ExecutionRequests.Deposits))
|
||||
for i, d := range b.Body.ExecutionRequests.Deposits {
|
||||
depositRequests[i], err = d.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Deposits[%d]", i))
|
||||
}
|
||||
}
|
||||
payloadConsolidationRequestsRoot, err := bytesutil.DecodeHexWithLength(b.Body.ExecutionPayloadHeader.ConsolidationRequestsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayloadHeader.ConsolidationRequestsRoot")
|
||||
|
||||
withdrawalRequests := make([]*enginev1.WithdrawalRequest, len(b.Body.ExecutionRequests.Withdrawals))
|
||||
for i, w := range b.Body.ExecutionRequests.Withdrawals {
|
||||
withdrawalRequests[i], err = w.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Withdrawals[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
consolidationRequests := make([]*enginev1.ConsolidationRequest, len(b.Body.ExecutionRequests.Consolidations))
|
||||
for i, c := range b.Body.ExecutionRequests.Consolidations {
|
||||
consolidationRequests[i], err = c.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.ExecutionRequests.Consolidations[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
blsChanges, err := SignedBLSChangesToConsensus(b.Body.BLSToExecutionChanges)
|
||||
@@ -2436,29 +2462,31 @@ func (b *BlindedBeaconBlockElectra) ToConsensus() (*eth.BlindedBeaconBlockElectr
|
||||
SyncCommitteeSignature: syncCommitteeSig,
|
||||
},
|
||||
ExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
TransactionsRoot: payloadTxsRoot,
|
||||
WithdrawalsRoot: payloadWithdrawalsRoot,
|
||||
BlobGasUsed: payloadBlobGasUsed,
|
||||
ExcessBlobGas: payloadExcessBlobGas,
|
||||
DepositRequestsRoot: payloadDepositRequestsRoot,
|
||||
WithdrawalRequestsRoot: payloadWithdrawalRequestsRoot,
|
||||
ConsolidationRequestsRoot: payloadConsolidationRequestsRoot,
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
TransactionsRoot: payloadTxsRoot,
|
||||
WithdrawalsRoot: payloadWithdrawalsRoot,
|
||||
BlobGasUsed: payloadBlobGasUsed,
|
||||
ExcessBlobGas: payloadExcessBlobGas,
|
||||
},
|
||||
BlsToExecutionChanges: blsChanges,
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{
|
||||
Deposits: depositRequests,
|
||||
Withdrawals: withdrawalRequests,
|
||||
Consolidations: consolidationRequests,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -2526,6 +2554,8 @@ func SignedBeaconBlockMessageJsoner(block interfaces.ReadOnlySignedBeaconBlock)
|
||||
return SignedBlindedBeaconBlockDenebFromConsensus(pbStruct)
|
||||
case *eth.SignedBeaconBlockDeneb:
|
||||
return SignedBeaconBlockDenebFromConsensus(pbStruct)
|
||||
case *eth.SignedBlindedBeaconBlockElectra:
|
||||
return SignedBlindedBeaconBlockElectraFromConsensus(pbStruct)
|
||||
case *eth.SignedBeaconBlockElectra:
|
||||
return SignedBeaconBlockElectraFromConsensus(pbStruct)
|
||||
default:
|
||||
@@ -2963,10 +2993,19 @@ func BlindedBeaconBlockElectraFromConsensus(b *eth.BlindedBeaconBlockElectra) (*
|
||||
ExecutionPayloadHeader: payload,
|
||||
BLSToExecutionChanges: SignedBLSChangesFromConsensus(b.Body.BlsToExecutionChanges),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ExecutionRequests: ExecutionRequestsFromConsensus(b.Body.ExecutionRequests),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExecutionRequestsFromConsensus(er *enginev1.ExecutionRequests) *ExecutionRequests {
|
||||
return &ExecutionRequests{
|
||||
Deposits: DepositRequestsFromConsensus(er.Deposits),
|
||||
Withdrawals: WithdrawalRequestsFromConsensus(er.Withdrawals),
|
||||
Consolidations: ConsolidationRequestsFromConsensus(er.Consolidations),
|
||||
}
|
||||
}
|
||||
|
||||
func SignedBlindedBeaconBlockElectraFromConsensus(b *eth.SignedBlindedBeaconBlockElectra) (*SignedBlindedBeaconBlockElectra, error) {
|
||||
block, err := BlindedBeaconBlockElectraFromConsensus(b.Message)
|
||||
if err != nil {
|
||||
@@ -3009,6 +3048,7 @@ func BeaconBlockElectraFromConsensus(b *eth.BeaconBlockElectra) (*BeaconBlockEle
|
||||
ExecutionPayload: payload,
|
||||
BLSToExecutionChanges: SignedBLSChangesFromConsensus(b.Body.BlsToExecutionChanges),
|
||||
BlobKzgCommitments: blobKzgCommitments,
|
||||
ExecutionRequests: ExecutionRequestsFromConsensus(b.Body.ExecutionRequests),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -3112,39 +3152,7 @@ func ExecutionPayloadDenebFromConsensus(payload *enginev1.ExecutionPayloadDeneb)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExecutionPayloadElectraFromConsensus(payload *enginev1.ExecutionPayloadElectra) (*ExecutionPayloadElectra, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions := make([]string, len(payload.Transactions))
|
||||
for i, tx := range payload.Transactions {
|
||||
transactions[i] = hexutil.Encode(tx)
|
||||
}
|
||||
|
||||
return &ExecutionPayloadElectra{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
Transactions: transactions,
|
||||
Withdrawals: WithdrawalsFromConsensus(payload.Withdrawals),
|
||||
BlobGasUsed: fmt.Sprintf("%d", payload.BlobGasUsed),
|
||||
ExcessBlobGas: fmt.Sprintf("%d", payload.ExcessBlobGas),
|
||||
DepositRequests: DepositRequestsFromConsensus(payload.DepositRequests),
|
||||
WithdrawalRequests: WithdrawalRequestsFromConsensus(payload.WithdrawalRequests),
|
||||
ConsolidationRequests: ConsolidationRequestsFromConsensus(payload.ConsolidationRequests),
|
||||
}, nil
|
||||
}
|
||||
var ExecutionPayloadElectraFromConsensus = ExecutionPayloadDenebFromConsensus
|
||||
|
||||
func ExecutionPayloadHeaderFromConsensus(payload *enginev1.ExecutionPayloadHeader) (*ExecutionPayloadHeader, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
@@ -3222,32 +3230,4 @@ func ExecutionPayloadHeaderDenebFromConsensus(payload *enginev1.ExecutionPayload
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExecutionPayloadHeaderElectraFromConsensus(payload *enginev1.ExecutionPayloadHeaderElectra) (*ExecutionPayloadHeaderElectra, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ExecutionPayloadHeaderElectra{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
TransactionsRoot: hexutil.Encode(payload.TransactionsRoot),
|
||||
WithdrawalsRoot: hexutil.Encode(payload.WithdrawalsRoot),
|
||||
BlobGasUsed: fmt.Sprintf("%d", payload.BlobGasUsed),
|
||||
ExcessBlobGas: fmt.Sprintf("%d", payload.ExcessBlobGas),
|
||||
DepositRequestsRoot: hexutil.Encode(payload.DepositRequestsRoot),
|
||||
WithdrawalRequestsRoot: hexutil.Encode(payload.WithdrawalRequestsRoot),
|
||||
ConsolidationRequestsRoot: hexutil.Encode(payload.ConsolidationRequestsRoot),
|
||||
}, nil
|
||||
}
|
||||
var ExecutionPayloadHeaderElectraFromConsensus = ExecutionPayloadHeaderDenebFromConsensus
|
||||
|
||||
@@ -84,6 +84,11 @@ func syncAggregateToJSON(input *v1.SyncAggregate) *SyncAggregate {
|
||||
}
|
||||
|
||||
func lightClientHeaderContainerToJSON(container *v2.LightClientHeaderContainer) (json.RawMessage, error) {
|
||||
// In the case that a finalizedHeader is nil.
|
||||
if container == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
beacon, err := container.GetBeacon()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get beacon block header")
|
||||
|
||||
@@ -674,7 +674,7 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcPayload, ok := execData.Proto().(*enginev1.ExecutionPayloadHeaderElectra)
|
||||
srcPayload, ok := execData.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
|
||||
if !ok {
|
||||
return nil, errPayloadHeaderNotFound
|
||||
}
|
||||
@@ -722,7 +722,7 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
pbd, err := st.PendingDeposits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -770,7 +770,7 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
|
||||
EarliestExitEpoch: fmt.Sprintf("%d", eee),
|
||||
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
|
||||
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
|
||||
PendingBalanceDeposits: PendingBalanceDepositsFromConsensus(pbd),
|
||||
PendingDeposits: PendingDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
}, nil
|
||||
|
||||
@@ -21,11 +21,12 @@ type GetCommitteesResponse struct {
|
||||
}
|
||||
|
||||
type ListAttestationsResponse struct {
|
||||
Data []*Attestation `json:"data"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitAttestationsRequest struct {
|
||||
Data []*Attestation `json:"data"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type ListVoluntaryExitsResponse struct {
|
||||
@@ -133,6 +134,13 @@ type GetBlockAttestationsResponse struct {
|
||||
Data []*Attestation `json:"data"`
|
||||
}
|
||||
|
||||
type GetBlockAttestationsV2Response struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data json.RawMessage `json:"data"` // Accepts both `Attestation` and `AttestationElectra` types
|
||||
}
|
||||
|
||||
type GetStateRootResponse struct {
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
@@ -169,7 +177,8 @@ type BLSToExecutionChangesPoolResponse struct {
|
||||
}
|
||||
|
||||
type GetAttesterSlashingsResponse struct {
|
||||
Data []*AttesterSlashing `json:"data"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Data json.RawMessage `json:"data"` // Accepts both `[]*AttesterSlashing` and `[]*AttesterSlashingElectra` types
|
||||
}
|
||||
|
||||
type GetProposerSlashingsResponse struct {
|
||||
|
||||
@@ -12,3 +12,12 @@ type Sidecar struct {
|
||||
KzgProof string `json:"kzg_proof"`
|
||||
CommitmentInclusionProof []string `json:"kzg_commitment_inclusion_proof"`
|
||||
}
|
||||
|
||||
type BlobSidecars struct {
|
||||
Sidecars []*Sidecar `json:"sidecars"`
|
||||
}
|
||||
|
||||
type PublishBlobsRequest struct {
|
||||
BlobSidecars *BlobSidecars `json:"blob_sidecars"`
|
||||
BlockRoot string `json:"block_root"`
|
||||
}
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
)
|
||||
|
||||
type AggregateAttestationResponse struct {
|
||||
Data *Attestation `json:"data"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitContributionAndProofsRequest struct {
|
||||
@@ -15,7 +16,7 @@ type SubmitContributionAndProofsRequest struct {
|
||||
}
|
||||
|
||||
type SubmitAggregateAndProofsRequest struct {
|
||||
Data []*SignedAggregateAttestationAndProof `json:"data"`
|
||||
Data []json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitSyncCommitteeSubscriptionsRequest struct {
|
||||
|
||||
@@ -257,9 +257,12 @@ type ConsolidationRequest struct {
|
||||
TargetPubkey string `json:"target_pubkey"`
|
||||
}
|
||||
|
||||
type PendingBalanceDeposit struct {
|
||||
Index string `json:"index"`
|
||||
Amount string `json:"amount"`
|
||||
type PendingDeposit struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials"`
|
||||
Amount string `json:"amount"`
|
||||
Signature string `json:"signature"`
|
||||
Slot string `json:"slot"`
|
||||
}
|
||||
|
||||
type PendingPartialWithdrawal struct {
|
||||
|
||||
@@ -176,7 +176,7 @@ type BeaconStateElectra struct {
|
||||
EarliestExitEpoch string `json:"earliest_exit_epoch"`
|
||||
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
|
||||
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
|
||||
PendingBalanceDeposits []*PendingBalanceDeposit `json:"pending_balance_deposits"`
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
}
|
||||
|
||||
@@ -4,26 +4,25 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"feed.go",
|
||||
"interface.go",
|
||||
"subscription.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/async/event",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//time/mclock:go_default_library"],
|
||||
deps = [
|
||||
"//time/mclock:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//event:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"example_feed_test.go",
|
||||
"example_scope_test.go",
|
||||
"example_subscription_test.go",
|
||||
"feed_test.go",
|
||||
"subscription_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
],
|
||||
deps = ["//testing/require:go_default_library"],
|
||||
)
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package event_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
)
|
||||
|
||||
func ExampleFeed_acknowledgedEvents() {
|
||||
// This example shows how the return value of Send can be used for request/reply
|
||||
// interaction between event consumers and producers.
|
||||
var feed event.Feed
|
||||
type ackedEvent struct {
|
||||
i int
|
||||
ack chan<- struct{}
|
||||
}
|
||||
|
||||
// Consumers wait for events on the feed and acknowledge processing.
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
for i := 0; i < 3; i++ {
|
||||
ch := make(chan ackedEvent, 100)
|
||||
sub := feed.Subscribe(ch)
|
||||
go func() {
|
||||
defer sub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case ev := <-ch:
|
||||
fmt.Println(ev.i) // "process" the event
|
||||
ev.ack <- struct{}{}
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// The producer sends values of type ackedEvent with increasing values of i.
|
||||
// It waits for all consumers to acknowledge before sending the next event.
|
||||
for i := 0; i < 3; i++ {
|
||||
acksignal := make(chan struct{})
|
||||
n := feed.Send(ackedEvent{i, acksignal})
|
||||
for ack := 0; ack < n; ack++ {
|
||||
<-acksignal
|
||||
}
|
||||
}
|
||||
// Output:
|
||||
// 0
|
||||
// 0
|
||||
// 0
|
||||
// 1
|
||||
// 1
|
||||
// 1
|
||||
// 2
|
||||
// 2
|
||||
// 2
|
||||
}
|
||||
@@ -14,241 +14,12 @@
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package event contains an event feed implementation for process communication.
|
||||
package event
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
geth_event "github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
|
||||
var errBadChannel = errors.New("event: Subscribe argument does not have sendable channel type")
|
||||
|
||||
// Feed implements one-to-many subscriptions where the carrier of events is a channel.
|
||||
// Values sent to a Feed are delivered to all subscribed channels simultaneously.
|
||||
//
|
||||
// Feeds can only be used with a single type. The type is determined by the first Send or
|
||||
// Subscribe operation. Subsequent calls to these methods panic if the type does not
|
||||
// match.
|
||||
//
|
||||
// The zero value is ready to use.
|
||||
type Feed struct {
|
||||
once sync.Once // ensures that init only runs once
|
||||
sendLock chan struct{} // sendLock has a one-element buffer and is empty when held.It protects sendCases.
|
||||
removeSub chan interface{} // interrupts Send
|
||||
sendCases caseList // the active set of select cases used by Send
|
||||
|
||||
// The inbox holds newly subscribed channels until they are added to sendCases.
|
||||
mu sync.Mutex
|
||||
inbox caseList
|
||||
etype reflect.Type
|
||||
}
|
||||
|
||||
// This is the index of the first actual subscription channel in sendCases.
|
||||
// sendCases[0] is a SelectRecv case for the removeSub channel.
|
||||
const firstSubSendCase = 1
|
||||
|
||||
type feedTypeError struct {
|
||||
got, want reflect.Type
|
||||
op string
|
||||
}
|
||||
|
||||
func (e feedTypeError) Error() string {
|
||||
return "event: wrong type in " + e.op + " got " + e.got.String() + ", want " + e.want.String()
|
||||
}
|
||||
|
||||
func (f *Feed) init() {
|
||||
f.removeSub = make(chan interface{})
|
||||
f.sendLock = make(chan struct{}, 1)
|
||||
f.sendLock <- struct{}{}
|
||||
f.sendCases = caseList{{Chan: reflect.ValueOf(f.removeSub), Dir: reflect.SelectRecv}}
|
||||
}
|
||||
|
||||
// Subscribe adds a channel to the feed. Future sends will be delivered on the channel
|
||||
// until the subscription is canceled. All channels added must have the same element type.
|
||||
//
|
||||
// The channel should have ample buffer space to avoid blocking other subscribers.
|
||||
// Slow subscribers are not dropped.
|
||||
func (f *Feed) Subscribe(channel interface{}) Subscription {
|
||||
f.once.Do(f.init)
|
||||
|
||||
chanval := reflect.ValueOf(channel)
|
||||
chantyp := chanval.Type()
|
||||
if chantyp.Kind() != reflect.Chan || chantyp.ChanDir()&reflect.SendDir == 0 {
|
||||
panic(errBadChannel)
|
||||
}
|
||||
sub := &feedSub{feed: f, channel: chanval, err: make(chan error, 1)}
|
||||
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
if !f.typecheck(chantyp.Elem()) {
|
||||
panic(feedTypeError{op: "Subscribe", got: chantyp, want: reflect.ChanOf(reflect.SendDir, f.etype)})
|
||||
}
|
||||
// Add the select case to the inbox.
|
||||
// The next Send will add it to f.sendCases.
|
||||
cas := reflect.SelectCase{Dir: reflect.SelectSend, Chan: chanval}
|
||||
f.inbox = append(f.inbox, cas)
|
||||
return sub
|
||||
}
|
||||
|
||||
// note: callers must hold f.mu
|
||||
func (f *Feed) typecheck(typ reflect.Type) bool {
|
||||
if f.etype == nil {
|
||||
f.etype = typ
|
||||
return true
|
||||
}
|
||||
// In the event the feed's type is an actual interface, we
|
||||
// perform an interface conformance check here.
|
||||
if f.etype.Kind() == reflect.Interface && typ.Implements(f.etype) {
|
||||
return true
|
||||
}
|
||||
return f.etype == typ
|
||||
}
|
||||
|
||||
func (f *Feed) remove(sub *feedSub) {
|
||||
// Delete from inbox first, which covers channels
|
||||
// that have not been added to f.sendCases yet.
|
||||
ch := sub.channel.Interface()
|
||||
f.mu.Lock()
|
||||
index := f.inbox.find(ch)
|
||||
if index != -1 {
|
||||
f.inbox = f.inbox.delete(index)
|
||||
f.mu.Unlock()
|
||||
return
|
||||
}
|
||||
f.mu.Unlock()
|
||||
|
||||
select {
|
||||
case f.removeSub <- ch:
|
||||
// Send will remove the channel from f.sendCases.
|
||||
case <-f.sendLock:
|
||||
// No Send is in progress, delete the channel now that we have the send lock.
|
||||
f.sendCases = f.sendCases.delete(f.sendCases.find(ch))
|
||||
f.sendLock <- struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Send delivers to all subscribed channels simultaneously.
|
||||
// It returns the number of subscribers that the value was sent to.
|
||||
func (f *Feed) Send(value interface{}) (nsent int) {
|
||||
rvalue := reflect.ValueOf(value)
|
||||
|
||||
f.once.Do(f.init)
|
||||
<-f.sendLock
|
||||
|
||||
// Add new cases from the inbox after taking the send lock.
|
||||
f.mu.Lock()
|
||||
f.sendCases = append(f.sendCases, f.inbox...)
|
||||
f.inbox = nil
|
||||
|
||||
if !f.typecheck(rvalue.Type()) {
|
||||
f.sendLock <- struct{}{}
|
||||
f.mu.Unlock()
|
||||
panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype})
|
||||
}
|
||||
f.mu.Unlock()
|
||||
|
||||
// Set the sent value on all channels.
|
||||
for i := firstSubSendCase; i < len(f.sendCases); i++ {
|
||||
f.sendCases[i].Send = rvalue
|
||||
}
|
||||
|
||||
// Send until all channels except removeSub have been chosen. 'cases' tracks a prefix
|
||||
// of sendCases. When a send succeeds, the corresponding case moves to the end of
|
||||
// 'cases' and it shrinks by one element.
|
||||
cases := f.sendCases
|
||||
for {
|
||||
// Fast path: try sending without blocking before adding to the select set.
|
||||
// This should usually succeed if subscribers are fast enough and have free
|
||||
// buffer space.
|
||||
for i := firstSubSendCase; i < len(cases); i++ {
|
||||
if cases[i].Chan.TrySend(rvalue) {
|
||||
nsent++
|
||||
cases = cases.deactivate(i)
|
||||
i--
|
||||
}
|
||||
}
|
||||
if len(cases) == firstSubSendCase {
|
||||
break
|
||||
}
|
||||
// Select on all the receivers, waiting for them to unblock.
|
||||
chosen, recv, _ := reflect.Select(cases)
|
||||
if chosen == 0 /* <-f.removeSub */ {
|
||||
index := f.sendCases.find(recv.Interface())
|
||||
f.sendCases = f.sendCases.delete(index)
|
||||
if index >= 0 && index < len(cases) {
|
||||
// Shrink 'cases' too because the removed case was still active.
|
||||
cases = f.sendCases[:len(cases)-1]
|
||||
}
|
||||
} else {
|
||||
cases = cases.deactivate(chosen)
|
||||
nsent++
|
||||
}
|
||||
}
|
||||
|
||||
// Forget about the sent value and hand off the send lock.
|
||||
for i := firstSubSendCase; i < len(f.sendCases); i++ {
|
||||
f.sendCases[i].Send = reflect.Value{}
|
||||
}
|
||||
f.sendLock <- struct{}{}
|
||||
return nsent
|
||||
}
|
||||
|
||||
type feedSub struct {
|
||||
feed *Feed
|
||||
channel reflect.Value
|
||||
errOnce sync.Once
|
||||
err chan error
|
||||
}
|
||||
|
||||
// Unsubscribe remove feed subscription.
|
||||
func (sub *feedSub) Unsubscribe() {
|
||||
sub.errOnce.Do(func() {
|
||||
sub.feed.remove(sub)
|
||||
close(sub.err)
|
||||
})
|
||||
}
|
||||
|
||||
// Err returns error channel.
|
||||
func (sub *feedSub) Err() <-chan error {
|
||||
return sub.err
|
||||
}
|
||||
|
||||
type caseList []reflect.SelectCase
|
||||
|
||||
// find returns the index of a case containing the given channel.
|
||||
func (cs caseList) find(channel interface{}) int {
|
||||
return slices.IndexFunc(cs, func(selectCase reflect.SelectCase) bool {
|
||||
return selectCase.Chan.Interface() == channel
|
||||
})
|
||||
}
|
||||
|
||||
// delete removes the given case from cs.
|
||||
func (cs caseList) delete(index int) caseList {
|
||||
return append(cs[:index], cs[index+1:]...)
|
||||
}
|
||||
|
||||
// deactivate moves the case at index into the non-accessible portion of the cs slice.
|
||||
func (cs caseList) deactivate(index int) caseList {
|
||||
last := len(cs) - 1
|
||||
cs[index], cs[last] = cs[last], cs[index]
|
||||
return cs[:last]
|
||||
}
|
||||
|
||||
// func (cs caseList) String() string {
|
||||
// s := "["
|
||||
// for i, cas := range cs {
|
||||
// if i != 0 {
|
||||
// s += ", "
|
||||
// }
|
||||
// switch cas.Dir {
|
||||
// case reflect.SelectSend:
|
||||
// s += fmt.Sprintf("%v<-", cas.Chan.Interface())
|
||||
// case reflect.SelectRecv:
|
||||
// s += fmt.Sprintf("<-%v", cas.Chan.Interface())
|
||||
// }
|
||||
// }
|
||||
// return s + "]"
|
||||
// }
|
||||
// Feed is a re-export of the go-ethereum event feed.
|
||||
type Feed = geth_event.Feed
|
||||
type Subscription = geth_event.Subscription
|
||||
|
||||
@@ -1,509 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
)
|
||||
|
||||
func TestFeedPanics(t *testing.T) {
|
||||
{
|
||||
var f Feed
|
||||
f.Send(2)
|
||||
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
|
||||
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
|
||||
// Validate it doesn't deadlock.
|
||||
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
|
||||
}
|
||||
{
|
||||
var f Feed
|
||||
ch := make(chan int)
|
||||
f.Subscribe(ch)
|
||||
want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)}
|
||||
assert.NoError(t, checkPanic(want, func() { f.Send(uint64(2)) }))
|
||||
}
|
||||
{
|
||||
var f Feed
|
||||
f.Send(2)
|
||||
want := feedTypeError{op: "Subscribe", got: reflect.TypeOf(make(chan uint64)), want: reflect.TypeOf(make(chan<- int))}
|
||||
assert.NoError(t, checkPanic(want, func() { f.Subscribe(make(chan uint64)) }))
|
||||
}
|
||||
{
|
||||
var f Feed
|
||||
assert.NoError(t, checkPanic(errBadChannel, func() { f.Subscribe(make(<-chan int)) }))
|
||||
}
|
||||
{
|
||||
var f Feed
|
||||
assert.NoError(t, checkPanic(errBadChannel, func() { f.Subscribe(0) }))
|
||||
}
|
||||
}
|
||||
|
||||
func checkPanic(want error, fn func()) (err error) {
|
||||
defer func() {
|
||||
panicResult := recover()
|
||||
if panicResult == nil {
|
||||
err = fmt.Errorf("didn't panic")
|
||||
} else if !reflect.DeepEqual(panicResult, want) {
|
||||
err = fmt.Errorf("panicked with wrong error: got %q, want %q", panicResult, want)
|
||||
}
|
||||
}()
|
||||
fn()
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestFeed(t *testing.T) {
|
||||
var feed Feed
|
||||
var done, subscribed sync.WaitGroup
|
||||
subscriber := func(i int) {
|
||||
defer done.Done()
|
||||
|
||||
subchan := make(chan int)
|
||||
sub := feed.Subscribe(subchan)
|
||||
timeout := time.NewTimer(2 * time.Second)
|
||||
subscribed.Done()
|
||||
|
||||
select {
|
||||
case v := <-subchan:
|
||||
if v != 1 {
|
||||
t.Errorf("%d: received value %d, want 1", i, v)
|
||||
}
|
||||
case <-timeout.C:
|
||||
t.Errorf("%d: receive timeout", i)
|
||||
}
|
||||
|
||||
sub.Unsubscribe()
|
||||
select {
|
||||
case _, ok := <-sub.Err():
|
||||
if ok {
|
||||
t.Errorf("%d: error channel not closed after unsubscribe", i)
|
||||
}
|
||||
case <-timeout.C:
|
||||
t.Errorf("%d: unsubscribe timeout", i)
|
||||
}
|
||||
}
|
||||
|
||||
const n = 1000
|
||||
done.Add(n)
|
||||
subscribed.Add(n)
|
||||
for i := 0; i < n; i++ {
|
||||
go subscriber(i)
|
||||
}
|
||||
subscribed.Wait()
|
||||
if nsent := feed.Send(1); nsent != n {
|
||||
t.Errorf("first send delivered %d times, want %d", nsent, n)
|
||||
}
|
||||
if nsent := feed.Send(2); nsent != 0 {
|
||||
t.Errorf("second send delivered %d times, want 0", nsent)
|
||||
}
|
||||
done.Wait()
|
||||
}
|
||||
|
||||
func TestFeedSubscribeSameChannel(t *testing.T) {
|
||||
var (
|
||||
feed Feed
|
||||
done sync.WaitGroup
|
||||
ch = make(chan int)
|
||||
sub1 = feed.Subscribe(ch)
|
||||
sub2 = feed.Subscribe(ch)
|
||||
_ = feed.Subscribe(ch)
|
||||
)
|
||||
expectSends := func(value, n int) {
|
||||
if nsent := feed.Send(value); nsent != n {
|
||||
t.Errorf("send delivered %d times, want %d", nsent, n)
|
||||
}
|
||||
done.Done()
|
||||
}
|
||||
expectRecv := func(wantValue, n int) {
|
||||
for i := 0; i < n; i++ {
|
||||
if v := <-ch; v != wantValue {
|
||||
t.Errorf("received %d, want %d", v, wantValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
done.Add(1)
|
||||
go expectSends(1, 3)
|
||||
expectRecv(1, 3)
|
||||
done.Wait()
|
||||
|
||||
sub1.Unsubscribe()
|
||||
|
||||
done.Add(1)
|
||||
go expectSends(2, 2)
|
||||
expectRecv(2, 2)
|
||||
done.Wait()
|
||||
|
||||
sub2.Unsubscribe()
|
||||
|
||||
done.Add(1)
|
||||
go expectSends(3, 1)
|
||||
expectRecv(3, 1)
|
||||
done.Wait()
|
||||
}
|
||||
|
||||
func TestFeedSubscribeBlockedPost(_ *testing.T) {
|
||||
var (
|
||||
feed Feed
|
||||
nsends = 2000
|
||||
ch1 = make(chan int)
|
||||
ch2 = make(chan int)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
defer wg.Wait()
|
||||
|
||||
feed.Subscribe(ch1)
|
||||
wg.Add(nsends)
|
||||
for i := 0; i < nsends; i++ {
|
||||
go func() {
|
||||
feed.Send(99)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
sub2 := feed.Subscribe(ch2)
|
||||
defer sub2.Unsubscribe()
|
||||
|
||||
// We're done when ch1 has received N times.
|
||||
// The number of receives on ch2 depends on scheduling.
|
||||
for i := 0; i < nsends; {
|
||||
select {
|
||||
case <-ch1:
|
||||
i++
|
||||
case <-ch2:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFeedUnsubscribeBlockedPost(_ *testing.T) {
|
||||
var (
|
||||
feed Feed
|
||||
nsends = 200
|
||||
chans = make([]chan int, 2000)
|
||||
subs = make([]Subscription, len(chans))
|
||||
bchan = make(chan int)
|
||||
bsub = feed.Subscribe(bchan)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
for i := range chans {
|
||||
chans[i] = make(chan int, nsends)
|
||||
}
|
||||
|
||||
// Queue up some Sends. None of these can make progress while bchan isn't read.
|
||||
wg.Add(nsends)
|
||||
for i := 0; i < nsends; i++ {
|
||||
go func() {
|
||||
feed.Send(99)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
// Subscribe the other channels.
|
||||
for i, ch := range chans {
|
||||
subs[i] = feed.Subscribe(ch)
|
||||
}
|
||||
// Unsubscribe them again.
|
||||
for _, sub := range subs {
|
||||
sub.Unsubscribe()
|
||||
}
|
||||
// Unblock the Sends.
|
||||
bsub.Unsubscribe()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Checks that unsubscribing a channel during Send works even if that
|
||||
// channel has already been sent on.
|
||||
func TestFeedUnsubscribeSentChan(_ *testing.T) {
|
||||
var (
|
||||
feed Feed
|
||||
ch1 = make(chan int)
|
||||
ch2 = make(chan int)
|
||||
sub1 = feed.Subscribe(ch1)
|
||||
sub2 = feed.Subscribe(ch2)
|
||||
wg sync.WaitGroup
|
||||
)
|
||||
defer sub2.Unsubscribe()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
feed.Send(0)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
// Wait for the value on ch1.
|
||||
<-ch1
|
||||
// Unsubscribe ch1, removing it from the send cases.
|
||||
sub1.Unsubscribe()
|
||||
|
||||
// Receive ch2, finishing Send.
|
||||
<-ch2
|
||||
wg.Wait()
|
||||
|
||||
// Send again. This should send to ch2 only, so the wait group will unblock
|
||||
// as soon as a value is received on ch2.
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
feed.Send(0)
|
||||
wg.Done()
|
||||
}()
|
||||
<-ch2
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestFeedUnsubscribeFromInbox(t *testing.T) {
|
||||
var (
|
||||
feed Feed
|
||||
ch1 = make(chan int)
|
||||
ch2 = make(chan int)
|
||||
sub1 = feed.Subscribe(ch1)
|
||||
sub2 = feed.Subscribe(ch1)
|
||||
sub3 = feed.Subscribe(ch2)
|
||||
)
|
||||
assert.Equal(t, 3, len(feed.inbox))
|
||||
assert.Equal(t, 1, len(feed.sendCases), "sendCases is non-empty after unsubscribe")
|
||||
|
||||
sub1.Unsubscribe()
|
||||
sub2.Unsubscribe()
|
||||
sub3.Unsubscribe()
|
||||
assert.Equal(t, 0, len(feed.inbox), "Inbox is non-empty after unsubscribe")
|
||||
assert.Equal(t, 1, len(feed.sendCases), "sendCases is non-empty after unsubscribe")
|
||||
}
|
||||
|
||||
func BenchmarkFeedSend1000(b *testing.B) {
|
||||
var (
|
||||
done sync.WaitGroup
|
||||
feed Feed
|
||||
nsubs = 1000
|
||||
)
|
||||
subscriber := func(ch <-chan int) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
<-ch
|
||||
}
|
||||
done.Done()
|
||||
}
|
||||
done.Add(nsubs)
|
||||
for i := 0; i < nsubs; i++ {
|
||||
ch := make(chan int, 200)
|
||||
feed.Subscribe(ch)
|
||||
go subscriber(ch)
|
||||
}
|
||||
|
||||
// The actual benchmark.
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
if feed.Send(i) != nsubs {
|
||||
panic("wrong number of sends")
|
||||
}
|
||||
}
|
||||
|
||||
b.StopTimer()
|
||||
done.Wait()
|
||||
}
|
||||
|
||||
func TestFeed_Send(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
evFeed *Feed
|
||||
testSetup func(fd *Feed, t *testing.T, o interface{})
|
||||
obj interface{}
|
||||
expectPanic bool
|
||||
}{
|
||||
{
|
||||
name: "normal struct",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedWithPointer, 1)
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeedWithPointer{
|
||||
a: new(uint64),
|
||||
b: new(string),
|
||||
},
|
||||
expectPanic: false,
|
||||
},
|
||||
{
|
||||
name: "un-implemented interface",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedIface, 1)
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeedWithPointer{
|
||||
a: new(uint64),
|
||||
b: new(string),
|
||||
},
|
||||
expectPanic: true,
|
||||
},
|
||||
{
|
||||
name: "semi-implemented interface",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedIface, 1)
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeed2{
|
||||
a: 0,
|
||||
b: "",
|
||||
c: []byte{'A'},
|
||||
},
|
||||
expectPanic: true,
|
||||
},
|
||||
{
|
||||
name: "fully-implemented interface",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedIface)
|
||||
// Make it unbuffered to allow message to
|
||||
// pass through
|
||||
go func() {
|
||||
a := <-testChan
|
||||
if !reflect.DeepEqual(a, o) {
|
||||
t.Errorf("Got = %v, want = %v", a, o)
|
||||
}
|
||||
}()
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeed{
|
||||
a: 0,
|
||||
b: "",
|
||||
},
|
||||
expectPanic: false,
|
||||
},
|
||||
{
|
||||
name: "fully-implemented interface with additional methods",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeedIface)
|
||||
// Make it unbuffered to allow message to
|
||||
// pass through
|
||||
go func() {
|
||||
a := <-testChan
|
||||
if !reflect.DeepEqual(a, o) {
|
||||
t.Errorf("Got = %v, want = %v", a, o)
|
||||
}
|
||||
}()
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeed3{
|
||||
a: 0,
|
||||
b: "",
|
||||
c: []byte{'A'},
|
||||
d: []byte{'B'},
|
||||
},
|
||||
expectPanic: false,
|
||||
},
|
||||
{
|
||||
name: "concrete types implementing the same interface",
|
||||
evFeed: new(Feed),
|
||||
testSetup: func(fd *Feed, t *testing.T, o interface{}) {
|
||||
testChan := make(chan testFeed, 1)
|
||||
// Make it unbuffered to allow message to
|
||||
// pass through
|
||||
go func() {
|
||||
a := <-testChan
|
||||
if !reflect.DeepEqual(a, o) {
|
||||
t.Errorf("Got = %v, want = %v", a, o)
|
||||
}
|
||||
}()
|
||||
fd.Subscribe(testChan)
|
||||
},
|
||||
obj: testFeed3{
|
||||
a: 0,
|
||||
b: "",
|
||||
c: []byte{'A'},
|
||||
d: []byte{'B'},
|
||||
},
|
||||
expectPanic: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if !tt.expectPanic {
|
||||
t.Errorf("panic triggered when unexpected: %v", r)
|
||||
}
|
||||
} else {
|
||||
if tt.expectPanic {
|
||||
t.Error("panic not triggered when expected")
|
||||
}
|
||||
}
|
||||
}()
|
||||
tt.testSetup(tt.evFeed, t, tt.obj)
|
||||
if gotNsent := tt.evFeed.Send(tt.obj); gotNsent != 1 {
|
||||
t.Errorf("Send() = %v, want %v", gotNsent, 1)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// The following objects below are a collection of different
|
||||
// struct types to test with.
|
||||
type testFeed struct {
|
||||
a uint64
|
||||
b string
|
||||
}
|
||||
|
||||
func (testFeed) method1() {
|
||||
|
||||
}
|
||||
|
||||
func (testFeed) method2() {
|
||||
|
||||
}
|
||||
|
||||
type testFeedWithPointer struct {
|
||||
a *uint64
|
||||
b *string
|
||||
}
|
||||
|
||||
type testFeed2 struct {
|
||||
a uint64
|
||||
b string
|
||||
c []byte
|
||||
}
|
||||
|
||||
func (testFeed2) method1() {
|
||||
|
||||
}
|
||||
|
||||
type testFeed3 struct {
|
||||
a uint64
|
||||
b string
|
||||
c, d []byte
|
||||
}
|
||||
|
||||
func (testFeed3) method1() {
|
||||
|
||||
}
|
||||
|
||||
func (testFeed3) method2() {
|
||||
|
||||
}
|
||||
|
||||
func (testFeed3) method3() {
|
||||
|
||||
}
|
||||
|
||||
type testFeedIface interface {
|
||||
method1()
|
||||
method2()
|
||||
}
|
||||
8
async/event/interface.go
Normal file
8
async/event/interface.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package event
|
||||
|
||||
// SubscriberSender is an abstract representation of an *event.Feed
|
||||
// to use in describing types that accept or return an *event.Feed.
|
||||
type SubscriberSender interface {
|
||||
Subscribe(channel interface{}) Subscription
|
||||
Send(value interface{}) (nsent int)
|
||||
}
|
||||
@@ -28,25 +28,6 @@ import (
|
||||
// request backoff time.
|
||||
const waitQuotient = 10
|
||||
|
||||
// Subscription represents a stream of events. The carrier of the events is typically a
|
||||
// channel, but isn't part of the interface.
|
||||
//
|
||||
// Subscriptions can fail while established. Failures are reported through an error
|
||||
// channel. It receives a value if there is an issue with the subscription (e.g. the
|
||||
// network connection delivering the events has been closed). Only one value will ever be
|
||||
// sent.
|
||||
//
|
||||
// The error channel is closed when the subscription ends successfully (i.e. when the
|
||||
// source of events is closed). It is also closed when Unsubscribe is called.
|
||||
//
|
||||
// The Unsubscribe method cancels the sending of events. You must call Unsubscribe in all
|
||||
// cases to ensure that resources related to the subscription are released. It can be
|
||||
// called any number of times.
|
||||
type Subscription interface {
|
||||
Err() <-chan error // returns the error channel
|
||||
Unsubscribe() // cancels sending of events, closing the error channel
|
||||
}
|
||||
|
||||
// NewSubscription runs a producer function as a subscription in a new goroutine. The
|
||||
// channel given to the producer is closed when Unsubscribe is called. If fn returns an
|
||||
// error, it is sent on the subscription's error channel.
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -44,7 +45,7 @@ type ForkchoiceFetcher interface {
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, [32]byte) error
|
||||
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
|
||||
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
ProposerBoost() [32]byte
|
||||
@@ -242,7 +243,7 @@ func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch primitives.Ep
|
||||
if !s.hasHeadState() {
|
||||
return []primitives.ValidatorIndex{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
|
||||
return helpers.ActiveValidatorIndices(ctx, s.headStateReadOnly(ctx), epoch)
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
@@ -44,10 +45,10 @@ func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
}
|
||||
|
||||
// InsertNode is a wrapper for node insertion which is self locked
|
||||
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, block consensus_blocks.ROBlock) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, block)
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns the corresponding value from forkchoice
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
@@ -38,7 +39,7 @@ func prepareForkchoiceState(
|
||||
payloadHash [32]byte,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
) (state.BeaconState, consensus_blocks.ROBlock, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
@@ -59,7 +60,26 @@ func prepareForkchoiceState(
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
if err != nil {
|
||||
return nil, consensus_blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, consensus_blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
}
|
||||
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
@@ -122,9 +142,9 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: []byte{'j'}}
|
||||
ofc := ðpb.Checkpoint{Root: []byte{'f'}}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
|
||||
|
||||
@@ -316,24 +336,24 @@ func TestService_ChainHeads(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.Equal(t, 3, len(roots))
|
||||
@@ -413,12 +433,12 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -449,12 +469,12 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -2,7 +2,6 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -28,8 +27,6 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const blobCommitmentVersionKZG uint8 = 0x01
|
||||
|
||||
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
@@ -219,17 +216,25 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
}
|
||||
|
||||
var lastValidHash []byte
|
||||
var parentRoot *common.Hash
|
||||
var versionedHashes []common.Hash
|
||||
var requests *enginev1.ExecutionRequests
|
||||
if blk.Version() >= version.Deneb {
|
||||
var versionedHashes []common.Hash
|
||||
versionedHashes, err = kzgCommitmentsToVersionedHashes(blk.Block().Body())
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get versioned hashes to feed the engine")
|
||||
}
|
||||
pr := common.Hash(blk.Block().ParentRoot())
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &pr)
|
||||
} else {
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, []common.Hash{}, &common.Hash{} /*empty version hashes and root before Deneb*/)
|
||||
prh := common.Hash(blk.Block().ParentRoot())
|
||||
parentRoot = &prh
|
||||
}
|
||||
if blk.Version() >= version.Electra {
|
||||
requests, err = blk.Block().Body().ExecutionRequests()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution requests")
|
||||
}
|
||||
}
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
newPayloadValidNodeCount.Inc()
|
||||
@@ -402,13 +407,7 @@ func kzgCommitmentsToVersionedHashes(body interfaces.ReadOnlyBeaconBlockBody) ([
|
||||
|
||||
versionedHashes := make([]common.Hash, len(commitments))
|
||||
for i, commitment := range commitments {
|
||||
versionedHashes[i] = ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
}
|
||||
return versionedHashes, nil
|
||||
}
|
||||
|
||||
func ConvertKzgCommitmentToVersionedHash(commitment []byte) common.Hash {
|
||||
versionedHash := sha256.Sum256(commitment)
|
||||
versionedHash[0] = blobCommitmentVersionKZG
|
||||
return versionedHash
|
||||
}
|
||||
|
||||
@@ -1135,9 +1135,14 @@ func TestComputePayloadAttribute(t *testing.T) {
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
blockRoot: [32]byte{'a'},
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
|
||||
@@ -32,18 +32,18 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
|
||||
|
||||
cp := ðpb.Checkpoint{}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
blkWithStateBadAtt := util.NewBeaconBlock()
|
||||
blkWithStateBadAtt.Block.Slot = 1
|
||||
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp = ðpb.Checkpoint{Root: r[:]}
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
|
||||
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -139,9 +139,9 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, roblock, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, roblock))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
}
|
||||
|
||||
@@ -318,10 +318,9 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: r1[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
|
||||
@@ -337,10 +336,9 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: r2[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
s, err := slots.EpochStart(newCheckpoint.Epoch)
|
||||
|
||||
@@ -46,8 +46,7 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
|
||||
// process the beacon block after validating the state transition function
|
||||
type postBlockProcessConfig struct {
|
||||
ctx context.Context
|
||||
signed interfaces.ReadOnlySignedBeaconBlock
|
||||
blockRoot [32]byte
|
||||
roblock consensusblocks.ROBlock
|
||||
headRoot [32]byte
|
||||
postState state.BeaconState
|
||||
isValidPayload bool
|
||||
@@ -61,7 +60,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
ctx, span := trace.StartSpan(cfg.ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
cfg.ctx = ctx
|
||||
if err := consensusblocks.BeaconBlockIsNil(cfg.signed); err != nil {
|
||||
if err := consensusblocks.BeaconBlockIsNil(cfg.roblock); err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
startTime := time.Now()
|
||||
@@ -73,19 +72,22 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
defer s.sendLightClientFeeds(cfg)
|
||||
defer s.sendStateFeedOnBlock(cfg)
|
||||
defer reportProcessingTime(startTime)
|
||||
defer reportAttestationInclusion(cfg.signed.Block())
|
||||
defer reportAttestationInclusion(cfg.roblock.Block())
|
||||
|
||||
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.blockRoot)
|
||||
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.roblock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.signed.Block().Slot())
|
||||
// Do not use parent context in the event it deadlined
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
s.rollbackBlock(ctx, cfg.roblock.Root())
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.roblock.Block().Slot())
|
||||
}
|
||||
if err := s.handleBlockAttestations(ctx, cfg.signed.Block(), cfg.postState); err != nil {
|
||||
if err := s.handleBlockAttestations(ctx, cfg.roblock.Block(), cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, cfg.signed.Block().Body().AttesterSlashings())
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, cfg.roblock.Block().Body().AttesterSlashings())
|
||||
if cfg.isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.blockRoot); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.roblock.Root()); err != nil {
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
@@ -95,8 +97,8 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
if cfg.headRoot != cfg.blockRoot {
|
||||
s.logNonCanonicalBlockReceived(cfg.blockRoot, cfg.headRoot)
|
||||
if cfg.headRoot != cfg.roblock.Root() {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
@@ -154,7 +156,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0].Block(), preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
@@ -234,7 +236,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
@@ -279,7 +281,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastB); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Set their optimistic status
|
||||
@@ -404,6 +406,10 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Block().Slot())
|
||||
}
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
log.Warnf("Rolling back insertion of block with root %#x", r)
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, r); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete block with block root %#x", r)
|
||||
}
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
return nil
|
||||
@@ -684,3 +690,15 @@ func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, bl
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// In the event of an issue processing a block we rollback changes done to the db and our caches
|
||||
// to always ensure that the node's internal state is consistent.
|
||||
func (s *Service) rollbackBlock(ctx context.Context, blockRoot [32]byte) {
|
||||
log.Warnf("Rolling back insertion of block with root %#x due to processing error", blockRoot)
|
||||
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, blockRoot); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete state from caches with block root %#x", blockRoot)
|
||||
}
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, blockRoot); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete block with block root %#x", blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -42,7 +43,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.signed.Block().Slot()
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) {
|
||||
return nil
|
||||
}
|
||||
@@ -50,9 +51,9 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.blockRoot == cfg.headRoot {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
fcuArgs.headState = cfg.postState
|
||||
fcuArgs.headBlock = cfg.signed
|
||||
fcuArgs.headBlock = cfg.roblock
|
||||
fcuArgs.headRoot = cfg.headRoot
|
||||
fcuArgs.proposingSlot = s.CurrentSlot() + 1
|
||||
return nil
|
||||
@@ -96,7 +97,7 @@ func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig, fcuArgs
|
||||
|
||||
// sendStateFeedOnBlock sends an event that a new block has been synced
|
||||
func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.blockRoot)
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.roblock.Root())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not check if block is optimistic")
|
||||
optimistic = true
|
||||
@@ -105,9 +106,9 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: cfg.signed.Block().Slot(),
|
||||
BlockRoot: cfg.blockRoot,
|
||||
SignedBlock: cfg.signed,
|
||||
Slot: cfg.roblock.Block().Slot(),
|
||||
BlockRoot: cfg.roblock.Root(),
|
||||
SignedBlock: cfg.roblock,
|
||||
Verified: true,
|
||||
Optimistic: optimistic,
|
||||
},
|
||||
@@ -117,7 +118,7 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
// sendLightClientFeeds sends the light client feeds when feature flag is enabled.
|
||||
func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) {
|
||||
if features.Get().EnableLightClient {
|
||||
if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.signed, cfg.postState); err != nil {
|
||||
if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to send light client optimistic update")
|
||||
}
|
||||
|
||||
@@ -125,7 +126,7 @@ func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) {
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
|
||||
// LightClientFinalityUpdate needs super majority
|
||||
s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.signed, finalized, cfg.postState)
|
||||
s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.roblock, finalized, cfg.postState)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,6 +163,10 @@ func (s *Service) sendLightClientFinalityUpdate(ctx context.Context, signed inte
|
||||
postState state.BeaconState) (int, error) {
|
||||
// Get attested state
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get attested block")
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get attested state")
|
||||
@@ -183,6 +188,7 @@ func (s *Service) sendLightClientFinalityUpdate(ctx context.Context, signed inte
|
||||
postState,
|
||||
signed,
|
||||
attestedState,
|
||||
attestedBlock,
|
||||
finalizedBlock,
|
||||
)
|
||||
|
||||
@@ -208,6 +214,10 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
|
||||
postState state.BeaconState) (int, error) {
|
||||
// Get attested state
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get attested block")
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get attested state")
|
||||
@@ -218,6 +228,7 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
|
||||
postState,
|
||||
signed,
|
||||
attestedState,
|
||||
attestedBlock,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -242,20 +253,21 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
slot := cfg.postState.Slot()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, cfg.blockRoot[:], cfg.postState); err != nil {
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return nil
|
||||
}
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, cfg.blockRoot[:])
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.blockRoot {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
}
|
||||
}
|
||||
@@ -271,7 +283,7 @@ func reportProcessingTime(startTime time.Time) {
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.blockRoot == cfg.headRoot {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -428,7 +440,7 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
|
||||
|
||||
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock,
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
@@ -438,10 +450,15 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: blk,
|
||||
// The first block can have a bogus root since the block is not inserted in forkchoice
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, [32]byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
root := blk.ParentRoot()
|
||||
root := roblock.Block().ParentRoot()
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
if err != nil {
|
||||
@@ -450,8 +467,12 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if b.Block().Slot() <= fSlot {
|
||||
break
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(b, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root = b.Block().ParentRoot()
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint,
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
|
||||
@@ -145,9 +145,8 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
@@ -190,7 +189,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
@@ -246,7 +245,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
// Set finalized epoch to 2.
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be 1 node: block 65
|
||||
@@ -279,7 +278,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
@@ -566,7 +565,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -614,7 +615,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -640,7 +643,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, nil, [32]byte{}, [32]byte{}, nil, true})
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -688,7 +693,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1114,7 +1121,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb1, r1, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1124,7 +1133,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb2, r2, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1134,7 +1145,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb3, r3, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1144,7 +1157,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb4, r4, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1219,7 +1234,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1237,7 +1254,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1256,7 +1275,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1278,7 +1299,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1306,7 +1329,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
@@ -1335,7 +1360,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1397,7 +1424,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1415,7 +1444,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1435,7 +1466,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1457,7 +1490,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1485,7 +1520,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
@@ -1513,7 +1550,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1578,7 +1617,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1597,7 +1638,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1616,7 +1659,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1643,7 +1688,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, invalidRoots[i-13], [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
@@ -1669,7 +1716,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
@@ -1708,7 +1757,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true}))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -1731,7 +1782,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -1757,7 +1810,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -1813,7 +1868,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1831,7 +1888,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1850,7 +1909,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1879,7 +1940,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -1905,7 +1968,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that the headroot/state are not in DB and restart the node
|
||||
@@ -1995,7 +2060,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2059,7 +2126,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2209,11 +2278,11 @@ func Test_getFCUArgs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
signed: wsb,
|
||||
blockRoot: [32]byte{'a'},
|
||||
roblock: roblock,
|
||||
postState: st,
|
||||
isValidPayload: true,
|
||||
}
|
||||
@@ -2223,11 +2292,143 @@ func Test_getFCUArgs(t *testing.T) {
|
||||
require.ErrorContains(t, "block does not exist", err)
|
||||
|
||||
// canonical branch
|
||||
cfg.headRoot = cfg.blockRoot
|
||||
cfg.headRoot = cfg.roblock.Root()
|
||||
fcuArgs = &fcuConfig{}
|
||||
err = s.getFCUArgs(cfg, fcuArgs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cfg.blockRoot, fcuArgs.headRoot)
|
||||
require.Equal(t, cfg.roblock.Root(), fcuArgs.headRoot)
|
||||
}
|
||||
|
||||
func TestRollbackBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
|
||||
require.Equal(t, true, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err := service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasState)
|
||||
|
||||
// Set invalid parent root to trigger forkchoice error.
|
||||
wsb.SetParentRoot([]byte("bad"))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err = service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasState)
|
||||
}
|
||||
|
||||
func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 33)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
|
||||
require.Equal(t, true, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err := service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasState)
|
||||
|
||||
// Set deadlined context when processing the block
|
||||
cancCtx, canc := context.WithCancel(context.Background())
|
||||
canc()
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentRoot = roblock.Block().ParentRoot()
|
||||
|
||||
cj := ðpb.Checkpoint{}
|
||||
cj.Epoch = 1
|
||||
cj.Root = parentRoot[:]
|
||||
require.NoError(t, postState.SetCurrentJustifiedCheckpoint(cj))
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err = service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasState)
|
||||
}
|
||||
|
||||
func fakeCommitments(n int) [][]byte {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -46,7 +47,7 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, r32))
|
||||
|
||||
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32, params.BeaconConfig().ZeroHash, fc, fc)
|
||||
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32.Root(), params.BeaconConfig().ZeroHash, fc, fc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, r33))
|
||||
|
||||
@@ -54,10 +55,12 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
a := util.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = []byte{'c'}
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
r33Root := r33.Root()
|
||||
a.Data.BeaconBlockRoot = r33Root[:]
|
||||
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(context.Background(), a))
|
||||
|
||||
a.Data.Target.Root = r32[:]
|
||||
r32Root := r32.Root()
|
||||
a.Data.Target.Root = r32Root[:]
|
||||
err = service.VerifyLmdFfgConsistency(context.Background(), a)
|
||||
require.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
|
||||
}
|
||||
@@ -116,7 +119,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -176,7 +181,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
|
||||
@@ -17,6 +17,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -83,7 +85,12 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
currentCheckpoints := s.saveCurrentCheckpoints(preState)
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, blockCopy, blockRoot)
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -102,8 +109,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
args := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
signed: blockCopy,
|
||||
blockRoot: blockRoot,
|
||||
roblock: roblock,
|
||||
postState: postState,
|
||||
isValidPayload: isValidPayload,
|
||||
}
|
||||
@@ -184,8 +190,7 @@ func (s *Service) updateCheckpoints(
|
||||
func (s *Service) validateExecutionAndConsensus(
|
||||
ctx context.Context,
|
||||
preState state.BeaconState,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
block consensusblocks.ROBlock,
|
||||
) (state.BeaconState, bool, error) {
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
@@ -204,7 +209,7 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block, blockRoot)
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
}
|
||||
@@ -273,7 +278,6 @@ func (s *Service) reportPostBlockProcessing(
|
||||
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
|
||||
finalized := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
go func() {
|
||||
finalizedState.SaveValidatorIndices() // used to handle Validator index invariant from EIP6110
|
||||
s.sendNewFinalizedEvent(ctx, finalizedState)
|
||||
}()
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
@@ -350,6 +354,9 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
|
||||
|
||||
// HasBlock returns true if the block of the input root exists in initial sync blocks cache or DB.
|
||||
func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
if s.BlockBeingSynced(root) {
|
||||
return false
|
||||
}
|
||||
return s.hasBlockInInitSyncOrDB(ctx, root)
|
||||
}
|
||||
|
||||
@@ -553,16 +560,16 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
}
|
||||
|
||||
// validateExecutionOnBlock notifies the engine of the incoming block execution payload and returns true if the payload is valid
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, block consensusblocks.ROBlock) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
|
||||
if err != nil {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
|
||||
if block.Block().Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, block); err != nil {
|
||||
return isValidPayload, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,6 +278,8 @@ func TestService_HasBlock(t *testing.T) {
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, s.HasBlock(context.Background(), r))
|
||||
s.blockBeingSynced.set(r)
|
||||
require.Equal(t, false, s.HasBlock(context.Background(), r))
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -303,7 +304,15 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint state")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, fRoot); err != nil {
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
if !features.Get().EnableStartOptimistic {
|
||||
@@ -329,8 +338,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
saved.SaveValidatorIndices() // used to handle Validator index invariant from EIP6110
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -517,7 +524,11 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
gb, err := consensus_blocks.NewROBlockWithRoot(genesisBlk, genesisBlkRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, gb); err != nil {
|
||||
log.WithError(err).Fatal("Could not process genesis block for fork choice")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetOriginRoot(genesisBlkRoot)
|
||||
|
||||
@@ -376,11 +376,15 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
@@ -453,7 +457,11 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(b, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
@@ -32,7 +32,7 @@ type mockBeaconNode struct {
|
||||
}
|
||||
|
||||
// StateFeed mocks the same method in the beacon node.
|
||||
func (mbn *mockBeaconNode) StateFeed() *event.Feed {
|
||||
func (mbn *mockBeaconNode) StateFeed() event.SubscriberSender {
|
||||
mbn.mu.Lock()
|
||||
defer mbn.mu.Unlock()
|
||||
if mbn.stateFeed == nil {
|
||||
|
||||
@@ -98,6 +98,44 @@ func (s *ChainService) BlockNotifier() blockfeed.Notifier {
|
||||
return s.blockNotifier
|
||||
}
|
||||
|
||||
type EventFeedWrapper struct {
|
||||
feed *event.Feed
|
||||
subscribed chan struct{} // this channel is closed once a subscription is made
|
||||
}
|
||||
|
||||
func (w *EventFeedWrapper) Subscribe(channel interface{}) event.Subscription {
|
||||
select {
|
||||
case <-w.subscribed:
|
||||
break // already closed
|
||||
default:
|
||||
close(w.subscribed)
|
||||
}
|
||||
return w.feed.Subscribe(channel)
|
||||
}
|
||||
|
||||
func (w *EventFeedWrapper) Send(value interface{}) int {
|
||||
return w.feed.Send(value)
|
||||
}
|
||||
|
||||
// WaitForSubscription allows test to wait for the feed to have a subscription before beginning to send events.
|
||||
func (w *EventFeedWrapper) WaitForSubscription(ctx context.Context) error {
|
||||
select {
|
||||
case <-w.subscribed:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
var _ event.SubscriberSender = &EventFeedWrapper{}
|
||||
|
||||
func NewEventFeedWrapper() *EventFeedWrapper {
|
||||
return &EventFeedWrapper{
|
||||
feed: new(event.Feed),
|
||||
subscribed: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// MockBlockNotifier mocks the block notifier.
|
||||
type MockBlockNotifier struct {
|
||||
feed *event.Feed
|
||||
@@ -131,7 +169,7 @@ func (msn *MockStateNotifier) ReceivedEvents() []*feed.Event {
|
||||
}
|
||||
|
||||
// StateFeed returns a state feed.
|
||||
func (msn *MockStateNotifier) StateFeed() *event.Feed {
|
||||
func (msn *MockStateNotifier) StateFeed() event.SubscriberSender {
|
||||
msn.feedLock.Lock()
|
||||
defer msn.feedLock.Unlock()
|
||||
|
||||
@@ -159,6 +197,23 @@ func (msn *MockStateNotifier) StateFeed() *event.Feed {
|
||||
return msn.feed
|
||||
}
|
||||
|
||||
// NewSimpleStateNotifier makes a state feed without the custom mock feed machinery.
|
||||
func NewSimpleStateNotifier() *MockStateNotifier {
|
||||
return &MockStateNotifier{feed: new(event.Feed)}
|
||||
}
|
||||
|
||||
type SimpleNotifier struct {
|
||||
Feed event.SubscriberSender
|
||||
}
|
||||
|
||||
func (n *SimpleNotifier) StateFeed() event.SubscriberSender {
|
||||
return n.Feed
|
||||
}
|
||||
|
||||
func (n *SimpleNotifier) OperationFeed() event.SubscriberSender {
|
||||
return n.Feed
|
||||
}
|
||||
|
||||
// OperationNotifier mocks the same method in the chain service.
|
||||
func (s *ChainService) OperationNotifier() opfeed.Notifier {
|
||||
if s.opNotifier == nil {
|
||||
@@ -173,7 +228,7 @@ type MockOperationNotifier struct {
|
||||
}
|
||||
|
||||
// OperationFeed returns an operation feed.
|
||||
func (mon *MockOperationNotifier) OperationFeed() *event.Feed {
|
||||
func (mon *MockOperationNotifier) OperationFeed() event.SubscriberSender {
|
||||
if mon.feed == nil {
|
||||
mon.feed = new(event.Feed)
|
||||
}
|
||||
@@ -512,7 +567,7 @@ func prepareForkchoiceState(
|
||||
payloadHash [32]byte,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
) (state.BeaconState, blocks.ROBlock, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
@@ -533,7 +588,26 @@ func prepareForkchoiceState(
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
}
|
||||
|
||||
// CachedHeadRoot mocks the same method in the chain service
|
||||
@@ -576,9 +650,9 @@ func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
|
||||
}
|
||||
|
||||
// InsertNode mocks the same method in the chain service
|
||||
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, block blocks.ROBlock) error {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
return s.ForkChoiceStore.InsertNode(ctx, st, block)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
72
beacon-chain/cache/tracked_validators.go
vendored
72
beacon-chain/cache/tracked_validators.go
vendored
@@ -2,19 +2,28 @@ package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
var validatorInactivityThreshold = 5 // 5 epochs
|
||||
|
||||
type TrackedValidator struct {
|
||||
Active bool
|
||||
FeeRecipient primitives.ExecutionAddress
|
||||
Index primitives.ValidatorIndex
|
||||
LastUpdated time.Time
|
||||
}
|
||||
|
||||
type TrackedValidatorsCache struct {
|
||||
sync.Mutex
|
||||
trackedValidators map[primitives.ValidatorIndex]TrackedValidator
|
||||
sync.RWMutex
|
||||
trackedValidators map[primitives.ValidatorIndex]TrackedValidator
|
||||
trackedValidatorsNum uint64
|
||||
trackedValidatorsNumLastUpdated uint64
|
||||
trackedValidatorsMetric prometheus.GaugeFunc
|
||||
}
|
||||
|
||||
func NewTrackedValidatorsCache() *TrackedValidatorsCache {
|
||||
@@ -24,8 +33,8 @@ func NewTrackedValidatorsCache() *TrackedValidatorsCache {
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) Validator(index primitives.ValidatorIndex) (TrackedValidator, bool) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
val, ok := t.trackedValidators[index]
|
||||
return val, ok
|
||||
}
|
||||
@@ -34,6 +43,32 @@ func (t *TrackedValidatorsCache) Set(val TrackedValidator) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidators[val.Index] = val
|
||||
t.updateTackedValidatorsNum()
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) updateTackedValidatorsNum() {
|
||||
epochTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot
|
||||
if t.trackedValidatorsNumLastUpdated != 0 && t.trackedValidatorsNumLastUpdated+epochTime < uint64(time.Now().Unix()) {
|
||||
// too early to update again
|
||||
return
|
||||
}
|
||||
num := 0
|
||||
for _, value := range t.trackedValidators {
|
||||
if value.LastUpdated.Unix()+int64(validatorInactivityThreshold)*int64(epochTime) < time.Now().Unix() {
|
||||
// validator expired
|
||||
// TODO: should we prune?
|
||||
continue
|
||||
}
|
||||
num++
|
||||
}
|
||||
t.trackedValidatorsNum = uint64(num)
|
||||
t.trackedValidatorsNumLastUpdated = uint64(time.Now().Unix())
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) GetTrackedValidatorsNum() uint64 {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
return t.trackedValidatorsNum
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) Prune() {
|
||||
@@ -43,7 +78,32 @@ func (t *TrackedValidatorsCache) Prune() {
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) Validating() bool {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
return len(t.trackedValidators) > 0
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) Size() int {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
return len(t.trackedValidators)
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) RegisterTrackedValidatorMetric() error {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidatorsMetric = prometheus.NewGaugeFunc(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "tracked_validator_count",
|
||||
Help: "The total number of validators tracked by trackedValidatorsCache in the beacon node. This is updated at intervals via the push proposer settings API endpoint.",
|
||||
},
|
||||
func() float64 { return float64(t.GetTrackedValidatorsNum()) },
|
||||
)
|
||||
return prometheus.Register(t.trackedValidatorsMetric)
|
||||
}
|
||||
|
||||
func (t *TrackedValidatorsCache) UnregisterTrackedValidatorMetric() {
|
||||
t.RLock()
|
||||
defer t.RUnlock()
|
||||
prometheus.Unregister(t.trackedValidatorsMetric)
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func ProcessDeposits(
|
||||
beaconState state.BeaconState,
|
||||
deposits []*ethpb.Deposit,
|
||||
) (state.BeaconState, error) {
|
||||
batchVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -46,7 +46,7 @@ func ProcessDeposits(
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, errors.New("got a nil deposit in block")
|
||||
}
|
||||
beaconState, err = ProcessDeposit(beaconState, deposit, batchVerified)
|
||||
beaconState, err = ProcessDeposit(beaconState, deposit, allSignaturesVerified)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process deposit from %#x", bytesutil.Trunc(deposit.Data.PublicKey))
|
||||
}
|
||||
@@ -81,7 +81,7 @@ func ProcessDeposits(
|
||||
// amount=deposit.data.amount,
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconState, error) {
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
@@ -92,7 +92,7 @@ func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verif
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ApplyDeposit(beaconState, deposit.Data, verifySignature)
|
||||
return ApplyDeposit(beaconState, deposit.Data, allSignaturesVerified)
|
||||
}
|
||||
|
||||
// ApplyDeposit
|
||||
@@ -115,13 +115,13 @@ func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verif
|
||||
// # Increase balance by deposit amount
|
||||
// index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
// increase_balance(state, index, amount)
|
||||
func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, verifySignature bool) (state.BeaconState, error) {
|
||||
func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
pubKey := data.PublicKey
|
||||
amount := data.Amount
|
||||
withdrawalCredentials := data.WithdrawalCredentials
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
if verifySignature {
|
||||
if !allSignaturesVerified {
|
||||
valid, err := blocks.IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -187,11 +187,12 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
|
||||
// return Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// effective_balance=effective_balance,
|
||||
// slashed=False,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// effective_balance=effective_balance,
|
||||
// )
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
|
||||
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
@@ -202,10 +203,11 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
EffectiveBalance: effectiveBalance,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: effectiveBalance,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,7 +199,7 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := altair.ProcessDeposit(beaconState, dep[0], true)
|
||||
newState, err := altair.ProcessDeposit(beaconState, dep[0], false)
|
||||
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
|
||||
|
||||
if newState.Eth1DepositIndex() != 1 {
|
||||
|
||||
@@ -55,12 +55,26 @@ func BatchVerifyDepositsSignatures(ctx context.Context, deposits []*ethpb.Deposi
|
||||
return false, err
|
||||
}
|
||||
|
||||
verified := false
|
||||
if err := verifyDepositDataWithDomain(ctx, deposits, domain); err != nil {
|
||||
log.WithError(err).Debug("Failed to batch verify deposits signatures, will try individual verify")
|
||||
verified = true
|
||||
return false, nil
|
||||
}
|
||||
return verified, nil
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// BatchVerifyPendingDepositsSignatures batch verifies pending deposit signatures.
|
||||
func BatchVerifyPendingDepositsSignatures(ctx context.Context, deposits []*ethpb.PendingDeposit) (bool, error) {
|
||||
var err error
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if err := verifyPendingDepositDataWithDomain(ctx, deposits, domain); err != nil {
|
||||
log.WithError(err).Debug("Failed to batch verify deposits signatures, will try individual verify")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// IsValidDepositSignature returns whether deposit_data is valid
|
||||
@@ -159,3 +173,44 @@ func verifyDepositDataWithDomain(ctx context.Context, deps []*ethpb.Deposit, dom
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifyPendingDepositDataWithDomain(ctx context.Context, deps []*ethpb.PendingDeposit, domain []byte) error {
|
||||
if len(deps) == 0 {
|
||||
return nil
|
||||
}
|
||||
pks := make([]bls.PublicKey, len(deps))
|
||||
sigs := make([][]byte, len(deps))
|
||||
msgs := make([][32]byte, len(deps))
|
||||
for i, dep := range deps {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
if dep == nil {
|
||||
return errors.New("nil deposit")
|
||||
}
|
||||
dpk, err := bls.PublicKeyFromBytes(dep.PublicKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pks[i] = dpk
|
||||
sigs[i] = dep.Signature
|
||||
depositMessage := ðpb.DepositMessage{
|
||||
PublicKey: dep.PublicKey,
|
||||
WithdrawalCredentials: dep.WithdrawalCredentials,
|
||||
Amount: dep.Amount,
|
||||
}
|
||||
sr, err := signing.ComputeSigningRoot(depositMessage, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msgs[i] = sr
|
||||
}
|
||||
verify, err := bls.VerifyMultipleSignatures(sigs, msgs, pks)
|
||||
if err != nil {
|
||||
return errors.Errorf("could not verify multiple signatures: %v", err)
|
||||
}
|
||||
if !verify {
|
||||
return errors.New("one or more deposit signatures did not verify")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,41 @@ import (
|
||||
)
|
||||
|
||||
func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
require.NoError(t, err)
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 3000,
|
||||
},
|
||||
}
|
||||
sr, err := signing.ComputeSigningRoot(ðpb.DepositMessage{
|
||||
PublicKey: deposit.Data.PublicKey,
|
||||
WithdrawalCredentials: deposit.Data.WithdrawalCredentials,
|
||||
Amount: 3000,
|
||||
}, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
deposit.Data.Signature = sig.Marshal()
|
||||
leaf, err := deposit.Data.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
// We then create a merkle branch for the test.
|
||||
depositTrie, err := trie.GenerateTrieFromItems([][]byte{leaf[:]}, params.BeaconConfig().DepositContractTreeDepth)
|
||||
require.NoError(t, err, "Could not generate trie")
|
||||
proof, err := depositTrie.MerkleProof(0)
|
||||
require.NoError(t, err, "Could not generate proof")
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(context.Background(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
|
||||
func TestBatchVerifyDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
deposit := ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte{1, 2, 3}, 48),
|
||||
@@ -34,9 +69,9 @@ func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) {
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
ok, err := blocks.BatchVerifyDepositsSignatures(context.Background(), []*ethpb.Deposit{deposit})
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(context.Background(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
|
||||
func TestVerifyDeposit_MerkleBranchFailsVerification(t *testing.T) {
|
||||
@@ -93,3 +128,54 @@ func TestIsValidDepositSignature_Ok(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, valid)
|
||||
}
|
||||
|
||||
func TestBatchVerifyPendingDepositsSignatures_Ok(t *testing.T) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
require.NoError(t, err)
|
||||
pendingDeposit := ðpb.PendingDeposit{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 3000,
|
||||
}
|
||||
sr, err := signing.ComputeSigningRoot(ðpb.DepositMessage{
|
||||
PublicKey: pendingDeposit.PublicKey,
|
||||
WithdrawalCredentials: pendingDeposit.WithdrawalCredentials,
|
||||
Amount: 3000,
|
||||
}, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
pendingDeposit.Signature = sig.Marshal()
|
||||
|
||||
sk2, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pendingDeposit2 := ðpb.PendingDeposit{
|
||||
PublicKey: sk2.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 4000,
|
||||
}
|
||||
sr2, err := signing.ComputeSigningRoot(ðpb.DepositMessage{
|
||||
PublicKey: pendingDeposit2.PublicKey,
|
||||
WithdrawalCredentials: pendingDeposit2.WithdrawalCredentials,
|
||||
Amount: 4000,
|
||||
}, domain)
|
||||
require.NoError(t, err)
|
||||
sig2 := sk2.Sign(sr2[:])
|
||||
pendingDeposit2.Signature = sig2.Marshal()
|
||||
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(context.Background(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
|
||||
func TestBatchVerifyPendingDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
pendingDeposit := ðpb.PendingDeposit{
|
||||
PublicKey: bytesutil.PadTo([]byte{1, 2, 3}, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(context.Background(), []*ethpb.PendingDeposit{pendingDeposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
|
||||
@@ -213,6 +213,11 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{
|
||||
Withdrawals: make([]*enginev1.WithdrawalRequest, 0),
|
||||
Deposits: make([]*enginev1.DepositRequest, 0),
|
||||
Consolidations: make([]*enginev1.ConsolidationRequest, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
|
||||
@@ -61,6 +61,9 @@ func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
|
||||
if body == nil {
|
||||
return false, errors.New("nil block body")
|
||||
}
|
||||
if body.Version() >= version.Capella {
|
||||
return true, nil
|
||||
}
|
||||
payload, err := body.Execution()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
@@ -202,24 +205,24 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
|
||||
// block_hash=payload.block_hash,
|
||||
// transactions_root=hash_tree_root(payload.transactions),
|
||||
// )
|
||||
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) (state.BeaconState, error) {
|
||||
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := verifyBlobCommitmentCount(body); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := ValidatePayload(st, payload); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(payload); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
return st, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
|
||||
@@ -253,7 +253,8 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.IsExecutionBlock(wrappedBlock.Body())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, got)
|
||||
// #14614
|
||||
require.Equal(t, true, got)
|
||||
}
|
||||
|
||||
func Test_IsExecutionEnabled(t *testing.T) {
|
||||
@@ -587,8 +588,7 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
ExecutionPayload: tt.payload,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, body)
|
||||
if err != nil {
|
||||
if err := blocks.ProcessPayload(st, body); err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
@@ -619,8 +619,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
|
||||
ExecutionPayload: payload,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = blocks.ProcessPayload(st, body)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.ProcessPayload(st, body))
|
||||
}
|
||||
|
||||
func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
@@ -677,8 +676,7 @@ func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
ExecutionPayloadHeader: p,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, body)
|
||||
if err != nil {
|
||||
if err := blocks.ProcessPayload(st, body); err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
|
||||
@@ -120,35 +120,36 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
// expected_withdrawals, partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
|
||||
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
//
|
||||
// assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
// expected_withdrawals, processed_partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
|
||||
//
|
||||
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
|
||||
// assert withdrawal == expected_withdrawal
|
||||
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
// assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
//
|
||||
// # Update pending partial withdrawals [New in Electra:EIP7251]
|
||||
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[partial_withdrawals_count:]
|
||||
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
|
||||
// assert withdrawal == expected_withdrawal
|
||||
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
//
|
||||
// # Update the next withdrawal index if this block contained withdrawals
|
||||
// if len(expected_withdrawals) != 0:
|
||||
// latest_withdrawal = expected_withdrawals[-1]
|
||||
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
// # Update pending partial withdrawals [New in Electra:EIP7251]
|
||||
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[processed_partial_withdrawals_count:]
|
||||
//
|
||||
// # Update the next validator index to start the next withdrawal sweep
|
||||
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// # Next sweep starts after the latest withdrawal's validator index
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// # Update the next withdrawal index if this block contained withdrawals
|
||||
// if len(expected_withdrawals) != 0:
|
||||
// latest_withdrawal = expected_withdrawals[-1]
|
||||
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
//
|
||||
// # Update the next validator index to start the next withdrawal sweep
|
||||
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// # Next sweep starts after the latest withdrawal's validator index
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
expectedWithdrawals, partialWithdrawalsCount, err := st.ExpectedWithdrawals()
|
||||
expectedWithdrawals, processedPartialWithdrawalsCount, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get expected withdrawals")
|
||||
}
|
||||
@@ -192,7 +193,7 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
|
||||
}
|
||||
|
||||
if st.Version() >= version.Electra {
|
||||
if err := st.DequeuePartialWithdrawals(partialWithdrawalsCount); err != nil {
|
||||
if err := st.DequeuePendingPartialWithdrawals(processedPartialWithdrawalsCount); err != nil {
|
||||
return nil, fmt.Errorf("unable to dequeue partial withdrawals from state: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,11 +32,13 @@ go_library(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -52,24 +54,27 @@ go_test(
|
||||
"deposit_fuzz_test.go",
|
||||
"deposits_test.go",
|
||||
"effective_balance_updates_test.go",
|
||||
"export_test.go",
|
||||
"registry_updates_test.go",
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/testing:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ProcessPendingConsolidations implements the spec definition below. This method makes mutating
|
||||
@@ -22,25 +23,28 @@ import (
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_pending_consolidations(state: BeaconState) -> None:
|
||||
// next_pending_consolidation = 0
|
||||
// for pending_consolidation in state.pending_consolidations:
|
||||
// source_validator = state.validators[pending_consolidation.source_index]
|
||||
// if source_validator.slashed:
|
||||
// next_pending_consolidation += 1
|
||||
// continue
|
||||
// if source_validator.withdrawable_epoch > get_current_epoch(state):
|
||||
// break
|
||||
// def process_pending_consolidations(state: BeaconState) -> None:
|
||||
//
|
||||
// # Churn any target excess active balance of target and raise its max
|
||||
// switch_to_compounding_validator(state, pending_consolidation.target_index)
|
||||
// # Move active balance to target. Excess balance is withdrawable.
|
||||
// active_balance = get_active_balance(state, pending_consolidation.source_index)
|
||||
// decrease_balance(state, pending_consolidation.source_index, active_balance)
|
||||
// increase_balance(state, pending_consolidation.target_index, active_balance)
|
||||
// next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
// next_pending_consolidation = 0
|
||||
// for pending_consolidation in state.pending_consolidations:
|
||||
// source_validator = state.validators[pending_consolidation.source_index]
|
||||
// if source_validator.slashed:
|
||||
// next_pending_consolidation += 1
|
||||
// continue
|
||||
// if source_validator.withdrawable_epoch > next_epoch:
|
||||
// break
|
||||
//
|
||||
// state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:]
|
||||
// # Calculate the consolidated balance
|
||||
// max_effective_balance = get_max_effective_balance(source_validator)
|
||||
// source_effective_balance = min(state.balances[pending_consolidation.source_index], max_effective_balance)
|
||||
//
|
||||
// # Move active balance to target. Excess balance is withdrawable.
|
||||
// decrease_balance(state, pending_consolidation.source_index, source_effective_balance)
|
||||
// increase_balance(state, pending_consolidation.target_index, source_effective_balance)
|
||||
// next_pending_consolidation += 1
|
||||
//
|
||||
// state.pending_consolidations = state.pending_consolidations[next_pending_consolidation:]
|
||||
func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessPendingConsolidations")
|
||||
defer span.End()
|
||||
@@ -51,37 +55,34 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
|
||||
nextEpoch := slots.ToEpoch(st.Slot()) + 1
|
||||
|
||||
var nextPendingConsolidation uint64
|
||||
pendingConsolidations, err := st.PendingConsolidations()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var nextPendingConsolidation uint64
|
||||
for _, pc := range pendingConsolidations {
|
||||
sourceValidator, err := st.ValidatorAtIndex(pc.SourceIndex)
|
||||
sourceValidator, err := st.ValidatorAtIndexReadOnly(pc.SourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sourceValidator.Slashed {
|
||||
if sourceValidator.Slashed() {
|
||||
nextPendingConsolidation++
|
||||
continue
|
||||
}
|
||||
if sourceValidator.WithdrawableEpoch > nextEpoch {
|
||||
if sourceValidator.WithdrawableEpoch() > nextEpoch {
|
||||
break
|
||||
}
|
||||
|
||||
if err := SwitchToCompoundingValidator(st, pc.TargetIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
activeBalance, err := st.ActiveBalanceAtIndex(pc.SourceIndex)
|
||||
validatorBalance, err := st.BalanceAtIndex(pc.SourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.DecreaseBalance(st, pc.SourceIndex, activeBalance); err != nil {
|
||||
b := min(validatorBalance, helpers.ValidatorMaxEffectiveBalance(sourceValidator))
|
||||
|
||||
if err := helpers.DecreaseBalance(st, pc.SourceIndex, b); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.IncreaseBalance(st, pc.TargetIndex, activeBalance); err != nil {
|
||||
if err := helpers.IncreaseBalance(st, pc.TargetIndex, b); err != nil {
|
||||
return err
|
||||
}
|
||||
nextPendingConsolidation++
|
||||
@@ -101,6 +102,16 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
// state: BeaconState,
|
||||
// consolidation_request: ConsolidationRequest
|
||||
// ) -> None:
|
||||
// if is_valid_switch_to_compounding_request(state, consolidation_request):
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// switch_to_compounding_validator(state, source_index)
|
||||
// return
|
||||
//
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// if consolidation_request.source_pubkey == consolidation_request.target_pubkey:
|
||||
// return
|
||||
// # If the pending consolidations queue is full, consolidation requests are ignored
|
||||
// if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
|
||||
// return
|
||||
@@ -121,10 +132,6 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
// source_validator = state.validators[source_index]
|
||||
// target_validator = state.validators[target_index]
|
||||
//
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// if source_index == target_index:
|
||||
// return
|
||||
//
|
||||
// # Verify source withdrawal credentials
|
||||
// has_correct_credential = has_execution_withdrawal_credential(source_validator)
|
||||
// is_correct_source_address = (
|
||||
@@ -160,19 +167,14 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
// source_index=source_index,
|
||||
// target_index=target_index
|
||||
// ))
|
||||
//
|
||||
// # Churn any target excess active balance of target and raise its max
|
||||
// if has_eth1_withdrawal_credential(target_validator):
|
||||
// switch_to_compounding_validator(state, target_index)
|
||||
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
|
||||
if len(reqs) == 0 || st == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
return nil
|
||||
}
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
minValWithdrawDelay := params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
@@ -182,22 +184,44 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
|
||||
}
|
||||
if IsValidSwitchToCompoundingRequest(st, cr) {
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
log.Error("failed to find source validator index")
|
||||
continue
|
||||
}
|
||||
if err := SwitchToCompoundingValidator(st, srcIdx); err != nil {
|
||||
log.WithError(err).Error("failed to switch to compounding validator")
|
||||
}
|
||||
continue
|
||||
}
|
||||
sourcePubkey := bytesutil.ToBytes48(cr.SourcePubkey)
|
||||
targetPubkey := bytesutil.ToBytes48(cr.TargetPubkey)
|
||||
if sourcePubkey == targetPubkey {
|
||||
continue
|
||||
}
|
||||
|
||||
if npc, err := st.NumPendingConsolidations(); err != nil {
|
||||
return fmt.Errorf("failed to fetch number of pending consolidations: %w", err) // This should never happen.
|
||||
} else if npc >= pcLimit {
|
||||
return nil
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
continue
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tgtIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.TargetPubkey))
|
||||
if !ok {
|
||||
continue
|
||||
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if srcIdx == tgtIdx {
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(sourcePubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
tgtIdx, ok := st.ValidatorIndexByPubkey(targetPubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -237,7 +261,8 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
// Initiate the exit of the source validator.
|
||||
exitEpoch, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(srcV.EffectiveBalance))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to compute consolidaiton epoch: %w", err)
|
||||
log.WithError(err).Error("failed to compute consolidation epoch")
|
||||
continue
|
||||
}
|
||||
srcV.ExitEpoch = exitEpoch
|
||||
srcV.WithdrawableEpoch = exitEpoch + minValWithdrawDelay
|
||||
@@ -248,7 +273,95 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if err := st.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
|
||||
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
if helpers.HasETH1WithdrawalCredential(tgtV) {
|
||||
if err := SwitchToCompoundingValidator(st, tgtIdx); err != nil {
|
||||
log.WithError(err).Error("failed to switch to compounding validator")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValidSwitchToCompoundingRequest returns true if the given consolidation request is valid for switching to compounding.
|
||||
//
|
||||
// Spec code:
|
||||
//
|
||||
// def is_valid_switch_to_compounding_request(
|
||||
//
|
||||
// state: BeaconState,
|
||||
// consolidation_request: ConsolidationRequest
|
||||
//
|
||||
// ) -> bool:
|
||||
//
|
||||
// # Switch to compounding requires source and target be equal
|
||||
// if consolidation_request.source_pubkey != consolidation_request.target_pubkey:
|
||||
// return False
|
||||
//
|
||||
// # Verify pubkey exists
|
||||
// source_pubkey = consolidation_request.source_pubkey
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if source_pubkey not in validator_pubkeys:
|
||||
// return False
|
||||
//
|
||||
// source_validator = state.validators[ValidatorIndex(validator_pubkeys.index(source_pubkey))]
|
||||
//
|
||||
// # Verify request has been authorized
|
||||
// if source_validator.withdrawal_credentials[12:] != consolidation_request.source_address:
|
||||
// return False
|
||||
//
|
||||
// # Verify source withdrawal credentials
|
||||
// if not has_eth1_withdrawal_credential(source_validator):
|
||||
// return False
|
||||
//
|
||||
// # Verify the source is active
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// if not is_active_validator(source_validator, current_epoch):
|
||||
// return False
|
||||
//
|
||||
// # Verify exit for source has not been initiated
|
||||
// if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return False
|
||||
//
|
||||
// return True
|
||||
func IsValidSwitchToCompoundingRequest(st state.BeaconState, req *enginev1.ConsolidationRequest) bool {
|
||||
if req.SourcePubkey == nil || req.TargetPubkey == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if !bytes.Equal(req.SourcePubkey, req.TargetPubkey) {
|
||||
return false
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(req.SourcePubkey))
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
// As per the consensus specification, this error is not considered an assertion.
|
||||
// Therefore, if the source_pubkey is not found in validator_pubkeys, we simply return false.
|
||||
srcV, err := st.ValidatorAtIndexReadOnly(srcIdx)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
sourceAddress := req.SourceAddress
|
||||
withdrawalCreds := srcV.GetWithdrawalCredentials()
|
||||
if len(withdrawalCreds) != 32 || len(sourceAddress) != 20 || !bytes.HasSuffix(withdrawalCreds, sourceAddress) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !helpers.HasETH1WithdrawalCredential(srcV) {
|
||||
return false
|
||||
}
|
||||
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
if !helpers.IsActiveValidatorUsingTrie(srcV, curEpoch) {
|
||||
return false
|
||||
}
|
||||
|
||||
if srcV.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessPendingConsolidations(t *testing.T) {
|
||||
@@ -80,10 +81,10 @@ func TestProcessPendingConsolidations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), num)
|
||||
|
||||
// v1 is switched to compounding validator.
|
||||
// v1 withdrawal credentials should not be updated.
|
||||
v1, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, v1.WithdrawalCredentials[0])
|
||||
require.Equal(t, params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, v1.WithdrawalCredentials[0])
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
@@ -201,38 +202,6 @@ func TestProcessPendingConsolidations(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
|
||||
gwei := balETH * 1_000_000_000
|
||||
balPerVal := params.BeaconConfig().MinActivationBalance
|
||||
numVals := gwei / balPerVal
|
||||
|
||||
vals := make([]*eth.Validator, numVals)
|
||||
bals := make([]uint64, numVals)
|
||||
for i := uint64(0); i < numVals; i++ {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
vals[i] = ð.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: balPerVal,
|
||||
WithdrawalCredentials: wc,
|
||||
}
|
||||
bals[i] = balPerVal
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: 10 * params.BeaconConfig().SlotsPerEpoch,
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
Fork: ð.Fork{
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
return st
|
||||
}
|
||||
|
||||
func TestProcessConsolidationRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -428,3 +397,87 @@ func TestProcessConsolidationRequests(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidSwitchToCompoundingRequest(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
t.Run("nil source pubkey", func(t *testing.T) {
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
SourcePubkey: nil,
|
||||
TargetPubkey: []byte{'a'},
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("nil target pubkey", func(t *testing.T) {
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
TargetPubkey: nil,
|
||||
SourcePubkey: []byte{'a'},
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("different source and target pubkey", func(t *testing.T) {
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
TargetPubkey: []byte{'a'},
|
||||
SourcePubkey: []byte{'b'},
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("source validator not found in state", func(t *testing.T) {
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
SourceAddress: make([]byte, 20),
|
||||
TargetPubkey: []byte{'a'},
|
||||
SourcePubkey: []byte{'a'},
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("incorrect source address", func(t *testing.T) {
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
pubkey := v.PublicKey
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
SourceAddress: make([]byte, 20),
|
||||
TargetPubkey: pubkey,
|
||||
SourcePubkey: pubkey,
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("incorrect eth1 withdrawal credential", func(t *testing.T) {
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
pubkey := v.PublicKey
|
||||
wc := v.WithdrawalCredentials
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
SourceAddress: wc[12:],
|
||||
TargetPubkey: pubkey,
|
||||
SourcePubkey: pubkey,
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
t.Run("is valid compounding request", func(t *testing.T) {
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
pubkey := v.PublicKey
|
||||
wc := v.WithdrawalCredentials
|
||||
v.WithdrawalCredentials[0] = 1
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(0, v))
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
SourceAddress: wc[12:],
|
||||
TargetPubkey: pubkey,
|
||||
SourcePubkey: pubkey,
|
||||
})
|
||||
require.Equal(t, true, ok)
|
||||
})
|
||||
t.Run("already has an exit epoch", func(t *testing.T) {
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
pubkey := v.PublicKey
|
||||
wc := v.WithdrawalCredentials
|
||||
v.ExitEpoch = 100
|
||||
require.NoError(t, st.UpdateValidatorAtIndex(0, v))
|
||||
ok := electra.IsValidSwitchToCompoundingRequest(st, &enginev1.ConsolidationRequest{
|
||||
SourceAddress: wc[12:],
|
||||
TargetPubkey: pubkey,
|
||||
SourcePubkey: pubkey,
|
||||
})
|
||||
require.Equal(t, false, ok)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,13 +2,13 @@ package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/contracts/deposit"
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -38,7 +39,7 @@ func ProcessDeposits(
|
||||
defer span.End()
|
||||
// Attempt to verify all deposit signatures at once, if this fails then fall back to processing
|
||||
// individual deposits with signature verification enabled.
|
||||
batchVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signatures in batch")
|
||||
}
|
||||
@@ -47,7 +48,7 @@ func ProcessDeposits(
|
||||
if d == nil || d.Data == nil {
|
||||
return nil, errors.New("got a nil deposit in block")
|
||||
}
|
||||
beaconState, err = ProcessDeposit(beaconState, d, batchVerified)
|
||||
beaconState, err = ProcessDeposit(beaconState, d, allSignaturesVerified)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process deposit from %#x", bytesutil.Trunc(d.Data.PublicKey))
|
||||
}
|
||||
@@ -82,7 +83,7 @@ func ProcessDeposits(
|
||||
// amount=deposit.data.amount,
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verifySignature bool) (state.BeaconState, error) {
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
@@ -92,37 +93,49 @@ func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, verif
|
||||
if err := beaconState.SetEth1DepositIndex(beaconState.Eth1DepositIndex() + 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ApplyDeposit(beaconState, deposit.Data, verifySignature)
|
||||
return ApplyDeposit(beaconState, deposit.Data, allSignaturesVerified)
|
||||
}
|
||||
|
||||
// ApplyDeposit
|
||||
// def apply_deposit(state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64, signature: BLSSignature) -> None:
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if pubkey not in validator_pubkeys:
|
||||
// ApplyDeposit adds the incoming deposit as a pending deposit on the state
|
||||
//
|
||||
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
// if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
|
||||
// add_validator_to_registry(state, pubkey, withdrawal_credentials, amount)
|
||||
// Spec pseudocode definition:
|
||||
// def apply_deposit(state: BeaconState,
|
||||
//
|
||||
// else:
|
||||
//
|
||||
// # Increase balance by deposit amount
|
||||
// index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
// state.pending_balance_deposits.append(PendingBalanceDeposit(index=index, amount=amount)) # [Modified in Electra:EIP-7251]
|
||||
// # Check if valid deposit switch to compounding credentials
|
||||
//
|
||||
// if ( is_compounding_withdrawal_credential(withdrawal_credentials) and has_eth1_withdrawal_credential(state.validators[index])
|
||||
//
|
||||
// and is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature)
|
||||
// ):
|
||||
// switch_to_compounding_validator(state, index)
|
||||
func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, verifySignature bool) (state.BeaconState, error) {
|
||||
// pubkey: BLSPubkey,
|
||||
// withdrawal_credentials: Bytes32,
|
||||
// amount: uint64,
|
||||
// signature: BLSSignature) -> None:
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if pubkey not in validator_pubkeys:
|
||||
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
// if is_valid_deposit_signature(pubkey, withdrawal_credentials, amount, signature):
|
||||
// add_validator_to_registry(state, pubkey, withdrawal_credentials, Gwei(0)) # [Modified in Electra:EIP7251]
|
||||
// # [New in Electra:EIP7251]
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// amount=amount,
|
||||
// signature=signature,
|
||||
// slot=GENESIS_SLOT, # Use GENESIS_SLOT to distinguish from a pending deposit request
|
||||
// ))
|
||||
// else:
|
||||
// # Increase balance by deposit amount
|
||||
// # [Modified in Electra:EIP7251]
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// amount=amount,
|
||||
// signature=signature,
|
||||
// slot=GENESIS_SLOT # Use GENESIS_SLOT to distinguish from a pending deposit request
|
||||
// ))
|
||||
func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
pubKey := data.PublicKey
|
||||
amount := data.Amount
|
||||
withdrawalCredentials := data.WithdrawalCredentials
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
signature := data.Signature
|
||||
_, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
if verifySignature {
|
||||
if !allSignaturesVerified {
|
||||
valid, err := IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signature")
|
||||
@@ -131,32 +144,20 @@ func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, verif
|
||||
return beaconState, nil
|
||||
}
|
||||
}
|
||||
if err := AddValidatorToRegistry(beaconState, pubKey, withdrawalCredentials, amount); err != nil {
|
||||
|
||||
if err := AddValidatorToRegistry(beaconState, pubKey, withdrawalCredentials, 0); err != nil { // # [Modified in Electra:EIP7251]
|
||||
return nil, errors.Wrap(err, "could not add validator to registry")
|
||||
}
|
||||
} else {
|
||||
// no validation on top-ups (phase0 feature). no validation before state change
|
||||
if err := beaconState.AppendPendingBalanceDeposit(index, amount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
val, err := beaconState.ValidatorAtIndex(index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if helpers.IsCompoundingWithdrawalCredential(withdrawalCredentials) && helpers.HasETH1WithdrawalCredential(val) {
|
||||
if verifySignature {
|
||||
valid, err := IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signature")
|
||||
}
|
||||
if !valid {
|
||||
return beaconState, nil
|
||||
}
|
||||
}
|
||||
if err := SwitchToCompoundingValidator(beaconState, index); err != nil {
|
||||
return nil, errors.Wrap(err, "could not switch to compound validator")
|
||||
}
|
||||
}
|
||||
}
|
||||
// no validation on top-ups (phase0 feature). no validation before state change
|
||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
Amount: amount,
|
||||
Signature: signature,
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
@@ -185,152 +186,373 @@ func verifyDepositDataSigningRoot(obj *ethpb.Deposit_Data, domain []byte) error
|
||||
return deposit.VerifyDepositSignature(obj, domain)
|
||||
}
|
||||
|
||||
// ProcessPendingBalanceDeposits implements the spec definition below. This method mutates the state.
|
||||
// ProcessPendingDeposits implements the spec definition below. This method mutates the state.
|
||||
// Iterating over `pending_deposits` queue this function runs the following checks before applying pending deposit:
|
||||
// 1. All Eth1 bridge deposits are processed before the first deposit request gets processed.
|
||||
// 2. Deposit position in the queue is finalized.
|
||||
// 3. Deposit does not exceed the `MAX_PENDING_DEPOSITS_PER_EPOCH` limit.
|
||||
// 4. Deposit does not exceed the activation churn limit.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def process_pending_balance_deposits(state: BeaconState) -> None:
|
||||
// available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(state)
|
||||
// processed_amount = 0
|
||||
// next_deposit_index = 0
|
||||
// deposits_to_postpone = []
|
||||
// def process_pending_deposits(state: BeaconState) -> None:
|
||||
//
|
||||
// for deposit in state.pending_balance_deposits:
|
||||
// validator = state.validators[deposit.index]
|
||||
// next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
// available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(state)
|
||||
// processed_amount = 0
|
||||
// next_deposit_index = 0
|
||||
// deposits_to_postpone = []
|
||||
// is_churn_limit_reached = False
|
||||
// finalized_slot = compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
|
||||
//
|
||||
// for deposit in state.pending_deposits:
|
||||
// # Do not process deposit requests if Eth1 bridge deposits are not yet applied.
|
||||
// if (
|
||||
// # Is deposit request
|
||||
// deposit.slot > GENESIS_SLOT and
|
||||
// # There are pending Eth1 bridge deposits
|
||||
// state.eth1_deposit_index < state.deposit_requests_start_index
|
||||
// ):
|
||||
// break
|
||||
//
|
||||
// # Check if deposit has been finalized, otherwise, stop processing.
|
||||
// if deposit.slot > finalized_slot:
|
||||
// break
|
||||
//
|
||||
// # Check if number of processed deposits has not reached the limit, otherwise, stop processing.
|
||||
// if next_deposit_index >= MAX_PENDING_DEPOSITS_PER_EPOCH:
|
||||
// break
|
||||
//
|
||||
// # Read validator state
|
||||
// is_validator_exited = False
|
||||
// is_validator_withdrawn = False
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if deposit.pubkey in validator_pubkeys:
|
||||
// validator = state.validators[ValidatorIndex(validator_pubkeys.index(deposit.pubkey))]
|
||||
// is_validator_exited = validator.exit_epoch < FAR_FUTURE_EPOCH
|
||||
// is_validator_withdrawn = validator.withdrawable_epoch < next_epoch
|
||||
//
|
||||
// if is_validator_withdrawn:
|
||||
// # Deposited balance will never become active. Increase balance but do not consume churn
|
||||
// apply_pending_deposit(state, deposit)
|
||||
// elif is_validator_exited:
|
||||
// # Validator is exiting, postpone the deposit until after withdrawable epoch
|
||||
// if validator.exit_epoch < FAR_FUTURE_EPOCH:
|
||||
// if get_current_epoch(state) <= validator.withdrawable_epoch:
|
||||
// deposits_to_postpone.append(deposit)
|
||||
// # Deposited balance will never become active. Increase balance but do not consume churn
|
||||
// else:
|
||||
// increase_balance(state, deposit.index, deposit.amount)
|
||||
// # Validator is not exiting, attempt to process deposit
|
||||
// else:
|
||||
// # Deposit does not fit in the churn, no more deposit processing in this epoch.
|
||||
// if processed_amount + deposit.amount > available_for_processing:
|
||||
// break
|
||||
// # Deposit fits in the churn, process it. Increase balance and consume churn.
|
||||
// else:
|
||||
// increase_balance(state, deposit.index, deposit.amount)
|
||||
// processed_amount += deposit.amount
|
||||
// # Regardless of how the deposit was handled, we move on in the queue.
|
||||
// next_deposit_index += 1
|
||||
//
|
||||
// state.pending_balance_deposits = state.pending_balance_deposits[next_deposit_index:]
|
||||
//
|
||||
// if len(state.pending_balance_deposits) == 0:
|
||||
// state.deposit_balance_to_consume = Gwei(0)
|
||||
// deposits_to_postpone.append(deposit)
|
||||
// else:
|
||||
// state.deposit_balance_to_consume = available_for_processing - processed_amount
|
||||
// # Check if deposit fits in the churn, otherwise, do no more deposit processing in this epoch.
|
||||
// is_churn_limit_reached = processed_amount + deposit.amount > available_for_processing
|
||||
// if is_churn_limit_reached:
|
||||
// break
|
||||
//
|
||||
// state.pending_balance_deposits += deposits_to_postpone
|
||||
func ProcessPendingBalanceDeposits(ctx context.Context, st state.BeaconState, activeBalance primitives.Gwei) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessPendingBalanceDeposits")
|
||||
// # Consume churn and apply deposit.
|
||||
// processed_amount += deposit.amount
|
||||
// apply_pending_deposit(state, deposit)
|
||||
//
|
||||
// # Regardless of how the deposit was handled, we move on in the queue.
|
||||
// next_deposit_index += 1
|
||||
//
|
||||
// state.pending_deposits = state.pending_deposits[next_deposit_index:] + deposits_to_postpone
|
||||
//
|
||||
// # Accumulate churn only if the churn limit has been hit.
|
||||
// if is_churn_limit_reached:
|
||||
// state.deposit_balance_to_consume = available_for_processing - processed_amount
|
||||
// else:
|
||||
// state.deposit_balance_to_consume = Gwei(0)
|
||||
func ProcessPendingDeposits(ctx context.Context, st state.BeaconState, activeBalance primitives.Gwei) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessPendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if st == nil || st.IsNil() {
|
||||
return errors.New("nil state")
|
||||
}
|
||||
|
||||
// constants & initializations
|
||||
nextEpoch := slots.ToEpoch(st.Slot()) + 1
|
||||
processedAmount := uint64(0)
|
||||
nextDepositIndex := uint64(0)
|
||||
isChurnLimitReached := false
|
||||
|
||||
var pendingDepositsToBatchVerify []*ethpb.PendingDeposit
|
||||
var pendingDepositsToPostpone []*eth.PendingDeposit
|
||||
|
||||
depBalToConsume, err := st.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "could not get deposit balance to consume")
|
||||
}
|
||||
availableForProcessing := depBalToConsume + helpers.ActivationExitChurnLimit(activeBalance)
|
||||
processedAmount := uint64(0)
|
||||
nextDepositIndex := 0
|
||||
var depositsToPostpone []*eth.PendingBalanceDeposit
|
||||
|
||||
deposits, err := st.PendingBalanceDeposits()
|
||||
finalizedSlot, err := slots.EpochStart(st.FinalizedCheckpoint().Epoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized slot")
|
||||
}
|
||||
|
||||
startIndex, err := st.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get starting pendingDeposit index")
|
||||
}
|
||||
|
||||
pendingDeposits, err := st.PendingDeposits()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// constants
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
nextEpoch := slots.ToEpoch(st.Slot()) + 1
|
||||
|
||||
for _, balanceDeposit := range deposits {
|
||||
v, err := st.ValidatorAtIndexReadOnly(balanceDeposit.Index)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch validator at index: %w", err)
|
||||
for _, pendingDeposit := range pendingDeposits {
|
||||
// Do not process pendingDeposit requests if Eth1 bridge deposits are not yet applied.
|
||||
if pendingDeposit.Slot > params.BeaconConfig().GenesisSlot && st.Eth1DepositIndex() < startIndex {
|
||||
break
|
||||
}
|
||||
|
||||
// If the validator is currently exiting, postpone the deposit until after the withdrawable
|
||||
// epoch.
|
||||
if v.ExitEpoch() < ffe {
|
||||
if nextEpoch <= v.WithdrawableEpoch() {
|
||||
depositsToPostpone = append(depositsToPostpone, balanceDeposit)
|
||||
} else {
|
||||
// The deposited balance will never become active. Therefore, we increase the balance but do
|
||||
// not consume the churn.
|
||||
if err := helpers.IncreaseBalance(st, balanceDeposit.Index, balanceDeposit.Amount); err != nil {
|
||||
return err
|
||||
}
|
||||
// Check if pendingDeposit has been finalized, otherwise, stop processing.
|
||||
if pendingDeposit.Slot > finalizedSlot {
|
||||
break
|
||||
}
|
||||
|
||||
// Check if number of processed deposits has not reached the limit, otherwise, stop processing.
|
||||
if nextDepositIndex >= params.BeaconConfig().MaxPendingDepositsPerEpoch {
|
||||
break
|
||||
}
|
||||
|
||||
var isValidatorExited bool
|
||||
var isValidatorWithdrawn bool
|
||||
index, found := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
|
||||
if found {
|
||||
val, err := st.ValidatorAtIndexReadOnly(index)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get validator")
|
||||
}
|
||||
isValidatorExited = val.ExitEpoch() < params.BeaconConfig().FarFutureEpoch
|
||||
isValidatorWithdrawn = val.WithdrawableEpoch() < nextEpoch
|
||||
}
|
||||
|
||||
if isValidatorWithdrawn {
|
||||
// note: the validator will never be active, just increase the balance
|
||||
if err := helpers.IncreaseBalance(st, index, pendingDeposit.Amount); err != nil {
|
||||
return errors.Wrap(err, "could not increase balance")
|
||||
}
|
||||
} else if isValidatorExited {
|
||||
pendingDepositsToPostpone = append(pendingDepositsToPostpone, pendingDeposit)
|
||||
} else {
|
||||
// Validator is not exiting, attempt to process deposit.
|
||||
if primitives.Gwei(processedAmount+balanceDeposit.Amount) > availableForProcessing {
|
||||
isChurnLimitReached = primitives.Gwei(processedAmount+pendingDeposit.Amount) > availableForProcessing
|
||||
if isChurnLimitReached {
|
||||
break
|
||||
}
|
||||
// Deposit fits in churn, process it. Increase balance and consume churn.
|
||||
if err := helpers.IncreaseBalance(st, balanceDeposit.Index, balanceDeposit.Amount); err != nil {
|
||||
return err
|
||||
processedAmount += pendingDeposit.Amount
|
||||
|
||||
// note: the following code deviates from the spec in order to perform batch signature verification
|
||||
if found {
|
||||
if err := helpers.IncreaseBalance(st, index, pendingDeposit.Amount); err != nil {
|
||||
return errors.Wrap(err, "could not increase balance")
|
||||
}
|
||||
} else {
|
||||
// Collect deposit for batch signature verification
|
||||
pendingDepositsToBatchVerify = append(pendingDepositsToBatchVerify, pendingDeposit)
|
||||
}
|
||||
processedAmount += balanceDeposit.Amount
|
||||
}
|
||||
|
||||
// Regardless of how the deposit was handled, we move on in the queue.
|
||||
// Regardless of how the pendingDeposit was handled, we move on in the queue.
|
||||
nextDepositIndex++
|
||||
}
|
||||
|
||||
// Perform batch signature verification on pending deposits that require validator registration
|
||||
if err = batchProcessNewPendingDeposits(ctx, st, pendingDepositsToBatchVerify); err != nil {
|
||||
return errors.Wrap(err, "could not process pending deposits with new public keys")
|
||||
}
|
||||
|
||||
// Combined operation:
|
||||
// - state.pending_balance_deposits = state.pending_balance_deposits[next_deposit_index:]
|
||||
// - state.pending_balance_deposits += deposits_to_postpone
|
||||
// However, the number of remaining deposits must be maintained to properly update the deposit
|
||||
// - state.pending_deposits = state.pending_deposits[next_deposit_index:]
|
||||
// - state.pending_deposits += deposits_to_postpone
|
||||
// However, the number of remaining deposits must be maintained to properly update the pendingDeposit
|
||||
// balance to consume.
|
||||
numRemainingDeposits := len(deposits[nextDepositIndex:])
|
||||
deposits = append(deposits[nextDepositIndex:], depositsToPostpone...)
|
||||
if err := st.SetPendingBalanceDeposits(deposits); err != nil {
|
||||
pendingDeposits = append(pendingDeposits[nextDepositIndex:], pendingDepositsToPostpone...)
|
||||
if err := st.SetPendingDeposits(pendingDeposits); err != nil {
|
||||
return errors.Wrap(err, "could not set pending deposits")
|
||||
}
|
||||
// Accumulate churn only if the churn limit has been hit.
|
||||
if isChurnLimitReached {
|
||||
return st.SetDepositBalanceToConsume(availableForProcessing - primitives.Gwei(processedAmount))
|
||||
}
|
||||
return st.SetDepositBalanceToConsume(0)
|
||||
}
|
||||
|
||||
// batchProcessNewPendingDeposits should only be used to process new deposits that require validator registration
|
||||
func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState, pendingDeposits []*ethpb.PendingDeposit) error {
|
||||
// Return early if there are no deposits to process
|
||||
if len(pendingDeposits) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try batch verification of all deposit signatures
|
||||
allSignaturesVerified, err := blocks.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch signature verification failed")
|
||||
}
|
||||
|
||||
// Process each deposit individually
|
||||
for _, pendingDeposit := range pendingDeposits {
|
||||
validSignature := allSignaturesVerified
|
||||
|
||||
// If batch verification failed, check the individual deposit signature
|
||||
if !allSignaturesVerified {
|
||||
validSignature, err = blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.SafeCopyBytes(pendingDeposit.PublicKey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(pendingDeposit.WithdrawalCredentials),
|
||||
Amount: pendingDeposit.Amount,
|
||||
Signature: bytesutil.SafeCopyBytes(pendingDeposit.Signature),
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "individual deposit signature verification failed")
|
||||
}
|
||||
}
|
||||
|
||||
// Add validator to the registry if the signature is valid
|
||||
if validSignature {
|
||||
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to add validator to registry")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ApplyPendingDeposit implements the spec definition below.
|
||||
// Note : This function is NOT used by ProcessPendingDeposits due to simplified logic for more readable batch processing
|
||||
//
|
||||
// Spec Definition:
|
||||
//
|
||||
// def apply_pending_deposit(state: BeaconState, deposit: PendingDeposit) -> None:
|
||||
//
|
||||
// """
|
||||
// Applies ``deposit`` to the ``state``.
|
||||
// """
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// if deposit.pubkey not in validator_pubkeys:
|
||||
// # Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
// if is_valid_deposit_signature(
|
||||
// deposit.pubkey,
|
||||
// deposit.withdrawal_credentials,
|
||||
// deposit.amount,
|
||||
// deposit.signature
|
||||
// ):
|
||||
// add_validator_to_registry(state, deposit.pubkey, deposit.withdrawal_credentials, deposit.amount)
|
||||
// else:
|
||||
// validator_index = ValidatorIndex(validator_pubkeys.index(deposit.pubkey))
|
||||
// # Increase balance
|
||||
// increase_balance(state, validator_index, deposit.amount)
|
||||
func ApplyPendingDeposit(ctx context.Context, st state.BeaconState, deposit *ethpb.PendingDeposit) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ApplyPendingDeposit")
|
||||
defer span.End()
|
||||
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(deposit.PublicKey))
|
||||
if !ok {
|
||||
verified, err := blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.SafeCopyBytes(deposit.PublicKey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(deposit.WithdrawalCredentials),
|
||||
Amount: deposit.Amount,
|
||||
Signature: bytesutil.SafeCopyBytes(deposit.Signature),
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not verify deposit signature")
|
||||
}
|
||||
|
||||
if verified {
|
||||
if err := AddValidatorToRegistry(st, deposit.PublicKey, deposit.WithdrawalCredentials, deposit.Amount); err != nil {
|
||||
return errors.Wrap(err, "could not add validator to registry")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return helpers.IncreaseBalance(st, index, deposit.Amount)
|
||||
}
|
||||
|
||||
// AddValidatorToRegistry updates the beacon state with validator information
|
||||
// def add_validator_to_registry(state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> None:
|
||||
//
|
||||
// index = get_index_for_new_validator(state)
|
||||
// validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount) # [Modified in Electra:EIP7251]
|
||||
// set_or_append_list(state.validators, index, validator)
|
||||
// set_or_append_list(state.balances, index, amount)
|
||||
// set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
// set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
// set_or_append_list(state.inactivity_scores, index, uint64(0))
|
||||
func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdrawalCredentials []byte, amount uint64) error {
|
||||
val, err := GetValidatorFromDeposit(pubKey, withdrawalCredentials, amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get validator from deposit")
|
||||
}
|
||||
if err := beaconState.AppendValidator(val); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendBalance(amount); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numRemainingDeposits == 0 {
|
||||
return st.SetDepositBalanceToConsume(0)
|
||||
} else {
|
||||
return st.SetDepositBalanceToConsume(availableForProcessing - primitives.Gwei(processedAmount))
|
||||
// only active in altair and only when it's a new validator (after append balance)
|
||||
if beaconState.Version() >= version.Altair {
|
||||
if err := beaconState.AppendInactivityScore(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendPreviousParticipationBits(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendCurrentParticipationBits(0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetValidatorFromDeposit gets a new validator object with provided parameters
|
||||
//
|
||||
// def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator:
|
||||
//
|
||||
// validator = Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// effective_balance=Gwei(0),
|
||||
// slashed=False,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// )
|
||||
//
|
||||
// # [Modified in Electra:EIP7251]
|
||||
// max_effective_balance = get_max_effective_balance(validator)
|
||||
// validator.effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, max_effective_balance)
|
||||
//
|
||||
// return validator
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) (*ethpb.Validator, error) {
|
||||
validator := ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
EffectiveBalance: 0,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
v, err := state_native.NewValidator(validator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maxEffectiveBalance := helpers.ValidatorMaxEffectiveBalance(v)
|
||||
validator.EffectiveBalance = min(amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement), maxEffectiveBalance)
|
||||
return validator, nil
|
||||
}
|
||||
|
||||
// ProcessDepositRequests is a function as part of electra to process execution layer deposits
|
||||
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
|
||||
defer span.End()
|
||||
|
||||
if len(requests) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
deposits := make([]*ethpb.Deposit, 0)
|
||||
for _, req := range requests {
|
||||
if req == nil {
|
||||
return nil, errors.New("got a nil DepositRequest")
|
||||
}
|
||||
deposits = append(deposits, ðpb.Deposit{
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: req.Pubkey,
|
||||
WithdrawalCredentials: req.WithdrawalCredentials,
|
||||
Amount: req.Amount,
|
||||
Signature: req.Signature,
|
||||
},
|
||||
})
|
||||
}
|
||||
batchVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signatures in batch")
|
||||
}
|
||||
var err error
|
||||
for _, receipt := range requests {
|
||||
beaconState, err = processDepositRequest(beaconState, receipt, batchVerified)
|
||||
beaconState, err = processDepositRequest(beaconState, receipt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not apply deposit request")
|
||||
}
|
||||
@@ -338,34 +560,42 @@ func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState,
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// processDepositRequest processes the specific deposit receipt
|
||||
// processDepositRequest processes the specific deposit request
|
||||
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||
//
|
||||
// # Set deposit request start index
|
||||
// if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUEST_START_INDEX:
|
||||
// if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUESTS_START_INDEX:
|
||||
// state.deposit_requests_start_index = deposit_request.index
|
||||
//
|
||||
// apply_deposit(
|
||||
// state=state,
|
||||
// # Create pending deposit
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=deposit_request.pubkey,
|
||||
// withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||
// amount=deposit_request.amount,
|
||||
// signature=deposit_request.signature,
|
||||
// )
|
||||
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest, verifySignature bool) (state.BeaconState, error) {
|
||||
// slot=state.slot,
|
||||
// ))
|
||||
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get deposit requests start index")
|
||||
}
|
||||
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
|
||||
if request == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
}
|
||||
if err := beaconState.SetDepositRequestsStartIndex(request.Index); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set deposit requests start index")
|
||||
}
|
||||
}
|
||||
return ApplyDeposit(beaconState, ðpb.Deposit_Data{
|
||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: bytesutil.SafeCopyBytes(request.Pubkey),
|
||||
Amount: request.Amount,
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(request.WithdrawalCredentials),
|
||||
Amount: request.Amount,
|
||||
Signature: bytesutil.SafeCopyBytes(request.Signature),
|
||||
}, verifySignature)
|
||||
Slot: beaconState.Slot(),
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not append deposit request")
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
@@ -9,10 +9,12 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
stateTesting "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/testing"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/common"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -20,7 +22,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
func TestProcessPendingDeposits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
@@ -48,17 +50,10 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
{
|
||||
name: "more deposits than balance to consume processes partial deposits",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(100))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 20)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 10,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
depositAmount := uint64(amountAvailForProcessing) / 10
|
||||
st := stateWithPendingDeposits(t, 1_000, 20, depositAmount)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(100))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
@@ -74,25 +69,45 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
}
|
||||
|
||||
// Half of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(remaining))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "withdrawn validators should not consume churn",
|
||||
state: func() state.BeaconState {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
depositAmount := uint64(amountAvailForProcessing)
|
||||
// set the pending deposits to the maximum churn limit
|
||||
st := stateWithPendingDeposits(t, 1_000, 2, depositAmount)
|
||||
vals := st.Validators()
|
||||
vals[1].WithdrawableEpoch = 0
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
// Validators 0..9 should have their balance increased
|
||||
for i := primitives.ValidatorIndex(0); i < 2; i++ {
|
||||
b, err := st.BalanceAtIndex(i)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(amountAvailForProcessing), b)
|
||||
}
|
||||
|
||||
// All pending deposits should have been processed
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remaining))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "less deposits than balance to consume processes all deposits",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(0))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 5)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 5,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
depositAmount := uint64(amountAvailForProcessing) / 5
|
||||
st := stateWithPendingDeposits(t, 1_000, 5, depositAmount)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(0))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
@@ -108,7 +123,73 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
}
|
||||
|
||||
// All of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remaining))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "process pending deposit for unknown key, activates new key",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 0)
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
dep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0)
|
||||
require.NoError(t, st.SetPendingDeposits([]*eth.PendingDeposit{dep}))
|
||||
require.Equal(t, 0, len(st.Validators()))
|
||||
require.Equal(t, 0, len(st.Balances()))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
b, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b)
|
||||
|
||||
// All of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remaining))
|
||||
|
||||
// validator becomes active
|
||||
require.Equal(t, 1, len(st.Validators()))
|
||||
require.Equal(t, 1, len(st.Balances()))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "process excess balance that uses a point to infinity signature, processed as a topup",
|
||||
state: func() state.BeaconState {
|
||||
excessBalance := uint64(100)
|
||||
st := stateWithActiveBalanceETH(t, 32)
|
||||
validators := st.Validators()
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
validators[0].PublicKey = sk.PublicKey().Marshal()
|
||||
validators[0].WithdrawalCredentials = wc
|
||||
dep := stateTesting.GeneratePendingDeposit(t, sk, excessBalance, bytesutil.ToBytes32(wc), 0)
|
||||
dep.Signature = common.InfiniteSignature[:]
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
require.NoError(t, st.SetPendingDeposits([]*eth.PendingDeposit{dep}))
|
||||
return st
|
||||
}(),
|
||||
check: func(t *testing.T, st state.BeaconState) {
|
||||
res, err := st.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), res)
|
||||
b, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(100), b)
|
||||
|
||||
// All of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remaining))
|
||||
},
|
||||
@@ -116,17 +197,10 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
{
|
||||
name: "exiting validator deposit postponed",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(0))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 5)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 5,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
depositAmount := uint64(amountAvailForProcessing) / 5
|
||||
st := stateWithPendingDeposits(t, 1_000, 5, depositAmount)
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(0))
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
v.ExitEpoch = 10
|
||||
@@ -148,7 +222,7 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
|
||||
// All of the balance deposits should have been processed, except validator index 0 was
|
||||
// added back to the pending deposits queue.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(remaining))
|
||||
},
|
||||
@@ -156,15 +230,7 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
{
|
||||
name: "exited validator balance increased",
|
||||
state: func() state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, 1_000)
|
||||
deps := make([]*eth.PendingBalanceDeposit, 1)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ð.PendingBalanceDeposit{
|
||||
Amount: 1_000_000,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
st := stateWithPendingDeposits(t, 1_000, 1, 1_000_000)
|
||||
v, err := st.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
v.ExitEpoch = 2
|
||||
@@ -182,7 +248,7 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
require.Equal(t, uint64(1_100_000), b)
|
||||
|
||||
// All of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(remaining))
|
||||
},
|
||||
@@ -199,7 +265,7 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
tab, err = helpers.TotalActiveBalance(tt.state)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = electra.ProcessPendingBalanceDeposits(context.TODO(), tt.state, primitives.Gwei(tab))
|
||||
err = electra.ProcessPendingDeposits(context.TODO(), tt.state, primitives.Gwei(tab))
|
||||
require.Equal(t, tt.wantErr, err != nil, "wantErr=%v, got err=%s", tt.wantErr, err)
|
||||
if tt.check != nil {
|
||||
tt.check(t, tt.state)
|
||||
@@ -208,6 +274,27 @@ func TestProcessPendingBalanceDeposits(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBatchProcessNewPendingDeposits(t *testing.T) {
|
||||
t.Run("invalid batch initiates correct individual validation", func(t *testing.T) {
|
||||
st := stateWithActiveBalanceETH(t, 0)
|
||||
require.Equal(t, 0, len(st.Validators()))
|
||||
require.Equal(t, 0, len(st.Balances()))
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
validDep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0)
|
||||
invalidDep := ð.PendingDeposit{}
|
||||
// have a combination of valid and invalid deposits
|
||||
deps := []*eth.PendingDeposit{validDep, invalidDep}
|
||||
require.NoError(t, electra.BatchProcessNewPendingDeposits(context.Background(), st, deps))
|
||||
// successfully added to register
|
||||
require.Equal(t, 1, len(st.Validators()))
|
||||
require.Equal(t, 1, len(st.Balances()))
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessDepositRequests(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
sk, err := bls.RandKey()
|
||||
@@ -220,7 +307,7 @@ func TestProcessDepositRequests(t *testing.T) {
|
||||
})
|
||||
t.Run("nil request errors", func(t *testing.T) {
|
||||
_, err = electra.ProcessDepositRequests(context.Background(), st, []*enginev1.DepositRequest{nil})
|
||||
require.ErrorContains(t, "got a nil DepositRequest", err)
|
||||
require.ErrorContains(t, "nil deposit request", err)
|
||||
})
|
||||
|
||||
vals := st.Validators()
|
||||
@@ -230,7 +317,7 @@ func TestProcessDepositRequests(t *testing.T) {
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(make([]*eth.PendingBalanceDeposit, 0))) // reset pbd as the determinitstic state populates this already
|
||||
require.NoError(t, st.SetPendingDeposits(make([]*eth.PendingDeposit, 0))) // reset pbd as the determinitstic state populates this already
|
||||
withdrawalCred := make([]byte, 32)
|
||||
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
depositMessage := ð.DepositMessage{
|
||||
@@ -255,11 +342,10 @@ func TestProcessDepositRequests(t *testing.T) {
|
||||
st, err = electra.ProcessDepositRequests(context.Background(), st, requests)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
pbd, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(pbd))
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||
require.Equal(t, uint64(2000), pbd[1].Amount)
|
||||
}
|
||||
|
||||
func TestProcessDeposit_Electra_Simple(t *testing.T) {
|
||||
@@ -286,7 +372,7 @@ func TestProcessDeposit_Electra_Simple(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
pdSt, err := electra.ProcessDeposits(context.Background(), st, deps)
|
||||
require.NoError(t, err)
|
||||
pbd, err := pdSt.PendingBalanceDeposits()
|
||||
pbd, err := pdSt.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, pbd[2].Amount)
|
||||
require.Equal(t, 3, len(pbd))
|
||||
@@ -322,7 +408,7 @@ func TestProcessDeposit_SkipsInvalidDeposit(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
newState, err := electra.ProcessDeposit(beaconState, dep[0], true)
|
||||
newState, err := electra.ProcessDeposit(beaconState, dep[0], false)
|
||||
require.NoError(t, err, "Expected invalid block deposit to be ignored without error")
|
||||
|
||||
if newState.Eth1DepositIndex() != 1 {
|
||||
@@ -359,42 +445,128 @@ func TestApplyDeposit_TopUps_WithBadSignature(t *testing.T) {
|
||||
vals[0].PublicKey = sk.PublicKey().Marshal()
|
||||
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
adSt, err := electra.ApplyDeposit(st, depositData, true)
|
||||
adSt, err := electra.ApplyDeposit(st, depositData, false)
|
||||
require.NoError(t, err)
|
||||
pbd, err := adSt.PendingBalanceDeposits()
|
||||
pbd, err := adSt.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, topUpAmount, pbd[0].Amount)
|
||||
}
|
||||
|
||||
func TestApplyDeposit_Electra_SwitchToCompoundingValidator(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 3)
|
||||
// stateWithActiveBalanceETH generates a mock beacon state given a balance in eth
|
||||
func stateWithActiveBalanceETH(t *testing.T, balETH uint64) state.BeaconState {
|
||||
gwei := balETH * 1_000_000_000
|
||||
balPerVal := params.BeaconConfig().MinActivationBalance
|
||||
numVals := gwei / balPerVal
|
||||
|
||||
vals := make([]*eth.Validator, numVals)
|
||||
bals := make([]uint64, numVals)
|
||||
for i := uint64(0); i < numVals; i++ {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
vals[i] = ð.Validator{
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: balPerVal,
|
||||
WithdrawalCredentials: wc,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
bals[i] = balPerVal
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: 10 * params.BeaconConfig().SlotsPerEpoch,
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
Fork: ð.Fork{
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// set some fake finalized checkpoint
|
||||
require.NoError(t, st.SetFinalizedCheckpoint(ð.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
}))
|
||||
return st
|
||||
}
|
||||
|
||||
// stateWithPendingDeposits with pending deposits and existing ethbalance
|
||||
func stateWithPendingDeposits(t *testing.T, balETH uint64, numDeposits, amount uint64) state.BeaconState {
|
||||
st := stateWithActiveBalanceETH(t, balETH)
|
||||
deps := make([]*eth.PendingDeposit, numDeposits)
|
||||
validators := st.Validators()
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
validators[i].PublicKey = sk.PublicKey().Marshal()
|
||||
validators[i].WithdrawalCredentials = wc
|
||||
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, amount, bytesutil.ToBytes32(wc), 0)
|
||||
}
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
require.NoError(t, st.SetPendingDeposits(deps))
|
||||
return st
|
||||
}
|
||||
|
||||
func TestApplyPendingDeposit_TopUp(t *testing.T) {
|
||||
excessBalance := uint64(100)
|
||||
st := stateWithActiveBalanceETH(t, 32)
|
||||
validators := st.Validators()
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
withdrawalCred := make([]byte, 32)
|
||||
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
depositData := ð.Deposit_Data{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: 1000,
|
||||
WithdrawalCredentials: withdrawalCred,
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
vals := st.Validators()
|
||||
vals[0].PublicKey = sk.PublicKey().Marshal()
|
||||
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
sr, err := signing.ComputeSigningRoot(depositData, bytesutil.ToBytes(3, 32))
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
validators[0].PublicKey = sk.PublicKey().Marshal()
|
||||
validators[0].WithdrawalCredentials = wc
|
||||
dep := stateTesting.GeneratePendingDeposit(t, sk, excessBalance, bytesutil.ToBytes32(wc), 0)
|
||||
dep.Signature = common.InfiniteSignature[:]
|
||||
require.NoError(t, st.SetValidators(validators))
|
||||
|
||||
require.NoError(t, electra.ApplyPendingDeposit(context.Background(), st, dep))
|
||||
|
||||
b, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
depositData.Signature = sig.Marshal()
|
||||
adSt, err := electra.ApplyDeposit(st, depositData, false)
|
||||
require.NoError(t, err)
|
||||
pbd, err := adSt.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(pbd))
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||
require.Equal(t, uint64(2000), pbd[1].Amount)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+uint64(excessBalance), b)
|
||||
}
|
||||
|
||||
func TestApplyPendingDeposit_UnknownKey(t *testing.T) {
|
||||
st := stateWithActiveBalanceETH(t, 0)
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
dep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0)
|
||||
require.Equal(t, 0, len(st.Validators()))
|
||||
require.NoError(t, electra.ApplyPendingDeposit(context.Background(), st, dep))
|
||||
// activates new validator
|
||||
require.Equal(t, 1, len(st.Validators()))
|
||||
b, err := st.BalanceAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b)
|
||||
}
|
||||
|
||||
func TestApplyPendingDeposit_InvalidSignature(t *testing.T) {
|
||||
st := stateWithActiveBalanceETH(t, 0)
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
dep := ð.PendingDeposit{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
WithdrawalCredentials: wc,
|
||||
Amount: 100,
|
||||
}
|
||||
require.Equal(t, 0, len(st.Validators()))
|
||||
require.NoError(t, electra.ApplyPendingDeposit(context.Background(), st, dep))
|
||||
// no validator added
|
||||
require.Equal(t, 0, len(st.Validators()))
|
||||
// no topup either
|
||||
require.Equal(t, 0, len(st.Balances()))
|
||||
}
|
||||
|
||||
3
beacon-chain/core/electra/export_test.go
Normal file
3
beacon-chain/core/electra/export_test.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package electra
|
||||
|
||||
var BatchProcessNewPendingDeposits = batchProcessNewPendingDeposits
|
||||
@@ -29,7 +29,6 @@ var (
|
||||
ProcessParticipationFlagUpdates = altair.ProcessParticipationFlagUpdates
|
||||
ProcessSyncCommitteeUpdates = altair.ProcessSyncCommitteeUpdates
|
||||
AttestationsDelta = altair.AttestationsDelta
|
||||
ProcessSyncAggregate = altair.ProcessSyncAggregate
|
||||
)
|
||||
|
||||
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.
|
||||
@@ -44,7 +43,7 @@ var (
|
||||
// process_registry_updates(state)
|
||||
// process_slashings(state)
|
||||
// process_eth1_data_reset(state)
|
||||
// process_pending_balance_deposits(state) # New in EIP7251
|
||||
// process_pending_deposits(state) # New in EIP7251
|
||||
// process_pending_consolidations(state) # New in EIP7251
|
||||
// process_effective_balance_updates(state)
|
||||
// process_slashings_reset(state)
|
||||
@@ -94,7 +93,7 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = ProcessPendingBalanceDeposits(ctx, state, primitives.Gwei(bp.ActiveCurrentEpoch)); err != nil {
|
||||
if err = ProcessPendingDeposits(ctx, state, primitives.Gwei(bp.ActiveCurrentEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = ProcessPendingConsolidations(ctx, state); err != nil {
|
||||
|
||||
@@ -78,23 +78,19 @@ func ProcessOperations(
|
||||
return nil, errors.Wrap(err, "could not process bls-to-execution changes")
|
||||
}
|
||||
// new in electra
|
||||
e, err := bb.Execution()
|
||||
requests, err := bb.ExecutionRequests()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution data from block")
|
||||
return nil, errors.Wrap(err, "could not get execution requests")
|
||||
}
|
||||
exe, ok := e.(interfaces.ExecutionDataElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("could not cast execution data to electra execution data")
|
||||
}
|
||||
st, err = ProcessDepositRequests(ctx, st, exe.DepositRequests())
|
||||
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit receipts")
|
||||
return nil, errors.Wrap(err, "could not process deposit requests")
|
||||
}
|
||||
st, err = ProcessWithdrawalRequests(ctx, st, exe.WithdrawalRequests())
|
||||
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
|
||||
return nil, errors.Wrap(err, "could not process withdrawal requests")
|
||||
}
|
||||
if err := ProcessConsolidationRequests(ctx, st, exe.ConsolidationRequests()); err != nil {
|
||||
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, fmt.Errorf("could not process consolidation requests: %w", err)
|
||||
}
|
||||
return st, nil
|
||||
|
||||
@@ -57,14 +57,17 @@ func TestProcessEpoch_CanProcessElectra(t *testing.T) {
|
||||
require.NoError(t, st.SetSlot(10*params.BeaconConfig().SlotsPerEpoch))
|
||||
require.NoError(t, st.SetDepositBalanceToConsume(100))
|
||||
amountAvailForProcessing := helpers.ActivationExitChurnLimit(1_000 * 1e9)
|
||||
deps := make([]*ethpb.PendingBalanceDeposit, 20)
|
||||
validators := st.Validators()
|
||||
deps := make([]*ethpb.PendingDeposit, 20)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
deps[i] = ðpb.PendingBalanceDeposit{
|
||||
Amount: uint64(amountAvailForProcessing) / 10,
|
||||
Index: primitives.ValidatorIndex(i),
|
||||
deps[i] = ðpb.PendingDeposit{
|
||||
PublicKey: validators[i].PublicKey,
|
||||
WithdrawalCredentials: validators[i].WithdrawalCredentials,
|
||||
Amount: uint64(amountAvailForProcessing) / 10,
|
||||
Slot: 0,
|
||||
}
|
||||
}
|
||||
require.NoError(t, st.SetPendingBalanceDeposits(deps))
|
||||
require.NoError(t, st.SetPendingDeposits(deps))
|
||||
require.NoError(t, st.SetPendingConsolidations([]*ethpb.PendingConsolidation{
|
||||
{
|
||||
SourceIndex: 2,
|
||||
@@ -108,7 +111,7 @@ func TestProcessEpoch_CanProcessElectra(t *testing.T) {
|
||||
require.Equal(t, primitives.Gwei(100), res)
|
||||
|
||||
// Half of the balance deposits should have been processed.
|
||||
remaining, err := st.PendingBalanceDeposits()
|
||||
remaining, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 10, len(remaining))
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -102,7 +101,7 @@ import (
|
||||
// earliest_exit_epoch=earliest_exit_epoch,
|
||||
// consolidation_balance_to_consume=0,
|
||||
// earliest_consolidation_epoch=compute_activation_exit_epoch(get_current_epoch(pre)),
|
||||
// pending_balance_deposits=[],
|
||||
// pending_deposits=[],
|
||||
// pending_partial_withdrawals=[],
|
||||
// pending_consolidations=[],
|
||||
// )
|
||||
@@ -245,26 +244,23 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
DepositRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP6110]
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP7002]
|
||||
ConsolidationRequestsRoot: bytesutil.Bytes32(0), // [New in Electra:EIP7251]
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
@@ -276,7 +272,7 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
EarliestExitEpoch: earliestExitEpoch,
|
||||
ConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(primitives.Gwei(tab)),
|
||||
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
|
||||
PendingBalanceDeposits: make([]*ethpb.PendingBalanceDeposit, 0),
|
||||
PendingDeposits: make([]*ethpb.PendingDeposit, 0),
|
||||
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
|
||||
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
@@ -113,24 +112,21 @@ func TestUpgradeToElectra(t *testing.T) {
|
||||
wdRoot, err := prevHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
wanted := &enginev1.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: prevHeader.ParentHash(),
|
||||
FeeRecipient: prevHeader.FeeRecipient(),
|
||||
StateRoot: prevHeader.StateRoot(),
|
||||
ReceiptsRoot: prevHeader.ReceiptsRoot(),
|
||||
LogsBloom: prevHeader.LogsBloom(),
|
||||
PrevRandao: prevHeader.PrevRandao(),
|
||||
BlockNumber: prevHeader.BlockNumber(),
|
||||
GasLimit: prevHeader.GasLimit(),
|
||||
GasUsed: prevHeader.GasUsed(),
|
||||
Timestamp: prevHeader.Timestamp(),
|
||||
ExtraData: prevHeader.ExtraData(),
|
||||
BaseFeePerGas: prevHeader.BaseFeePerGas(),
|
||||
BlockHash: prevHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
DepositRequestsRoot: bytesutil.Bytes32(0),
|
||||
WithdrawalRequestsRoot: bytesutil.Bytes32(0),
|
||||
ConsolidationRequestsRoot: bytesutil.Bytes32(0),
|
||||
ParentHash: prevHeader.ParentHash(),
|
||||
FeeRecipient: prevHeader.FeeRecipient(),
|
||||
StateRoot: prevHeader.StateRoot(),
|
||||
ReceiptsRoot: prevHeader.ReceiptsRoot(),
|
||||
LogsBloom: prevHeader.LogsBloom(),
|
||||
PrevRandao: prevHeader.PrevRandao(),
|
||||
BlockNumber: prevHeader.BlockNumber(),
|
||||
GasLimit: prevHeader.GasLimit(),
|
||||
GasUsed: prevHeader.GasUsed(),
|
||||
Timestamp: prevHeader.Timestamp(),
|
||||
ExtraData: prevHeader.ExtraData(),
|
||||
BaseFeePerGas: prevHeader.BaseFeePerGas(),
|
||||
BlockHash: prevHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
}
|
||||
require.DeepEqual(t, wanted, protoHeader)
|
||||
|
||||
@@ -173,10 +169,10 @@ func TestUpgradeToElectra(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ActivationExitEpoch(slots.ToEpoch(preForkState.Slot())), earliestConsolidationEpoch)
|
||||
|
||||
pendingBalanceDeposits, err := mSt.PendingBalanceDeposits()
|
||||
pendingDeposits, err := mSt.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(pendingBalanceDeposits))
|
||||
require.Equal(t, uint64(1000), pendingBalanceDeposits[1].Amount)
|
||||
require.Equal(t, 2, len(pendingDeposits))
|
||||
require.Equal(t, uint64(1000), pendingDeposits[1].Amount)
|
||||
|
||||
numPendingPartialWithdrawals, err := mSt.NumPendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -3,87 +3,22 @@ package electra
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/common"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// AddValidatorToRegistry updates the beacon state with validator information
|
||||
// def add_validator_to_registry(state: BeaconState,
|
||||
//
|
||||
// pubkey: BLSPubkey,
|
||||
// withdrawal_credentials: Bytes32,
|
||||
// amount: uint64) -> None:
|
||||
// index = get_index_for_new_validator(state)
|
||||
// validator = get_validator_from_deposit(pubkey, withdrawal_credentials)
|
||||
// set_or_append_list(state.validators, index, validator)
|
||||
// set_or_append_list(state.balances, index, 0) # [Modified in Electra:EIP7251]
|
||||
// set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
// set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
// set_or_append_list(state.inactivity_scores, index, uint64(0))
|
||||
// state.pending_balance_deposits.append(PendingBalanceDeposit(index=index, amount=amount)) # [New in Electra:EIP7251]
|
||||
func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdrawalCredentials []byte, amount uint64) error {
|
||||
val := ValidatorFromDeposit(pubKey, withdrawalCredentials)
|
||||
if err := beaconState.AppendValidator(val); err != nil {
|
||||
return err
|
||||
}
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
return errors.New("could not find validator in registry")
|
||||
}
|
||||
if err := beaconState.AppendBalance(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendPendingBalanceDeposit(index, amount); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendInactivityScore(0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := beaconState.AppendPreviousParticipationBits(0); err != nil {
|
||||
return err
|
||||
}
|
||||
return beaconState.AppendCurrentParticipationBits(0)
|
||||
}
|
||||
|
||||
// ValidatorFromDeposit gets a new validator object with provided parameters
|
||||
//
|
||||
// def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32) -> Validator:
|
||||
//
|
||||
// return Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// effective_balance=0, # [Modified in Electra:EIP7251]
|
||||
//
|
||||
// )
|
||||
func ValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte) *ethpb.Validator {
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: 0, // [Modified in Electra:EIP7251]
|
||||
}
|
||||
}
|
||||
|
||||
// SwitchToCompoundingValidator
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def switch_to_compounding_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// validator = state.validators[index]
|
||||
// if has_eth1_withdrawal_credential(validator):
|
||||
// validator.withdrawal_credentials = COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
|
||||
// queue_excess_active_balance(state, index)
|
||||
// def switch_to_compounding_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
//
|
||||
// validator = state.validators[index]
|
||||
// validator.withdrawal_credentials = COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
|
||||
// queue_excess_active_balance(state, index)
|
||||
func SwitchToCompoundingValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
v, err := s.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
@@ -92,28 +27,32 @@ func SwitchToCompoundingValidator(s state.BeaconState, idx primitives.ValidatorI
|
||||
if len(v.WithdrawalCredentials) == 0 {
|
||||
return errors.New("validator has no withdrawal credentials")
|
||||
}
|
||||
if helpers.HasETH1WithdrawalCredential(v) {
|
||||
v.WithdrawalCredentials[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return QueueExcessActiveBalance(s, idx)
|
||||
|
||||
v.WithdrawalCredentials[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
if err := s.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return QueueExcessActiveBalance(s, idx)
|
||||
}
|
||||
|
||||
// QueueExcessActiveBalance queues validators with balances above the min activation balance and adds to pending balance deposit.
|
||||
// QueueExcessActiveBalance queues validators with balances above the min activation balance and adds to pending deposit.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// if balance > MIN_ACTIVATION_BALANCE:
|
||||
// excess_balance = balance - MIN_ACTIVATION_BALANCE
|
||||
// state.balances[index] = MIN_ACTIVATION_BALANCE
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=excess_balance)
|
||||
// )
|
||||
// def queue_excess_active_balance(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
//
|
||||
// balance = state.balances[index]
|
||||
// if balance > MIN_ACTIVATION_BALANCE:
|
||||
// excess_balance = balance - MIN_ACTIVATION_BALANCE
|
||||
// state.balances[index] = MIN_ACTIVATION_BALANCE
|
||||
// validator = state.validators[index]
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=validator.pubkey,
|
||||
// withdrawal_credentials=validator.withdrawal_credentials,
|
||||
// amount=excess_balance,
|
||||
// signature=bls.G2_POINT_AT_INFINITY,
|
||||
// slot=GENESIS_SLOT,
|
||||
// ))
|
||||
func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := s.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
@@ -121,11 +60,22 @@ func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex
|
||||
}
|
||||
|
||||
if bal > params.BeaconConfig().MinActivationBalance {
|
||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
||||
if err := s.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.AppendPendingBalanceDeposit(idx, excessBalance)
|
||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
||||
val, err := s.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
return s.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: pk[:],
|
||||
WithdrawalCredentials: val.GetWithdrawalCredentials(),
|
||||
Amount: excessBalance,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -134,15 +84,21 @@ func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def queue_entire_balance_and_reset_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
// balance = state.balances[index]
|
||||
// state.balances[index] = 0
|
||||
// validator = state.validators[index]
|
||||
// validator.effective_balance = 0
|
||||
// validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH
|
||||
// state.pending_balance_deposits.append(
|
||||
// PendingBalanceDeposit(index=index, amount=balance)
|
||||
// )
|
||||
// def queue_entire_balance_and_reset_validator(state: BeaconState, index: ValidatorIndex) -> None:
|
||||
//
|
||||
// balance = state.balances[index]
|
||||
// state.balances[index] = 0
|
||||
// validator = state.validators[index]
|
||||
// validator.effective_balance = 0
|
||||
// validator.activation_eligibility_epoch = FAR_FUTURE_EPOCH
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=validator.pubkey,
|
||||
// withdrawal_credentials=validator.withdrawal_credentials,
|
||||
// amount=balance,
|
||||
// signature=bls.G2_POINT_AT_INFINITY,
|
||||
// slot=GENESIS_SLOT,
|
||||
//
|
||||
// ))
|
||||
//
|
||||
//nolint:dupword
|
||||
func QueueEntireBalanceAndResetValidator(s state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
@@ -166,5 +122,11 @@ func QueueEntireBalanceAndResetValidator(s state.BeaconState, idx primitives.Val
|
||||
return err
|
||||
}
|
||||
|
||||
return s.AppendPendingBalanceDeposit(idx, bal)
|
||||
return s.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: v.PublicKey,
|
||||
WithdrawalCredentials: v.WithdrawalCredentials,
|
||||
Amount: bal,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -14,20 +13,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestAddValidatorToRegistry(t *testing.T) {
|
||||
st, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, electra.AddValidatorToRegistry(st, make([]byte, fieldparams.BLSPubkeyLength), make([]byte, fieldparams.RootLength), 100))
|
||||
balances := st.Balances()
|
||||
require.Equal(t, 1, len(balances))
|
||||
require.Equal(t, uint64(0), balances[0])
|
||||
pbds, err := st.PendingBalanceDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbds))
|
||||
require.Equal(t, uint64(100), pbds[0].Amount)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), pbds[0].Index)
|
||||
}
|
||||
|
||||
func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoElectra(ð.BeaconStateElectra{
|
||||
Validators: []*eth.Validator{
|
||||
@@ -60,7 +45,7 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
b, err := s.BalanceAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was changed")
|
||||
pbd, err := s.PendingBalanceDeposits()
|
||||
pbd, err := s.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(pbd), "pending balance deposits should be empty")
|
||||
|
||||
@@ -69,11 +54,10 @@ func TestSwitchToCompoundingValidator(t *testing.T) {
|
||||
b, err = s.BalanceAtIndex(2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, b, "balance was not changed")
|
||||
pbd, err = s.PendingBalanceDeposits()
|
||||
pbd, err = s.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd), "pending balance deposits should have one element")
|
||||
require.Equal(t, uint64(100_000), pbd[0].Amount, "pending balance deposit amount is incorrect")
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pbd[0].Index, "pending balance deposit index is incorrect")
|
||||
}
|
||||
|
||||
func TestQueueEntireBalanceAndResetValidator(t *testing.T) {
|
||||
@@ -97,11 +81,10 @@ func TestQueueEntireBalanceAndResetValidator(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), v.EffectiveBalance, "effective balance was not reset")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, v.ActivationEligibilityEpoch, "activation eligibility epoch was not reset")
|
||||
pbd, err := s.PendingBalanceDeposits()
|
||||
pbd, err := s.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd), "pending balance deposits should have one element")
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance+100_000, pbd[0].Amount, "pending balance deposit amount is incorrect")
|
||||
require.Equal(t, primitives.ValidatorIndex(0), pbd[0].Index, "pending balance deposit index is incorrect")
|
||||
}
|
||||
|
||||
func TestSwitchToCompoundingValidator_Ok(t *testing.T) {
|
||||
@@ -114,7 +97,7 @@ func TestSwitchToCompoundingValidator_Ok(t *testing.T) {
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
require.NoError(t, electra.SwitchToCompoundingValidator(st, 0))
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
pbd, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1010), pbd[0].Amount) // appends it at the end
|
||||
val, err := st.ValidatorAtIndex(0)
|
||||
@@ -132,7 +115,7 @@ func TestQueueExcessActiveBalance_Ok(t *testing.T) {
|
||||
err := electra.QueueExcessActiveBalance(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
pbd, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount) // appends it at the end
|
||||
|
||||
@@ -149,7 +132,7 @@ func TestQueueEntireBalanceAndResetValidator_Ok(t *testing.T) {
|
||||
err := electra.QueueEntireBalanceAndResetValidator(st, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingBalanceDeposits()
|
||||
pbd, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance-1000, pbd[0].Amount)
|
||||
|
||||
@@ -111,30 +111,31 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
log.Debugf("Skipping execution layer withdrawal request, validator index for %s not found\n", hexutil.Encode(wr.ValidatorPubkey))
|
||||
continue
|
||||
}
|
||||
validator, err := st.ValidatorAtIndex(vIdx)
|
||||
validator, err := st.ValidatorAtIndexReadOnly(vIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Verify withdrawal credentials
|
||||
hasCorrectCredential := helpers.HasExecutionWithdrawalCredentials(validator)
|
||||
isCorrectSourceAddress := bytes.Equal(validator.WithdrawalCredentials[12:], wr.SourceAddress)
|
||||
wc := validator.GetWithdrawalCredentials()
|
||||
isCorrectSourceAddress := bytes.Equal(wc[12:], wr.SourceAddress)
|
||||
if !hasCorrectCredential || !isCorrectSourceAddress {
|
||||
log.Debugln("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the validator is active.
|
||||
if !helpers.IsActiveValidator(validator, currentEpoch) {
|
||||
if !helpers.IsActiveValidatorUsingTrie(validator, currentEpoch) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator not active")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has not yet submitted an exit.
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has been active long enough.
|
||||
if currentEpoch < validator.ActivationEpoch.AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
|
||||
if currentEpoch < validator.ActivationEpoch().AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
continue
|
||||
}
|
||||
@@ -156,7 +157,7 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
continue
|
||||
}
|
||||
|
||||
hasSufficientEffectiveBalance := validator.EffectiveBalance >= params.BeaconConfig().MinActivationBalance
|
||||
hasSufficientEffectiveBalance := validator.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
||||
vBal, err := st.BalanceAtIndex(vIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -147,11 +147,17 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
|
||||
// epoch = get_current_epoch(state)
|
||||
// total_balance = get_total_active_balance(state)
|
||||
// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance)
|
||||
// if state.version == electra:
|
||||
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from total balance to avoid uint64 overflow
|
||||
// penalty_per_effective_balance_increment = adjusted_total_slashing_balance // (total_balance // increment)
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
// penalty = penalty_numerator // total_balance * increment
|
||||
// if state.version == electra:
|
||||
// effective_balance_increments = validator.effective_balance // increment
|
||||
// penalty = penalty_per_effective_balance_increment * effective_balance_increments
|
||||
// decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
func ProcessSlashings(st state.BeaconState, slashingMultiplier uint64) (state.BeaconState, error) {
|
||||
currentEpoch := time.CurrentEpoch(st)
|
||||
@@ -177,13 +183,26 @@ func ProcessSlashings(st state.BeaconState, slashingMultiplier uint64) (state.Be
|
||||
// below equally.
|
||||
increment := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
minSlashing := math.Min(totalSlashing*slashingMultiplier, totalBalance)
|
||||
|
||||
// Modified in Electra:EIP7251
|
||||
var penaltyPerEffectiveBalanceIncrement uint64
|
||||
if st.Version() >= version.Electra {
|
||||
penaltyPerEffectiveBalanceIncrement = minSlashing / (totalBalance / increment)
|
||||
}
|
||||
|
||||
bals := st.Balances()
|
||||
changed := false
|
||||
err = st.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
correctEpoch := (currentEpoch + exitLength/2) == val.WithdrawableEpoch()
|
||||
if val.Slashed() && correctEpoch {
|
||||
penaltyNumerator := val.EffectiveBalance() / increment * minSlashing
|
||||
penalty := penaltyNumerator / totalBalance * increment
|
||||
var penalty uint64
|
||||
if st.Version() >= version.Electra {
|
||||
effectiveBalanceIncrements := val.EffectiveBalance() / increment
|
||||
penalty = penaltyPerEffectiveBalanceIncrement * effectiveBalanceIncrements
|
||||
} else {
|
||||
penaltyNumerator := val.EffectiveBalance() / increment * minSlashing
|
||||
penalty = penaltyNumerator / totalBalance * increment
|
||||
}
|
||||
bals[idx] = helpers.DecreaseBalanceWithVal(bals[idx], penalty)
|
||||
changed = true
|
||||
}
|
||||
|
||||
@@ -448,3 +448,75 @@ func TestProcessHistoricalDataUpdate(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessSlashings_SlashedElectra(t *testing.T) {
|
||||
tests := []struct {
|
||||
state *ethpb.BeaconStateElectra
|
||||
want uint64
|
||||
}{
|
||||
{
|
||||
state: ðpb.BeaconStateElectra{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance}},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
},
|
||||
want: uint64(29000000000),
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconStateElectra{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance},
|
||||
},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance, params.BeaconConfig().MaxEffectiveBalance},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
},
|
||||
want: uint64(30500000000),
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconStateElectra{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra},
|
||||
},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalance * 10, params.BeaconConfig().MaxEffectiveBalance * 20},
|
||||
Slashings: []uint64{0, 2 * 1e9},
|
||||
},
|
||||
want: uint64(317000001536),
|
||||
},
|
||||
{
|
||||
state: ðpb.BeaconStateElectra{
|
||||
Validators: []*ethpb.Validator{
|
||||
{Slashed: true,
|
||||
WithdrawableEpoch: params.BeaconConfig().EpochsPerSlashingsVector / 2,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra - params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
{ExitEpoch: params.BeaconConfig().FarFutureEpoch, EffectiveBalance: params.BeaconConfig().MaxEffectiveBalanceElectra - params.BeaconConfig().EffectiveBalanceIncrement}},
|
||||
Balances: []uint64{params.BeaconConfig().MaxEffectiveBalanceElectra - params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalanceElectra - params.BeaconConfig().EffectiveBalanceIncrement},
|
||||
Slashings: []uint64{0, 1e9},
|
||||
},
|
||||
want: uint64(2044000000727),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
t.Run(fmt.Sprint(i), func(t *testing.T) {
|
||||
original := proto.Clone(tt.state)
|
||||
s, err := state_native.InitializeFromProtoElectra(tt.state)
|
||||
require.NoError(t, err)
|
||||
helpers.ClearCache()
|
||||
newState, err := epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplierBellatrix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, newState.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, newState.Balances()[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,5 +4,5 @@ import "github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
|
||||
// Notifier interface defines the methods of the service that provides beacon block operation updates to consumers.
|
||||
type Notifier interface {
|
||||
OperationFeed() *event.Feed
|
||||
OperationFeed() event.SubscriberSender
|
||||
}
|
||||
|
||||
@@ -4,5 +4,5 @@ import "github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
|
||||
// Notifier interface defines the methods of the service that provides state updates to consumers.
|
||||
type Notifier interface {
|
||||
StateFeed() *event.Feed
|
||||
StateFeed() event.SubscriberSender
|
||||
}
|
||||
|
||||
@@ -63,6 +63,7 @@ go_test(
|
||||
"validators_test.go",
|
||||
"weak_subjectivity_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
shard_count = 2,
|
||||
tags = ["CI_race_detection"],
|
||||
|
||||
@@ -69,15 +69,16 @@ func IsNextPeriodSyncCommittee(
|
||||
}
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(root, valIdx)
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
val, err := st.ValidatorAtIndexReadOnly(valIdx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
committee, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(findSubCommitteeIndices(val.PublicKey, committee.Pubkeys)) > 0, nil
|
||||
return len(findSubCommitteeIndices(pk[:], committee.Pubkeys)) > 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -96,10 +97,11 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
||||
}
|
||||
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(root, valIdx)
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
val, err := st.ValidatorAtIndexReadOnly(valIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
committee, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -112,7 +114,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
||||
}
|
||||
}()
|
||||
|
||||
return findSubCommitteeIndices(val.PublicKey, committee.Pubkeys), nil
|
||||
return findSubCommitteeIndices(pk[:], committee.Pubkeys), nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -130,15 +132,16 @@ func NextPeriodSyncSubcommitteeIndices(
|
||||
}
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(root, valIdx)
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
val, err := st.ValidatorAtIndexReadOnly(valIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
committee, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return findSubCommitteeIndices(val.PublicKey, committee.Pubkeys), nil
|
||||
return findSubCommitteeIndices(pk[:], committee.Pubkeys), nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -584,23 +584,23 @@ func IsSameWithdrawalCredentials(a, b *ethpb.Validator) bool {
|
||||
// and validator.withdrawable_epoch <= epoch
|
||||
// and balance > 0
|
||||
// )
|
||||
func IsFullyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
func IsFullyWithdrawableValidator(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
if val == nil || balance <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Electra / EIP-7251 logic
|
||||
if fork >= version.Electra {
|
||||
return HasExecutionWithdrawalCredentials(val) && val.WithdrawableEpoch <= epoch
|
||||
return HasExecutionWithdrawalCredentials(val) && val.WithdrawableEpoch() <= epoch
|
||||
}
|
||||
|
||||
return HasETH1WithdrawalCredential(val) && val.WithdrawableEpoch <= epoch
|
||||
return HasETH1WithdrawalCredential(val) && val.WithdrawableEpoch() <= epoch
|
||||
}
|
||||
|
||||
// IsPartiallyWithdrawableValidator returns whether the validator is able to perform a
|
||||
// partial withdrawal. This function assumes that the caller has a lock on the state.
|
||||
// This method conditionally calls the fork appropriate implementation based on the epoch argument.
|
||||
func IsPartiallyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
func IsPartiallyWithdrawableValidator(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
if val == nil {
|
||||
return false
|
||||
}
|
||||
@@ -630,9 +630,9 @@ func IsPartiallyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoc
|
||||
// and has_max_effective_balance
|
||||
// and has_excess_balance
|
||||
// )
|
||||
func isPartiallyWithdrawableValidatorElectra(val *ethpb.Validator, balance uint64, epoch primitives.Epoch) bool {
|
||||
func isPartiallyWithdrawableValidatorElectra(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch) bool {
|
||||
maxEB := ValidatorMaxEffectiveBalance(val)
|
||||
hasMaxBalance := val.EffectiveBalance == maxEB
|
||||
hasMaxBalance := val.EffectiveBalance() == maxEB
|
||||
hasExcessBalance := balance > maxEB
|
||||
|
||||
return HasExecutionWithdrawalCredentials(val) &&
|
||||
@@ -652,8 +652,8 @@ func isPartiallyWithdrawableValidatorElectra(val *ethpb.Validator, balance uint6
|
||||
// has_max_effective_balance = validator.effective_balance == MAX_EFFECTIVE_BALANCE
|
||||
// has_excess_balance = balance > MAX_EFFECTIVE_BALANCE
|
||||
// return has_eth1_withdrawal_credential(validator) and has_max_effective_balance and has_excess_balance
|
||||
func isPartiallyWithdrawableValidatorCapella(val *ethpb.Validator, balance uint64, epoch primitives.Epoch) bool {
|
||||
hasMaxBalance := val.EffectiveBalance == params.BeaconConfig().MaxEffectiveBalance
|
||||
func isPartiallyWithdrawableValidatorCapella(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch) bool {
|
||||
hasMaxBalance := val.EffectiveBalance() == params.BeaconConfig().MaxEffectiveBalance
|
||||
hasExcessBalance := balance > params.BeaconConfig().MaxEffectiveBalance
|
||||
return HasETH1WithdrawalCredential(val) && hasExcessBalance && hasMaxBalance
|
||||
}
|
||||
@@ -670,7 +670,7 @@ func isPartiallyWithdrawableValidatorCapella(val *ethpb.Validator, balance uint6
|
||||
// return MAX_EFFECTIVE_BALANCE_ELECTRA
|
||||
// else:
|
||||
// return MIN_ACTIVATION_BALANCE
|
||||
func ValidatorMaxEffectiveBalance(val *ethpb.Validator) uint64 {
|
||||
func ValidatorMaxEffectiveBalance(val state.ReadOnlyValidator) uint64 {
|
||||
if HasCompoundingWithdrawalCredential(val) {
|
||||
return params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
|
||||
@@ -974,13 +974,6 @@ func TestIsFullyWithdrawableValidator(t *testing.T) {
|
||||
fork int
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Handles nil case",
|
||||
validator: nil,
|
||||
balance: 0,
|
||||
epoch: 0,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "No ETH1 prefix",
|
||||
validator: ðpb.Validator{
|
||||
@@ -1036,7 +1029,9 @@ func TestIsFullyWithdrawableValidator(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.IsFullyWithdrawableValidator(tt.validator, tt.balance, tt.epoch, tt.fork))
|
||||
v, err := state_native.NewValidator(tt.validator)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, helpers.IsFullyWithdrawableValidator(v, tt.balance, tt.epoch, tt.fork))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1050,13 +1045,6 @@ func TestIsPartiallyWithdrawableValidator(t *testing.T) {
|
||||
fork int
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Handles nil case",
|
||||
validator: nil,
|
||||
balance: 0,
|
||||
epoch: 0,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "No ETH1 prefix",
|
||||
validator: ðpb.Validator{
|
||||
@@ -1113,7 +1101,9 @@ func TestIsPartiallyWithdrawableValidator(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.IsPartiallyWithdrawableValidator(tt.validator, tt.balance, tt.epoch, tt.fork))
|
||||
v, err := state_native.NewValidator(tt.validator)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, helpers.IsPartiallyWithdrawableValidator(v, tt.balance, tt.epoch, tt.fork))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1167,15 +1157,12 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
|
||||
validator: ðpb.Validator{WithdrawalCredentials: []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC}},
|
||||
want: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
{
|
||||
"Handles nil case",
|
||||
nil,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.ValidatorMaxEffectiveBalance(tt.validator))
|
||||
v, err := state_native.NewValidator(tt.validator)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, helpers.ValidatorMaxEffectiveBalance(v))
|
||||
})
|
||||
}
|
||||
// Sanity check that MinActivationBalance equals (pre-electra) MaxEffectiveBalance
|
||||
|
||||
@@ -12,12 +12,10 @@ go_library(
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/migration:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -29,11 +27,13 @@ go_test(
|
||||
srcs = ["lightclient_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -13,15 +13,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
v11 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/v5/proto/eth/v1"
|
||||
ethpbv2 "github.com/prysmaticlabs/prysm/v5/proto/eth/v2"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/migration"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -29,16 +25,6 @@ const (
|
||||
executionBranchNumOfLeaves = 4
|
||||
)
|
||||
|
||||
// createLightClientFinalityUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_finality_update
|
||||
// def create_light_client_finality_update(update: LightClientUpdate) -> LightClientFinalityUpdate:
|
||||
//
|
||||
// return LightClientFinalityUpdate(
|
||||
// attested_header=update.attested_header,
|
||||
// finalized_header=update.finalized_header,
|
||||
// finality_branch=update.finality_branch,
|
||||
// sync_aggregate=update.sync_aggregate,
|
||||
// signature_slot=update.signature_slot,
|
||||
// )
|
||||
func createLightClientFinalityUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientFinalityUpdate {
|
||||
finalityUpdate := ðpbv2.LightClientFinalityUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
@@ -51,14 +37,6 @@ func createLightClientFinalityUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2
|
||||
return finalityUpdate
|
||||
}
|
||||
|
||||
// createLightClientOptimisticUpdate - implements https://github.com/ethereum/consensus-specs/blob/3d235740e5f1e641d3b160c8688f26e7dc5a1894/specs/altair/light-client/full-node.md#create_light_client_optimistic_update
|
||||
// def create_light_client_optimistic_update(update: LightClientUpdate) -> LightClientOptimisticUpdate:
|
||||
//
|
||||
// return LightClientOptimisticUpdate(
|
||||
// attested_header=update.attested_header,
|
||||
// sync_aggregate=update.sync_aggregate,
|
||||
// signature_slot=update.signature_slot,
|
||||
// )
|
||||
func createLightClientOptimisticUpdate(update *ethpbv2.LightClientUpdate) *ethpbv2.LightClientOptimisticUpdate {
|
||||
optimisticUpdate := ðpbv2.LightClientOptimisticUpdate{
|
||||
AttestedHeader: update.AttestedHeader,
|
||||
@@ -74,9 +52,10 @@ func NewLightClientFinalityUpdateFromBeaconState(
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (*ethpbv2.LightClientFinalityUpdate, error) {
|
||||
update, err := NewLightClientUpdateFromBeaconState(ctx, state, block, attestedState, finalizedBlock)
|
||||
update, err := NewLightClientUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, finalizedBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -89,8 +68,9 @@ func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (*ethpbv2.LightClientOptimisticUpdate, error) {
|
||||
update, err := NewLightClientUpdateFromBeaconState(ctx, state, block, attestedState, nil)
|
||||
update, err := NewLightClientUpdateFromBeaconState(ctx, state, block, attestedState, attestedBlock, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -98,68 +78,12 @@ func NewLightClientOptimisticUpdateFromBeaconState(
|
||||
return createLightClientOptimisticUpdate(update), nil
|
||||
}
|
||||
|
||||
// NewLightClientUpdateFromBeaconState implements https://github.com/ethereum/consensus-specs/blob/d70dcd9926a4bbe987f1b4e65c3e05bd029fcfb8/specs/altair/light-client/full-node.md#create_light_client_update
|
||||
// def create_light_client_update(state: BeaconState,
|
||||
//
|
||||
// block: SignedBeaconBlock,
|
||||
// attested_state: BeaconState,
|
||||
// finalized_block: Optional[SignedBeaconBlock]) -> LightClientUpdate:
|
||||
// assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
|
||||
// assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
//
|
||||
// assert state.slot == state.latest_block_header.slot
|
||||
// header = state.latest_block_header.copy()
|
||||
// header.state_root = hash_tree_root(state)
|
||||
// assert hash_tree_root(header) == hash_tree_root(block.message)
|
||||
// update_signature_period = compute_sync_committee_period(compute_epoch_at_slot(block.message.slot))
|
||||
//
|
||||
// assert attested_state.slot == attested_state.latest_block_header.slot
|
||||
// attested_header = attested_state.latest_block_header.copy()
|
||||
// attested_header.state_root = hash_tree_root(attested_state)
|
||||
// assert hash_tree_root(attested_header) == block.message.parent_root
|
||||
// update_attested_period = compute_sync_committee_period(compute_epoch_at_slot(attested_header.slot))
|
||||
//
|
||||
// # `next_sync_committee` is only useful if the message is signed by the current sync committee
|
||||
// if update_attested_period == update_signature_period:
|
||||
// next_sync_committee = attested_state.next_sync_committee
|
||||
// next_sync_committee_branch = compute_merkle_proof_for_state(attested_state, NEXT_SYNC_COMMITTEE_INDEX)
|
||||
// else:
|
||||
// next_sync_committee = SyncCommittee()
|
||||
// next_sync_committee_branch = [Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
//
|
||||
// # Indicate finality whenever possible
|
||||
// if finalized_block is not None:
|
||||
// if finalized_block.message.slot != GENESIS_SLOT:
|
||||
// finalized_header = BeaconBlockHeader(
|
||||
// slot=finalized_block.message.slot,
|
||||
// proposer_index=finalized_block.message.proposer_index,
|
||||
// parent_root=finalized_block.message.parent_root,
|
||||
// state_root=finalized_block.message.state_root,
|
||||
// body_root=hash_tree_root(finalized_block.message.body),
|
||||
// )
|
||||
// assert hash_tree_root(finalized_header) == attested_state.finalized_checkpoint.root
|
||||
// else:
|
||||
// assert attested_state.finalized_checkpoint.root == Bytes32()
|
||||
// finalized_header = BeaconBlockHeader()
|
||||
// finality_branch = compute_merkle_proof_for_state(attested_state, FINALIZED_ROOT_INDEX)
|
||||
// else:
|
||||
// finalized_header = BeaconBlockHeader()
|
||||
// finality_branch = [Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))]
|
||||
//
|
||||
// return LightClientUpdate(
|
||||
// attested_header=attested_header,
|
||||
// next_sync_committee=next_sync_committee,
|
||||
// next_sync_committee_branch=next_sync_committee_branch,
|
||||
// finalized_header=finalized_header,
|
||||
// finality_branch=finality_branch,
|
||||
// sync_aggregate=block.message.body.sync_aggregate,
|
||||
// signature_slot=block.message.slot,
|
||||
// )
|
||||
func NewLightClientUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
state state.BeaconState,
|
||||
block interfaces.ReadOnlySignedBeaconBlock,
|
||||
attestedState state.BeaconState,
|
||||
attestedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
finalizedBlock interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientUpdate, error) {
|
||||
// assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
|
||||
attestedEpoch := slots.ToEpoch(attestedState.Slot())
|
||||
@@ -223,73 +147,30 @@ func NewLightClientUpdateFromBeaconState(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attested header root")
|
||||
}
|
||||
if attestedHeaderRoot != block.Block().ParentRoot() {
|
||||
return nil, fmt.Errorf("attested header root %#x not equal to block parent root %#x", attestedHeaderRoot, block.Block().ParentRoot())
|
||||
attestedBlockRoot, err := attestedBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attested block root")
|
||||
}
|
||||
// assert hash_tree_root(attested_header) == hash_tree_root(attested_block.message) == block.message.parent_root
|
||||
if attestedHeaderRoot != block.Block().ParentRoot() || attestedHeaderRoot != attestedBlockRoot {
|
||||
return nil, fmt.Errorf("attested header root %#x not equal to block parent root %#x or attested block root %#x", attestedHeaderRoot, block.Block().ParentRoot(), attestedBlockRoot)
|
||||
}
|
||||
|
||||
// update_attested_period = compute_sync_committee_period(compute_epoch_at_slot(attested_header.slot))
|
||||
updateAttestedPeriod := slots.SyncCommitteePeriod(slots.ToEpoch(attestedHeader.Slot))
|
||||
// update_attested_period = compute_sync_committee_period_at_slot(attested_block.message.slot)
|
||||
updateAttestedPeriod := slots.SyncCommitteePeriod(slots.ToEpoch(attestedBlock.Block().Slot()))
|
||||
|
||||
// update = LightClientUpdate()
|
||||
result, err := createDefaultLightClientUpdate(block.Block().Version())
|
||||
result, err := createDefaultLightClientUpdate()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create default light client update")
|
||||
}
|
||||
|
||||
// update.attested_header = block_to_light_client_header(attested_block)
|
||||
blockHeader := ðpbv1.BeaconBlockHeader{
|
||||
Slot: attestedHeader.Slot,
|
||||
ProposerIndex: attestedHeader.ProposerIndex,
|
||||
ParentRoot: attestedHeader.ParentRoot,
|
||||
StateRoot: attestedHeader.StateRoot,
|
||||
BodyRoot: attestedHeader.BodyRoot,
|
||||
}
|
||||
switch block.Block().Version() {
|
||||
case version.Altair, version.Bellatrix:
|
||||
result.AttestedHeader = ðpbv2.LightClientHeaderContainer{
|
||||
Header: ðpbv2.LightClientHeaderContainer_HeaderAltair{
|
||||
HeaderAltair: ðpbv2.LightClientHeader{Beacon: blockHeader},
|
||||
},
|
||||
}
|
||||
case version.Capella:
|
||||
executionPayloadHeader, err := getExecutionPayloadHeaderCapella(block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload header")
|
||||
}
|
||||
executionPayloadProof, err := blocks.PayloadProof(ctx, block.Block())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload proof")
|
||||
}
|
||||
result.AttestedHeader = ðpbv2.LightClientHeaderContainer{
|
||||
Header: ðpbv2.LightClientHeaderContainer_HeaderCapella{
|
||||
HeaderCapella: ðpbv2.LightClientHeaderCapella{
|
||||
Beacon: blockHeader,
|
||||
Execution: executionPayloadHeader,
|
||||
ExecutionBranch: executionPayloadProof,
|
||||
},
|
||||
},
|
||||
}
|
||||
case version.Deneb:
|
||||
executionPayloadHeader, err := getExecutionPayloadHeaderDeneb(block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload header")
|
||||
}
|
||||
executionPayloadProof, err := blocks.PayloadProof(ctx, block.Block())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload proof")
|
||||
}
|
||||
result.AttestedHeader = ðpbv2.LightClientHeaderContainer{
|
||||
Header: ðpbv2.LightClientHeaderContainer_HeaderDeneb{
|
||||
HeaderDeneb: ðpbv2.LightClientHeaderDeneb{
|
||||
Beacon: blockHeader,
|
||||
Execution: executionPayloadHeader,
|
||||
ExecutionBranch: executionPayloadProof,
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported block version %s", version.String(block.Block().Version()))
|
||||
attestedLightClientHeader, err := BlockToLightClientHeader(attestedBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get attested light client header")
|
||||
}
|
||||
result.AttestedHeader = attestedLightClientHeader
|
||||
|
||||
// if update_attested_period == update_signature_period
|
||||
if updateAttestedPeriod == updateSignaturePeriod {
|
||||
@@ -319,70 +200,11 @@ func NewLightClientUpdateFromBeaconState(
|
||||
// if finalized_block.message.slot != GENESIS_SLOT
|
||||
if finalizedBlock.Block().Slot() != 0 {
|
||||
// update.finalized_header = block_to_light_client_header(finalized_block)
|
||||
v1alpha1FinalizedHeader, err := finalizedBlock.Header()
|
||||
finalizedLightClientHeader, err := BlockToLightClientHeader(finalizedBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get finalized header")
|
||||
}
|
||||
finalizedHeader := migration.V1Alpha1SignedHeaderToV1(v1alpha1FinalizedHeader).GetMessage()
|
||||
finalizedHeaderRoot, err := finalizedHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get finalized header root")
|
||||
}
|
||||
switch block.Block().Version() {
|
||||
case version.Altair, version.Bellatrix:
|
||||
result.FinalizedHeader = ðpbv2.LightClientHeaderContainer{
|
||||
Header: ðpbv2.LightClientHeaderContainer_HeaderAltair{
|
||||
HeaderAltair: ðpbv2.LightClientHeader{Beacon: finalizedHeader},
|
||||
},
|
||||
}
|
||||
case version.Capella:
|
||||
executionPayloadHeader, err := getExecutionPayloadHeaderCapella(finalizedBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload header")
|
||||
}
|
||||
executionPayloadProof, err := blocks.PayloadProof(ctx, finalizedBlock.Block())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload proof")
|
||||
}
|
||||
result.FinalizedHeader = ðpbv2.LightClientHeaderContainer{
|
||||
Header: ðpbv2.LightClientHeaderContainer_HeaderCapella{
|
||||
HeaderCapella: ðpbv2.LightClientHeaderCapella{
|
||||
Beacon: finalizedHeader,
|
||||
Execution: executionPayloadHeader,
|
||||
ExecutionBranch: executionPayloadProof,
|
||||
},
|
||||
},
|
||||
}
|
||||
case version.Deneb:
|
||||
executionPayloadHeader, err := getExecutionPayloadHeaderDeneb(finalizedBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload header")
|
||||
}
|
||||
executionPayloadProof, err := blocks.PayloadProof(ctx, finalizedBlock.Block())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload proof")
|
||||
}
|
||||
result.FinalizedHeader = ðpbv2.LightClientHeaderContainer{
|
||||
Header: ðpbv2.LightClientHeaderContainer_HeaderDeneb{
|
||||
HeaderDeneb: ðpbv2.LightClientHeaderDeneb{
|
||||
Beacon: finalizedHeader,
|
||||
Execution: executionPayloadHeader,
|
||||
ExecutionBranch: executionPayloadProof,
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported block version %s", version.String(block.Block().Version()))
|
||||
}
|
||||
|
||||
// assert hash_tree_root(update.finalized_header.beacon) == attested_state.finalized_checkpoint.root
|
||||
if finalizedHeaderRoot != bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root) {
|
||||
return nil, fmt.Errorf(
|
||||
"finalized header root %#x not equal to attested finalized checkpoint root %#x",
|
||||
finalizedHeaderRoot,
|
||||
bytesutil.ToBytes32(attestedState.FinalizedCheckpoint().Root),
|
||||
)
|
||||
return nil, errors.Wrap(err, "could not get finalized light client header")
|
||||
}
|
||||
result.FinalizedHeader = finalizedLightClientHeader
|
||||
} else {
|
||||
// assert attested_state.finalized_checkpoint.root == Bytes32()
|
||||
if !bytes.Equal(attestedState.FinalizedCheckpoint().Root, make([]byte, 32)) {
|
||||
@@ -411,7 +233,7 @@ func NewLightClientUpdateFromBeaconState(
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func createDefaultLightClientUpdate(v int) (*ethpbv2.LightClientUpdate, error) {
|
||||
func createDefaultLightClientUpdate() (*ethpbv2.LightClientUpdate, error) {
|
||||
syncCommitteeSize := params.BeaconConfig().SyncCommitteeSize
|
||||
pubKeys := make([][]byte, syncCommitteeSize)
|
||||
for i := uint64(0); i < syncCommitteeSize; i++ {
|
||||
@@ -421,218 +243,26 @@ func createDefaultLightClientUpdate(v int) (*ethpbv2.LightClientUpdate, error) {
|
||||
Pubkeys: pubKeys,
|
||||
AggregatePubkey: make([]byte, fieldparams.BLSPubkeyLength),
|
||||
}
|
||||
nextSyncCommitteeBranch := make([][]byte, fieldparams.NextSyncCommitteeBranchDepth)
|
||||
for i := 0; i < fieldparams.NextSyncCommitteeBranchDepth; i++ {
|
||||
nextSyncCommitteeBranch := make([][]byte, fieldparams.SyncCommitteeBranchDepth)
|
||||
for i := 0; i < fieldparams.SyncCommitteeBranchDepth; i++ {
|
||||
nextSyncCommitteeBranch[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
executionBranch := make([][]byte, executionBranchNumOfLeaves)
|
||||
for i := 0; i < executionBranchNumOfLeaves; i++ {
|
||||
executionBranch[i] = make([]byte, 32)
|
||||
}
|
||||
finalizedBlockHeader := ðpbv1.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 0,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
finalityBranch := make([][]byte, FinalityBranchNumOfLeaves)
|
||||
for i := 0; i < FinalityBranchNumOfLeaves; i++ {
|
||||
finalityBranch[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
var finalizedHeader *ethpbv2.LightClientHeaderContainer
|
||||
switch v {
|
||||
case version.Altair, version.Bellatrix:
|
||||
finalizedHeader = ðpbv2.LightClientHeaderContainer{
|
||||
Header: ðpbv2.LightClientHeaderContainer_HeaderAltair{
|
||||
HeaderAltair: ðpbv2.LightClientHeader{
|
||||
Beacon: finalizedBlockHeader,
|
||||
},
|
||||
},
|
||||
}
|
||||
case version.Capella:
|
||||
finalizedHeader = ðpbv2.LightClientHeaderContainer{
|
||||
Header: ðpbv2.LightClientHeaderContainer_HeaderCapella{
|
||||
HeaderCapella: ðpbv2.LightClientHeaderCapella{
|
||||
Beacon: finalizedBlockHeader,
|
||||
Execution: createEmptyExecutionPayloadHeaderCapella(),
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
},
|
||||
}
|
||||
case version.Deneb:
|
||||
finalizedHeader = ðpbv2.LightClientHeaderContainer{
|
||||
Header: ðpbv2.LightClientHeaderContainer_HeaderDeneb{
|
||||
HeaderDeneb: ðpbv2.LightClientHeaderDeneb{
|
||||
Beacon: finalizedBlockHeader,
|
||||
Execution: createEmptyExecutionPayloadHeaderDeneb(),
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
},
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported block version %s", version.String(v))
|
||||
}
|
||||
|
||||
return ðpbv2.LightClientUpdate{
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
|
||||
FinalizedHeader: finalizedHeader,
|
||||
FinalityBranch: finalityBranch,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func createEmptyExecutionPayloadHeaderCapella() *enginev1.ExecutionPayloadHeaderCapella {
|
||||
return &enginev1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BlockNumber: 0,
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
Timestamp: 0,
|
||||
ExtraData: make([]byte, 32),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
func createEmptyExecutionPayloadHeaderDeneb() *enginev1.ExecutionPayloadHeaderDeneb {
|
||||
return &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
BlockNumber: 0,
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
Timestamp: 0,
|
||||
ExtraData: make([]byte, 32),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
|
||||
func getExecutionPayloadHeaderCapella(block interfaces.ReadOnlySignedBeaconBlock) (*enginev1.ExecutionPayloadHeaderCapella, error) {
|
||||
payloadInterface, err := block.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution data")
|
||||
}
|
||||
transactionsRoot, err := payloadInterface.TransactionsRoot()
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
transactions, err := payloadInterface.Transactions()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get transactions")
|
||||
}
|
||||
transactionsRootArray, err := ssz.TransactionsRoot(transactions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get transactions root")
|
||||
}
|
||||
transactionsRoot = transactionsRootArray[:]
|
||||
} else if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get transactions root")
|
||||
}
|
||||
withdrawalsRoot, err := payloadInterface.WithdrawalsRoot()
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
withdrawals, err := payloadInterface.Withdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals")
|
||||
}
|
||||
withdrawalsRootArray, err := ssz.WithdrawalSliceRoot(withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals root")
|
||||
}
|
||||
withdrawalsRoot = withdrawalsRootArray[:]
|
||||
} else if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals root")
|
||||
}
|
||||
|
||||
execution := &enginev1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: payloadInterface.ParentHash(),
|
||||
FeeRecipient: payloadInterface.FeeRecipient(),
|
||||
StateRoot: payloadInterface.StateRoot(),
|
||||
ReceiptsRoot: payloadInterface.ReceiptsRoot(),
|
||||
LogsBloom: payloadInterface.LogsBloom(),
|
||||
PrevRandao: payloadInterface.PrevRandao(),
|
||||
BlockNumber: payloadInterface.BlockNumber(),
|
||||
GasLimit: payloadInterface.GasLimit(),
|
||||
GasUsed: payloadInterface.GasUsed(),
|
||||
Timestamp: payloadInterface.Timestamp(),
|
||||
ExtraData: payloadInterface.ExtraData(),
|
||||
BaseFeePerGas: payloadInterface.BaseFeePerGas(),
|
||||
BlockHash: payloadInterface.BlockHash(),
|
||||
TransactionsRoot: transactionsRoot,
|
||||
WithdrawalsRoot: withdrawalsRoot,
|
||||
}
|
||||
|
||||
return execution, nil
|
||||
}
|
||||
|
||||
func getExecutionPayloadHeaderDeneb(block interfaces.ReadOnlySignedBeaconBlock) (*enginev1.ExecutionPayloadHeaderDeneb, error) {
|
||||
payloadInterface, err := block.Block().Body().Execution()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution data")
|
||||
}
|
||||
transactionsRoot, err := payloadInterface.TransactionsRoot()
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
transactions, err := payloadInterface.Transactions()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get transactions")
|
||||
}
|
||||
transactionsRootArray, err := ssz.TransactionsRoot(transactions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get transactions root")
|
||||
}
|
||||
transactionsRoot = transactionsRootArray[:]
|
||||
} else if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get transactions root")
|
||||
}
|
||||
withdrawalsRoot, err := payloadInterface.WithdrawalsRoot()
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
withdrawals, err := payloadInterface.Withdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals")
|
||||
}
|
||||
withdrawalsRootArray, err := ssz.WithdrawalSliceRoot(withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals root")
|
||||
}
|
||||
withdrawalsRoot = withdrawalsRootArray[:]
|
||||
} else if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals root")
|
||||
}
|
||||
|
||||
execution := &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadInterface.ParentHash(),
|
||||
FeeRecipient: payloadInterface.FeeRecipient(),
|
||||
StateRoot: payloadInterface.StateRoot(),
|
||||
ReceiptsRoot: payloadInterface.ReceiptsRoot(),
|
||||
LogsBloom: payloadInterface.LogsBloom(),
|
||||
PrevRandao: payloadInterface.PrevRandao(),
|
||||
BlockNumber: payloadInterface.BlockNumber(),
|
||||
GasLimit: payloadInterface.GasLimit(),
|
||||
GasUsed: payloadInterface.GasUsed(),
|
||||
Timestamp: payloadInterface.Timestamp(),
|
||||
ExtraData: payloadInterface.ExtraData(),
|
||||
BaseFeePerGas: payloadInterface.BaseFeePerGas(),
|
||||
BlockHash: payloadInterface.BlockHash(),
|
||||
TransactionsRoot: transactionsRoot,
|
||||
WithdrawalsRoot: withdrawalsRoot,
|
||||
}
|
||||
|
||||
return execution, nil
|
||||
}
|
||||
|
||||
func ComputeTransactionsRoot(payload interfaces.ExecutionData) ([]byte, error) {
|
||||
transactionsRoot, err := payload.TransactionsRoot()
|
||||
if errors.Is(err, consensus_types.ErrUnsupportedField) {
|
||||
@@ -669,8 +299,45 @@ func ComputeWithdrawalsRoot(payload interfaces.ExecutionData) ([]byte, error) {
|
||||
return withdrawalsRoot, nil
|
||||
}
|
||||
|
||||
func BlockToLightClientHeaderAltair(block interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientHeader, error) {
|
||||
if block.Version() != version.Altair {
|
||||
func BlockToLightClientHeader(block interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientHeaderContainer, error) {
|
||||
switch block.Version() {
|
||||
case version.Altair, version.Bellatrix:
|
||||
altairHeader, err := blockToLightClientHeaderAltair(block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get header")
|
||||
}
|
||||
return ðpbv2.LightClientHeaderContainer{
|
||||
Header: ðpbv2.LightClientHeaderContainer_HeaderAltair{
|
||||
HeaderAltair: altairHeader,
|
||||
},
|
||||
}, nil
|
||||
case version.Capella:
|
||||
capellaHeader, err := blockToLightClientHeaderCapella(context.Background(), block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get capella header")
|
||||
}
|
||||
return ðpbv2.LightClientHeaderContainer{
|
||||
Header: ðpbv2.LightClientHeaderContainer_HeaderCapella{
|
||||
HeaderCapella: capellaHeader,
|
||||
},
|
||||
}, nil
|
||||
case version.Deneb, version.Electra:
|
||||
denebHeader, err := blockToLightClientHeaderDeneb(context.Background(), block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get header")
|
||||
}
|
||||
return ðpbv2.LightClientHeaderContainer{
|
||||
Header: ðpbv2.LightClientHeaderContainer_HeaderDeneb{
|
||||
HeaderDeneb: denebHeader,
|
||||
},
|
||||
}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported block version %s", version.String(block.Version()))
|
||||
}
|
||||
}
|
||||
|
||||
func blockToLightClientHeaderAltair(block interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientHeader, error) {
|
||||
if block.Version() < version.Altair {
|
||||
return nil, fmt.Errorf("block version is %s instead of Altair", version.String(block.Version()))
|
||||
}
|
||||
|
||||
@@ -692,8 +359,8 @@ func BlockToLightClientHeaderAltair(block interfaces.ReadOnlySignedBeaconBlock)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func BlockToLightClientHeaderCapella(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientHeaderCapella, error) {
|
||||
if block.Version() != version.Capella {
|
||||
func blockToLightClientHeaderCapella(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientHeaderCapella, error) {
|
||||
if block.Version() < version.Capella {
|
||||
return nil, fmt.Errorf("block version is %s instead of Capella", version.String(block.Version()))
|
||||
}
|
||||
|
||||
@@ -754,9 +421,9 @@ func BlockToLightClientHeaderCapella(ctx context.Context, block interfaces.ReadO
|
||||
}, nil
|
||||
}
|
||||
|
||||
func BlockToLightClientHeaderDeneb(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientHeaderDeneb, error) {
|
||||
if block.Version() != version.Deneb {
|
||||
return nil, fmt.Errorf("block version is %s instead of Deneb", version.String(block.Version()))
|
||||
func blockToLightClientHeaderDeneb(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock) (*ethpbv2.LightClientHeaderDeneb, error) {
|
||||
if block.Version() < version.Deneb {
|
||||
return nil, fmt.Errorf("block version is %s instead of Deneb/Electra", version.String(block.Version()))
|
||||
}
|
||||
|
||||
payload, err := block.Block().Body().Execution()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -333,8 +333,7 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
}
|
||||
state, err = b.ProcessPayload(state, blk.Body())
|
||||
if err != nil {
|
||||
if err = b.ProcessPayload(state, blk.Body()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution data")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -698,3 +698,45 @@ func TestProcessSlotsConditionally(t *testing.T) {
|
||||
assert.Equal(t, primitives.Slot(6), s.Slot())
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkProcessSlots_Capella(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateCapella(b, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkProcessSlots_Deneb(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateDeneb(b, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkProcessSlots_Electra(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(b, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,76 +241,35 @@ func SlashedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validat
|
||||
return slashed
|
||||
}
|
||||
|
||||
// ExitedValidatorIndices determines the indices exited during the current epoch.
|
||||
func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator, activeValidatorCount uint64) ([]primitives.ValidatorIndex, error) {
|
||||
// ExitedValidatorIndices returns the indices of validators who exited during the specified epoch.
|
||||
//
|
||||
// A validator is considered to have exited during an epoch if their ExitEpoch equals the epoch and
|
||||
// excludes validators that have been ejected.
|
||||
// This function simplifies the exit determination by directly checking the validator's ExitEpoch,
|
||||
// avoiding the complexities and potential inaccuracies of calculating withdrawable epochs.
|
||||
func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) ([]primitives.ValidatorIndex, error) {
|
||||
exited := make([]primitives.ValidatorIndex, 0)
|
||||
exitEpochs := make([]primitives.Epoch, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
val := validators[i]
|
||||
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
exitEpochs = append(exitEpochs, val.ExitEpoch)
|
||||
}
|
||||
}
|
||||
exitQueueEpoch := primitives.Epoch(0)
|
||||
for _, i := range exitEpochs {
|
||||
if exitQueueEpoch < i {
|
||||
exitQueueEpoch = i
|
||||
}
|
||||
}
|
||||
|
||||
// We use the exit queue churn to determine if we have passed a churn limit.
|
||||
exitQueueChurn := uint64(0)
|
||||
for _, val := range validators {
|
||||
if val.ExitEpoch == exitQueueEpoch {
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
for i, val := range validators {
|
||||
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
|
||||
val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
|
||||
if val.ExitEpoch == epoch && val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
|
||||
exited = append(exited, primitives.ValidatorIndex(i))
|
||||
}
|
||||
}
|
||||
return exited, nil
|
||||
}
|
||||
|
||||
// EjectedValidatorIndices determines the indices ejected during the given epoch.
|
||||
func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator, activeValidatorCount uint64) ([]primitives.ValidatorIndex, error) {
|
||||
// EjectedValidatorIndices returns the indices of validators who were ejected during the specified epoch.
|
||||
//
|
||||
// A validator is considered ejected during an epoch if:
|
||||
// - Their ExitEpoch equals the epoch.
|
||||
// - Their EffectiveBalance is less than or equal to the EjectionBalance threshold.
|
||||
//
|
||||
// This function simplifies the ejection determination by directly checking the validator's ExitEpoch
|
||||
// and EffectiveBalance, avoiding the complexities and potential inaccuracies of calculating
|
||||
// withdrawable epochs.
|
||||
func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) ([]primitives.ValidatorIndex, error) {
|
||||
ejected := make([]primitives.ValidatorIndex, 0)
|
||||
exitEpochs := make([]primitives.Epoch, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
val := validators[i]
|
||||
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
exitEpochs = append(exitEpochs, val.ExitEpoch)
|
||||
}
|
||||
}
|
||||
exitQueueEpoch := primitives.Epoch(0)
|
||||
for _, i := range exitEpochs {
|
||||
if exitQueueEpoch < i {
|
||||
exitQueueEpoch = i
|
||||
}
|
||||
}
|
||||
|
||||
// We use the exit queue churn to determine if we have passed a churn limit.
|
||||
exitQueueChurn := uint64(0)
|
||||
for _, val := range validators {
|
||||
if val.ExitEpoch == exitQueueEpoch {
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
for i, val := range validators {
|
||||
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
|
||||
val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
|
||||
if val.ExitEpoch == epoch && val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
|
||||
ejected = append(ejected, primitives.ValidatorIndex(i))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -389,19 +389,16 @@ func TestExitedValidatorIndices(t *testing.T) {
|
||||
state: ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: 10,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 10,
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -433,11 +430,7 @@ func TestExitedValidatorIndices(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
activeCount, err := helpers.ActiveValidatorCount(context.Background(), s, time.PrevEpoch(s))
|
||||
require.NoError(t, err)
|
||||
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators, activeCount)
|
||||
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.wanted, exitedIndices)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
@@ -603,14 +604,14 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
|
||||
// marshal versioned state from struct type down to bytes.
|
||||
func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, error) {
|
||||
switch st.ToProtoUnsafe().(type) {
|
||||
case *ethpb.BeaconState:
|
||||
switch st.Version() {
|
||||
case version.Phase0:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconState)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
}
|
||||
return encode(ctx, rState)
|
||||
case *ethpb.BeaconStateAltair:
|
||||
case version.Altair:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateAltair)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
@@ -623,7 +624,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(altairKey, rawObj...)), nil
|
||||
case *ethpb.BeaconStateBellatrix:
|
||||
case version.Bellatrix:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateBellatrix)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
@@ -636,7 +637,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(bellatrixKey, rawObj...)), nil
|
||||
case *ethpb.BeaconStateCapella:
|
||||
case version.Capella:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateCapella)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
@@ -649,7 +650,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(capellaKey, rawObj...)), nil
|
||||
case *ethpb.BeaconStateDeneb:
|
||||
case version.Deneb:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateDeneb)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
@@ -662,7 +663,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(denebKey, rawObj...)), nil
|
||||
case *ethpb.BeaconStateElectra:
|
||||
case version.Electra:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("non valid inner state")
|
||||
|
||||
@@ -138,20 +138,17 @@ func TestState_CanSaveRetrieve(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
p, err := blocks.WrappedExecutionPayloadHeaderElectra(&enginev1.ExecutionPayloadHeaderElectra{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: []byte("foo"),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
DepositRequestsRoot: make([]byte, 32),
|
||||
WithdrawalRequestsRoot: make([]byte, 32),
|
||||
ConsolidationRequestsRoot: make([]byte, 32),
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: []byte("foo"),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
TransactionsRoot: make([]byte, 32),
|
||||
WithdrawalsRoot: make([]byte, 32),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(p))
|
||||
|
||||
@@ -37,6 +37,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
@@ -105,8 +106,11 @@ go_test(
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/execution/testing:go_default_library",
|
||||
"//beacon-chain/execution/types:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -23,6 +24,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -44,8 +46,6 @@ var (
|
||||
GetPayloadMethodV4,
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
GetPayloadBodiesByHashV2,
|
||||
GetPayloadBodiesByRangeV2,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -77,14 +77,12 @@ const (
|
||||
BlockByNumberMethod = "eth_getBlockByNumber"
|
||||
// GetPayloadBodiesByHashV1 is the engine_getPayloadBodiesByHashX JSON-RPC method for pre-Electra payloads.
|
||||
GetPayloadBodiesByHashV1 = "engine_getPayloadBodiesByHashV1"
|
||||
// GetPayloadBodiesByHashV2 is the engine_getPayloadBodiesByHashX JSON-RPC method introduced by Electra.
|
||||
GetPayloadBodiesByHashV2 = "engine_getPayloadBodiesByHashV2"
|
||||
// GetPayloadBodiesByRangeV1 is the engine_getPayloadBodiesByRangeX JSON-RPC method for pre-Electra payloads.
|
||||
GetPayloadBodiesByRangeV1 = "engine_getPayloadBodiesByRangeV1"
|
||||
// GetPayloadBodiesByRangeV2 is the engine_getPayloadBodiesByRangeX JSON-RPC method introduced by Electra.
|
||||
GetPayloadBodiesByRangeV2 = "engine_getPayloadBodiesByRangeV2"
|
||||
// ExchangeCapabilities request string for JSON-RPC.
|
||||
ExchangeCapabilities = "engine_exchangeCapabilities"
|
||||
// GetBlobsV1 request string for JSON-RPC.
|
||||
GetBlobsV1 = "engine_getBlobsV1"
|
||||
// Defines the seconds before timing out engine endpoints with non-block execution semantics.
|
||||
defaultEngineTimeout = time.Second
|
||||
)
|
||||
@@ -99,22 +97,21 @@ type ForkchoiceUpdatedResponse struct {
|
||||
ValidationError string `json:"validationError"`
|
||||
}
|
||||
|
||||
// PayloadReconstructor defines a service that can reconstruct a full beacon
|
||||
// block with an execution payload from a signed beacon block and a connection
|
||||
// to an execution client's engine API.
|
||||
type PayloadReconstructor interface {
|
||||
// Reconstructor defines a service responsible for reconstructing full beacon chain objects by utilizing the execution API and making requests through the execution client.
|
||||
type Reconstructor interface {
|
||||
ReconstructFullBlock(
|
||||
ctx context.Context, blindedBlock interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (interfaces.SignedBeaconBlock, error)
|
||||
ReconstructFullBellatrixBlockBatch(
|
||||
ctx context.Context, blindedBlocks []interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([]interfaces.SignedBeaconBlock, error)
|
||||
ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, indices []bool) ([]blocks.VerifiedROBlob, error)
|
||||
}
|
||||
|
||||
// EngineCaller defines a client that can interact with an Ethereum
|
||||
// execution node's engine service via JSON-RPC.
|
||||
type EngineCaller interface {
|
||||
NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash) ([]byte, error)
|
||||
NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests) ([]byte, error)
|
||||
ForkchoiceUpdated(
|
||||
ctx context.Context, state *pb.ForkchoiceState, attrs payloadattribute.Attributer,
|
||||
) (*pb.PayloadIDBytes, []byte, error)
|
||||
@@ -125,8 +122,8 @@ type EngineCaller interface {
|
||||
|
||||
var ErrEmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
|
||||
// NewPayload calls the engine_newPayloadVX method via JSON-RPC.
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash) ([]byte, error) {
|
||||
// NewPayload request calls the engine_newPayloadVX method via JSON-RPC.
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
@@ -163,18 +160,20 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
if !ok {
|
||||
return nil, errors.New("execution data must be a Deneb execution payload")
|
||||
}
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb, versionedHashes, parentBlockRoot)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
case *pb.ExecutionPayloadElectra:
|
||||
payloadPb, ok := payload.Proto().(*pb.ExecutionPayloadElectra)
|
||||
if !ok {
|
||||
return nil, errors.New("execution data must be a Electra execution payload")
|
||||
}
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV4, payloadPb, versionedHashes, parentBlockRoot)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
if executionRequests == nil {
|
||||
err := s.rpcClient.CallContext(ctx, result, NewPayloadMethodV3, payloadPb, versionedHashes, parentBlockRoot)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
} else {
|
||||
flattenedRequests, err := pb.EncodeExecutionRequests(executionRequests)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to encode execution requests")
|
||||
}
|
||||
err = s.rpcClient.CallContext(ctx, result, NewPayloadMethodV4, payloadPb, versionedHashes, parentBlockRoot, flattenedRequests)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("unknown execution data type")
|
||||
@@ -269,7 +268,7 @@ func (s *Service) ForkchoiceUpdated(
|
||||
func getPayloadMethodAndMessage(slot primitives.Slot) (string, proto.Message) {
|
||||
pe := slots.ToEpoch(slot)
|
||||
if pe >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return GetPayloadMethodV4, &pb.ExecutionPayloadElectraWithValueAndBlobsBundle{}
|
||||
return GetPayloadMethodV4, &pb.ExecutionBundleElectra{}
|
||||
}
|
||||
if pe >= params.BeaconConfig().DenebForkEpoch {
|
||||
return GetPayloadMethodV3, &pb.ExecutionPayloadDenebWithValueAndBlobsBundle{}
|
||||
@@ -309,13 +308,16 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.ExchangeCapabilities")
|
||||
defer span.End()
|
||||
|
||||
result := &pb.ExchangeCapabilities{}
|
||||
var result []string
|
||||
err := s.rpcClient.CallContext(ctx, &result, ExchangeCapabilities, supportedEngineEndpoints)
|
||||
if err != nil {
|
||||
return nil, handleRPCError(err)
|
||||
}
|
||||
|
||||
var unsupported []string
|
||||
for _, s1 := range supportedEngineEndpoints {
|
||||
supported := false
|
||||
for _, s2 := range result.SupportedMethods {
|
||||
for _, s2 := range result {
|
||||
if s1 == s2 {
|
||||
supported = true
|
||||
break
|
||||
@@ -328,7 +330,7 @@ func (s *Service) ExchangeCapabilities(ctx context.Context) ([]string, error) {
|
||||
if len(unsupported) != 0 {
|
||||
log.Warnf("Please update client, detected the following unsupported engine methods: %s", unsupported)
|
||||
}
|
||||
return result.SupportedMethods, handleRPCError(err)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash returns the valid terminal block hash based on total difficulty.
|
||||
@@ -495,6 +497,20 @@ func (s *Service) HeaderByNumber(ctx context.Context, number *big.Int) (*types.H
|
||||
return hdr, err
|
||||
}
|
||||
|
||||
// GetBlobs returns the blob and proof from the execution engine for the given versioned hashes.
|
||||
func (s *Service) GetBlobs(ctx context.Context, versionedHashes []common.Hash) ([]*pb.BlobAndProof, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobs")
|
||||
defer span.End()
|
||||
// If the execution engine does not support `GetBlobsV1`, return early to prevent encountering an error later.
|
||||
if !s.capabilityCache.has(GetBlobsV1) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProof, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV1, versionedHashes)
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
// ReconstructFullBlock takes in a blinded beacon block and reconstructs
|
||||
// a beacon block with a full execution payload via the engine API.
|
||||
func (s *Service) ReconstructFullBlock(
|
||||
@@ -523,6 +539,109 @@ func (s *Service) ReconstructFullBellatrixBlockBatch(
|
||||
return unb, nil
|
||||
}
|
||||
|
||||
// ReconstructBlobSidecars reconstructs the verified blob sidecars for a given beacon block.
|
||||
// It retrieves the KZG commitments from the block body, fetches the associated blobs and proofs,
|
||||
// and constructs the corresponding verified read-only blob sidecars.
|
||||
//
|
||||
// The 'exists' argument is a boolean list (must be the same length as body.BlobKzgCommitments), where each element corresponds to whether a
|
||||
// particular blob sidecar already exists. If exists[i] is true, the blob for the i-th KZG commitment
|
||||
// has already been retrieved and does not need to be fetched again from the execution layer (EL).
|
||||
//
|
||||
// For example:
|
||||
// - len(block.Body().BlobKzgCommitments()) == 6
|
||||
// - If exists = [true, false, true, false, true, false], the function will fetch the blobs
|
||||
// associated with indices 1, 3, and 5 (since those are marked as non-existent).
|
||||
// - If exists = [false ... x 6], the function will attempt to fetch all blobs.
|
||||
//
|
||||
// Only the blobs that do not already exist (where exists[i] is false) are fetched using the KZG commitments from block body.
|
||||
func (s *Service) ReconstructBlobSidecars(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, exists []bool) ([]blocks.VerifiedROBlob, error) {
|
||||
blockBody := block.Block().Body()
|
||||
kzgCommitments, err := blockBody.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get blob KZG commitments")
|
||||
}
|
||||
if len(kzgCommitments) > len(exists) {
|
||||
return nil, fmt.Errorf("length of KZG commitments (%d) is greater than length of exists (%d)", len(kzgCommitments), len(exists))
|
||||
}
|
||||
|
||||
// Collect KZG hashes for non-existing blobs
|
||||
var kzgHashes []common.Hash
|
||||
for i, commitment := range kzgCommitments {
|
||||
if !exists[i] {
|
||||
kzgHashes = append(kzgHashes, primitives.ConvertKzgCommitmentToVersionedHash(commitment))
|
||||
}
|
||||
}
|
||||
if len(kzgHashes) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Fetch blobs from EL
|
||||
blobs, err := s.GetBlobs(ctx, kzgHashes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get blobs")
|
||||
}
|
||||
if len(blobs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
header, err := block.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get header")
|
||||
}
|
||||
|
||||
// Reconstruct verified blob sidecars
|
||||
var verifiedBlobs []blocks.VerifiedROBlob
|
||||
for i, blobIndex := 0, 0; i < len(kzgCommitments); i++ {
|
||||
if exists[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
if blobIndex >= len(blobs) || blobs[blobIndex] == nil {
|
||||
blobIndex++
|
||||
continue
|
||||
}
|
||||
blob := blobs[blobIndex]
|
||||
blobIndex++
|
||||
|
||||
proof, err := blocks.MerkleProofKZGCommitment(blockBody, i)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to get Merkle proof for KZG commitment")
|
||||
continue
|
||||
}
|
||||
sidecar := ðpb.BlobSidecar{
|
||||
Index: uint64(i),
|
||||
Blob: blob.Blob,
|
||||
KzgCommitment: kzgCommitments[i],
|
||||
KzgProof: blob.KzgProof,
|
||||
SignedBlockHeader: header,
|
||||
CommitmentInclusionProof: proof,
|
||||
}
|
||||
|
||||
roBlob, err := blocks.NewROBlobWithRoot(sidecar, blockRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to create RO blob with root")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the sidecar KZG proof
|
||||
v := s.blobVerifier(roBlob, verification.ELMemPoolRequirements)
|
||||
if err := v.SidecarKzgProofVerified(); err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify KZG proof for sidecar")
|
||||
continue
|
||||
}
|
||||
|
||||
verifiedBlob, err := v.VerifiedROBlob()
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("index", i).Error("failed to verify RO blob")
|
||||
continue
|
||||
}
|
||||
|
||||
verifiedBlobs = append(verifiedBlobs, verifiedBlob)
|
||||
}
|
||||
|
||||
return verifiedBlobs, nil
|
||||
}
|
||||
|
||||
func fullPayloadFromPayloadBody(
|
||||
header interfaces.ExecutionData, body *pb.ExecutionPayloadBody, bVersion int,
|
||||
) (interfaces.ExecutionData, error) {
|
||||
@@ -566,7 +685,7 @@ func fullPayloadFromPayloadBody(
|
||||
Transactions: pb.RecastHexutilByteSlice(body.Transactions),
|
||||
Withdrawals: body.Withdrawals,
|
||||
}) // We can't get the block value and don't care about the block value for this instance
|
||||
case version.Deneb:
|
||||
case version.Deneb, version.Electra:
|
||||
ebg, err := header.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to extract ExcessBlobGas attribute from execution payload header")
|
||||
@@ -595,50 +714,6 @@ func fullPayloadFromPayloadBody(
|
||||
ExcessBlobGas: ebg,
|
||||
BlobGasUsed: bgu,
|
||||
}) // We can't get the block value and don't care about the block value for this instance
|
||||
case version.Electra:
|
||||
ebg, err := header.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to extract ExcessBlobGas attribute from execution payload header")
|
||||
}
|
||||
bgu, err := header.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to extract BlobGasUsed attribute from execution payload header")
|
||||
}
|
||||
wr, err := pb.JsonWithdrawalRequestsToProto(body.WithdrawalRequests)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dr, err := pb.JsonDepositRequestsToProto(body.DepositRequests)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cr, err := pb.JsonConsolidationRequestsToProto(body.ConsolidationRequests)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.WrappedExecutionPayloadElectra(
|
||||
&pb.ExecutionPayloadElectra{
|
||||
ParentHash: header.ParentHash(),
|
||||
FeeRecipient: header.FeeRecipient(),
|
||||
StateRoot: header.StateRoot(),
|
||||
ReceiptsRoot: header.ReceiptsRoot(),
|
||||
LogsBloom: header.LogsBloom(),
|
||||
PrevRandao: header.PrevRandao(),
|
||||
BlockNumber: header.BlockNumber(),
|
||||
GasLimit: header.GasLimit(),
|
||||
GasUsed: header.GasUsed(),
|
||||
Timestamp: header.Timestamp(),
|
||||
ExtraData: header.ExtraData(),
|
||||
BaseFeePerGas: header.BaseFeePerGas(),
|
||||
BlockHash: header.BlockHash(),
|
||||
Transactions: pb.RecastHexutilByteSlice(body.Transactions),
|
||||
Withdrawals: body.Withdrawals,
|
||||
ExcessBlobGas: ebg,
|
||||
BlobGasUsed: bgu,
|
||||
DepositRequests: dr,
|
||||
WithdrawalRequests: wr,
|
||||
ConsolidationRequests: cr,
|
||||
}) // We can't get the block value and don't care about the block value for this instance
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown execution block version for payload %d", bVersion)
|
||||
}
|
||||
@@ -761,7 +836,7 @@ func buildEmptyExecutionPayload(v int) (proto.Message, error) {
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
}, nil
|
||||
case version.Deneb:
|
||||
case version.Deneb, version.Electra:
|
||||
return &pb.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
@@ -775,22 +850,6 @@ func buildEmptyExecutionPayload(v int) (proto.Message, error) {
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
}, nil
|
||||
case version.Electra:
|
||||
return &pb.ExecutionPayloadElectra{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*pb.Withdrawal, 0),
|
||||
WithdrawalRequests: make([]*pb.WithdrawalRequest, 0),
|
||||
DepositRequests: make([]*pb.DepositRequest, 0),
|
||||
}, nil
|
||||
default:
|
||||
return nil, errors.Wrapf(ErrUnsupportedVersion, "version=%s", version.String(v))
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package execution
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -20,6 +21,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
mocks "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -37,9 +39,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
_ = PayloadReconstructor(&Service{})
|
||||
_ = Reconstructor(&Service{})
|
||||
_ = EngineCaller(&Service{})
|
||||
_ = PayloadReconstructor(&Service{})
|
||||
_ = Reconstructor(&Service{})
|
||||
_ = EngineCaller(&mocks.EngineClient{})
|
||||
)
|
||||
|
||||
@@ -123,7 +125,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(req)
|
||||
require.NoError(t, err)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
@@ -134,7 +136,7 @@ func TestClient_IPC(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(req)
|
||||
require.NoError(t, err)
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
latestValidHash, err := srv.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, bytesutil.ToBytes32(want.LatestValidHash), bytesutil.ToBytes32(latestValidHash))
|
||||
})
|
||||
@@ -163,7 +165,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.CapellaForkEpoch = 1
|
||||
cfg.DenebForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 2
|
||||
cfg.ElectraForkEpoch = 3
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run(GetPayloadMethod, func(t *testing.T) {
|
||||
@@ -322,7 +324,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
})
|
||||
t.Run(GetPayloadMethodV4, func(t *testing.T) {
|
||||
payloadId := [8]byte{1}
|
||||
want, ok := fix["ExecutionPayloadElectraWithValue"].(*pb.GetPayloadV4ResponseJson)
|
||||
want, ok := fix["ExecutionBundleElectra"].(*pb.GetPayloadV4ResponseJson)
|
||||
require.Equal(t, true, ok)
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
@@ -358,7 +360,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
client.rpcClient = rpcClient
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
resp, err := client.GetPayload(ctx, payloadId, 2*params.BeaconConfig().SlotsPerEpoch)
|
||||
resp, err := client.GetPayload(ctx, payloadId, 3*params.BeaconConfig().SlotsPerEpoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, resp.OverrideBuilder)
|
||||
g, err := resp.ExecutionData.ExcessBlobGas()
|
||||
@@ -374,18 +376,35 @@ func TestClient_HTTP(t *testing.T) {
|
||||
require.DeepEqual(t, proofs, resp.BlobsBundle.Proofs)
|
||||
blobs := [][]byte{bytesutil.PadTo([]byte("a"), fieldparams.BlobLength), bytesutil.PadTo([]byte("b"), fieldparams.BlobLength)}
|
||||
require.DeepEqual(t, blobs, resp.BlobsBundle.Blobs)
|
||||
ede, ok := resp.ExecutionData.(interfaces.ExecutionDataElectra)
|
||||
require.Equal(t, true, ok)
|
||||
require.NotNil(t, ede.WithdrawalRequests())
|
||||
wrequestsNotOverMax := len(ede.WithdrawalRequests()) <= int(params.BeaconConfig().MaxWithdrawalRequestsPerPayload)
|
||||
require.Equal(t, true, wrequestsNotOverMax)
|
||||
require.NotNil(t, ede.DepositRequests())
|
||||
drequestsNotOverMax := len(ede.DepositRequests()) <= int(params.BeaconConfig().MaxDepositRequestsPerPayload)
|
||||
require.Equal(t, true, drequestsNotOverMax)
|
||||
require.NotNil(t, ede.ConsolidationRequests())
|
||||
consolidationsNotOverMax := len(ede.ConsolidationRequests()) <= int(params.BeaconConfig().MaxConsolidationsRequestsPerPayload)
|
||||
require.Equal(t, true, consolidationsNotOverMax)
|
||||
requests := &pb.ExecutionRequests{
|
||||
Deposits: []*pb.DepositRequest{
|
||||
{
|
||||
Pubkey: bytesutil.PadTo([]byte{byte('a')}, fieldparams.BLSPubkeyLength),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte{byte('b')}, fieldparams.RootLength),
|
||||
Amount: params.BeaconConfig().MinActivationBalance,
|
||||
Signature: bytesutil.PadTo([]byte{byte('c')}, fieldparams.BLSSignatureLength),
|
||||
Index: 0,
|
||||
},
|
||||
},
|
||||
Withdrawals: []*pb.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte{byte('d')}, common.AddressLength),
|
||||
ValidatorPubkey: bytesutil.PadTo([]byte{byte('e')}, fieldparams.BLSPubkeyLength),
|
||||
Amount: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
},
|
||||
Consolidations: []*pb.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte{byte('f')}, common.AddressLength),
|
||||
SourcePubkey: bytesutil.PadTo([]byte{byte('g')}, fieldparams.BLSPubkeyLength),
|
||||
TargetPubkey: bytesutil.PadTo([]byte{byte('h')}, fieldparams.BLSPubkeyLength),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
require.DeepEqual(t, requests, resp.ExecutionRequests)
|
||||
})
|
||||
|
||||
t.Run(ForkchoiceUpdatedMethod+" VALID status", func(t *testing.T) {
|
||||
forkChoiceState := &pb.ForkchoiceState{
|
||||
HeadBlockHash: []byte("head"),
|
||||
@@ -536,7 +555,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -550,7 +569,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -564,21 +583,46 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV4+" VALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadElectra"].(*pb.ExecutionPayloadElectra)
|
||||
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["ValidPayloadStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV4Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadElectra(execPayload)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
requests := &pb.ExecutionRequests{
|
||||
Deposits: []*pb.DepositRequest{
|
||||
{
|
||||
Pubkey: bytesutil.PadTo([]byte{byte('a')}, fieldparams.BLSPubkeyLength),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte{byte('b')}, fieldparams.RootLength),
|
||||
Amount: params.BeaconConfig().MinActivationBalance,
|
||||
Signature: bytesutil.PadTo([]byte{byte('c')}, fieldparams.BLSSignatureLength),
|
||||
Index: 0,
|
||||
},
|
||||
},
|
||||
Withdrawals: []*pb.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte{byte('d')}, common.AddressLength),
|
||||
ValidatorPubkey: bytesutil.PadTo([]byte{byte('e')}, fieldparams.BLSPubkeyLength),
|
||||
Amount: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
},
|
||||
Consolidations: []*pb.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte{byte('f')}, common.AddressLength),
|
||||
SourcePubkey: bytesutil.PadTo([]byte{byte('g')}, fieldparams.BLSPubkeyLength),
|
||||
TargetPubkey: bytesutil.PadTo([]byte{byte('h')}, fieldparams.BLSPubkeyLength),
|
||||
},
|
||||
},
|
||||
}
|
||||
client := newPayloadV4Setup(t, want, execPayload, requests)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -592,7 +636,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -606,7 +650,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -620,21 +664,46 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil)
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV4+" SYNCING status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadElectra"].(*pb.ExecutionPayloadElectra)
|
||||
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["SyncingStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV4Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadElectra(execPayload)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
requests := &pb.ExecutionRequests{
|
||||
Deposits: []*pb.DepositRequest{
|
||||
{
|
||||
Pubkey: bytesutil.PadTo([]byte{byte('a')}, fieldparams.BLSPubkeyLength),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte{byte('b')}, fieldparams.RootLength),
|
||||
Amount: params.BeaconConfig().MinActivationBalance,
|
||||
Signature: bytesutil.PadTo([]byte{byte('c')}, fieldparams.BLSSignatureLength),
|
||||
Index: 0,
|
||||
},
|
||||
},
|
||||
Withdrawals: []*pb.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte{byte('d')}, common.AddressLength),
|
||||
ValidatorPubkey: bytesutil.PadTo([]byte{byte('e')}, fieldparams.BLSPubkeyLength),
|
||||
Amount: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
},
|
||||
Consolidations: []*pb.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte{byte('f')}, common.AddressLength),
|
||||
SourcePubkey: bytesutil.PadTo([]byte{byte('g')}, fieldparams.BLSPubkeyLength),
|
||||
TargetPubkey: bytesutil.PadTo([]byte{byte('h')}, fieldparams.BLSPubkeyLength),
|
||||
},
|
||||
},
|
||||
}
|
||||
client := newPayloadV4Setup(t, want, execPayload, requests)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests)
|
||||
require.ErrorIs(t, ErrAcceptedSyncingPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -648,7 +717,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -662,7 +731,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -676,21 +745,45 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil)
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV4+" INVALID_BLOCK_HASH status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadElectra"].(*pb.ExecutionPayloadElectra)
|
||||
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["InvalidBlockHashStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV4Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadElectra(execPayload)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
requests := &pb.ExecutionRequests{
|
||||
Deposits: []*pb.DepositRequest{
|
||||
{
|
||||
Pubkey: bytesutil.PadTo([]byte{byte('a')}, fieldparams.BLSPubkeyLength),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte{byte('b')}, fieldparams.RootLength),
|
||||
Amount: params.BeaconConfig().MinActivationBalance,
|
||||
Signature: bytesutil.PadTo([]byte{byte('c')}, fieldparams.BLSSignatureLength),
|
||||
Index: 0,
|
||||
},
|
||||
},
|
||||
Withdrawals: []*pb.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte{byte('d')}, common.AddressLength),
|
||||
ValidatorPubkey: bytesutil.PadTo([]byte{byte('e')}, fieldparams.BLSPubkeyLength),
|
||||
Amount: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
},
|
||||
Consolidations: []*pb.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte{byte('f')}, common.AddressLength),
|
||||
SourcePubkey: bytesutil.PadTo([]byte{byte('g')}, fieldparams.BLSPubkeyLength),
|
||||
TargetPubkey: bytesutil.PadTo([]byte{byte('h')}, fieldparams.BLSPubkeyLength),
|
||||
},
|
||||
},
|
||||
}
|
||||
client := newPayloadV4Setup(t, want, execPayload, requests)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests)
|
||||
require.ErrorIs(t, ErrInvalidBlockHashPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -704,7 +797,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -718,7 +811,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadCapella(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -732,21 +825,46 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, nil)
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
t.Run(NewPayloadMethodV4+" INVALID status", func(t *testing.T) {
|
||||
execPayload, ok := fix["ExecutionPayloadElectra"].(*pb.ExecutionPayloadElectra)
|
||||
execPayload, ok := fix["ExecutionPayloadDeneb"].(*pb.ExecutionPayloadDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
want, ok := fix["InvalidStatus"].(*pb.PayloadStatus)
|
||||
require.Equal(t, true, ok)
|
||||
client := newPayloadV4Setup(t, want, execPayload)
|
||||
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadElectra(execPayload)
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayloadDeneb(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'})
|
||||
requests := &pb.ExecutionRequests{
|
||||
Deposits: []*pb.DepositRequest{
|
||||
{
|
||||
Pubkey: bytesutil.PadTo([]byte{byte('a')}, fieldparams.BLSPubkeyLength),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte{byte('b')}, fieldparams.RootLength),
|
||||
Amount: params.BeaconConfig().MinActivationBalance,
|
||||
Signature: bytesutil.PadTo([]byte{byte('c')}, fieldparams.BLSSignatureLength),
|
||||
Index: 0,
|
||||
},
|
||||
},
|
||||
Withdrawals: []*pb.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte{byte('d')}, common.AddressLength),
|
||||
ValidatorPubkey: bytesutil.PadTo([]byte{byte('e')}, fieldparams.BLSPubkeyLength),
|
||||
Amount: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
},
|
||||
Consolidations: []*pb.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: bytesutil.PadTo([]byte{byte('f')}, common.AddressLength),
|
||||
SourcePubkey: bytesutil.PadTo([]byte{byte('g')}, fieldparams.BLSPubkeyLength),
|
||||
TargetPubkey: bytesutil.PadTo([]byte{byte('h')}, fieldparams.BLSPubkeyLength),
|
||||
},
|
||||
},
|
||||
}
|
||||
client := newPayloadV4Setup(t, want, execPayload, requests)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{'a'}, requests)
|
||||
require.ErrorIs(t, ErrInvalidPayloadStatus, err)
|
||||
require.DeepEqual(t, want.LatestValidHash, resp)
|
||||
})
|
||||
@@ -760,7 +878,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
// We call the RPC method via HTTP and expect a proper result.
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{})
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
@@ -1417,10 +1535,9 @@ func fixtures() map[string]interface{} {
|
||||
"ExecutionPayload": s.ExecutionPayload,
|
||||
"ExecutionPayloadCapella": s.ExecutionPayloadCapella,
|
||||
"ExecutionPayloadDeneb": s.ExecutionPayloadDeneb,
|
||||
"ExecutionPayloadElectra": s.ExecutionPayloadElectra,
|
||||
"ExecutionPayloadCapellaWithValue": s.ExecutionPayloadWithValueCapella,
|
||||
"ExecutionPayloadDenebWithValue": s.ExecutionPayloadWithValueDeneb,
|
||||
"ExecutionPayloadElectraWithValue": s.ExecutionPayloadWithValueElectra,
|
||||
"ExecutionBundleElectra": s.ExecutionBundleElectra,
|
||||
"ValidPayloadStatus": s.ValidPayloadStatus,
|
||||
"InvalidBlockHashStatus": s.InvalidBlockHashStatus,
|
||||
"AcceptedStatus": s.AcceptedStatus,
|
||||
@@ -1558,40 +1675,6 @@ func fixturesStruct() *payloadFixtures {
|
||||
TargetPubkey: &tPubkey,
|
||||
}
|
||||
}
|
||||
dr, err := pb.JsonDepositRequestsToProto(depositRequests)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
wr, err := pb.JsonWithdrawalRequestsToProto(withdrawalRequests)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cr, err := pb.JsonConsolidationRequestsToProto(consolidationRequests)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
executionPayloadFixtureElectra := &pb.ExecutionPayloadElectra{
|
||||
ParentHash: foo[:],
|
||||
FeeRecipient: bar,
|
||||
StateRoot: foo[:],
|
||||
ReceiptsRoot: foo[:],
|
||||
LogsBloom: baz,
|
||||
PrevRandao: foo[:],
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: foo[:],
|
||||
BaseFeePerGas: bytesutil.PadTo(baseFeePerGas.Bytes(), fieldparams.RootLength),
|
||||
BlockHash: foo[:],
|
||||
Transactions: [][]byte{foo[:]},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlobGasUsed: 2,
|
||||
ExcessBlobGas: 3,
|
||||
DepositRequests: dr,
|
||||
WithdrawalRequests: wr,
|
||||
ConsolidationRequests: cr,
|
||||
}
|
||||
hexUint := hexutil.Uint64(1)
|
||||
executionPayloadWithValueFixtureCapella := &pb.GetPayloadV2ResponseJson{
|
||||
ExecutionPayload: &pb.ExecutionPayloadCapellaJSON{
|
||||
@@ -1641,28 +1724,44 @@ func fixturesStruct() *payloadFixtures {
|
||||
Blobs: []hexutil.Bytes{{'a'}, {'b'}},
|
||||
},
|
||||
}
|
||||
executionPayloadWithValueFixtureElectra := &pb.GetPayloadV4ResponseJson{
|
||||
|
||||
depositRequestBytes, err := hexutil.Decode("0x610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"620000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"4059730700000063000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"00000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
if err != nil {
|
||||
panic("failed to decode deposit request")
|
||||
}
|
||||
withdrawalRequestBytes, err := hexutil.Decode("0x6400000000000000000000000000000000000000" +
|
||||
"6500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040597307000000")
|
||||
if err != nil {
|
||||
panic("failed to decode withdrawal request")
|
||||
}
|
||||
consolidationRequestBytes, err := hexutil.Decode("0x6600000000000000000000000000000000000000" +
|
||||
"670000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"680000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
||||
if err != nil {
|
||||
panic("failed to decode consolidation request")
|
||||
}
|
||||
executionBundleFixtureElectra := &pb.GetPayloadV4ResponseJson{
|
||||
ShouldOverrideBuilder: true,
|
||||
ExecutionPayload: &pb.ExecutionPayloadElectraJSON{
|
||||
ParentHash: &common.Hash{'a'},
|
||||
FeeRecipient: &common.Address{'b'},
|
||||
StateRoot: &common.Hash{'c'},
|
||||
ReceiptsRoot: &common.Hash{'d'},
|
||||
LogsBloom: &hexutil.Bytes{'e'},
|
||||
PrevRandao: &common.Hash{'f'},
|
||||
BaseFeePerGas: "0x123",
|
||||
BlockHash: &common.Hash{'g'},
|
||||
Transactions: []hexutil.Bytes{{'h'}},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlockNumber: &hexUint,
|
||||
GasLimit: &hexUint,
|
||||
GasUsed: &hexUint,
|
||||
Timestamp: &hexUint,
|
||||
BlobGasUsed: &bgu,
|
||||
ExcessBlobGas: &ebg,
|
||||
DepositRequests: depositRequests,
|
||||
WithdrawalRequests: withdrawalRequests,
|
||||
ConsolidationRequests: consolidationRequests,
|
||||
ExecutionPayload: &pb.ExecutionPayloadDenebJSON{
|
||||
ParentHash: &common.Hash{'a'},
|
||||
FeeRecipient: &common.Address{'b'},
|
||||
StateRoot: &common.Hash{'c'},
|
||||
ReceiptsRoot: &common.Hash{'d'},
|
||||
LogsBloom: &hexutil.Bytes{'e'},
|
||||
PrevRandao: &common.Hash{'f'},
|
||||
BaseFeePerGas: "0x123",
|
||||
BlockHash: &common.Hash{'g'},
|
||||
Transactions: []hexutil.Bytes{{'h'}},
|
||||
Withdrawals: []*pb.Withdrawal{},
|
||||
BlockNumber: &hexUint,
|
||||
GasLimit: &hexUint,
|
||||
GasUsed: &hexUint,
|
||||
Timestamp: &hexUint,
|
||||
BlobGasUsed: &bgu,
|
||||
ExcessBlobGas: &ebg,
|
||||
},
|
||||
BlockValue: "0x11fffffffff",
|
||||
BlobsBundle: &pb.BlobBundleJSON{
|
||||
@@ -1670,6 +1769,9 @@ func fixturesStruct() *payloadFixtures {
|
||||
Proofs: []hexutil.Bytes{[]byte("proof1"), []byte("proof2")},
|
||||
Blobs: []hexutil.Bytes{{'a'}, {'b'}},
|
||||
},
|
||||
ExecutionRequests: []hexutil.Bytes{append([]byte{pb.DepositRequestType}, depositRequestBytes...),
|
||||
append([]byte{pb.WithdrawalRequestType}, withdrawalRequestBytes...),
|
||||
append([]byte{pb.ConsolidationRequestType}, consolidationRequestBytes...)},
|
||||
}
|
||||
parent := bytesutil.PadTo([]byte("parentHash"), fieldparams.RootLength)
|
||||
sha3Uncles := bytesutil.PadTo([]byte("sha3Uncles"), fieldparams.RootLength)
|
||||
@@ -1762,10 +1864,9 @@ func fixturesStruct() *payloadFixtures {
|
||||
ExecutionPayloadCapella: executionPayloadFixtureCapella,
|
||||
ExecutionPayloadDeneb: executionPayloadFixtureDeneb,
|
||||
EmptyExecutionPayloadDeneb: emptyExecutionPayloadDeneb,
|
||||
ExecutionPayloadElectra: executionPayloadFixtureElectra,
|
||||
ExecutionPayloadWithValueCapella: executionPayloadWithValueFixtureCapella,
|
||||
ExecutionPayloadWithValueDeneb: executionPayloadWithValueFixtureDeneb,
|
||||
ExecutionPayloadWithValueElectra: executionPayloadWithValueFixtureElectra,
|
||||
ExecutionBundleElectra: executionBundleFixtureElectra,
|
||||
ValidPayloadStatus: validStatus,
|
||||
InvalidBlockHashStatus: inValidBlockHashStatus,
|
||||
AcceptedStatus: acceptedStatus,
|
||||
@@ -1787,10 +1888,9 @@ type payloadFixtures struct {
|
||||
ExecutionPayloadCapella *pb.ExecutionPayloadCapella
|
||||
EmptyExecutionPayloadDeneb *pb.ExecutionPayloadDeneb
|
||||
ExecutionPayloadDeneb *pb.ExecutionPayloadDeneb
|
||||
ExecutionPayloadElectra *pb.ExecutionPayloadElectra
|
||||
ExecutionPayloadWithValueCapella *pb.GetPayloadV2ResponseJson
|
||||
ExecutionPayloadWithValueDeneb *pb.GetPayloadV3ResponseJson
|
||||
ExecutionPayloadWithValueElectra *pb.GetPayloadV4ResponseJson
|
||||
ExecutionBundleElectra *pb.GetPayloadV4ResponseJson
|
||||
ValidPayloadStatus *pb.PayloadStatus
|
||||
InvalidBlockHashStatus *pb.PayloadStatus
|
||||
AcceptedStatus *pb.PayloadStatus
|
||||
@@ -2149,7 +2249,7 @@ func newPayloadV3Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.Execu
|
||||
return service
|
||||
}
|
||||
|
||||
func newPayloadV4Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.ExecutionPayloadElectra) *Service {
|
||||
func newPayloadV4Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.ExecutionPayloadDeneb, requests *pb.ExecutionRequests) *Service {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
@@ -2158,14 +2258,28 @@ func newPayloadV4Setup(t *testing.T, status *pb.PayloadStatus, payload *pb.Execu
|
||||
enc, err := io.ReadAll(r.Body)
|
||||
require.NoError(t, err)
|
||||
jsonRequestString := string(enc)
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string("engine_newPayloadV4"),
|
||||
))
|
||||
|
||||
reqArg, err := json.Marshal(payload)
|
||||
reqPayload, err := json.Marshal(payload)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We expect the JSON string RPC request contains the right arguments.
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(reqArg),
|
||||
jsonRequestString, string(reqPayload),
|
||||
))
|
||||
|
||||
reqRequests, err := pb.EncodeExecutionRequests(requests)
|
||||
require.NoError(t, err)
|
||||
|
||||
jsonRequests, err := json.Marshal(reqRequests)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, true, strings.Contains(
|
||||
jsonRequestString, string(jsonRequests),
|
||||
))
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
@@ -2222,11 +2336,10 @@ func Test_ExchangeCapabilities(t *testing.T) {
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
exchangeCapabilities := &pb.ExchangeCapabilities{}
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": exchangeCapabilities,
|
||||
"result": []string{},
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
@@ -2255,14 +2368,11 @@ func Test_ExchangeCapabilities(t *testing.T) {
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
exchangeCapabilities := &pb.ExchangeCapabilities{
|
||||
SupportedMethods: []string{"A", "B", "C"},
|
||||
}
|
||||
|
||||
resp := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": exchangeCapabilities,
|
||||
"result": []string{"A", "B", "C"},
|
||||
}
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
require.NoError(t, err)
|
||||
@@ -2284,3 +2394,122 @@ func Test_ExchangeCapabilities(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestReconstructBlobSidecars(t *testing.T) {
|
||||
client := &Service{capabilityCache: &capabilityCache{}}
|
||||
b := util.NewBeaconBlockDeneb()
|
||||
kzgCommitments := createRandomKzgCommitments(t, 6)
|
||||
|
||||
b.Block.Body.BlobKzgCommitments = kzgCommitments
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
t.Run("all seen", func(t *testing.T) {
|
||||
exists := []bool{true, true, true, true, true, true}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("get-blobs end point is not supported", func(t *testing.T) {
|
||||
exists := []bool{true, true, true, true, true, false}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
client.capabilityCache = &capabilityCache{capabilities: map[string]interface{}{GetBlobsV1: nil}}
|
||||
|
||||
t.Run("recovered 6 missing blobs", func(t *testing.T) {
|
||||
srv := createBlobServer(t, 6)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClient(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
exists := [6]bool{}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists[:])
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("recovered 3 missing blobs", func(t *testing.T) {
|
||||
srv := createBlobServer(t, 3)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClient(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
exists := []bool{true, false, true, false, true, false}
|
||||
verifiedBlobs, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(verifiedBlobs))
|
||||
})
|
||||
|
||||
t.Run("kzg is longer than exist", func(t *testing.T) {
|
||||
srv := createBlobServer(t, 3)
|
||||
defer srv.Close()
|
||||
|
||||
rpcClient, client := setupRpcClient(t, srv.URL, client)
|
||||
defer rpcClient.Close()
|
||||
|
||||
exists := []bool{true, false, true, false, true}
|
||||
_, err := client.ReconstructBlobSidecars(ctx, sb, r, exists)
|
||||
require.ErrorContains(t, "length of KZG commitments (6) is greater than length of exists (5)", err)
|
||||
})
|
||||
}
|
||||
|
||||
func createRandomKzgCommitments(t *testing.T, num int) [][]byte {
|
||||
kzgCommitments := make([][]byte, num)
|
||||
for i := range kzgCommitments {
|
||||
kzgCommitments[i] = make([]byte, 48)
|
||||
_, err := rand.Read(kzgCommitments[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
return kzgCommitments
|
||||
}
|
||||
|
||||
func createBlobServer(t *testing.T, numBlobs int) *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
|
||||
blobs := make([]pb.BlobAndProofJson, numBlobs)
|
||||
for i := range blobs {
|
||||
blobs[i] = pb.BlobAndProofJson{Blob: []byte(fmt.Sprintf("blob%d", i+1)), KzgProof: []byte(fmt.Sprintf("proof%d", i+1))}
|
||||
}
|
||||
|
||||
respJSON := map[string]interface{}{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": blobs,
|
||||
}
|
||||
require.NoError(t, json.NewEncoder(w).Encode(respJSON))
|
||||
}))
|
||||
}
|
||||
|
||||
func setupRpcClient(t *testing.T, url string, client *Service) (*rpc.Client, *Service) {
|
||||
rpcClient, err := rpc.DialHTTP(url)
|
||||
require.NoError(t, err)
|
||||
|
||||
client.rpcClient = rpcClient
|
||||
client.capabilityCache = &capabilityCache{capabilities: map[string]interface{}{GetBlobsV1: nil}}
|
||||
client.blobVerifier = testNewBlobVerifier()
|
||||
|
||||
return rpcClient, client
|
||||
}
|
||||
|
||||
func testNewBlobVerifier() verification.NewBlobVerifier {
|
||||
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
|
||||
return &verification.MockBlobVerifier{
|
||||
CbVerifiedROBlob: func() (blocks.VerifiedROBlob, error) {
|
||||
return blocks.VerifiedROBlob{}, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package execution
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
@@ -131,21 +130,10 @@ func TestParseRequest(t *testing.T) {
|
||||
strToHexBytes(t, "0x66756c6c00000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
},
|
||||
{
|
||||
method: GetPayloadBodiesByHashV2,
|
||||
byteArgs: []hexutil.Bytes{
|
||||
strToHexBytes(t, "0x656d707479000000000000000000000000000000000000000000000000000000"),
|
||||
strToHexBytes(t, "0x66756c6c00000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
},
|
||||
{
|
||||
method: GetPayloadBodiesByRangeV1,
|
||||
hexArgs: []string{hexutil.EncodeUint64(0), hexutil.EncodeUint64(1)},
|
||||
},
|
||||
{
|
||||
method: GetPayloadBodiesByRangeV2,
|
||||
hexArgs: []string{hexutil.EncodeUint64(math.MaxUint64), hexutil.EncodeUint64(1)},
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.method, func(t *testing.T) {
|
||||
@@ -191,9 +179,7 @@ func TestParseRequest(t *testing.T) {
|
||||
func TestCallCount(t *testing.T) {
|
||||
methods := []string{
|
||||
GetPayloadBodiesByHashV1,
|
||||
GetPayloadBodiesByHashV2,
|
||||
GetPayloadBodiesByRangeV1,
|
||||
GetPayloadBodiesByRangeV2,
|
||||
}
|
||||
cases := []struct {
|
||||
method string
|
||||
@@ -201,10 +187,8 @@ func TestCallCount(t *testing.T) {
|
||||
}{
|
||||
{method: GetPayloadBodiesByHashV1, count: 1},
|
||||
{method: GetPayloadBodiesByHashV1, count: 2},
|
||||
{method: GetPayloadBodiesByHashV2, count: 1},
|
||||
{method: GetPayloadBodiesByRangeV1, count: 1},
|
||||
{method: GetPayloadBodiesByRangeV1, count: 2},
|
||||
{method: GetPayloadBodiesByRangeV2, count: 1},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.method, func(t *testing.T) {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/network"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/authorization"
|
||||
)
|
||||
@@ -115,3 +116,11 @@ func WithJwtId(jwtId string) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithVerifierWaiter gives the sync package direct access to the verifier waiter.
|
||||
func WithVerifierWaiter(v *verification.InitializerWaiter) Option {
|
||||
return func(s *Service) error {
|
||||
s.verifierWaiter = v
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -87,10 +86,7 @@ func (r *blindedBlockReconstructor) addToBatch(b interfaces.ReadOnlySignedBeacon
|
||||
return nil
|
||||
}
|
||||
|
||||
func payloadBodyMethodForBlock(b interface{ Version() int }) string {
|
||||
if b.Version() > version.Deneb {
|
||||
return GetPayloadBodiesByHashV2
|
||||
}
|
||||
func payloadBodyMethodForBlock(_ interface{ Version() int }) string {
|
||||
return GetPayloadBodiesByHashV1
|
||||
}
|
||||
|
||||
@@ -243,9 +239,6 @@ func (r *blindedBlockReconstructor) unblinded() ([]interfaces.SignedBeaconBlock,
|
||||
return unblinded, nil
|
||||
}
|
||||
|
||||
func rangeMethodForHashMethod(method string) string {
|
||||
if method == GetPayloadBodiesByHashV2 {
|
||||
return GetPayloadBodiesByRangeV2
|
||||
}
|
||||
func rangeMethodForHashMethod(_ string) string {
|
||||
return GetPayloadBodiesByRangeV1
|
||||
}
|
||||
|
||||
@@ -25,33 +25,6 @@ func (v versioner) Version() int {
|
||||
return v.version
|
||||
}
|
||||
|
||||
func TestPayloadBodyMethodForBlock(t *testing.T) {
|
||||
cases := []struct {
|
||||
versions []int
|
||||
want string
|
||||
}{
|
||||
{
|
||||
versions: []int{version.Phase0, version.Altair, version.Bellatrix, version.Capella, version.Deneb},
|
||||
want: GetPayloadBodiesByHashV1,
|
||||
},
|
||||
{
|
||||
versions: []int{version.Electra},
|
||||
want: GetPayloadBodiesByHashV2,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
for _, v := range c.versions {
|
||||
t.Run(version.String(v), func(t *testing.T) {
|
||||
v := versioner{version: v}
|
||||
require.Equal(t, c.want, payloadBodyMethodForBlock(v))
|
||||
})
|
||||
}
|
||||
}
|
||||
t.Run("post-electra", func(t *testing.T) {
|
||||
require.Equal(t, GetPayloadBodiesByHashV2, payloadBodyMethodForBlock(versioner{version: version.Electra + 1}))
|
||||
})
|
||||
}
|
||||
|
||||
func payloadToBody(t *testing.T, ed interfaces.ExecutionData) *pb.ExecutionPayloadBody {
|
||||
body := &pb.ExecutionPayloadBody{}
|
||||
txs, err := ed.Transactions()
|
||||
@@ -64,12 +37,6 @@ func payloadToBody(t *testing.T, ed interfaces.ExecutionData) *pb.ExecutionPaylo
|
||||
for i := range txs {
|
||||
body.Transactions = append(body.Transactions, txs[i])
|
||||
}
|
||||
eed, isElectra := ed.(interfaces.ExecutionDataElectra)
|
||||
if isElectra {
|
||||
body.DepositRequests = pb.ProtoDepositRequestsToJson(eed.DepositRequests())
|
||||
body.WithdrawalRequests = pb.ProtoWithdrawalRequestsToJson(eed.WithdrawalRequests())
|
||||
body.ConsolidationRequests = pb.ProtoConsolidationRequestsToJson(eed.ConsolidationRequests())
|
||||
}
|
||||
return body
|
||||
}
|
||||
|
||||
@@ -132,7 +99,7 @@ func testBlindedBlockFixtures(t *testing.T) *blindedBlockFixtures {
|
||||
afterSkipBlock, _ := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, denebSlot(t)+3, 0, util.WithPayloadSetter(afterSkip))
|
||||
fx.afterSkipDeneb = blindedBlockWithHeader(t, afterSkipBlock)
|
||||
|
||||
electra := fixturesStruct().ExecutionPayloadElectra
|
||||
electra := fixturesStruct().ExecutionPayloadDeneb
|
||||
electra.BlockHash = bytesutil.PadTo([]byte("electra"), 32)
|
||||
electra.BlockNumber = 5
|
||||
electraBlock, _ := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, electraSlot(t), 0, util.WithElectraPayload(electra))
|
||||
@@ -164,6 +131,7 @@ func TestPayloadBodiesViaUnblinder(t *testing.T) {
|
||||
|
||||
payload, err := bbr.payloadForHeader(fx.denebBlock.blinded.header, fx.denebBlock.blinded.block.Version())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.Deneb, fx.denebBlock.blinded.block.Version())
|
||||
unblindFull, err := blocks.BuildSignedBeaconBlockFromExecutionPayload(fx.denebBlock.blinded.block, payload)
|
||||
require.NoError(t, err)
|
||||
testAssertReconstructedEquivalent(t, fx.denebBlock.full, unblindFull)
|
||||
@@ -352,22 +320,6 @@ func TestReconstructBlindedBlockBatchFallbackToRange(t *testing.T) {
|
||||
}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
// separate methods for the electra block
|
||||
srv.register(GetPayloadBodiesByHashV2, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{nil}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
srv.register(GetPayloadBodiesByRangeV2, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
p := mockParseUintList(t, msg.Params)
|
||||
require.Equal(t, 2, len(p))
|
||||
start, count := p[0], p[1]
|
||||
require.Equal(t, fx.electra.blinded.header.BlockNumber(), start)
|
||||
require.Equal(t, uint64(1), count)
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{
|
||||
payloadToBody(t, fx.electra.blinded.header),
|
||||
}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
blind := []interfaces.ReadOnlySignedBeaconBlock{
|
||||
fx.denebBlock.blinded.block,
|
||||
fx.emptyDenebBlock.blinded.block,
|
||||
@@ -386,13 +338,8 @@ func TestReconstructBlindedBlockBatchDenebAndElectra(t *testing.T) {
|
||||
t.Run("deneb and electra", func(t *testing.T) {
|
||||
cli, srv := newMockEngine(t)
|
||||
fx := testBlindedBlockFixtures(t)
|
||||
// The reconstructed should make separate calls for the deneb (v1) and electra (v2) blocks.
|
||||
srv.register(GetPayloadBodiesByHashV1, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{payloadToBody(t, fx.denebBlock.blinded.header)}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
srv.register(GetPayloadBodiesByHashV2, func(msg *jsonrpcMessage, w http.ResponseWriter, r *http.Request) {
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{payloadToBody(t, fx.electra.blinded.header)}
|
||||
executionPayloadBodies := []*pb.ExecutionPayloadBody{payloadToBody(t, fx.denebBlock.blinded.header), payloadToBody(t, fx.electra.blinded.header)}
|
||||
mockWriteResult(t, w, msg, executionPayloadBodies)
|
||||
})
|
||||
blinded := []interfaces.ReadOnlySignedBeaconBlock{
|
||||
|
||||
@@ -78,6 +78,13 @@ func (s *Service) pollConnectionStatus(ctx context.Context) {
|
||||
currClient.Close()
|
||||
}
|
||||
log.WithField("endpoint", logs.MaskCredentialsLogging(s.cfg.currHttpEndpoint.Url)).Info("Connected to new endpoint")
|
||||
|
||||
c, err := s.ExchangeCapabilities(ctx)
|
||||
if err != nil {
|
||||
errorLogger(err, "Could not exchange capabilities with execution client")
|
||||
}
|
||||
s.capabilityCache.save(c)
|
||||
|
||||
return
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Received cancelled context,closing existing powchain service")
|
||||
|
||||
@@ -29,7 +29,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/trie"
|
||||
contracts "github.com/prysmaticlabs/prysm/v5/contracts/deposit"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -155,6 +157,9 @@ type Service struct {
|
||||
lastReceivedMerkleIndex int64 // Keeps track of the last received index to prevent log spam.
|
||||
runError error
|
||||
preGenesisState state.BeaconState
|
||||
verifierWaiter *verification.InitializerWaiter
|
||||
blobVerifier verification.NewBlobVerifier
|
||||
capabilityCache *capabilityCache
|
||||
}
|
||||
|
||||
// NewService sets up a new instance with an ethclient when given a web3 endpoint as a string in the config.
|
||||
@@ -192,6 +197,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
lastReceivedMerkleIndex: -1,
|
||||
preGenesisState: genState,
|
||||
eth1HeadTicker: time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerETH1Block) * time.Second),
|
||||
capabilityCache: &capabilityCache{},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -229,6 +235,13 @@ func (s *Service) Start() {
|
||||
}
|
||||
}
|
||||
|
||||
v, err := s.verifierWaiter.WaitForInitializer(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get verification initializer")
|
||||
return
|
||||
}
|
||||
s.blobVerifier = newBlobVerifierFromInitializer(v)
|
||||
|
||||
s.isRunning = true
|
||||
|
||||
// Poll the execution client connection and fallback if errors occur.
|
||||
@@ -886,3 +899,39 @@ func (s *Service) migrateOldDepositTree(eth1DataInDB *ethpb.ETH1ChainData) error
|
||||
func (s *Service) removeStartupState() {
|
||||
s.cfg.finalizedStateAtStartup = nil
|
||||
}
|
||||
|
||||
func newBlobVerifierFromInitializer(ini *verification.Initializer) verification.NewBlobVerifier {
|
||||
return func(b blocks.ROBlob, reqs []verification.Requirement) verification.BlobVerifier {
|
||||
return ini.NewBlobVerifier(b, reqs)
|
||||
}
|
||||
}
|
||||
|
||||
type capabilityCache struct {
|
||||
capabilities map[string]interface{}
|
||||
capabilitiesLock sync.RWMutex
|
||||
}
|
||||
|
||||
func (c *capabilityCache) save(cs []string) {
|
||||
c.capabilitiesLock.Lock()
|
||||
defer c.capabilitiesLock.Unlock()
|
||||
|
||||
if c.capabilities == nil {
|
||||
c.capabilities = make(map[string]interface{})
|
||||
}
|
||||
|
||||
for _, capability := range cs {
|
||||
c.capabilities[capability] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *capabilityCache) has(capability string) bool {
|
||||
c.capabilitiesLock.RLock()
|
||||
defer c.capabilitiesLock.RUnlock()
|
||||
|
||||
if c.capabilities == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := c.capabilities[capability]
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -19,8 +19,11 @@ import (
|
||||
dbutil "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/verification"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/trie"
|
||||
contracts "github.com/prysmaticlabs/prysm/v5/contracts/deposit"
|
||||
@@ -73,7 +76,7 @@ type goodNotifier struct {
|
||||
MockStateFeed *event.Feed
|
||||
}
|
||||
|
||||
func (g *goodNotifier) StateFeed() *event.Feed {
|
||||
func (g *goodNotifier) StateFeed() event.SubscriberSender {
|
||||
if g.MockStateFeed == nil {
|
||||
g.MockStateFeed = new(event.Feed)
|
||||
}
|
||||
@@ -92,10 +95,16 @@ func TestStart_OK(t *testing.T) {
|
||||
t.Cleanup(func() {
|
||||
server.Stop()
|
||||
})
|
||||
c := startup.NewClockSynchronizer()
|
||||
require.NoError(t, c.SetClock(startup.NewClock(time.Unix(0, 0), [32]byte{})))
|
||||
waiter := verification.NewInitializerWaiter(
|
||||
c, forkchoice.NewROForkChoice(nil), nil)
|
||||
|
||||
web3Service, err := NewService(context.Background(),
|
||||
WithHttpEndpoint(endpoint),
|
||||
WithDepositContractAddress(testAcc.ContractAddr),
|
||||
WithDatabase(beaconDB),
|
||||
WithVerifierWaiter(waiter),
|
||||
)
|
||||
require.NoError(t, err, "unable to setup execution service")
|
||||
web3Service = setDefaultMocks(web3Service)
|
||||
|
||||
@@ -36,10 +36,12 @@ type EngineClient struct {
|
||||
OverrideValidHash [32]byte
|
||||
GetPayloadResponse *blocks.GetPayloadResponse
|
||||
ErrGetPayload error
|
||||
BlobSidecars []blocks.VerifiedROBlob
|
||||
ErrorBlobSidecars error
|
||||
}
|
||||
|
||||
// NewPayload --
|
||||
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData, _ []common.Hash, _ *common.Hash) ([]byte, error) {
|
||||
func (e *EngineClient) NewPayload(_ context.Context, _ interfaces.ExecutionData, _ []common.Hash, _ *common.Hash, _ *pb.ExecutionRequests) ([]byte, error) {
|
||||
return e.NewPayloadResp, e.ErrNewPayload
|
||||
}
|
||||
|
||||
@@ -54,7 +56,7 @@ func (e *EngineClient) ForkchoiceUpdated(
|
||||
}
|
||||
|
||||
// GetPayload --
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, s primitives.Slot) (*blocks.GetPayloadResponse, error) {
|
||||
func (e *EngineClient) GetPayload(_ context.Context, _ [8]byte, _ primitives.Slot) (*blocks.GetPayloadResponse, error) {
|
||||
return e.GetPayloadResponse, e.ErrGetPayload
|
||||
}
|
||||
|
||||
@@ -106,6 +108,11 @@ func (e *EngineClient) ReconstructFullBellatrixBlockBatch(
|
||||
return fullBlocks, nil
|
||||
}
|
||||
|
||||
// ReconstructBlobSidecars is a mock implementation of the ReconstructBlobSidecars method.
|
||||
func (e *EngineClient) ReconstructBlobSidecars(context.Context, interfaces.ReadOnlySignedBeaconBlock, [32]byte, []bool) ([]blocks.VerifiedROBlob, error) {
|
||||
return e.BlobSidecars, e.ErrorBlobSidecars
|
||||
}
|
||||
|
||||
// GetTerminalBlockHash --
|
||||
func (e *EngineClient) GetTerminalBlockHash(ctx context.Context, transitionTime uint64) ([]byte, bool, error) {
|
||||
ttd := new(big.Int)
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -23,13 +23,13 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/forkchoice:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -11,4 +11,3 @@ var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
|
||||
var errInvalidNilCheckpoint = errors.New("invalid nil checkpoint")
|
||||
var errInvalidUnrealizedJustifiedEpoch = errors.New("invalid unrealized justified epoch")
|
||||
var errInvalidUnrealizedFinalizedEpoch = errors.New("invalid unrealized finalized epoch")
|
||||
var errNilBlockHeader = errors.New("invalid nil block header")
|
||||
|
||||
@@ -6,18 +6,17 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
forkchoice2 "github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -105,26 +104,10 @@ func (f *ForkChoice) ProcessAttestation(ctx context.Context, validatorIndices []
|
||||
}
|
||||
|
||||
// InsertNode processes a new block by inserting it to the fork choice store.
|
||||
func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, root [32]byte) error {
|
||||
func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, roblock consensus_blocks.ROBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.InsertNode")
|
||||
defer span.End()
|
||||
|
||||
slot := state.Slot()
|
||||
bh := state.LatestBlockHeader()
|
||||
if bh == nil {
|
||||
return errNilBlockHeader
|
||||
}
|
||||
parentRoot := bytesutil.ToBytes32(bh.ParentRoot)
|
||||
var payloadHash [32]byte
|
||||
if state.Version() >= version.Bellatrix {
|
||||
ph, err := state.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ph != nil {
|
||||
copy(payloadHash[:], ph.BlockHash())
|
||||
}
|
||||
}
|
||||
jc := state.CurrentJustifiedCheckpoint()
|
||||
if jc == nil {
|
||||
return errInvalidNilCheckpoint
|
||||
@@ -135,13 +118,20 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.BeaconState, ro
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
finalizedEpoch := fc.Epoch
|
||||
node, err := f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
|
||||
node, err := f.store.insert(ctx, roblock, justifiedEpoch, finalizedEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jc, fc = f.store.pullTips(state, node, jc, fc)
|
||||
return f.updateCheckpoints(ctx, jc, fc)
|
||||
if err := f.updateCheckpoints(ctx, jc, fc); err != nil {
|
||||
_, remErr := f.store.removeNode(ctx, node)
|
||||
if remErr != nil {
|
||||
log.WithError(remErr).Error("could not remove node")
|
||||
}
|
||||
return errors.Wrap(err, "could not update checkpoints")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateCheckpoints update the checkpoints when inserting a new node.
|
||||
@@ -483,15 +473,8 @@ func (f *ForkChoice) InsertChain(ctx context.Context, chain []*forkchoicetypes.B
|
||||
return nil
|
||||
}
|
||||
for i := len(chain) - 1; i > 0; i-- {
|
||||
b := chain[i].Block
|
||||
r := chain[i-1].Block.ParentRoot()
|
||||
parentRoot := b.ParentRoot()
|
||||
payloadHash, err := blocks.GetBlockPayloadHash(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := f.store.insert(ctx,
|
||||
b.Slot(), r, parentRoot, payloadHash,
|
||||
chain[i].Block,
|
||||
chain[i].JustifiedCheckpoint.Epoch, chain[i].FinalizedCheckpoint.Epoch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package doublylinkedtree
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -31,7 +32,7 @@ func prepareForkchoiceState(
|
||||
payloadHash [32]byte,
|
||||
justifiedEpoch primitives.Epoch,
|
||||
finalizedEpoch primitives.Epoch,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
) (state.BeaconState, blocks.ROBlock, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
@@ -58,21 +59,40 @@ func prepareForkchoiceState(
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
f.votes = []Vote{
|
||||
{indexToHash(1), indexToHash(1), 0},
|
||||
@@ -93,15 +113,15 @@ func TestForkChoice_UpdateBalancesPositiveChange(t *testing.T) {
|
||||
func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
@@ -124,15 +144,15 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
@@ -155,24 +175,24 @@ func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 6, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 6, indexToHash(6), indexToHash(5), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
require.Equal(t, true, f.IsCanonical(params.BeaconConfig().ZeroHash))
|
||||
require.Equal(t, false, f.IsCanonical(indexToHash(1)))
|
||||
@@ -186,24 +206,24 @@ func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'2'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 4, [32]byte{'4'}, [32]byte{'2'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 5, [32]byte{'5'}, [32]byte{'4'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 6, [32]byte{'6'}, [32]byte{'5'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
f.store.nodeByRoot[[32]byte{'3'}].balance = 10
|
||||
require.NoError(t, f.store.treeRootNode.applyWeightChanges(ctx))
|
||||
@@ -232,15 +252,15 @@ func TestForkChoice_IsCanonicalReorg(t *testing.T) {
|
||||
func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 5, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
f.store.treeRootNode = f.store.nodeByRoot[indexToHash(1)]
|
||||
f.store.treeRootNode.parent = nil
|
||||
|
||||
@@ -264,12 +284,12 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
func TestForkChoice_AncestorEqualSlot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 100)
|
||||
require.NoError(t, err)
|
||||
@@ -279,12 +299,12 @@ func TestForkChoice_AncestorEqualSlot(t *testing.T) {
|
||||
func TestForkChoice_AncestorLowerSlot(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'1'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 200, [32]byte{'3'}, [32]byte{'1'}, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 150)
|
||||
require.NoError(t, err)
|
||||
@@ -295,20 +315,20 @@ func TestForkChoice_RemoveEquivocating(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
// Insert a block it will be head
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
head, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'a'}, head)
|
||||
|
||||
// Insert two extra blocks
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
head, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{'c'}, head)
|
||||
@@ -377,36 +397,36 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
// \-- c -- f
|
||||
// \-- g
|
||||
// \ -- h -- i -- j
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'a'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'b'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'b'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 4, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 4, [32]byte{'e'}, [32]byte{'d'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 5, [32]byte{'f'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 5, [32]byte{'f'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 6, [32]byte{'g'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 6, [32]byte{'g'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 7, [32]byte{'h'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 7, [32]byte{'h'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 8, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 8, [32]byte{'i'}, [32]byte{'h'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 9, [32]byte{'j'}, [32]byte{'i'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 9, [32]byte{'j'}, [32]byte{'i'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -497,18 +517,18 @@ func TestStore_CommonAncestor(t *testing.T) {
|
||||
|
||||
// a -- b -- c -- d
|
||||
f = setup(0, 0)
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 2, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 3, [32]byte{'d'}, [32]byte{'c'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
tests = []struct {
|
||||
name string
|
||||
r1 [32]byte
|
||||
@@ -589,7 +609,9 @@ func TestStore_InsertChain(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(),
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
@@ -598,14 +620,16 @@ func TestStore_InsertChain(t *testing.T) {
|
||||
blk.Block.Slot = primitives.Slot(i)
|
||||
copiedRoot := root
|
||||
blk.Block.ParentRoot = copiedRoot[:]
|
||||
root, err = blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: wsb.Block(),
|
||||
roblock, err := blocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
blks = append(blks, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]},
|
||||
})
|
||||
root, err = blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
args := make([]*forkchoicetypes.BlockAndCheckpoints, 10)
|
||||
for i := 0; i < len(blks); i++ {
|
||||
@@ -669,26 +693,26 @@ func TestForkChoice_UpdateCheckpoints(t *testing.T) {
|
||||
fcs.store.finalizedCheckpoint = tt.finalized
|
||||
fcs.store.genesisTime = uint64(time.Now().Unix()) - uint64(tt.currentSlot)*params.BeaconConfig().SecondsPerSlot
|
||||
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 32, [32]byte{'f'},
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 32, [32]byte{'f'},
|
||||
[32]byte{}, [32]byte{}, tt.finalized.Epoch, tt.finalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'j'},
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 64, [32]byte{'j'},
|
||||
[32]byte{'f'}, [32]byte{}, tt.justified.Epoch, tt.finalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'b'},
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 96, [32]byte{'b'},
|
||||
[32]byte{'j'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'c'},
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 96, [32]byte{'c'},
|
||||
[32]byte{'f'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 65, [32]byte{'h'},
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 65, [32]byte{'h'},
|
||||
[32]byte{'f'}, [32]byte{}, tt.newFinalized.Epoch, tt.newFinalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, roblock))
|
||||
// restart justifications cause insertion messed it up
|
||||
fcs.store.justifiedCheckpoint = tt.justified
|
||||
fcs.store.finalizedCheckpoint = tt.finalized
|
||||
@@ -714,9 +738,9 @@ func TestWeight(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
|
||||
root := [32]byte{'a'}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
n, ok := f.store.nodeByRoot[root]
|
||||
require.Equal(t, true, ok)
|
||||
@@ -746,9 +770,9 @@ func TestForkChoice_UnrealizedJustifiedPayloadBlockHash(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(0, 0)
|
||||
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, roblock))
|
||||
|
||||
f.store.unrealizedJustifiedCheckpoint.Root = [32]byte{'a'}
|
||||
got := f.UnrealizedJustifiedPayloadBlockHash()
|
||||
@@ -759,90 +783,90 @@ func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 0, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
// No Node
|
||||
viable, err := f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
|
||||
viable, err := f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root()})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// No Children
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 0})
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root(), Epoch: 0})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root(), Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 2})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root(), Epoch: 2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
st, bRoot, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, root, [32]byte{'B'}, 0, 0)
|
||||
st, blk2, err := prepareForkchoiceState(ctx, 1, [32]byte{'b'}, blk.Root(), [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, bRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk2))
|
||||
|
||||
// Epoch start
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root()})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: root, Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk.Root(), Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// No Children but impossible checkpoint
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root()})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root(), Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
st, cRoot, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, bRoot, [32]byte{'C'}, 0, 0)
|
||||
st, blk3, err := prepareForkchoiceState(ctx, 2, [32]byte{'c'}, blk2.Root(), [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, cRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk3))
|
||||
|
||||
// Children in same epoch
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root()})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root(), Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
st, dRoot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, bRoot, [32]byte{'D'}, 0, 0)
|
||||
st, blk4, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch, [32]byte{'d'}, blk2.Root(), [32]byte{'D'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, dRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk4))
|
||||
|
||||
// Children in next epoch but boundary
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root()})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root(), Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// Boundary block
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: dRoot, Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk4.Root(), Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: dRoot, Epoch: 0})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk4.Root(), Epoch: 0})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, viable)
|
||||
|
||||
// Children in next epoch
|
||||
st, eRoot, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, bRoot, [32]byte{'E'}, 0, 0)
|
||||
st, blk5, err := prepareForkchoiceState(ctx, params.BeaconConfig().SlotsPerEpoch+1, [32]byte{'e'}, blk2.Root(), [32]byte{'E'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, eRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk5))
|
||||
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: bRoot, Epoch: 1})
|
||||
viable, err = f.IsViableForCheckpoint(&forkchoicetypes.Checkpoint{Root: blk2.Root(), Epoch: 1})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, viable)
|
||||
}
|
||||
@@ -850,14 +874,14 @@ func TestForkChoiceIsViableForCheckpoint(t *testing.T) {
|
||||
func TestForkChoiceSlot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
st, root, err := prepareForkchoiceState(ctx, 3, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 3, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
// No Node
|
||||
_, err = f.Slot(root)
|
||||
_, err = f.Slot(blk.Root())
|
||||
require.ErrorIs(t, ErrNilNode, err)
|
||||
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
slot, err := f.Slot(root)
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
slot, err := f.Slot(blk.Root())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Slot(3), slot)
|
||||
}
|
||||
@@ -866,16 +890,16 @@ func TestForkchoiceParentRoot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
root1 := [32]byte{'a'}
|
||||
st, root, err := prepareForkchoiceState(ctx, 3, root1, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 3, root1, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
|
||||
root2 := [32]byte{'b'}
|
||||
st, root, err = prepareForkchoiceState(ctx, 3, root2, root1, [32]byte{'A'}, 0, 0)
|
||||
st, blk, err = prepareForkchoiceState(ctx, 3, root2, root1, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
|
||||
root, err = f.ParentRoot(root2)
|
||||
root, err := f.ParentRoot(root2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root1, root)
|
||||
|
||||
@@ -887,3 +911,16 @@ func TestForkchoiceParentRoot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, zeroHash, root)
|
||||
}
|
||||
|
||||
func TestForkChoice_CleanupInserting(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 2)
|
||||
f.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) {
|
||||
return f.justifiedBalances, errors.New("mock err")
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, f.InsertNode(ctx, st, roblock))
|
||||
require.Equal(t, false, f.HasNode(roblock.Root()))
|
||||
}
|
||||
|
||||
@@ -14,15 +14,15 @@ import (
|
||||
func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
@@ -41,15 +41,15 @@ func TestNode_ApplyWeightChanges_PositiveChange(t *testing.T) {
|
||||
func TestNode_ApplyWeightChanges_NegativeChange(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
// The updated balances of each node is 100
|
||||
s := f.store
|
||||
@@ -72,9 +72,9 @@ func TestNode_UpdateBestDescendant_NonViableChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is not viable.
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 3)
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 2, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
// Verify parent's best child and best descendant are `none`.
|
||||
s := f.store
|
||||
@@ -87,9 +87,9 @@ func TestNode_UpdateBestDescendant_ViableChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is the best descendant
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
s := f.store
|
||||
assert.Equal(t, 1, len(s.treeRootNode.children))
|
||||
@@ -100,12 +100,12 @@ func TestNode_UpdateBestDescendant_HigherWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is the best descendant
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 100
|
||||
@@ -120,12 +120,12 @@ func TestNode_UpdateBestDescendant_LowerWeightChild(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
// Input child is the best descendant
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].weight = 200
|
||||
@@ -159,21 +159,21 @@ func TestNode_ViableForHead(t *testing.T) {
|
||||
func TestNode_LeadsToViableHead(t *testing.T) {
|
||||
f := setup(4, 3)
|
||||
ctx := context.Background()
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(3), params.BeaconConfig().ZeroHash, 4, 3)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(3), params.BeaconConfig().ZeroHash, 4, 3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
|
||||
require.Equal(t, true, f.store.treeRootNode.leadsToViableHead(4, 5))
|
||||
require.Equal(t, true, f.store.nodeByRoot[indexToHash(5)].leadsToViableHead(4, 5))
|
||||
@@ -192,28 +192,28 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
// \
|
||||
// -- 5 (true)
|
||||
//
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[1] = f.store.nodeByRoot[blkRoot]
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[1] = f.store.nodeByRoot[blk.Root()]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, params.BeaconConfig().ZeroHash))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[2] = f.store.nodeByRoot[blkRoot]
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[2] = f.store.nodeByRoot[blk.Root()]
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, indexToHash(1)))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[3] = f.store.nodeByRoot[blkRoot]
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[3] = f.store.nodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[4] = f.store.nodeByRoot[blkRoot]
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[4] = f.store.nodeByRoot[blk.Root()]
|
||||
state, blk, err = prepareForkchoiceState(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
storeNodes[5] = f.store.nodeByRoot[blkRoot]
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
storeNodes[5] = f.store.nodeByRoot[blk.Root()]
|
||||
|
||||
opt, err := f.IsOptimistic(indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
@@ -266,9 +266,9 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
driftGenesisTime(f, 1, 1)
|
||||
root := [32]byte{'a'}
|
||||
f.justifiedBalances = []uint64{10}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
state, blk, err := prepareForkchoiceState(ctx, 1, root, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
@@ -283,9 +283,9 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
// late block
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
root = [32]byte{'b'}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, root, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
state, blk, err = prepareForkchoiceState(ctx, 2, root, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
@@ -299,9 +299,9 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
// very late block
|
||||
driftGenesisTime(f, 3, ProcessAttestationsThreshold+1)
|
||||
root = [32]byte{'c'}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, root, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
state, blk, err = prepareForkchoiceState(ctx, 3, root, [32]byte{'b'}, [32]byte{'C'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
@@ -314,9 +314,9 @@ func TestNode_TimeStampsChecks(t *testing.T) {
|
||||
|
||||
// block from the future
|
||||
root = [32]byte{'d'}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, root, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
state, blk, err = prepareForkchoiceState(ctx, 5, root, [32]byte{'c'}, [32]byte{'D'}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, f.InsertNode(ctx, state, blk))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
|
||||
@@ -484,23 +484,23 @@ func TestForkChoice_missingProposerBoostRoots(t *testing.T) {
|
||||
}
|
||||
f.justifiedBalances = balances
|
||||
driftGenesisTime(f, 1, 0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 1, [32]byte{'r'}, [32]byte{}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
|
||||
f.store.previousProposerBoostRoot = [32]byte{'p'}
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
require.Equal(t, blk.Root(), headRoot)
|
||||
require.Equal(t, [32]byte{'r'}, f.store.proposerBoostRoot)
|
||||
|
||||
f.store.proposerBoostRoot = [32]byte{'p'}
|
||||
driftGenesisTime(f, 3, 0)
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
|
||||
st, blk, err = prepareForkchoiceState(ctx, 2, [32]byte{'a'}, [32]byte{'r'}, [32]byte{}, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
headRoot, err = f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
require.Equal(t, blk.Root(), headRoot)
|
||||
require.Equal(t, [32]byte{'p'}, f.store.proposerBoostRoot)
|
||||
}
|
||||
|
||||
@@ -19,23 +19,23 @@ func TestForkChoice_ShouldOverrideFCU(t *testing.T) {
|
||||
f.store.committeeWeight /= uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
ctx := context.Background()
|
||||
driftGenesisTime(f, 1, 0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 1, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
attesters := make([]uint64, f.numActiveValidators-64)
|
||||
for i := range attesters {
|
||||
attesters[i] = uint64(i + 64)
|
||||
}
|
||||
f.ProcessAttestation(ctx, attesters, root, 0)
|
||||
f.ProcessAttestation(ctx, attesters, blk.Root(), 0)
|
||||
|
||||
orphanLateBlockFirstThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
driftGenesisTime(f, 2, orphanLateBlockFirstThreshold+1)
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
st, blk, err = prepareForkchoiceState(ctx, 2, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
require.Equal(t, blk.Root(), headRoot)
|
||||
t.Run("head is weak", func(t *testing.T) {
|
||||
require.Equal(t, true, f.ShouldOverrideFCU())
|
||||
|
||||
@@ -117,23 +117,23 @@ func TestForkChoice_GetProposerHead(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
driftGenesisTime(f, 1, 0)
|
||||
parentRoot := [32]byte{'a'}
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
st, blk, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
attesters := make([]uint64, f.numActiveValidators-64)
|
||||
for i := range attesters {
|
||||
attesters[i] = uint64(i + 64)
|
||||
}
|
||||
f.ProcessAttestation(ctx, attesters, root, 0)
|
||||
f.ProcessAttestation(ctx, attesters, blk.Root(), 0)
|
||||
|
||||
driftGenesisTime(f, 3, 1)
|
||||
childRoot := [32]byte{'b'}
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, childRoot, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
st, blk, err = prepareForkchoiceState(ctx, 2, childRoot, [32]byte{'a'}, [32]byte{'B'}, 0, 0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, st, root))
|
||||
require.NoError(t, f.InsertNode(ctx, st, blk))
|
||||
headRoot, err := f.Head(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, headRoot)
|
||||
require.Equal(t, blk.Root(), headRoot)
|
||||
orphanLateBlockFirstThreshold := params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().IntervalsPerSlot
|
||||
f.store.headNode.timestamp -= params.BeaconConfig().SecondsPerSlot - orphanLateBlockFirstThreshold
|
||||
t.Run("head is weak", func(t *testing.T) {
|
||||
|
||||
@@ -8,8 +8,10 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
@@ -63,12 +65,24 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
// insert registers a new block node to the fork choice store's node list.
|
||||
// It then updates the new node's parent with the best child and descendant node.
|
||||
func (s *Store) insert(ctx context.Context,
|
||||
slot primitives.Slot,
|
||||
root, parentRoot, payloadHash [fieldparams.RootLength]byte,
|
||||
roblock consensus_blocks.ROBlock,
|
||||
justifiedEpoch, finalizedEpoch primitives.Epoch) (*Node, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.insert")
|
||||
defer span.End()
|
||||
|
||||
root := roblock.Root()
|
||||
block := roblock.Block()
|
||||
slot := block.Slot()
|
||||
parentRoot := block.ParentRoot()
|
||||
var payloadHash [32]byte
|
||||
if block.Version() >= version.Bellatrix {
|
||||
execution, err := block.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(payloadHash[:], execution.BlockHash())
|
||||
}
|
||||
|
||||
// Return if the block has been inserted into Store before.
|
||||
if n, ok := s.nodeByRoot[root]; ok {
|
||||
return n, nil
|
||||
@@ -107,7 +121,9 @@ func (s *Store) insert(ctx context.Context,
|
||||
s.headNode = n
|
||||
s.highestReceivedNode = n
|
||||
} else {
|
||||
return n, errInvalidParentRoot
|
||||
delete(s.nodeByRoot, root)
|
||||
delete(s.nodeByPayload, payloadHash)
|
||||
return nil, errInvalidParentRoot
|
||||
}
|
||||
} else {
|
||||
parent.children = append(parent.children, n)
|
||||
@@ -128,7 +144,11 @@ func (s *Store) insert(ctx context.Context,
|
||||
jEpoch := s.justifiedCheckpoint.Epoch
|
||||
fEpoch := s.finalizedCheckpoint.Epoch
|
||||
if err := s.treeRootNode.updateBestDescendant(ctx, jEpoch, fEpoch, slots.ToEpoch(currentSlot)); err != nil {
|
||||
return n, err
|
||||
_, remErr := s.removeNode(ctx, n)
|
||||
if remErr != nil {
|
||||
log.WithError(remErr).Error("could not remove node")
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not update best descendants")
|
||||
}
|
||||
}
|
||||
// Update metrics.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user