mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
141 Commits
dn4-att-fi
...
epbs-rebas
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d17449df3b | ||
|
|
fde1b935f2 | ||
|
|
2f2efe4d4f | ||
|
|
ca16bf3d90 | ||
|
|
4836bd42cb | ||
|
|
12a84c8760 | ||
|
|
bfe75727f6 | ||
|
|
694922b948 | ||
|
|
b746a8f440 | ||
|
|
9986e64a09 | ||
|
|
e81c2eb4b7 | ||
|
|
4bb0c66fc1 | ||
|
|
d49274a334 | ||
|
|
501174d9a6 | ||
|
|
ee435586ce | ||
|
|
48b3d80fc3 | ||
|
|
311074c627 | ||
|
|
7d3f5e2ddc | ||
|
|
8b79971447 | ||
|
|
13fd2c6579 | ||
|
|
d116773806 | ||
|
|
f1daf04e62 | ||
|
|
269badb1f9 | ||
|
|
17e81c2d8d | ||
|
|
ab98d71d65 | ||
|
|
44b622dc43 | ||
|
|
271b445d0f | ||
|
|
ffddf26e48 | ||
|
|
c67f6000a3 | ||
|
|
b59778f34b | ||
|
|
474c2104b4 | ||
|
|
39b21f34d6 | ||
|
|
d11df8f26c | ||
|
|
a5b2a7a81f | ||
|
|
5f92629ff9 | ||
|
|
a21050d40e | ||
|
|
db1060ad0e | ||
|
|
1aebd0ae98 | ||
|
|
4095cc4743 | ||
|
|
9425826ae0 | ||
|
|
59157f8258 | ||
|
|
a330be52e4 | ||
|
|
d685911591 | ||
|
|
ae234b9470 | ||
|
|
2dcbad94b7 | ||
|
|
180761dfdb | ||
|
|
d905f3b703 | ||
|
|
bb809d8cb0 | ||
|
|
8b6319602b | ||
|
|
16d948a26c | ||
|
|
94e48cb27b | ||
|
|
b92ac72e34 | ||
|
|
03b2e29e42 | ||
|
|
d40521b292 | ||
|
|
c987b2c0b5 | ||
|
|
19ce188245 | ||
|
|
97b9074a81 | ||
|
|
f31e8efe95 | ||
|
|
41172f3890 | ||
|
|
5399702b47 | ||
|
|
09e07956ab | ||
|
|
9bd542ef02 | ||
|
|
83ddc98525 | ||
|
|
6f3290b4eb | ||
|
|
67f4544d6a | ||
|
|
bd5ed500ec | ||
|
|
19f3d0a2c8 | ||
|
|
e96321f6fe | ||
|
|
12ebe67f13 | ||
|
|
1829bc2d12 | ||
|
|
ab89b525ab | ||
|
|
4a951f5d1c | ||
|
|
4ab14ba3ba | ||
|
|
dc643c9f32 | ||
|
|
9fa49e7bc9 | ||
|
|
1139c90ab2 | ||
|
|
79d05a87bb | ||
|
|
1707cf3ec7 | ||
|
|
bdbb850250 | ||
|
|
b28b1ed6ce | ||
|
|
74bb0821a8 | ||
|
|
8025a483e2 | ||
|
|
0475631543 | ||
|
|
f27092fa91 | ||
|
|
67cef41cbf | ||
|
|
258908d50e | ||
|
|
415a42a4aa | ||
|
|
25eae3acda | ||
|
|
956d9d108c | ||
|
|
c285715f9f | ||
|
|
9382ae736d | ||
|
|
f16ff45a6b | ||
|
|
8d6577be84 | ||
|
|
9de75b5376 | ||
|
|
a7ba11df37 | ||
|
|
00aeea3656 | ||
|
|
9dbf979e77 | ||
|
|
be60504512 | ||
|
|
1857496159 | ||
|
|
ccf61e1700 | ||
|
|
4edbd2f9ef | ||
|
|
5179af1438 | ||
|
|
c0f9689e30 | ||
|
|
ff8240a04f | ||
|
|
847498c648 | ||
|
|
2633684339 | ||
|
|
ab3f1963e2 | ||
|
|
b87d02eeb3 | ||
|
|
bcb4155523 | ||
|
|
77f10b9e0e | ||
|
|
928b707ef1 | ||
|
|
91c15247e5 | ||
|
|
5ef5b65ffe | ||
|
|
9ae97786c5 | ||
|
|
66d1bb54f6 | ||
|
|
4d98049054 | ||
|
|
d5ff25b59d | ||
|
|
a265cf08fa | ||
|
|
f2ade3caff | ||
|
|
e6ffc0701e | ||
|
|
61c296e075 | ||
|
|
f264680739 | ||
|
|
8fe024f6a1 | ||
|
|
6b7dd833a3 | ||
|
|
060527032b | ||
|
|
a29ecb6bbe | ||
|
|
54656e172f | ||
|
|
97c8adb003 | ||
|
|
2fe8614115 | ||
|
|
09accc7132 | ||
|
|
53f1f11c6d | ||
|
|
48fe9d9c4d | ||
|
|
4386c244e1 | ||
|
|
7ac522d8ff | ||
|
|
52cf3a155d | ||
|
|
83ed320826 | ||
|
|
616cdc1e8b | ||
|
|
361712e886 | ||
|
|
9ec8c6c4b5 | ||
|
|
073cf19b69 | ||
|
|
6ac8090599 |
33
.github/workflows/changelog.yml
vendored
33
.github/workflows/changelog.yml
vendored
@@ -1,33 +0,0 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
|
||||
jobs:
|
||||
changed_files:
|
||||
runs-on: ubuntu-latest
|
||||
name: Check CHANGELOG.md
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: changelog modified
|
||||
id: changelog-modified
|
||||
uses: tj-actions/changed-files@v45
|
||||
with:
|
||||
files: CHANGELOG.md
|
||||
|
||||
- name: List all changed files
|
||||
env:
|
||||
ALL_CHANGED_FILES: ${{ steps.changelog-modified.outputs.all_changed_files }}
|
||||
run: |
|
||||
if [[ ${ALL_CHANGED_FILES[*]} =~ (^|[[:space:]])"CHANGELOG.md"($|[[:space:]]) ]];
|
||||
then
|
||||
echo "CHANGELOG.md was modified.";
|
||||
exit 0;
|
||||
else
|
||||
echo "CHANGELOG.md was not modified.";
|
||||
echo "Please see CHANGELOG.md and follow the instructions to add your changes to that file."
|
||||
echo "In some rare scenarios, a changelog entry is not required and this CI check can be ignored."
|
||||
exit 1;
|
||||
fi
|
||||
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -54,7 +54,7 @@ jobs:
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
version: v1.55.2
|
||||
version: v1.56.1
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
|
||||
@@ -73,6 +73,7 @@ linters:
|
||||
- promlinter
|
||||
- protogetter
|
||||
- revive
|
||||
- spancheck
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- tagalign
|
||||
|
||||
93
CHANGELOG.md
93
CHANGELOG.md
@@ -4,17 +4,33 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [Unreleased](https://github.com/prysmaticlabs/prysm/compare/v5.1.1...HEAD)
|
||||
## [Unreleased](https://github.com/prysmaticlabs/prysm/compare/v5.1.2...HEAD)
|
||||
|
||||
### Added
|
||||
|
||||
- Electra EIP6110: Queue deposit [pr](https://github.com/prysmaticlabs/prysm/pull/14430)
|
||||
- Electra EIP6110: Queue deposit [pr](https://github.com/prysmaticlabs/prysm/pull/14430).
|
||||
- Add Bellatrix tests for light client functions.
|
||||
- Add Discovery Rebooter Feature.
|
||||
- Added GetBlockAttestationsV2 endpoint.
|
||||
- Light client support: Consensus types for Electra
|
||||
- Light client support: Consensus types for Electra.
|
||||
- Added SubmitPoolAttesterSlashingV2 endpoint.
|
||||
- Added SubmitAggregateAndProofsRequestV2 endpoint.
|
||||
- Updated the `beacon-chain/monitor` package to Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14562)
|
||||
- Added ListAttestationsV2 endpoint.
|
||||
- Add ability to rollback node's internal state during processing.
|
||||
- Change how unsafe protobuf state is created to prevent unnecessary copies.
|
||||
- Added benchmarks for process slots for Capella, Deneb, Electra.
|
||||
- Add helper to cast bytes to string without allocating memory.
|
||||
- Added GetAggregatedAttestationV2 endpoint.
|
||||
- Added SubmitAttestationsV2 endpoint.
|
||||
- Validator REST mode Electra block support.
|
||||
- Added validator index label to `validator_statuses` metric.
|
||||
- Added Validator REST mode use of Attestation V2 endpoints and Electra attestations.
|
||||
- PeerDAS: Added proto for `DataColumnIdentifier`, `DataColumnSidecar`, `DataColumnSidecarsByRangeRequest` and `MetadataV2`.
|
||||
- Better attestation packing for Electra. [PR](https://github.com/prysmaticlabs/prysm/pull/14534)
|
||||
- P2P: Add logs when a peer is (dis)connected. Add the reason of the disconnection when we initiate it.
|
||||
- Added a Prometheus error counter metric for HTTP requests to track beacon node requests.
|
||||
- Added a Prometheus error counter metric for SSE requests.
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -23,6 +39,36 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Fix `engine_exchangeCapabilities` implementation.
|
||||
- Updated the default `scrape-interval` in `Client-stats` to 2 minutes to accommodate Beaconcha.in API rate limits.
|
||||
- Switch to compounding when consolidating with source==target.
|
||||
- Revert block db save when saving state fails.
|
||||
- Return false from HasBlock if the block is being synced.
|
||||
- Cleanup forkchoice on failed insertions.
|
||||
- Use read only validator for core processing to avoid unnecessary copying.
|
||||
- Use ROBlock across block processing pipeline.
|
||||
- Added missing Eth-Consensus-Version headers to GetBlockAttestationsV2 and GetAttesterSlashingsV2 endpoints.
|
||||
- When instantiating new validators, explicit set `Slashed` to false and move `EffectiveBalance` to match struct definition.
|
||||
- Updated pgo profile for beacon chain with holesky data. This improves the profile guided
|
||||
optimizations in the go compiler.
|
||||
- Use read only state when computing the active validator list.
|
||||
- Simplified `ExitedValidatorIndices`.
|
||||
- Simplified `EjectedValidatorIndices`.
|
||||
- `engine_newPayloadV4`,`engine_getPayloadV4` are changes due to new execution request serialization decisions, [PR](https://github.com/prysmaticlabs/prysm/pull/14580)
|
||||
- Fixed various small things in state-native code.
|
||||
- Use ROBlock earlier in block syncing pipeline.
|
||||
- Changed the signature of `ProcessPayload`.
|
||||
- Only Build the Protobuf state once during serialization.
|
||||
- Capella blocks are execution.
|
||||
- Fixed panic when http request to subscribe to event stream fails.
|
||||
- Return early for blob reconstructor during capella fork.
|
||||
- Updated block endpoint from V1 to V2.
|
||||
- Rename instances of "deposit receipts" to "deposit requests".
|
||||
- Non-blocking payload attribute event handling in beacon api [pr](https://github.com/prysmaticlabs/prysm/pull/14644).
|
||||
- Updated light client protobufs. [PR](https://github.com/prysmaticlabs/prysm/pull/14650)
|
||||
- Added `Eth-Consensus-Version` header to `ListAttestationsV2` and `GetAggregateAttestationV2` endpoints.
|
||||
- Updated light client consensus types. [PR](https://github.com/prysmaticlabs/prysm/pull/14652)
|
||||
- Fixed pending deposits processing on Electra.
|
||||
- Modified `ListAttestationsV2`, `GetAttesterSlashingsV2` and `GetAggregateAttestationV2` endpoints to use slot to determine fork version.
|
||||
- Improvements to HTTP response handling. [pr](https://github.com/prysmaticlabs/prysm/pull/14673)
|
||||
- Updated `Blobs` endpoint to return additional metadata fields.
|
||||
|
||||
### Deprecated
|
||||
|
||||
@@ -31,15 +77,52 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
### Removed
|
||||
|
||||
- Removed finalized validator index cache, no longer needed.
|
||||
- Removed validator queue position log on key reload and wait for activation.
|
||||
- Removed outdated spectest exclusions for EIP-6110.
|
||||
- Removed kzg proof check from blob reconstructor.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed mesh size by appending `gParams.Dhi = gossipSubDhi`
|
||||
- Fix skipping partial withdrawals count.
|
||||
- recover from panics when writing the event stream [pr](https://github.com/prysmaticlabs/prysm/pull/14545)
|
||||
- wait for the async StreamEvent writer to exit before leaving the http handler, avoiding race condition panics [pr](https://github.com/prysmaticlabs/prysm/pull/14557)
|
||||
- Certain deb files were returning a 404 which made building new docker images without an existing
|
||||
cache impossible. This has been fixed with updates to rules_oci and bazel-lib.
|
||||
- Fixed an issue where the length check between block body KZG commitments and the existing cache from the database was incompatible.
|
||||
- Fix `--backfill-oldest-slot` handling - this flag was totally broken, the code would always backfill to the default slot [pr](https://github.com/prysmaticlabs/prysm/pull/14584)
|
||||
- Fix keymanager API should return corrected error format for malformed tokens
|
||||
- Fix keymanager API so that get keys returns an empty response instead of a 500 error when using an unsupported keystore.
|
||||
- Small log imporvement, removing some redundant or duplicate logs
|
||||
- EIP7521 - Fixes withdrawal bug by accounting for pending partial withdrawals and deducting already withdrawn amounts from the sweep balance. [PR](https://github.com/prysmaticlabs/prysm/pull/14578)
|
||||
- unskip electra merkle spec test
|
||||
- Fix panic in validator REST mode when checking status after removing all keys
|
||||
- Fix panic on attestation interface since we call data before validation
|
||||
- corrects nil check on some interface attestation types
|
||||
- temporary solution to handling electra attesation and attester_slashing events. [pr](14655)
|
||||
- Diverse log improvements and comment additions.
|
||||
- P2P: Avoid infinite loop when looking for peers in small networks.
|
||||
- Fixed another rollback bug due to a context deadline.
|
||||
|
||||
|
||||
### Security
|
||||
|
||||
## [v5.1.2](https://github.com/prysmaticlabs/prysm/compare/v5.1.1...v5.1.2) - 2024-10-16
|
||||
|
||||
This is a hotfix release with one change.
|
||||
|
||||
Prysm v5.1.1 contains an updated implementation of the beacon api streaming events endpoint. This
|
||||
new implementation contains a bug that can cause a panic in certain conditions. The issue is
|
||||
difficult to reproduce reliably and we are still trying to determine the root cause, but in the
|
||||
meantime we are issuing a patch that recovers from the panic to prevent the node from crashing.
|
||||
|
||||
This only impacts the v5.1.1 release beacon api event stream endpoints. This endpoint is used by the
|
||||
prysm REST mode validator (a feature which requires the validator to be configured to use the beacon
|
||||
api intead of prysm's stock grpc endpoints) or accessory software that connects to the events api,
|
||||
like https://github.com/ethpandaops/ethereum-metrics-exporter
|
||||
|
||||
### Fixed
|
||||
|
||||
- Recover from panics when writing the event stream [#14545](https://github.com/prysmaticlabs/prysm/pull/14545)
|
||||
|
||||
## [v5.1.1](https://github.com/prysmaticlabs/prysm/compare/v5.1.0...v5.1.1) - 2024-10-15
|
||||
|
||||
@@ -73,6 +156,7 @@ Updating to this release is recommended at your convenience.
|
||||
- fastssz version bump (better error messages).
|
||||
- SSE implementation that sheds stuck clients. [pr](https://github.com/prysmaticlabs/prysm/pull/14413)
|
||||
- Added GetPoolAttesterSlashingsV2 endpoint.
|
||||
- Use engine API get-blobs for block subscriber to reduce block import latency and potentially reduce bandwidth.
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -132,6 +216,7 @@ Updating to this release is recommended at your convenience.
|
||||
- Light client support: fix light client attested header execution fields' wrong version bug.
|
||||
- Testing: added custom matcher for better push settings testing.
|
||||
- Registered `GetDepositSnapshot` Beacon API endpoint.
|
||||
- Fix rolling back of a block due to a context deadline.
|
||||
|
||||
### Security
|
||||
|
||||
|
||||
@@ -101,9 +101,9 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "aspect_bazel_lib",
|
||||
sha256 = "f5ea76682b209cc0bd90d0f5a3b26d2f7a6a2885f0c5f615e72913f4805dbb0d",
|
||||
strip_prefix = "bazel-lib-2.5.0",
|
||||
url = "https://github.com/aspect-build/bazel-lib/releases/download/v2.5.0/bazel-lib-v2.5.0.tar.gz",
|
||||
sha256 = "a272d79bb0ac6b6965aa199b1f84333413452e87f043b53eca7f347a23a478e8",
|
||||
strip_prefix = "bazel-lib-2.9.3",
|
||||
url = "https://github.com/bazel-contrib/bazel-lib/releases/download/v2.9.3/bazel-lib-v2.9.3.tar.gz",
|
||||
)
|
||||
|
||||
load("@aspect_bazel_lib//lib:repositories.bzl", "aspect_bazel_lib_dependencies", "aspect_bazel_lib_register_toolchains")
|
||||
@@ -165,7 +165,7 @@ load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
oci_pull(
|
||||
name = "linux_debian11_multiarch_base", # Debian bullseye
|
||||
digest = "sha256:b82f113425c5b5c714151aaacd8039bc141821cdcd3c65202d42bdf9c43ae60b", # 2023-12-12
|
||||
image = "gcr.io/distroless/cc-debian11",
|
||||
image = "gcr.io/prysmaticlabs/distroless/cc-debian11",
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm64/v8",
|
||||
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -176,7 +177,7 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
err = non200Err(r)
|
||||
return
|
||||
}
|
||||
res, err = io.ReadAll(r.Body)
|
||||
res, err = io.ReadAll(io.LimitReader(r.Body, client.MaxBodySize))
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "error reading http response body from builder server")
|
||||
return
|
||||
@@ -358,7 +359,7 @@ func (c *Client) Status(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
bodyBytes, err := io.ReadAll(io.LimitReader(response.Body, client.MaxErrBodySize))
|
||||
var errMessage ErrorMessage
|
||||
var body string
|
||||
if err != nil {
|
||||
|
||||
@@ -10,11 +10,17 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxBodySize int64 = 1 << 23 // 8MB default, WithMaxBodySize can override
|
||||
MaxErrBodySize int64 = 1 << 17 // 128KB
|
||||
)
|
||||
|
||||
// Client is a wrapper object around the HTTP client.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
token string
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
token string
|
||||
maxBodySize int64
|
||||
}
|
||||
|
||||
// NewClient constructs a new client with the provided options (ex WithTimeout).
|
||||
@@ -26,8 +32,9 @@ func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
hc: &http.Client{},
|
||||
baseURL: u,
|
||||
maxBodySize: MaxBodySize,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(c)
|
||||
@@ -72,7 +79,7 @@ func (c *Client) NodeURL() string {
|
||||
// Get is a generic, opinionated GET function to reduce boilerplate amongst the getters in this package.
|
||||
func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byte, error) {
|
||||
u := c.baseURL.ResolveReference(&url.URL{Path: path})
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), http.NoBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -89,7 +96,7 @@ func (c *Client) Get(ctx context.Context, path string, opts ...ReqOption) ([]byt
|
||||
if r.StatusCode != http.StatusOK {
|
||||
return nil, Non200Err(r)
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
b, err := io.ReadAll(io.LimitReader(r.Body, c.maxBodySize))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error reading http response body")
|
||||
}
|
||||
|
||||
@@ -25,16 +25,16 @@ var ErrInvalidNodeVersion = errors.New("invalid node version response")
|
||||
var ErrConnectionIssue = errors.New("could not connect")
|
||||
|
||||
// Non200Err is a function that parses an HTTP response to handle responses that are not 200 with a formatted error.
|
||||
func Non200Err(response *http.Response) error {
|
||||
bodyBytes, err := io.ReadAll(response.Body)
|
||||
func Non200Err(r *http.Response) error {
|
||||
b, err := io.ReadAll(io.LimitReader(r.Body, MaxErrBodySize))
|
||||
var body string
|
||||
if err != nil {
|
||||
body = "(Unable to read response body.)"
|
||||
} else {
|
||||
body = "response body:\n" + string(bodyBytes)
|
||||
body = "response body:\n" + string(b)
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", r.StatusCode, r.Request.URL, body)
|
||||
switch r.StatusCode {
|
||||
case http.StatusNotFound:
|
||||
return errors.Wrap(ErrNotFound, msg)
|
||||
default:
|
||||
|
||||
@@ -93,6 +93,7 @@ func (h *EventStream) Subscribe(eventsChannel chan<- *Event) {
|
||||
EventType: EventConnectionError,
|
||||
Data: []byte(errors.Wrap(err, client.ErrConnectionIssue.Error()).Error()),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestNewEventStream(t *testing.T) {
|
||||
|
||||
func TestEventStream(t *testing.T) {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, r *http.Request) {
|
||||
mux.HandleFunc("/eth/v1/events", func(w http.ResponseWriter, _ *http.Request) {
|
||||
flusher, ok := w.(http.Flusher)
|
||||
require.Equal(t, true, ok)
|
||||
for i := 1; i <= 3; i++ {
|
||||
@@ -79,3 +79,23 @@ func TestEventStream(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventStreamRequestError(t *testing.T) {
|
||||
topics := []string{"head"}
|
||||
eventsChannel := make(chan *Event, 1)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// use valid url that will result in failed request with nil body
|
||||
stream, err := NewEventStream(ctx, http.DefaultClient, "http://badhost:1234", topics)
|
||||
require.NoError(t, err)
|
||||
|
||||
// error will happen when request is made, should be received over events channel
|
||||
go stream.Subscribe(eventsChannel)
|
||||
|
||||
event := <-eventsChannel
|
||||
if event.EventType != EventConnectionError {
|
||||
t.Errorf("Expected event type %q, got %q", EventConnectionError, event.EventType)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -46,3 +46,10 @@ func WithAuthenticationToken(token string) ClientOpt {
|
||||
c.token = token
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxBodySize overrides the default max body size of 8MB.
|
||||
func WithMaxBodySize(size int64) ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.maxBodySize = size
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,11 +21,12 @@ type GetCommitteesResponse struct {
|
||||
}
|
||||
|
||||
type ListAttestationsResponse struct {
|
||||
Data []*Attestation `json:"data"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitAttestationsRequest struct {
|
||||
Data []*Attestation `json:"data"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type ListVoluntaryExitsResponse struct {
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
package structs
|
||||
|
||||
type SidecarsResponse struct {
|
||||
Data []*Sidecar `json:"data"`
|
||||
Version string `json:"version"`
|
||||
Data []*Sidecar `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
}
|
||||
|
||||
type Sidecar struct {
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
)
|
||||
|
||||
type AggregateAttestationResponse struct {
|
||||
Data *Attestation `json:"data"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
|
||||
type SubmitContributionAndProofsRequest struct {
|
||||
|
||||
@@ -6,9 +6,11 @@ go_library(
|
||||
"chain_info.go",
|
||||
"chain_info_forkchoice.go",
|
||||
"currently_syncing_block.go",
|
||||
"currently_syncing_execution_payload_envelope.go",
|
||||
"defragment.go",
|
||||
"error.go",
|
||||
"execution_engine.go",
|
||||
"execution_engine_epbs.go",
|
||||
"forkchoice_update_execution.go",
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
@@ -25,6 +27,8 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_execution_payload_envelope.go",
|
||||
"receive_payload_attestation_message.go",
|
||||
"service.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
@@ -43,6 +47,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epbs:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
@@ -97,6 +102,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -109,6 +115,7 @@ go_test(
|
||||
"chain_info_norace_test.go",
|
||||
"chain_info_test.go",
|
||||
"checktags_test.go",
|
||||
"epbs_test.go",
|
||||
"error_test.go",
|
||||
"execution_engine_test.go",
|
||||
"forkchoice_update_execution_test.go",
|
||||
@@ -124,6 +131,7 @@ go_test(
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
"receive_block_test.go",
|
||||
"receive_execution_payload_envelope_test.go",
|
||||
"service_norace_test.go",
|
||||
"service_test.go",
|
||||
"setup_test.go",
|
||||
@@ -177,6 +185,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//testing/util/random:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -42,14 +43,15 @@ type ForkchoiceFetcher interface {
|
||||
GetProposerHead() [32]byte
|
||||
SetForkChoiceGenesisTime(uint64)
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
HighestReceivedBlockSlotRoot() (primitives.Slot, [32]byte)
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, [32]byte) error
|
||||
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
|
||||
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
|
||||
NewSlot(context.Context, primitives.Slot) error
|
||||
ProposerBoost() [32]byte
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
|
||||
GetPTCVote(root [32]byte) primitives.PTCStatus
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
@@ -118,6 +120,12 @@ type OptimisticModeFetcher interface {
|
||||
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
|
||||
}
|
||||
|
||||
// ExecutionPayloadFetcher defines a common interface that returns forkchoice
|
||||
// information about payload block hashes
|
||||
type ExecutionPayloadFetcher interface {
|
||||
HashInForkchoice([32]byte) bool
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
@@ -242,7 +250,7 @@ func (s *Service) HeadValidatorsIndices(ctx context.Context, epoch primitives.Ep
|
||||
if !s.hasHeadState() {
|
||||
return []primitives.ValidatorIndex{}, nil
|
||||
}
|
||||
return helpers.ActiveValidatorIndices(ctx, s.headState(ctx), epoch)
|
||||
return helpers.ActiveValidatorIndices(ctx, s.headStateReadOnly(ctx), epoch)
|
||||
}
|
||||
|
||||
// HeadGenesisValidatorsRoot returns genesis validators root of the head state.
|
||||
@@ -399,6 +407,14 @@ func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// HashInForkchoice returns true if the given payload block hash is found in
|
||||
// forkchoice
|
||||
func (s *Service) HashInForkchoice(hash [32]byte) bool {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HasHash(hash)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
@@ -29,11 +31,11 @@ func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlot returns the corresponding value from forkchoice
|
||||
func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
|
||||
// HighestReceivedBlockSlotRoot returns the corresponding value from forkchoice
|
||||
func (s *Service) HighestReceivedBlockSlotRoot() (primitives.Slot, [32]byte) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlotRoot()
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
|
||||
@@ -44,10 +46,10 @@ func (s *Service) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
}
|
||||
|
||||
// InsertNode is a wrapper for node insertion which is self locked
|
||||
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
func (s *Service) InsertNode(ctx context.Context, st state.BeaconState, block consensus_blocks.ROBlock) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
return s.cfg.ForkChoiceStore.InsertNode(ctx, st, block)
|
||||
}
|
||||
|
||||
// ForkChoiceDump returns the corresponding value from forkchoice
|
||||
@@ -99,3 +101,30 @@ func (s *Service) ParentRoot(root [32]byte) ([32]byte, error) {
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.ParentRoot(root)
|
||||
}
|
||||
|
||||
// GetPTCVote wraps a call to the corresponding method in forkchoice and checks
|
||||
// the currently syncing status
|
||||
// Warning: this method will return the current PTC status regardless of
|
||||
// timeliness. A client MUST call this method when about to submit a PTC
|
||||
// attestation, that is exactly at the threshold to submit the attestation.
|
||||
func (s *Service) GetPTCVote(root [32]byte) primitives.PTCStatus {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
f := s.cfg.ForkChoiceStore.GetPTCVote()
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
if f != primitives.PAYLOAD_ABSENT {
|
||||
return f
|
||||
}
|
||||
f, isSyncing := s.payloadBeingSynced.isSyncing(root)
|
||||
if isSyncing {
|
||||
return f
|
||||
}
|
||||
return primitives.PAYLOAD_ABSENT
|
||||
}
|
||||
|
||||
// insertPayloadEnvelope wraps a locked call to the corresponding method in
|
||||
// forkchoice
|
||||
func (s *Service) insertPayloadEnvelope(envelope interfaces.ROExecutionPayloadEnvelope) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertPayloadEnvelope(envelope)
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
@@ -36,9 +37,10 @@ func prepareForkchoiceState(
|
||||
blockRoot [32]byte,
|
||||
parentRoot [32]byte,
|
||||
payloadHash [32]byte,
|
||||
parentHash [32]byte,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
) (state.BeaconState, consensus_blocks.ROBlock, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
@@ -59,7 +61,27 @@ func prepareForkchoiceState(
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
if err != nil {
|
||||
return nil, consensus_blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
ParentHash: parentHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, consensus_blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
}
|
||||
|
||||
func TestHeadRoot_Nil(t *testing.T) {
|
||||
@@ -122,9 +144,9 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: []byte{'j'}}
|
||||
ofc := ðpb.Checkpoint{Root: []byte{'f'}}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{Epoch: 6, Root: [32]byte{'j'}}))
|
||||
|
||||
@@ -316,24 +338,24 @@ func TestService_ChainHeads(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, [32]byte{'A'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, [32]byte{'B'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'D'}, [32]byte{'C'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, [32]byte{'E'}, [32]byte{'D'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
roots, slots := c.ChainHeads()
|
||||
require.Equal(t, 3, len(roots))
|
||||
@@ -413,12 +435,12 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
opt, err := c.IsOptimistic(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -449,12 +471,12 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
opt, err := c.IsOptimisticForRoot(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -0,0 +1,36 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
type currentlySyncingPayload struct {
|
||||
sync.Mutex
|
||||
roots map[[32]byte]primitives.PTCStatus
|
||||
}
|
||||
|
||||
func (b *currentlySyncingPayload) set(envelope interfaces.ROExecutionPayloadEnvelope) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
if envelope.PayloadWithheld() {
|
||||
b.roots[envelope.BeaconBlockRoot()] = primitives.PAYLOAD_WITHHELD
|
||||
} else {
|
||||
b.roots[envelope.BeaconBlockRoot()] = primitives.PAYLOAD_PRESENT
|
||||
}
|
||||
}
|
||||
|
||||
func (b *currentlySyncingPayload) unset(root [32]byte) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
delete(b.roots, root)
|
||||
}
|
||||
|
||||
func (b *currentlySyncingPayload) isSyncing(root [32]byte) (status primitives.PTCStatus, isSyncing bool) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
status, isSyncing = b.roots[root]
|
||||
return
|
||||
}
|
||||
18
beacon-chain/blockchain/epbs_test.go
Normal file
18
beacon-chain/blockchain/epbs_test.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestServiceGetPTCVote(t *testing.T) {
|
||||
c := ¤tlySyncingPayload{roots: make(map[[32]byte]primitives.PTCStatus)}
|
||||
s := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, payloadBeingSynced: c}
|
||||
r := [32]byte{'r'}
|
||||
require.Equal(t, primitives.PAYLOAD_ABSENT, s.GetPTCVote(r))
|
||||
c.roots[r] = primitives.PAYLOAD_WITHHELD
|
||||
require.Equal(t, primitives.PAYLOAD_WITHHELD, s.GetPTCVote(r))
|
||||
}
|
||||
@@ -30,6 +30,9 @@ var (
|
||||
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
|
||||
// ErrNilHead is returned when no head is present in the blockchain service.
|
||||
ErrNilHead = errors.New("nil head")
|
||||
// errInvalidValidatorIndex is returned when a validator index is
|
||||
// invalid or unexpected
|
||||
errInvalidValidatorIndex = errors.New("invalid validator index")
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
|
||||
@@ -2,12 +2,16 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
@@ -27,6 +31,8 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const blobCommitmentVersionKZG uint8 = 0x01
|
||||
|
||||
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
@@ -69,6 +75,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
if arg.attributes == nil {
|
||||
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
|
||||
}
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), arg)
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
|
||||
if err != nil {
|
||||
switch {
|
||||
@@ -91,6 +98,14 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
log.WithError(err).Error("Could not set head root to invalid")
|
||||
return nil, nil
|
||||
}
|
||||
if len(invalidRoots) == 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": headBlk.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
|
||||
}).Warn("invalid payload")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
log.WithError(err).Error("Could not remove invalid block and state")
|
||||
return nil, nil
|
||||
@@ -105,6 +120,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
}).Warn("Pruned invalid blocks, could not update head root")
|
||||
return nil, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
|
||||
}
|
||||
|
||||
b, err := s.getBlock(ctx, r)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block")
|
||||
@@ -167,6 +183,38 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
return payloadID, nil
|
||||
}
|
||||
|
||||
func firePayloadAttributesEvent(ctx context.Context, f event.SubscriberSender, cfg *fcuConfig) {
|
||||
pidx, err := helpers.BeaconProposerIndex(ctx, cfg.headState)
|
||||
if err != nil {
|
||||
log.WithError(err).
|
||||
WithField("head_root", cfg.headRoot[:]).
|
||||
Error("Could not get proposer index for PayloadAttributes event")
|
||||
return
|
||||
}
|
||||
evd := payloadattribute.EventData{
|
||||
ProposerIndex: pidx,
|
||||
ProposalSlot: cfg.headState.Slot(),
|
||||
ParentBlockRoot: cfg.headRoot[:],
|
||||
Attributer: cfg.attributes,
|
||||
HeadRoot: cfg.headRoot,
|
||||
HeadState: cfg.headState,
|
||||
HeadBlock: cfg.headBlock,
|
||||
}
|
||||
if cfg.headBlock != nil && !cfg.headBlock.IsNil() {
|
||||
headPayload, err := cfg.headBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return
|
||||
}
|
||||
evd.ParentBlockHash = headPayload.BlockHash()
|
||||
evd.ParentBlockNumber = headPayload.BlockNumber()
|
||||
}
|
||||
f.Send(&feed.Event{
|
||||
Type: statefeed.PayloadAttributes,
|
||||
Data: evd,
|
||||
})
|
||||
}
|
||||
|
||||
// getPayloadHash returns the payload hash given the block root.
|
||||
// if the block is before bellatrix fork epoch, it returns the zero hash.
|
||||
func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, error) {
|
||||
@@ -328,7 +376,7 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Deneb, version.Electra:
|
||||
case version.Deneb, version.Electra, version.EPBS:
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
@@ -407,7 +455,13 @@ func kzgCommitmentsToVersionedHashes(body interfaces.ReadOnlyBeaconBlockBody) ([
|
||||
|
||||
versionedHashes := make([]common.Hash, len(commitments))
|
||||
for i, commitment := range commitments {
|
||||
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
versionedHashes[i] = ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
}
|
||||
return versionedHashes, nil
|
||||
}
|
||||
|
||||
func ConvertKzgCommitmentToVersionedHash(commitment []byte) common.Hash {
|
||||
versionedHash := sha256.Sum256(commitment)
|
||||
versionedHash[0] = blobCommitmentVersionKZG
|
||||
return versionedHash
|
||||
}
|
||||
|
||||
62
beacon-chain/blockchain/execution_engine_epbs.go
Normal file
62
beacon-chain/blockchain/execution_engine_epbs.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
|
||||
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
|
||||
func (s *Service) notifyForkchoiceUpdateEPBS(ctx context.Context, blockhash [32]byte, attributes payloadattribute.Attributer) (*enginev1.PayloadIDBytes, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdateEPBS")
|
||||
defer span.End()
|
||||
|
||||
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: blockhash[:],
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
if attributes == nil {
|
||||
attributes = payloadattribute.EmptyWithVersion(version.EPBS)
|
||||
}
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attributes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(blockhash[:])),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, nil
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
log.WithError(err).Info("forkchoice updated to invalid block")
|
||||
return nil, invalidBlock{error: ErrInvalidPayload, root: [32]byte(lastValidHash)}
|
||||
default:
|
||||
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
forkchoiceUpdatedValidNodeCount.Inc()
|
||||
// If the forkchoice update call has an attribute, update the payload ID cache.
|
||||
hasAttr := attributes != nil && !attributes.IsEmpty()
|
||||
if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", blockhash[:]),
|
||||
}).Error("Received nil payload ID on VALID engine response")
|
||||
}
|
||||
return payloadID, nil
|
||||
}
|
||||
@@ -46,13 +46,13 @@ func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
|
||||
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, [32]byte{'B'}, [32]byte{'A'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -104,13 +104,13 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, [32]byte{'B'}, [32]byte{'A'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -287,16 +287,16 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, [32]byte{'A'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, [32]byte{'B'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, [32]byte{'C'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -316,10 +316,8 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
headRoot: brd,
|
||||
}
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, brd, InvalidBlockRoot(err))
|
||||
require.Equal(t, brd, InvalidAncestorRoots(err)[0])
|
||||
require.Equal(t, 1, len(InvalidAncestorRoots(err)))
|
||||
// The incoming block is not invalid because the empty node is still valid on ePBS.
|
||||
require.Equal(t, false, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
//
|
||||
@@ -398,28 +396,28 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, bState, bra))
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, [32]byte{'A'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, [32]byte{'B'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, [32]byte{'C'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, bre, brb, [32]byte{'E'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, bre, brb, [32]byte{'E'}, [32]byte{'D'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 6, brf, bre, [32]byte{'F'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 6, brf, bre, [32]byte{'F'}, [32]byte{'E'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 7, brg, bre, [32]byte{'G'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 7, brg, bre, [32]byte{'G'}, [32]byte{'F'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -512,10 +510,10 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, r, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -692,7 +690,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
root := [32]byte{'a'}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, root, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, root, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
@@ -759,17 +757,17 @@ func Test_reportInvalidBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, _, fcs := tr.ctx, tr.db, tr.fcs
|
||||
jcp := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'A'}, [32]byte{}, [32]byte{'a'}, jcp, jcp)
|
||||
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'A'}, [32]byte{}, [32]byte{'a'}, [32]byte{}, jcp, jcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 1, [32]byte{'B'}, [32]byte{'A'}, [32]byte{'b'}, jcp, jcp)
|
||||
st, root, err = prepareForkchoiceState(ctx, 1, [32]byte{'B'}, [32]byte{'A'}, [32]byte{'b'}, [32]byte{'a'}, jcp, jcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'C'}, [32]byte{'B'}, [32]byte{'c'}, jcp, jcp)
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'C'}, [32]byte{'B'}, [32]byte{'c'}, [32]byte{'b'}, jcp, jcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'d'}, jcp, jcp)
|
||||
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'d'}, [32]byte{'c'}, jcp, jcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
|
||||
@@ -931,7 +929,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
fjc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash}
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, fjc))
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(fjc))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
fcs.SetOriginRoot(genesisRoot)
|
||||
@@ -965,7 +963,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, opStateSummary))
|
||||
tenjc := ðpb.Checkpoint{Epoch: 10, Root: genesisRoot[:]}
|
||||
tenfc := ðpb.Checkpoint{Epoch: 10, Root: genesisRoot[:]}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 320, opRoot, genesisRoot, params.BeaconConfig().ZeroHash, tenjc, tenfc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 320, opRoot, genesisRoot, params.BeaconConfig().ZeroHash, [32]byte{}, tenjc, tenfc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, opRoot))
|
||||
@@ -994,7 +992,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, validSummary))
|
||||
twentyjc := ðpb.Checkpoint{Epoch: 20, Root: validRoot[:]}
|
||||
twentyfc := ðpb.Checkpoint{Epoch: 20, Root: validRoot[:]}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 640, validRoot, genesisRoot, params.BeaconConfig().ZeroHash, twentyjc, twentyfc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 640, validRoot, genesisRoot, params.BeaconConfig().ZeroHash, [32]byte{}, twentyjc, twentyfc)
|
||||
require.NoError(t, err)
|
||||
fcs.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
@@ -1056,8 +1054,8 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{r1, r2}))
|
||||
|
||||
require.Equal(t, false, service.hasBlock(ctx, r1))
|
||||
require.Equal(t, false, service.hasBlock(ctx, r2))
|
||||
require.Equal(t, false, service.chainHasBlock(ctx, r1))
|
||||
require.Equal(t, false, service.chainHasBlock(ctx, r2))
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r1))
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r2))
|
||||
has, err := service.cfg.StateGen.HasState(ctx, r1)
|
||||
@@ -1135,9 +1133,14 @@ func TestComputePayloadAttribute(t *testing.T) {
|
||||
// Cache hit, advance state, no fee recipient
|
||||
slot := primitives.Slot(1)
|
||||
service.cfg.PayloadIDCache.Set(slot, [32]byte{}, [8]byte{})
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
signed, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(signed, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
blockRoot: [32]byte{'a'},
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
}
|
||||
fcu := &fcuConfig{
|
||||
headState: st,
|
||||
|
||||
@@ -122,13 +122,13 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -164,10 +164,10 @@ func TestShouldOverrideFCU(t *testing.T) {
|
||||
headRoot := [32]byte{'b'}
|
||||
parentRoot := [32]byte{'a'}
|
||||
ojc := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, headRoot, parentRoot, [32]byte{}, ojc, ojc)
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, headRoot, parentRoot, [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head = &head{
|
||||
@@ -63,11 +63,11 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
headState, err := util.NewBeaconState()
|
||||
@@ -101,7 +101,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head = &head{
|
||||
@@ -110,7 +110,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
}
|
||||
|
||||
reorgChainParent := [32]byte{'B'}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 0, reorgChainParent, oldRoot, oldBlock.Block().ParentRoot(), ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 0, reorgChainParent, oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -122,7 +122,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
headState, err := util.NewBeaconState()
|
||||
@@ -238,11 +238,11 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
headState, err := util.NewBeaconState()
|
||||
@@ -304,7 +304,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
@@ -381,7 +381,7 @@ func TestSaveOrphanedOps(t *testing.T) {
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
@@ -451,7 +451,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
for _, blk := range []*ethpb.SignedBeaconBlockCapella{blkG, blk1, blk2, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
@@ -509,7 +509,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
@@ -568,7 +568,7 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
@@ -583,7 +583,7 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
ojp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
@@ -603,7 +603,7 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
headRoot := service.headRoot()
|
||||
require.Equal(t, [32]byte{}, headRoot)
|
||||
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, fcp, fcp)
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{1, 2}, nil })
|
||||
|
||||
@@ -45,28 +45,44 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
}
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
if b.Version() >= version.Bellatrix {
|
||||
p, err := b.Body().Execution()
|
||||
if b.Version() >= version.EPBS {
|
||||
sh, err := b.Body().SignedExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
case err != nil:
|
||||
return err
|
||||
default:
|
||||
log = log.WithField("txCount", len(txs))
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Deneb {
|
||||
kzgs, err := b.Body().BlobKzgCommitments()
|
||||
header, err := sh.Header()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
} else if len(kzgs) > 0 {
|
||||
log = log.WithField("kzgCommitmentCount", len(kzgs))
|
||||
return err
|
||||
}
|
||||
log = log.WithFields(logrus.Fields{"payloadHash": fmt.Sprintf("%#x", header.BlockHash()),
|
||||
"builderIndex": header.BuilderIndex(),
|
||||
"value": header.Value(),
|
||||
"blobKzgCommitmentsRoot": fmt.Sprintf("%#x", header.BlobKzgCommitmentsRoot()),
|
||||
})
|
||||
} else {
|
||||
if b.Version() >= version.Bellatrix {
|
||||
p, err := b.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
case err != nil:
|
||||
return err
|
||||
default:
|
||||
log = log.WithField("txCount", len(txs))
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Deneb {
|
||||
kzgs, err := b.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
} else if len(kzgs) > 0 {
|
||||
log = log.WithField("kzgCommitmentCount", len(kzgs))
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Info("Finished applying state transition")
|
||||
@@ -112,6 +128,9 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
|
||||
// logs payload related data every slot.
|
||||
func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
if block.Version() >= version.EPBS {
|
||||
return nil
|
||||
}
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if block is execution block")
|
||||
|
||||
@@ -182,6 +182,10 @@ var (
|
||||
Name: "chain_service_processing_milliseconds",
|
||||
Help: "Total time to call a chain service in ReceiveBlock()",
|
||||
})
|
||||
executionEngineProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "execution_engine_processing_milliseconds",
|
||||
Help: "Total time to process an execution payload envelope in ReceiveExecutionPayloadEnvelope()",
|
||||
})
|
||||
dataAvailWaitedTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "da_waited_time_milliseconds",
|
||||
Help: "Total time spent waiting for a data availability check in ReceiveBlock()",
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
@@ -69,6 +71,22 @@ func WithDepositCache(c cache.DepositCache) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithPayloadAttestationCache for payload attestation cache.
|
||||
func WithPayloadAttestationCache(c *cache.PayloadAttestationCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.PayloadAttestationCache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPayloadEnvelopeCache for payload envelope cache.
|
||||
func WithPayloadEnvelopeCache(c *sync.Map) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.PayloadEnvelopeCache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPayloadIDCache for payload ID cache.
|
||||
func WithPayloadIDCache(c *cache.PayloadIDCache) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -97,7 +97,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
// We assume trusted attestation in this function has verified signature.
|
||||
|
||||
// Update forkchoice store with the new attestation for updating weight.
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.GetAttestingIndices(), bytesutil.ToBytes32(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.GetAttestingIndices(), bytesutil.ToBytes32(a.GetData().BeaconBlockRoot), a.GetData().Slot)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -32,18 +32,18 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
|
||||
|
||||
cp := ðpb.Checkpoint{}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
blkWithStateBadAtt := util.NewBeaconBlock()
|
||||
blkWithStateBadAtt.Block.Slot = 1
|
||||
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp = ðpb.Checkpoint{Root: r[:]}
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
|
||||
BlkWithStateBadAttRoot, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -92,12 +92,12 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
{
|
||||
name: "process nil attestation",
|
||||
a: nil,
|
||||
wantedErr: "attestation can't be nil",
|
||||
wantedErr: "attestation is nil",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Data) in attestation",
|
||||
a: ðpb.Attestation{},
|
||||
wantedErr: "attestation's data can't be nil",
|
||||
wantedErr: "attestation is nil",
|
||||
},
|
||||
{
|
||||
name: "process nil field (a.Target) in attestation",
|
||||
@@ -139,9 +139,9 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, roblock, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, roblock))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
}
|
||||
|
||||
@@ -170,7 +170,7 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, [32]byte{}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
service.head = &head{
|
||||
@@ -202,7 +202,7 @@ func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: ckRoot}))
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, [32]byte{}, cp1, cp1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
|
||||
@@ -259,7 +259,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, [32]byte{}, cp1, cp1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
s1, err := service.getAttPreState(ctx, cp1)
|
||||
@@ -273,7 +273,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
_, err = service.getAttPreState(ctx, cp2)
|
||||
require.ErrorContains(t, "epoch 2 root 0x4200000000000000000000000000000000000000000000000000000000000000: not a checkpoint in forkchoice", err)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, cp2, cp2)
|
||||
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, [32]byte{}, cp2, cp2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
|
||||
@@ -298,7 +298,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
|
||||
st, root, err = prepareForkchoiceState(ctx, 31, [32]byte(cp3.Root), [32]byte(cp2.Root), [32]byte{'P'}, cp2, cp2)
|
||||
st, root, err = prepareForkchoiceState(ctx, 31, [32]byte(cp3.Root), [32]byte(cp2.Root), [32]byte{'P'}, [32]byte{}, cp2, cp2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
|
||||
@@ -318,10 +318,9 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: r1[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, checkpoint, checkpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r1))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, params.BeaconConfig().SlotsPerEpoch.Mul(uint64(checkpoint.Epoch)), returned.Slot(), "Incorrectly returned base state")
|
||||
@@ -337,10 +336,9 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: r2[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, [32]byte{}, newCheckpoint, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, r2))
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
s, err := slots.EpochStart(newCheckpoint.Epoch)
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
@@ -46,8 +44,7 @@ var initialSyncBlockCacheSize = uint64(2 * params.BeaconConfig().SlotsPerEpoch)
|
||||
// process the beacon block after validating the state transition function
|
||||
type postBlockProcessConfig struct {
|
||||
ctx context.Context
|
||||
signed interfaces.ReadOnlySignedBeaconBlock
|
||||
blockRoot [32]byte
|
||||
roblock consensusblocks.ROBlock
|
||||
headRoot [32]byte
|
||||
postState state.BeaconState
|
||||
isValidPayload bool
|
||||
@@ -61,31 +58,36 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
ctx, span := trace.StartSpan(cfg.ctx, "blockChain.onBlock")
|
||||
defer span.End()
|
||||
cfg.ctx = ctx
|
||||
if err := consensusblocks.BeaconBlockIsNil(cfg.signed); err != nil {
|
||||
if err := consensusblocks.BeaconBlockIsNil(cfg.roblock); err != nil {
|
||||
return invalidBlock{error: err}
|
||||
}
|
||||
startTime := time.Now()
|
||||
fcuArgs := &fcuConfig{}
|
||||
|
||||
if s.inRegularSync() {
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
if cfg.roblock.Version() < version.EPBS {
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
}
|
||||
}
|
||||
defer s.sendLightClientFeeds(cfg)
|
||||
defer s.sendStateFeedOnBlock(cfg)
|
||||
defer reportProcessingTime(startTime)
|
||||
defer reportAttestationInclusion(cfg.signed.Block())
|
||||
defer reportAttestationInclusion(cfg.roblock.Block())
|
||||
|
||||
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.blockRoot)
|
||||
err := s.cfg.ForkChoiceStore.InsertNode(ctx, cfg.postState, cfg.roblock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.signed.Block().Slot())
|
||||
// Do not use parent context in the event it deadlined
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
s.rollbackBlock(ctx, cfg.roblock.Root())
|
||||
return errors.Wrapf(err, "could not insert block %d to fork choice store", cfg.roblock.Block().Slot())
|
||||
}
|
||||
if err := s.handleBlockAttestations(ctx, cfg.signed.Block(), cfg.postState); err != nil {
|
||||
if err := s.handleBlockAttestations(ctx, cfg.roblock.Block(), cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not handle block's attestations")
|
||||
}
|
||||
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, cfg.signed.Block().Body().AttesterSlashings())
|
||||
s.InsertSlashingsToForkChoiceStore(ctx, cfg.roblock.Block().Body().AttesterSlashings())
|
||||
if cfg.isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.blockRoot); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, cfg.roblock.Root()); err != nil {
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
@@ -95,10 +97,22 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
if cfg.headRoot != cfg.blockRoot {
|
||||
s.logNonCanonicalBlockReceived(cfg.blockRoot, cfg.headRoot)
|
||||
if cfg.headRoot != cfg.roblock.Root() {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if cfg.roblock.Version() >= version.EPBS {
|
||||
if err := s.saveHead(ctx, cfg.headRoot, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
if err := s.pruneAttsFromPool(cfg.roblock); err != nil {
|
||||
log.WithError(err).Error("could not prune attestations from pool")
|
||||
}
|
||||
|
||||
// update the NSC and handle epoch boundaries here since we do
|
||||
// not send FCU at all
|
||||
return s.updateCachesPostBlockProcessing(cfg)
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return nil
|
||||
@@ -154,7 +168,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
}
|
||||
|
||||
// Fill in missing blocks
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0].Block(), preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
if err := s.fillInForkChoiceMissingBlocks(ctx, blks[0], preState.CurrentJustifiedCheckpoint(), preState.FinalizedCheckpoint()); err != nil {
|
||||
return errors.Wrap(err, "could not fill in missing blocks to forkchoice")
|
||||
}
|
||||
|
||||
@@ -234,7 +248,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
if err := avs.IsDataAvailable(ctx, s.CurrentSlot(), b); err != nil {
|
||||
return errors.Wrapf(err, "could not validate blob data availability at slot %d", b.Block().Slot())
|
||||
}
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b,
|
||||
JustifiedCheckpoint: jCheckpoints[i],
|
||||
FinalizedCheckpoint: fCheckpoints[i]}
|
||||
pendingNodes[len(blks)-i-1] = args
|
||||
@@ -279,7 +293,7 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
return errors.Wrap(err, "could not insert batch to forkchoice")
|
||||
}
|
||||
// Insert the last block to forkchoice
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastBR); err != nil {
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, preState, lastB); err != nil {
|
||||
return errors.Wrap(err, "could not insert last block in batch to forkchoice")
|
||||
}
|
||||
// Set their optimistic status
|
||||
@@ -375,7 +389,7 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
|
||||
}
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
if s.cfg.ForkChoiceStore.HasNode(r) {
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Target.Epoch)
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Slot)
|
||||
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -404,6 +418,9 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
|
||||
return errors.Wrapf(err, "could not save block from slot %d", b.Block().Slot())
|
||||
}
|
||||
if err := s.cfg.StateGen.SaveState(ctx, r, st); err != nil {
|
||||
// Do not use parent context in the event it deadlined
|
||||
ctx = trace.NewContext(context.Background(), span)
|
||||
s.rollbackBlock(ctx, r)
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
return nil
|
||||
@@ -474,9 +491,15 @@ func (s *Service) runLateBlockTasks() {
|
||||
|
||||
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
|
||||
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
|
||||
epbs := params.BeaconConfig().EPBSForkEpoch
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C():
|
||||
case slot := <-ticker.C():
|
||||
if slots.ToEpoch(slot) == epbs && slot%32 == 0 {
|
||||
ticker.Done()
|
||||
attThreshold := params.BeaconConfig().SecondsPerSlot / 4
|
||||
ticker = slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
|
||||
}
|
||||
s.lateBlockTasks(s.ctx)
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
@@ -614,9 +637,6 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
if !s.inRegularSync() {
|
||||
return
|
||||
}
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.MissedSlot,
|
||||
})
|
||||
s.headLock.RLock()
|
||||
headRoot := s.headRoot()
|
||||
headState := s.headState(ctx)
|
||||
@@ -644,27 +664,46 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: nil,
|
||||
attributes: attribute,
|
||||
}
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), fcuArgs)
|
||||
return
|
||||
}
|
||||
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
if headState.Version() >= version.EPBS {
|
||||
bh, err := headState.LatestBlockHash()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve latest block hash")
|
||||
return
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdateEPBS(ctx, [32]byte(bh), attribute)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
} else {
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
s.headLock.RUnlock()
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
|
||||
return
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
|
||||
return
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -684,3 +723,15 @@ func (s *Service) handleInvalidExecutionError(ctx context.Context, err error, bl
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// In the event of an issue processing a block we rollback changes done to the db and our caches
|
||||
// to always ensure that the node's internal state is consistent.
|
||||
func (s *Service) rollbackBlock(ctx context.Context, blockRoot [32]byte) {
|
||||
log.Warnf("Rolling back insertion of block with root %#x due to processing error", blockRoot)
|
||||
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, blockRoot); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete state from caches with block root %#x", blockRoot)
|
||||
}
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, blockRoot); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete block with block root %#x", blockRoot)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
@@ -18,6 +19,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -42,7 +44,7 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
if !s.inRegularSync() {
|
||||
return nil
|
||||
}
|
||||
slot := cfg.signed.Block().Slot()
|
||||
slot := cfg.roblock.Block().Slot()
|
||||
if slots.WithinVotingWindow(uint64(s.genesisTime.Unix()), slot) {
|
||||
return nil
|
||||
}
|
||||
@@ -50,9 +52,9 @@ func (s *Service) getFCUArgs(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) er
|
||||
}
|
||||
|
||||
func (s *Service) getFCUArgsEarlyBlock(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.blockRoot == cfg.headRoot {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
fcuArgs.headState = cfg.postState
|
||||
fcuArgs.headBlock = cfg.signed
|
||||
fcuArgs.headBlock = cfg.roblock
|
||||
fcuArgs.headRoot = cfg.headRoot
|
||||
fcuArgs.proposingSlot = s.CurrentSlot() + 1
|
||||
return nil
|
||||
@@ -96,7 +98,7 @@ func (s *Service) fcuArgsNonCanonicalBlock(cfg *postBlockProcessConfig, fcuArgs
|
||||
|
||||
// sendStateFeedOnBlock sends an event that a new block has been synced
|
||||
func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.blockRoot)
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(cfg.roblock.Root())
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Could not check if block is optimistic")
|
||||
optimistic = true
|
||||
@@ -105,9 +107,9 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.BlockProcessed,
|
||||
Data: &statefeed.BlockProcessedData{
|
||||
Slot: cfg.signed.Block().Slot(),
|
||||
BlockRoot: cfg.blockRoot,
|
||||
SignedBlock: cfg.signed,
|
||||
Slot: cfg.roblock.Block().Slot(),
|
||||
BlockRoot: cfg.roblock.Root(),
|
||||
SignedBlock: cfg.roblock,
|
||||
Verified: true,
|
||||
Optimistic: optimistic,
|
||||
},
|
||||
@@ -117,7 +119,7 @@ func (s *Service) sendStateFeedOnBlock(cfg *postBlockProcessConfig) {
|
||||
// sendLightClientFeeds sends the light client feeds when feature flag is enabled.
|
||||
func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) {
|
||||
if features.Get().EnableLightClient {
|
||||
if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.signed, cfg.postState); err != nil {
|
||||
if _, err := s.sendLightClientOptimisticUpdate(cfg.ctx, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("Failed to send light client optimistic update")
|
||||
}
|
||||
|
||||
@@ -125,7 +127,7 @@ func (s *Service) sendLightClientFeeds(cfg *postBlockProcessConfig) {
|
||||
finalized := s.ForkChoicer().FinalizedCheckpoint()
|
||||
|
||||
// LightClientFinalityUpdate needs super majority
|
||||
s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.signed, finalized, cfg.postState)
|
||||
s.tryPublishLightClientFinalityUpdate(cfg.ctx, cfg.roblock, finalized, cfg.postState)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -252,20 +254,21 @@ func (s *Service) sendLightClientOptimisticUpdate(ctx context.Context, signed in
|
||||
// before sending FCU to the engine.
|
||||
func (s *Service) updateCachesPostBlockProcessing(cfg *postBlockProcessConfig) error {
|
||||
slot := cfg.postState.Slot()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, cfg.blockRoot[:], cfg.postState); err != nil {
|
||||
root := cfg.roblock.Root()
|
||||
if err := transition.UpdateNextSlotCache(cfg.ctx, root[:], cfg.postState); err != nil {
|
||||
return errors.Wrap(err, "could not update next slot state cache")
|
||||
}
|
||||
if !slots.IsEpochEnd(slot) {
|
||||
return nil
|
||||
}
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, cfg.blockRoot[:])
|
||||
return s.handleEpochBoundary(cfg.ctx, slot, cfg.postState, root[:])
|
||||
}
|
||||
|
||||
// handleSecondFCUCall handles a second call to FCU when syncing a new block.
|
||||
// This is useful when proposing in the next block and we want to defer the
|
||||
// computation of the next slot shuffling.
|
||||
func (s *Service) handleSecondFCUCall(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.blockRoot {
|
||||
if (fcuArgs.attributes == nil || fcuArgs.attributes.IsEmpty()) && cfg.headRoot == cfg.roblock.Root() {
|
||||
go s.sendFCUWithAttributes(cfg, fcuArgs)
|
||||
}
|
||||
}
|
||||
@@ -281,7 +284,7 @@ func reportProcessingTime(startTime time.Time) {
|
||||
// called on blocks that arrive after the attestation voting window, or in a
|
||||
// background routine after syncing early blocks.
|
||||
func (s *Service) computePayloadAttributes(cfg *postBlockProcessConfig, fcuArgs *fcuConfig) error {
|
||||
if cfg.blockRoot == cfg.headRoot {
|
||||
if cfg.roblock.Root() == cfg.headRoot {
|
||||
if err := s.updateCachesPostBlockProcessing(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -302,7 +305,26 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
|
||||
return nil, err
|
||||
}
|
||||
|
||||
preState, err := s.cfg.StateGen.StateByRoot(ctx, b.ParentRoot())
|
||||
parentRoot := b.ParentRoot()
|
||||
|
||||
if b.Version() >= version.EPBS {
|
||||
s.ForkChoicer().RLock()
|
||||
parentHash := s.ForkChoicer().HashForBlockRoot(parentRoot)
|
||||
s.ForkChoicer().RUnlock()
|
||||
signedBid, err := b.Body().SignedExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get signed execution payload header")
|
||||
}
|
||||
bid, err := signedBid.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload header")
|
||||
}
|
||||
if parentHash == bid.BlockHash() {
|
||||
// It's based on full, use the state by hash
|
||||
parentRoot = parentHash
|
||||
}
|
||||
}
|
||||
preState, err := s.cfg.StateGen.StateByRoot(ctx, parentRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot())
|
||||
}
|
||||
@@ -438,7 +460,7 @@ func (s *Service) ancestorByDB(ctx context.Context, r [32]byte, slot primitives.
|
||||
|
||||
// This retrieves missing blocks from DB (ie. the blocks that couldn't be received over sync) and inserts them to fork choice store.
|
||||
// This is useful for block tree visualizer and additional vote accounting.
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock,
|
||||
func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
fCheckpoint, jCheckpoint *ethpb.Checkpoint) error {
|
||||
pendingNodes := make([]*forkchoicetypes.BlockAndCheckpoints, 0)
|
||||
|
||||
@@ -448,10 +470,15 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: blk,
|
||||
// The first block can have a bogus root since the block is not inserted in forkchoice
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(signed, [32]byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pendingNodes = append(pendingNodes, &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint, FinalizedCheckpoint: fCheckpoint})
|
||||
// As long as parent node is not in fork choice store, and parent node is in DB.
|
||||
root := blk.ParentRoot()
|
||||
root := roblock.Block().ParentRoot()
|
||||
for !s.cfg.ForkChoiceStore.HasNode(root) && s.cfg.BeaconDB.HasBlock(ctx, root) {
|
||||
b, err := s.getBlock(ctx, root)
|
||||
if err != nil {
|
||||
@@ -460,8 +487,12 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, blk interfa
|
||||
if b.Block().Slot() <= fSlot {
|
||||
break
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(b, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root = b.Block().ParentRoot()
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: b.Block(),
|
||||
args := &forkchoicetypes.BlockAndCheckpoints{Block: roblock,
|
||||
JustifiedCheckpoint: jCheckpoint,
|
||||
FinalizedCheckpoint: fCheckpoint}
|
||||
pendingNodes = append(pendingNodes, args)
|
||||
|
||||
@@ -140,14 +140,13 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
// the parent of the last block inserted is the tree node.
|
||||
fcp := ðpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
|
||||
r0 := bytesutil.ToBytes32(roots[0])
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
@@ -183,14 +182,14 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
// the parent of the last block inserted is the tree node.
|
||||
fcp := ðpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
|
||||
r0 := bytesutil.ToBytes32(roots[0])
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fcp2))
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// 5 nodes from the block tree 1. B0 - B3 - B4 - B6 - B8
|
||||
@@ -246,7 +245,7 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) {
|
||||
// Set finalized epoch to 2.
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: 2, Root: r64}))
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should be 1 node: block 65
|
||||
@@ -279,7 +278,7 @@ func TestFillForkChoiceMissingBlocks_FinalizedSibling(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.fillInForkChoiceMissingBlocks(
|
||||
context.Background(), wsb.Block(), beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
context.Background(), wsb, beaconState.FinalizedCheckpoint(), beaconState.CurrentJustifiedCheckpoint())
|
||||
require.Equal(t, ErrNotDescendantOfFinalized.Error(), err.Error())
|
||||
}
|
||||
|
||||
@@ -463,7 +462,7 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st, blkRoot, err := prepareForkchoiceState(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
}
|
||||
@@ -503,7 +502,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
util.SaveBlock(t, context.Background(), beaconDB, beaconBlock)
|
||||
}
|
||||
|
||||
st, blkRoot, err := prepareForkchoiceState(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
@@ -566,7 +565,9 @@ func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -614,7 +615,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -640,7 +643,9 @@ func TestOnBlock_CanFinalize(t *testing.T) {
|
||||
|
||||
func TestOnBlock_NilBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, nil, [32]byte{}, [32]byte{}, nil, true})
|
||||
signed := &consensusblocks.SignedBeaconBlock{}
|
||||
roblock := consensusblocks.ROBlock{ReadOnlySignedBeaconBlock: signed}
|
||||
err := service.postBlockProcess(&postBlockProcessConfig{tr.ctx, roblock, [32]byte{}, nil, true})
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
}
|
||||
|
||||
@@ -688,7 +693,9 @@ func TestOnBlock_CallNewPayloadAndForkchoiceUpdated(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, r, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, r, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
testState, err = service.cfg.StateGen.StateByRoot(ctx, r)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1102,7 +1109,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
for i := 0; i < 10; i++ {
|
||||
fc := ðpb.Checkpoint{}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, wsb1.Block().ParentRoot(), [32]byte{}, [32]byte{}, fc, fc)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, wsb1.Block().ParentRoot(), [32]byte{}, [32]byte{}, [32]byte{}, fc, fc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
var wg sync.WaitGroup
|
||||
@@ -1114,7 +1121,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb1)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb1, r1, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb1, r1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1124,7 +1133,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb2)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb2, r2, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb2, r2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1134,7 +1145,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb3)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb3, r3, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb3, r3)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1144,7 +1157,9 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb4)
|
||||
require.NoError(t, err)
|
||||
lock.Lock()
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb4, r4, [32]byte{}, postState, true}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb4, r4)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
lock.Unlock()
|
||||
wg.Done()
|
||||
}()
|
||||
@@ -1219,7 +1234,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1237,7 +1254,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1256,7 +1275,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1278,7 +1299,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1306,7 +1329,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head is the last invalid block imported. The
|
||||
// store's headroot is the previous head (since the invalid block did
|
||||
@@ -1335,7 +1360,9 @@ func TestStore_NoViableHead_FCU(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1397,7 +1424,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1415,7 +1444,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1435,7 +1466,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we haven't justified the second epoch yet
|
||||
@@ -1457,7 +1490,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, firstInvalidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, firstInvalidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, firstInvalidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
jc = service.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
require.Equal(t, primitives.Epoch(2), jc.Epoch)
|
||||
@@ -1485,7 +1520,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
// not finish importing and it was never imported to forkchoice). Check
|
||||
@@ -1513,7 +1550,9 @@ func TestStore_NoViableHead_NewPayload(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
// Check the newly imported block is head, it justified the right
|
||||
// checkpoint and the node is no longer optimistic
|
||||
@@ -1578,7 +1617,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1597,7 +1638,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1616,7 +1659,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1643,7 +1688,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, invalidRoots[i-13], wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, invalidRoots[i-13], [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, invalidRoots[i-13])
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
// Check that we have justified the second epoch
|
||||
@@ -1669,7 +1716,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that forkchoice's head and store's headroot are the previous head (since the invalid block did
|
||||
@@ -1708,7 +1757,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true}))
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true}))
|
||||
// Check that the head is still INVALID and the node is still optimistic
|
||||
require.Equal(t, invalidHeadRoot, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
optimistic, err = service.IsOptimistic(ctx)
|
||||
@@ -1731,7 +1782,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
st, err = service.cfg.StateGen.StateByRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
@@ -1757,7 +1810,9 @@ func TestStore_NoViableHead_Liveness(t *testing.T) {
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, true})
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, true})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, root, service.cfg.ForkChoiceStore.CachedHeadRoot())
|
||||
sjc = service.CurrentJustifiedCheckpt()
|
||||
@@ -1813,7 +1868,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
}
|
||||
|
||||
for i := 6; i < 12; i++ {
|
||||
@@ -1831,7 +1888,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -1850,7 +1909,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, lastValidRoot, wsb, postState))
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, lastValidRoot, [32]byte{}, postState, false})
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, lastValidRoot)
|
||||
require.NoError(t, err)
|
||||
err = service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false})
|
||||
require.NoError(t, err)
|
||||
// save the post state and the payload Hash of this block since it will
|
||||
// be the LVH
|
||||
@@ -1879,7 +1940,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.NoError(t, service.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch))
|
||||
_, err = service.updateFinalizationOnBlock(ctx, preState, postState, currStoreFinalizedEpoch)
|
||||
require.NoError(t, err)
|
||||
@@ -1905,7 +1968,9 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, wsb, root)
|
||||
rowsb, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
_, err = service.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, rowsb)
|
||||
require.ErrorContains(t, "received an INVALID payload from execution engine", err)
|
||||
|
||||
// Check that the headroot/state are not in DB and restart the node
|
||||
@@ -1995,7 +2060,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2059,7 +2126,9 @@ func TestOnBlock_HandleBlockAttestations(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, root, [32]byte{}, postState, false}))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -2209,11 +2278,11 @@ func Test_getFCUArgs(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
signed: wsb,
|
||||
blockRoot: [32]byte{'a'},
|
||||
roblock: roblock,
|
||||
postState: st,
|
||||
isValidPayload: true,
|
||||
}
|
||||
@@ -2223,11 +2292,199 @@ func Test_getFCUArgs(t *testing.T) {
|
||||
require.ErrorContains(t, "block does not exist", err)
|
||||
|
||||
// canonical branch
|
||||
cfg.headRoot = cfg.blockRoot
|
||||
cfg.headRoot = cfg.roblock.Root()
|
||||
fcuArgs = &fcuConfig{}
|
||||
err = s.getFCUArgs(cfg, fcuArgs)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, cfg.blockRoot, fcuArgs.headRoot)
|
||||
require.Equal(t, cfg.roblock.Root(), fcuArgs.headRoot)
|
||||
}
|
||||
|
||||
func TestRollbackBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
|
||||
require.Equal(t, true, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err := service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasState)
|
||||
|
||||
// Set invalid parent root to trigger forkchoice error.
|
||||
wsb.SetParentRoot([]byte("bad"))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, fmt.Sprintf("could not insert block %d to fork choice store", roblock.Block().Slot()), service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err = service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasState)
|
||||
}
|
||||
|
||||
func TestRollbackBlock_SavePostStateInfo_ContextDeadline(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 128)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save state summaries so that the cache is flushed and saved to disk
|
||||
// later.
|
||||
for i := 1; i <= 127; i++ {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: primitives.Slot(i),
|
||||
Root: bytesutil.Bytes32(uint64(i)),
|
||||
}))
|
||||
}
|
||||
|
||||
// Set deadlined context when saving block and state
|
||||
cancCtx, canc := context.WithCancel(ctx)
|
||||
canc()
|
||||
|
||||
require.ErrorContains(t, context.Canceled.Error(), service.savePostStateInfo(cancCtx, root, wsb, postState))
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err := service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasState)
|
||||
}
|
||||
|
||||
func TestRollbackBlock_ContextDeadline(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
|
||||
st, err = service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), 33)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), 34)
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err = service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err = service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
|
||||
require.Equal(t, true, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err := service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasState)
|
||||
|
||||
// Set deadlined context when processing the block
|
||||
cancCtx, canc := context.WithCancel(context.Background())
|
||||
canc()
|
||||
roblock, err = consensusblocks.NewROBlockWithRoot(wsb, root)
|
||||
require.NoError(t, err)
|
||||
|
||||
parentRoot = roblock.Block().ParentRoot()
|
||||
|
||||
cj := ðpb.Checkpoint{}
|
||||
cj.Epoch = 1
|
||||
cj.Root = parentRoot[:]
|
||||
require.NoError(t, postState.SetCurrentJustifiedCheckpoint(cj))
|
||||
require.NoError(t, postState.SetFinalizedCheckpoint(cj))
|
||||
|
||||
// Rollback block insertion into db and caches.
|
||||
require.ErrorContains(t, "context canceled", service.postBlockProcess(&postBlockProcessConfig{cancCtx, roblock, [32]byte{}, postState, false}))
|
||||
|
||||
// The block should no longer exist.
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasBlock(ctx, root))
|
||||
hasState, err = service.cfg.StateGen.HasState(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasState)
|
||||
}
|
||||
|
||||
func fakeCommitments(n int) [][]byte {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
@@ -147,15 +148,35 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
return
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
var attributes payloadattribute.Attributer
|
||||
if s.inRegularSync() {
|
||||
attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
}
|
||||
if headState.Version() >= version.EPBS {
|
||||
bh, err := headState.LatestBlockHash()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get latest block hash")
|
||||
return
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdateEPBS(ctx, [32]byte(bh), attributes)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not notify forkchoice update")
|
||||
}
|
||||
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
return
|
||||
}
|
||||
if err := s.pruneAttsFromPool(headBlock); err != nil {
|
||||
log.WithError(err).Error("could not prune attestations from pool")
|
||||
}
|
||||
return
|
||||
}
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: newHeadRoot,
|
||||
headBlock: headBlock,
|
||||
proposingSlot: proposingSlot,
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
}
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
@@ -177,7 +198,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
}
|
||||
|
||||
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
|
||||
hasBlock := s.chainHasBlock(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
|
||||
if !(hasState && hasBlock) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -42,11 +43,11 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
|
||||
f := service.cfg.ForkChoiceStore
|
||||
fc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, r32, err := prepareForkchoiceState(ctx, 32, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, fc, fc)
|
||||
state, r32, err := prepareForkchoiceState(ctx, 32, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, [32]byte{}, fc, fc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, r32))
|
||||
|
||||
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32, params.BeaconConfig().ZeroHash, fc, fc)
|
||||
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32.Root(), params.BeaconConfig().ZeroHash, [32]byte{}, fc, fc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, r33))
|
||||
|
||||
@@ -54,10 +55,12 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
a := util.NewAttestation()
|
||||
a.Data.Target.Epoch = 1
|
||||
a.Data.Target.Root = []byte{'c'}
|
||||
a.Data.BeaconBlockRoot = r33[:]
|
||||
r33Root := r33.Root()
|
||||
a.Data.BeaconBlockRoot = r33Root[:]
|
||||
require.ErrorContains(t, wanted, service.VerifyLmdFfgConsistency(context.Background(), a))
|
||||
|
||||
a.Data.Target.Root = r32[:]
|
||||
r32Root := r32.Root()
|
||||
a.Data.Target.Root = r32Root[:]
|
||||
err = service.VerifyLmdFfgConsistency(context.Background(), a)
|
||||
require.NoError(t, err, "Could not verify LMD and FFG votes to be consistent")
|
||||
}
|
||||
@@ -80,7 +83,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
attsToSave := make([]ethpb.Att, len(atts))
|
||||
@@ -116,7 +119,9 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
@@ -142,7 +147,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 2, r, service.originBlockRoot, [32]byte{'b'}, ojc, ojc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 2, r, service.originBlockRoot, [32]byte{'b'}, [32]byte{}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.Equal(t, 3, fcs.NodeCount())
|
||||
@@ -176,7 +181,9 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, tRoot, wsb, postState))
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, wsb, tRoot, [32]byte{}, postState, false}))
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(wsb, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.postBlockProcess(&postBlockProcessConfig{ctx, roblock, [32]byte{}, postState, false}))
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
require.Equal(t, tRoot, service.head.root)
|
||||
@@ -189,7 +196,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 2, r, service.originBlockRoot, [32]byte{'b'}, ojc, ojc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 2, r, service.originBlockRoot, [32]byte{'b'}, [32]byte{}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.Equal(t, 3, fcs.NodeCount())
|
||||
|
||||
@@ -17,6 +17,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -39,18 +41,30 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
|
||||
ReceiveExecutionPayloadEnvelope(ctx context.Context, env interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityStore) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
}
|
||||
|
||||
// PayloadAttestationReceiver defines methods of the chain service for receiving
|
||||
// and processing new payload attestations and payload attestation messages
|
||||
type PayloadAttestationReceiver interface {
|
||||
ReceivePayloadAttestationMessage(ctx context.Context, a *ethpb.PayloadAttestationMessage) error
|
||||
}
|
||||
|
||||
// BlobReceiver interface defines the methods of chain service for receiving new
|
||||
// blobs
|
||||
type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// ExecutionPayloadReceiver interface defines the methods of chain service for receiving `ROExecutionPayloadEnvelope`.
|
||||
type ExecutionPayloadReceiver interface {
|
||||
ReceiveExecutionPayloadEnvelope(ctx context.Context, envelope interfaces.ROSignedExecutionPayloadEnvelope, _ das.AvailabilityStore) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
@@ -83,13 +97,34 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
currentCheckpoints := s.saveCurrentCheckpoints(preState)
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, blockCopy, blockRoot)
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(blockCopy, blockRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
if err != nil {
|
||||
return err
|
||||
var postState state.BeaconState
|
||||
var isValidPayload bool
|
||||
var daWaitedTime time.Duration
|
||||
if blockCopy.Version() >= version.EPBS {
|
||||
postState, err = s.validateStateTransition(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not validate state transition")
|
||||
}
|
||||
optimistic, err := s.IsOptimisticForRoot(ctx, roblock.Block().ParentRoot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if parent is optimistic")
|
||||
}
|
||||
// if the parent is not optimistic then we can set the block as
|
||||
// not optimistic.
|
||||
isValidPayload = !optimistic
|
||||
} else {
|
||||
postState, isValidPayload, err = s.validateExecutionAndConsensus(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daWaitedTime, err = s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Defragment the state before continuing block processing.
|
||||
s.defragmentState(postState)
|
||||
@@ -102,8 +137,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
}
|
||||
args := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
signed: blockCopy,
|
||||
blockRoot: blockRoot,
|
||||
roblock: roblock,
|
||||
postState: postState,
|
||||
isValidPayload: isValidPayload,
|
||||
}
|
||||
@@ -184,8 +218,7 @@ func (s *Service) updateCheckpoints(
|
||||
func (s *Service) validateExecutionAndConsensus(
|
||||
ctx context.Context,
|
||||
preState state.BeaconState,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
block consensusblocks.ROBlock,
|
||||
) (state.BeaconState, bool, error) {
|
||||
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
@@ -204,7 +237,7 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block, blockRoot)
|
||||
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, block)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
}
|
||||
@@ -349,6 +382,9 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock
|
||||
|
||||
// HasBlock returns true if the block of the input root exists in initial sync blocks cache or DB.
|
||||
func (s *Service) HasBlock(ctx context.Context, root [32]byte) bool {
|
||||
if s.BlockBeingSynced(root) {
|
||||
return false
|
||||
}
|
||||
return s.hasBlockInInitSyncOrDB(ctx, root)
|
||||
}
|
||||
|
||||
@@ -552,16 +588,16 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
}
|
||||
|
||||
// validateExecutionOnBlock notifies the engine of the incoming block execution payload and returns true if the payload is valid
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, signed interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, signed)
|
||||
func (s *Service) validateExecutionOnBlock(ctx context.Context, ver int, header interfaces.ExecutionData, block consensusblocks.ROBlock) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, ver, header, block)
|
||||
if err != nil {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, blockRoot, signed.Block().ParentRoot())
|
||||
err = s.handleInvalidExecutionError(ctx, err, block.Root(), block.Block().ParentRoot())
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
if signed.Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, signed); err != nil {
|
||||
if block.Block().Version() < version.Capella && isValidPayload {
|
||||
if err := s.validateMergeTransitionBlock(ctx, ver, header, block); err != nil {
|
||||
return isValidPayload, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,6 +278,8 @@ func TestService_HasBlock(t *testing.T) {
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, s.HasBlock(context.Background(), r))
|
||||
s.blockBeingSynced.set(r)
|
||||
require.Equal(t, false, s.HasBlock(context.Background(), r))
|
||||
}
|
||||
|
||||
func TestCheckSaveHotStateDB_Enabling(t *testing.T) {
|
||||
|
||||
234
beacon-chain/blockchain/receive_execution_payload_envelope.go
Normal file
234
beacon-chain/blockchain/receive_execution_payload_envelope.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epbs"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// ReceiveExecutionPayloadEnvelope is a function that defines the operations (minus pubsub)
|
||||
// that are performed on a received execution payload envelope. The operations consist of:
|
||||
// 1. Validate the payload, apply state transition.
|
||||
// 2. Apply fork choice to the processed payload
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope, _ das.AvailabilityStore) error {
|
||||
receivedTime := time.Now()
|
||||
envelope, err := signed.Envelope()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root := envelope.BeaconBlockRoot()
|
||||
s.payloadBeingSynced.set(envelope)
|
||||
defer s.payloadBeingSynced.unset(root)
|
||||
|
||||
preState, err := s.getPayloadEnvelopePrestate(ctx, envelope)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get prestate")
|
||||
}
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
eg.Go(func() error {
|
||||
if err := epbs.ValidatePayloadStateTransition(ctx, preState, envelope); err != nil {
|
||||
return errors.Wrap(err, "failed to validate consensus state transition function")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnEnvelope(ctx, envelope)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
daStartTime := time.Now()
|
||||
// TODO: Add DA check
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
|
||||
if err := s.savePostPayload(ctx, signed, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.insertPayloadEnvelope(envelope); err != nil {
|
||||
return errors.Wrap(err, "could not insert payload to forkchoice")
|
||||
}
|
||||
if isValidPayload {
|
||||
s.ForkChoicer().Lock()
|
||||
if err := s.ForkChoicer().SetOptimisticToValid(ctx, root); err != nil {
|
||||
s.ForkChoicer().Unlock()
|
||||
return errors.Wrap(err, "could not set optimistic payload to valid")
|
||||
}
|
||||
s.ForkChoicer().Unlock()
|
||||
}
|
||||
|
||||
headRoot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get headroot to compute attributes")
|
||||
return nil
|
||||
}
|
||||
if bytes.Equal(headRoot, root[:]) {
|
||||
attr := s.getPayloadAttribute(ctx, preState, envelope.Slot()+1, headRoot)
|
||||
execution, err := envelope.Execution()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get execution data")
|
||||
return nil
|
||||
}
|
||||
blockHash := [32]byte(execution.BlockHash())
|
||||
payloadID, err := s.notifyForkchoiceUpdateEPBS(ctx, blockHash, attr)
|
||||
if err != nil {
|
||||
if IsInvalidBlock(err) {
|
||||
// TODO handle the lvh here
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if attr != nil && !attr.IsEmpty() && payloadID != nil {
|
||||
var pid [8]byte
|
||||
copy(pid[:], payloadID[:])
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot)),
|
||||
"headSlot": envelope.Slot(),
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.PayloadIDCache.Set(envelope.Slot()+1, root, pid)
|
||||
}
|
||||
headBlk, err := s.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block")
|
||||
}
|
||||
if err := s.saveHead(ctx, root, headBlk, preState); err != nil {
|
||||
log.WithError(err).Error("could not save new head")
|
||||
}
|
||||
// update the NSC with the hash for the full block
|
||||
if err := transition.UpdateNextSlotCache(ctx, blockHash[:], preState); err != nil {
|
||||
log.WithError(err).Error("could not update next slot cache with payload")
|
||||
}
|
||||
|
||||
}
|
||||
timeWithoutDaWait := time.Since(receivedTime) - daWaitedTime
|
||||
executionEngineProcessingTime.Observe(float64(timeWithoutDaWait.Milliseconds()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// notifyNewPayload signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewEnvelope(ctx context.Context, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
payload, err := envelope.Execution()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
|
||||
versionedHashes := envelope.VersionedHashes()
|
||||
root := envelope.BeaconBlockRoot()
|
||||
parentRoot, err := s.ParentRoot(root)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get parent block root")
|
||||
}
|
||||
pr := common.Hash(parentRoot)
|
||||
requests := envelope.ExecutionRequests()
|
||||
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &pr, requests)
|
||||
switch {
|
||||
case err == nil:
|
||||
newPayloadValidNodeCount.Inc()
|
||||
return true, nil
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
lvh := bytesutil.ToBytes32(lastValidHash)
|
||||
return false, invalidBlock{
|
||||
error: ErrInvalidPayload,
|
||||
lastValidHash: lvh,
|
||||
}
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// validateExecutionOnEnvelope notifies the engine of the incoming execution payload and returns true if the payload is valid
|
||||
func (s *Service) validateExecutionOnEnvelope(ctx context.Context, e interfaces.ROExecutionPayloadEnvelope) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewEnvelope(ctx, e)
|
||||
if err == nil {
|
||||
return isValidPayload, nil
|
||||
}
|
||||
blockRoot := e.BeaconBlockRoot()
|
||||
parentRoot, rootErr := s.ParentRoot(blockRoot)
|
||||
if rootErr != nil {
|
||||
return false, errors.Wrap(rootErr, "could not get parent block root")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, blockRoot, parentRoot)
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (s *Service) getPayloadEnvelopePrestate(ctx context.Context, e interfaces.ROExecutionPayloadEnvelope) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.getPayloadEnvelopePreState")
|
||||
defer span.End()
|
||||
|
||||
// Verify incoming payload has a valid pre state.
|
||||
root := e.BeaconBlockRoot()
|
||||
// Verify the referred block is known to forkchoice
|
||||
if !s.InForkchoice(root) {
|
||||
return nil, errors.New("Cannot import execution payload envelope for unknown block")
|
||||
}
|
||||
if err := s.verifyBlkPreState(ctx, root); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify payload prestate")
|
||||
}
|
||||
|
||||
preState, err := s.cfg.StateGen.StateByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get pre state")
|
||||
}
|
||||
if preState == nil || preState.IsNil() {
|
||||
return nil, errors.Wrap(err, "nil pre state")
|
||||
}
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
func (s *Service) savePostPayload(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope, st state.BeaconState) error {
|
||||
if err := s.cfg.BeaconDB.SaveBlindPayloadEnvelope(ctx, signed); err != nil {
|
||||
return err
|
||||
}
|
||||
envelope, err := signed.Envelope()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
execution, err := envelope.Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := envelope.BeaconBlockRoot()
|
||||
if err := s.cfg.StateGen.SaveState(ctx, [32]byte(execution.BlockHash()), st); err != nil {
|
||||
log.Warnf("Rolling back insertion of block with root %#x", r)
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, r); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete block with block root %#x", r)
|
||||
}
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,100 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util/random"
|
||||
)
|
||||
|
||||
func Test_getPayloadEnvelopePrestate(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
gs, _ := util.DeterministicGenesisStateEpbs(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
|
||||
|
||||
p := random.ExecutionPayloadEnvelope(t)
|
||||
p.BeaconBlockRoot = service.originBlockRoot[:]
|
||||
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = service.getPayloadEnvelopePrestate(ctx, e)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_notifyNewEnvelope(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
gs, _ := util.DeterministicGenesisStateEpbs(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
|
||||
p := random.ExecutionPayloadEnvelope(t)
|
||||
p.BeaconBlockRoot = service.originBlockRoot[:]
|
||||
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
|
||||
require.NoError(t, err)
|
||||
engine := &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
isValidPayload, err := service.notifyNewEnvelope(ctx, e)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isValidPayload)
|
||||
}
|
||||
|
||||
func Test_validateExecutionOnEnvelope(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
gs, _ := util.DeterministicGenesisStateEpbs(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
|
||||
p := random.ExecutionPayloadEnvelope(t)
|
||||
p.BeaconBlockRoot = service.originBlockRoot[:]
|
||||
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
|
||||
require.NoError(t, err)
|
||||
engine := &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
isValidPayload, err := service.validateExecutionOnEnvelope(ctx, e)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isValidPayload)
|
||||
}
|
||||
|
||||
func Test_ReceiveExecutionPayloadEnvelope(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
gs, _ := util.DeterministicGenesisStateEpbs(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
|
||||
post := gs.Copy()
|
||||
p := &enginev1.ExecutionPayloadEnvelope{
|
||||
Payload: &enginev1.ExecutionPayloadElectra{
|
||||
ParentHash: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
BeaconBlockRoot: service.originBlockRoot[:],
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
StateRoot: make([]byte, 32),
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{},
|
||||
}
|
||||
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
|
||||
require.NoError(t, err)
|
||||
das := &das.MockAvailabilityStore{}
|
||||
|
||||
blockHeader := post.LatestBlockHeader()
|
||||
prevStateRoot, err := post.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
blockHeader.StateRoot = prevStateRoot[:]
|
||||
require.NoError(t, post.SetLatestBlockHeader(blockHeader))
|
||||
stRoot, err := post.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
p.StateRoot = stRoot[:]
|
||||
engine := &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
require.NoError(t, service.ReceiveExecutionPayloadEnvelope(ctx, e, das))
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func (s *Service) ReceivePayloadAttestationMessage(ctx context.Context, a *eth.PayloadAttestationMessage) error {
|
||||
if err := helpers.ValidateNilPayloadAttestationMessage(a); err != nil {
|
||||
return err
|
||||
}
|
||||
root := [32]byte(a.Data.BeaconBlockRoot)
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ptc, err := helpers.GetPayloadTimelinessCommittee(ctx, st, a.Data.Slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idx := slices.Index(ptc, a.ValidatorIndex)
|
||||
if idx == -1 {
|
||||
return errInvalidValidatorIndex
|
||||
}
|
||||
if s.cfg.PayloadAttestationCache.Seen(root, uint64(primitives.ValidatorIndex(idx))) {
|
||||
return nil
|
||||
}
|
||||
return s.cfg.PayloadAttestationCache.Add(a, uint64(idx))
|
||||
}
|
||||
@@ -36,6 +36,7 @@ import (
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -65,6 +66,7 @@ type Service struct {
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
payloadBeingSynced *currentlySyncingPayload
|
||||
blobStorage *filesystem.BlobStorage
|
||||
lastPublishedLightClientEpoch primitives.Epoch
|
||||
}
|
||||
@@ -75,6 +77,8 @@ type config struct {
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache cache.DepositCache
|
||||
PayloadAttestationCache *cache.PayloadAttestationCache
|
||||
PayloadEnvelopeCache *sync.Map
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
AttPool attestations.Pool
|
||||
@@ -179,6 +183,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
blobNotifiers: bn,
|
||||
cfg: &config{},
|
||||
blockBeingSynced: ¤tlySyncingBlock{roots: make(map[[32]byte]struct{})},
|
||||
payloadBeingSynced: ¤tlySyncingPayload{roots: make(map[[32]byte]primitives.PTCStatus)},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(srv); err != nil {
|
||||
@@ -303,7 +308,15 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint state")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, fRoot); err != nil {
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
}
|
||||
roblock, err := consensus_blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
if !features.Get().EnableStartOptimistic {
|
||||
@@ -515,7 +528,11 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
gb, err := consensus_blocks.NewROBlockWithRoot(genesisBlk, genesisBlkRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, gb); err != nil {
|
||||
log.WithError(err).Fatal("Could not process genesis block for fork choice")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetOriginRoot(genesisBlkRoot)
|
||||
@@ -542,7 +559,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
// 2.) Check DB.
|
||||
// Checking 1.) is ten times faster than checking 2.)
|
||||
// this function requires a lock in forkchoice
|
||||
func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
|
||||
func (s *Service) chainHasBlock(ctx context.Context, root [32]byte) bool {
|
||||
if s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -376,14 +376,18 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
cfg: &config{ForkChoiceStore: doublylinkedtree.New(), BeaconDB: beaconDB},
|
||||
}
|
||||
b := util.NewBeaconBlock()
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(t, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
assert.Equal(t, false, s.chainHasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.chainHasBlock(ctx, r), "Should have block")
|
||||
}
|
||||
|
||||
func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
@@ -453,7 +457,11 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
require.NoError(b, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, r))
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(b, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(wsb, r)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
||||
@@ -3,7 +3,10 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = ["mock.go"],
|
||||
srcs = [
|
||||
"mock.go",
|
||||
"mock_epbs.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
|
||||
@@ -37,44 +37,50 @@ var ErrNilState = errors.New("nil state")
|
||||
|
||||
// ChainService defines the mock interface for testing
|
||||
type ChainService struct {
|
||||
NotFinalized bool
|
||||
Optimistic bool
|
||||
ValidAttestation bool
|
||||
ValidatorsRoot [32]byte
|
||||
PublicKey [fieldparams.BLSPubkeyLength]byte
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckPoint *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckPoint *ethpb.Checkpoint
|
||||
Slot *primitives.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
Balance *precompute.Balance
|
||||
CanonicalRoots map[[32]byte]bool
|
||||
Fork *ethpb.Fork
|
||||
ETH1Data *ethpb.Eth1Data
|
||||
InitSyncBlockRoots map[[32]byte]bool
|
||||
DB db.Database
|
||||
State state.BeaconState
|
||||
Block interfaces.ReadOnlySignedBeaconBlock
|
||||
VerifyBlkDescendantErr error
|
||||
stateNotifier statefeed.Notifier
|
||||
BlocksReceived []interfaces.ReadOnlySignedBeaconBlock
|
||||
SyncCommitteeIndices []primitives.CommitteeIndex
|
||||
blockNotifier blockfeed.Notifier
|
||||
opNotifier opfeed.Notifier
|
||||
Root []byte
|
||||
SyncCommitteeDomain []byte
|
||||
SyncSelectionProofDomain []byte
|
||||
SyncContributionProofDomain []byte
|
||||
SyncCommitteePubkeys [][]byte
|
||||
Genesis time.Time
|
||||
ForkChoiceStore forkchoice.ForkChoicer
|
||||
ReceiveBlockMockErr error
|
||||
OptimisticCheckRootReceived [32]byte
|
||||
FinalizedRoots map[[32]byte]bool
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []blocks.VerifiedROBlob
|
||||
TargetRoot [32]byte
|
||||
NotFinalized bool
|
||||
Optimistic bool
|
||||
ValidAttestation bool
|
||||
ValidatorsRoot [32]byte
|
||||
PublicKey [fieldparams.BLSPubkeyLength]byte
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckPoint *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckPoint *ethpb.Checkpoint
|
||||
Slot *primitives.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
Balance *precompute.Balance
|
||||
CanonicalRoots map[[32]byte]bool
|
||||
Fork *ethpb.Fork
|
||||
ETH1Data *ethpb.Eth1Data
|
||||
InitSyncBlockRoots map[[32]byte]bool
|
||||
DB db.Database
|
||||
State state.BeaconState
|
||||
Block interfaces.ReadOnlySignedBeaconBlock
|
||||
ExecutionPayloadEnvelope interfaces.ROExecutionPayloadEnvelope
|
||||
VerifyBlkDescendantErr error
|
||||
stateNotifier statefeed.Notifier
|
||||
BlocksReceived []interfaces.ReadOnlySignedBeaconBlock
|
||||
SyncCommitteeIndices []primitives.CommitteeIndex
|
||||
blockNotifier blockfeed.Notifier
|
||||
opNotifier opfeed.Notifier
|
||||
Root []byte
|
||||
SyncCommitteeDomain []byte
|
||||
SyncSelectionProofDomain []byte
|
||||
SyncContributionProofDomain []byte
|
||||
SyncCommitteePubkeys [][]byte
|
||||
Genesis time.Time
|
||||
ForkChoiceStore forkchoice.ForkChoicer
|
||||
ReceiveBlockMockErr error
|
||||
ReceiveEnvelopeMockErr error
|
||||
OptimisticCheckRootReceived [32]byte
|
||||
FinalizedRoots map[[32]byte]bool
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []blocks.VerifiedROBlob
|
||||
TargetRoot [32]byte
|
||||
HighestReceivedSlot primitives.Slot
|
||||
HighestReceivedRoot [32]byte
|
||||
PayloadStatus primitives.PTCStatus
|
||||
ReceivePayloadAttestationMessageErr error
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -567,7 +573,7 @@ func prepareForkchoiceState(
|
||||
payloadHash [32]byte,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
) (state.BeaconState, blocks.ROBlock, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
}
|
||||
@@ -588,7 +594,26 @@ func prepareForkchoiceState(
|
||||
|
||||
base.BlockRoots[0] = append(base.BlockRoots[0], blockRoot[:]...)
|
||||
st, err := state_native.InitializeFromProtoBellatrix(base)
|
||||
return st, blockRoot, err
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: slot,
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
signed, err := blocks.NewSignedBeaconBlock(blk)
|
||||
if err != nil {
|
||||
return nil, blocks.ROBlock{}, err
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(signed, blockRoot)
|
||||
return st, roblock, err
|
||||
}
|
||||
|
||||
// CachedHeadRoot mocks the same method in the chain service
|
||||
@@ -622,18 +647,18 @@ func (s *ChainService) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlot mocks the same method in the chain service
|
||||
func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
|
||||
// HighestReceivedBlockSlotRoot mocks the same method in the chain service
|
||||
func (s *ChainService) HighestReceivedBlockSlotRoot() (primitives.Slot, [32]byte) {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
return s.ForkChoiceStore.HighestReceivedBlockSlotRoot()
|
||||
}
|
||||
return 0
|
||||
return s.HighestReceivedSlot, s.HighestReceivedRoot
|
||||
}
|
||||
|
||||
// InsertNode mocks the same method in the chain service
|
||||
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, root [32]byte) error {
|
||||
func (s *ChainService) InsertNode(ctx context.Context, st state.BeaconState, block blocks.ROBlock) error {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.InsertNode(ctx, st, root)
|
||||
return s.ForkChoiceStore.InsertNode(ctx, st, block)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -687,3 +712,17 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
}
|
||||
|
||||
// HashInForkchoice mocks the same method in the chain service
|
||||
func (c *ChainService) HashInForkchoice([32]byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ReceivePayloadAttestationMessage mocks the same method in the chain service
|
||||
func (c *ChainService) ReceivePayloadAttestationMessage(_ context.Context, _ *ethpb.PayloadAttestationMessage) error {
|
||||
return c.ReceivePayloadAttestationMessageErr
|
||||
}
|
||||
|
||||
func (c *ChainService) GetPTCVote(root [32]byte) primitives.PTCStatus {
|
||||
return c.PayloadStatus
|
||||
}
|
||||
|
||||
25
beacon-chain/blockchain/testing/mock_epbs.go
Normal file
25
beacon-chain/blockchain/testing/mock_epbs.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
)
|
||||
|
||||
// ReceiveExecutionPayloadEnvelope mocks the method in chain service.
|
||||
func (s *ChainService) ReceiveExecutionPayloadEnvelope(ctx context.Context, env interfaces.ROExecutionPayloadEnvelope, _ das.AvailabilityStore) error {
|
||||
if s.ReceiveBlockMockErr != nil {
|
||||
return s.ReceiveBlockMockErr
|
||||
}
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
if s.State.Slot() == env.Slot() {
|
||||
if err := s.State.SetLatestFullSlot(s.State.Slot()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.ExecutionPayloadEnvelope = env
|
||||
return nil
|
||||
}
|
||||
8
beacon-chain/cache/BUILD.bazel
vendored
8
beacon-chain/cache/BUILD.bazel
vendored
@@ -15,11 +15,13 @@ go_library(
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"interfaces.go",
|
||||
"payload_attestation.go",
|
||||
"payload_id.go",
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
"proposer_indices_type.go",
|
||||
"registration.go",
|
||||
"signed_execution_header.go",
|
||||
"skip_slot_cache.go",
|
||||
"subnet_ids.go",
|
||||
"sync_committee.go",
|
||||
@@ -42,11 +44,13 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
@@ -70,10 +74,12 @@ go_test(
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"payload_attestation_test.go",
|
||||
"payload_id_test.go",
|
||||
"private_access_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"registration_test.go",
|
||||
"signed_execution_header_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"subnet_ids_test.go",
|
||||
"sync_committee_head_state_test.go",
|
||||
@@ -88,7 +94,9 @@ go_test(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
132
beacon-chain/cache/payload_attestation.go
vendored
Normal file
132
beacon-chain/cache/payload_attestation.go
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var errNilPayloadAttestationMessage = errors.New("nil Payload Attestation Message")
|
||||
|
||||
// PayloadAttestationCache keeps a map of all the PTC votes that were seen,
|
||||
// already aggregated. The key is the beacon block root.
|
||||
type PayloadAttestationCache struct {
|
||||
root [32]byte
|
||||
attestations [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// Seen returns true if a vote for the given Beacon Block Root has already been processed
|
||||
// for this Payload Timeliness Committee index. This will return true even if
|
||||
// the Payload status differs.
|
||||
func (p *PayloadAttestationCache) Seen(root [32]byte, idx uint64) bool {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if p.root != root {
|
||||
return false
|
||||
}
|
||||
for _, agg := range p.attestations {
|
||||
if agg == nil {
|
||||
continue
|
||||
}
|
||||
if agg.AggregationBits.BitAt(idx) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// messageToPayloadAttestation creates a PayloadAttestation with a single
|
||||
// aggregated bit from the passed PayloadAttestationMessage
|
||||
func messageToPayloadAttestation(att *eth.PayloadAttestationMessage, idx uint64) *eth.PayloadAttestation {
|
||||
bits := primitives.NewPayloadAttestationAggregationBits()
|
||||
bits.SetBitAt(idx, true)
|
||||
data := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: bytesutil.SafeCopyBytes(att.Data.BeaconBlockRoot),
|
||||
Slot: att.Data.Slot,
|
||||
PayloadStatus: att.Data.PayloadStatus,
|
||||
}
|
||||
return ð.PayloadAttestation{
|
||||
AggregationBits: bits,
|
||||
Data: data,
|
||||
Signature: bytesutil.SafeCopyBytes(att.Signature),
|
||||
}
|
||||
}
|
||||
|
||||
// aggregateSigFromMessage returns the aggregated signature from a Payload
|
||||
// Attestation by adding the passed signature in the PayloadAttestationMessage,
|
||||
// no signature validation is performed.
|
||||
func aggregateSigFromMessage(aggregated *eth.PayloadAttestation, message *eth.PayloadAttestationMessage) ([]byte, error) {
|
||||
aggSig, err := bls.SignatureFromBytesNoValidation(aggregated.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sig, err := bls.SignatureFromBytesNoValidation(message.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bls.AggregateSignatures([]bls.Signature{aggSig, sig}).Marshal(), nil
|
||||
}
|
||||
|
||||
// Add adds a PayloadAttestationMessage to the internal cache of aggregated
|
||||
// PayloadAttestations.
|
||||
// If the index has already been seen for this attestation status the function does nothing.
|
||||
// If the root is not the cached root, the function will clear the previous cache
|
||||
// This function assumes that the message has already been validated. In
|
||||
// particular that the signature is valid and that the block root corresponds to
|
||||
// the given slot in the attestation data.
|
||||
func (p *PayloadAttestationCache) Add(att *eth.PayloadAttestationMessage, idx uint64) error {
|
||||
if att == nil || att.Data == nil || att.Data.BeaconBlockRoot == nil {
|
||||
return errNilPayloadAttestationMessage
|
||||
}
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
root := [32]byte(att.Data.BeaconBlockRoot)
|
||||
if p.root != root {
|
||||
p.root = root
|
||||
p.attestations = [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation{}
|
||||
}
|
||||
agg := p.attestations[att.Data.PayloadStatus]
|
||||
if agg == nil {
|
||||
p.attestations[att.Data.PayloadStatus] = messageToPayloadAttestation(att, idx)
|
||||
return nil
|
||||
}
|
||||
if agg.AggregationBits.BitAt(idx) {
|
||||
return nil
|
||||
}
|
||||
sig, err := aggregateSigFromMessage(agg, att)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
agg.Signature = sig
|
||||
agg.AggregationBits.SetBitAt(idx, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the aggregated PayloadAttestation for the given root and status
|
||||
// if the root doesn't exist or status is invalid, the function returns nil.
|
||||
func (p *PayloadAttestationCache) Get(root [32]byte, status primitives.PTCStatus) *eth.PayloadAttestation {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
if p.root != root {
|
||||
return nil
|
||||
}
|
||||
if status >= primitives.PAYLOAD_INVALID_STATUS {
|
||||
return nil
|
||||
}
|
||||
|
||||
return eth.CopyPayloadAttestation(p.attestations[status])
|
||||
}
|
||||
|
||||
// Clear clears the internal map
|
||||
func (p *PayloadAttestationCache) Clear() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.root = [32]byte{}
|
||||
p.attestations = [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation{}
|
||||
}
|
||||
143
beacon-chain/cache/payload_attestation_test.go
vendored
Normal file
143
beacon-chain/cache/payload_attestation_test.go
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestPayloadAttestationCache(t *testing.T) {
|
||||
p := &PayloadAttestationCache{}
|
||||
|
||||
//Test Has seen
|
||||
root := [32]byte{'r'}
|
||||
idx := uint64(5)
|
||||
require.Equal(t, false, p.Seen(root, idx))
|
||||
|
||||
// Test Add
|
||||
msg := ð.PayloadAttestationMessage{
|
||||
Signature: bls.NewAggregateSignature().Marshal(),
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: root[:],
|
||||
Slot: 1,
|
||||
PayloadStatus: primitives.PAYLOAD_PRESENT,
|
||||
},
|
||||
}
|
||||
|
||||
// Add new root
|
||||
require.NoError(t, p.Add(msg, idx))
|
||||
require.Equal(t, true, p.Seen(root, idx))
|
||||
require.Equal(t, root, p.root)
|
||||
att := p.attestations[primitives.PAYLOAD_PRESENT]
|
||||
indices := att.AggregationBits.BitIndices()
|
||||
require.DeepEqual(t, []int{int(idx)}, indices)
|
||||
singleSig := bytesutil.SafeCopyBytes(msg.Signature)
|
||||
require.DeepEqual(t, singleSig, att.Signature)
|
||||
|
||||
// Test Seen
|
||||
require.Equal(t, true, p.Seen(root, idx))
|
||||
require.Equal(t, false, p.Seen(root, idx+1))
|
||||
|
||||
// Add another attestation on the same data
|
||||
msg2 := ð.PayloadAttestationMessage{
|
||||
Signature: bls.NewAggregateSignature().Marshal(),
|
||||
Data: att.Data,
|
||||
}
|
||||
idx2 := uint64(7)
|
||||
require.NoError(t, p.Add(msg2, idx2))
|
||||
att = p.attestations[primitives.PAYLOAD_PRESENT]
|
||||
indices = att.AggregationBits.BitIndices()
|
||||
require.DeepEqual(t, []int{int(idx), int(idx2)}, indices)
|
||||
require.DeepNotEqual(t, att.Signature, msg.Signature)
|
||||
|
||||
// Try again the same index
|
||||
require.NoError(t, p.Add(msg2, idx2))
|
||||
att2 := p.attestations[primitives.PAYLOAD_PRESENT]
|
||||
indices = att.AggregationBits.BitIndices()
|
||||
require.DeepEqual(t, []int{int(idx), int(idx2)}, indices)
|
||||
require.DeepEqual(t, att, att2)
|
||||
|
||||
// Test Seen
|
||||
require.Equal(t, true, p.Seen(root, idx2))
|
||||
require.Equal(t, false, p.Seen(root, idx2+1))
|
||||
|
||||
// Add another payload status for a different payload status
|
||||
msg3 := ð.PayloadAttestationMessage{
|
||||
Signature: bls.NewAggregateSignature().Marshal(),
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: root[:],
|
||||
Slot: 1,
|
||||
PayloadStatus: primitives.PAYLOAD_WITHHELD,
|
||||
},
|
||||
}
|
||||
idx3 := uint64(17)
|
||||
|
||||
require.NoError(t, p.Add(msg3, idx3))
|
||||
att3 := p.attestations[primitives.PAYLOAD_WITHHELD]
|
||||
indices3 := att3.AggregationBits.BitIndices()
|
||||
require.DeepEqual(t, []int{int(idx3)}, indices3)
|
||||
require.DeepEqual(t, singleSig, att3.Signature)
|
||||
|
||||
// Add a different root
|
||||
root2 := [32]byte{'s'}
|
||||
msg.Data.BeaconBlockRoot = root2[:]
|
||||
require.NoError(t, p.Add(msg, idx))
|
||||
require.Equal(t, root2, p.root)
|
||||
require.Equal(t, true, p.Seen(root2, idx))
|
||||
require.Equal(t, false, p.Seen(root, idx))
|
||||
att = p.attestations[primitives.PAYLOAD_PRESENT]
|
||||
indices = att.AggregationBits.BitIndices()
|
||||
require.DeepEqual(t, []int{int(idx)}, indices)
|
||||
}
|
||||
|
||||
func TestPayloadAttestationCache_Get(t *testing.T) {
|
||||
root := [32]byte{1, 2, 3}
|
||||
wrongRoot := [32]byte{4, 5, 6}
|
||||
status := primitives.PAYLOAD_PRESENT
|
||||
invalidStatus := primitives.PAYLOAD_INVALID_STATUS
|
||||
|
||||
cache := &PayloadAttestationCache{
|
||||
root: root,
|
||||
attestations: [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation{
|
||||
{
|
||||
Signature: []byte{1},
|
||||
},
|
||||
{
|
||||
Signature: []byte{2},
|
||||
},
|
||||
{
|
||||
Signature: []byte{3},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("valid root and status", func(t *testing.T) {
|
||||
result := cache.Get(root, status)
|
||||
require.NotNil(t, result, "Expected a non-nil result")
|
||||
require.DeepEqual(t, cache.attestations[status], result)
|
||||
})
|
||||
|
||||
t.Run("invalid root", func(t *testing.T) {
|
||||
result := cache.Get(wrongRoot, status)
|
||||
require.IsNil(t, result)
|
||||
})
|
||||
|
||||
t.Run("status out of bound", func(t *testing.T) {
|
||||
result := cache.Get(root, invalidStatus)
|
||||
require.IsNil(t, result)
|
||||
})
|
||||
|
||||
t.Run("no attestation", func(t *testing.T) {
|
||||
emptyCache := &PayloadAttestationCache{
|
||||
root: root,
|
||||
attestations: [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation{},
|
||||
}
|
||||
|
||||
result := emptyCache.Get(root, status)
|
||||
require.IsNil(t, result)
|
||||
})
|
||||
}
|
||||
76
beacon-chain/cache/signed_execution_header.go
vendored
Normal file
76
beacon-chain/cache/signed_execution_header.go
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
)
|
||||
|
||||
// ExecutionPayloadHeaders is used by the sync service to store signed execution payload headers after they pass validation,
|
||||
// and filter out subsequent headers with lower value.
|
||||
// The signed header from this cache could be used by the proposer when proposing the next slot.
|
||||
type ExecutionPayloadHeaders struct {
|
||||
headers map[primitives.Slot][]*enginev1.SignedExecutionPayloadHeader
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewExecutionPayloadHeaders() *ExecutionPayloadHeaders {
|
||||
return &ExecutionPayloadHeaders{
|
||||
headers: make(map[primitives.Slot][]*enginev1.SignedExecutionPayloadHeader),
|
||||
}
|
||||
}
|
||||
|
||||
// SaveSignedExecutionPayloadHeader saves the signed execution payload header to the cache.
|
||||
// The cache stores headers for up to two slots. If the input slot is higher than the lowest slot
|
||||
// currently in the cache, the lowest slot is removed to make space for the new header.
|
||||
// Only the highest value header for a given parent block hash will be stored.
|
||||
// This function assumes caller already checks header's slot is current or next slot, it doesn't account for slot validation.
|
||||
func (c *ExecutionPayloadHeaders) SaveSignedExecutionPayloadHeader(header *enginev1.SignedExecutionPayloadHeader) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
for s := range c.headers {
|
||||
if s+1 < header.Message.Slot {
|
||||
delete(c.headers, s)
|
||||
}
|
||||
}
|
||||
|
||||
// Add or update the header in the map
|
||||
if _, ok := c.headers[header.Message.Slot]; !ok {
|
||||
c.headers[header.Message.Slot] = []*enginev1.SignedExecutionPayloadHeader{header}
|
||||
} else {
|
||||
found := false
|
||||
for i, h := range c.headers[header.Message.Slot] {
|
||||
if bytes.Equal(h.Message.ParentBlockHash, header.Message.ParentBlockHash) && bytes.Equal(h.Message.ParentBlockRoot, header.Message.ParentBlockRoot) {
|
||||
if header.Message.Value > h.Message.Value {
|
||||
c.headers[header.Message.Slot][i] = header
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
c.headers[header.Message.Slot] = append(c.headers[header.Message.Slot], header)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SignedExecutionPayloadHeader returns the signed payload header for the given slot and parent block hash and block root.
|
||||
// Returns nil if the header is not found.
|
||||
// This should be used when the caller wants the header to match parent block hash and parent block root such as proposer choosing a header to propose.
|
||||
func (c *ExecutionPayloadHeaders) SignedExecutionPayloadHeader(slot primitives.Slot, parentBlockHash []byte, parentBlockRoot []byte) *enginev1.SignedExecutionPayloadHeader {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
if headers, ok := c.headers[slot]; ok {
|
||||
for _, header := range headers {
|
||||
if bytes.Equal(header.Message.ParentBlockHash, parentBlockHash) && bytes.Equal(header.Message.ParentBlockRoot, parentBlockRoot) {
|
||||
return header
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
243
beacon-chain/cache/signed_execution_header_test.go
vendored
Normal file
243
beacon-chain/cache/signed_execution_header_test.go
vendored
Normal file
@@ -0,0 +1,243 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func Test_SaveSignedExecutionPayloadHeader(t *testing.T) {
|
||||
t.Run("First header should be added to cache", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header)
|
||||
require.Equal(t, 1, len(c.headers))
|
||||
require.Equal(t, header, c.headers[1][0])
|
||||
})
|
||||
|
||||
t.Run("Second header with higher slot should be added, and both slots should be in cache", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header1 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header2 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header1)
|
||||
c.SaveSignedExecutionPayloadHeader(header2)
|
||||
require.Equal(t, 2, len(c.headers))
|
||||
require.Equal(t, header1, c.headers[1][0])
|
||||
require.Equal(t, header2, c.headers[2][0])
|
||||
})
|
||||
|
||||
t.Run("Third header with higher slot should replace the oldest slot", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header1 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header2 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header3 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 3,
|
||||
ParentBlockHash: []byte("parent3"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header1)
|
||||
c.SaveSignedExecutionPayloadHeader(header2)
|
||||
c.SaveSignedExecutionPayloadHeader(header3)
|
||||
require.Equal(t, 2, len(c.headers))
|
||||
require.Equal(t, header2, c.headers[2][0])
|
||||
require.Equal(t, header3, c.headers[3][0])
|
||||
})
|
||||
|
||||
t.Run("Header with same slot but higher value should replace the existing one", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header1 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header2 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 200,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header1)
|
||||
c.SaveSignedExecutionPayloadHeader(header2)
|
||||
require.Equal(t, 1, len(c.headers[2]))
|
||||
require.Equal(t, header2, c.headers[2][0])
|
||||
})
|
||||
|
||||
t.Run("Header with different parent block hash should be appended to the same slot", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header1 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header2 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 200,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header1)
|
||||
c.SaveSignedExecutionPayloadHeader(header2)
|
||||
require.Equal(t, 2, len(c.headers[2]))
|
||||
require.Equal(t, header1, c.headers[2][0])
|
||||
require.Equal(t, header2, c.headers[2][1])
|
||||
})
|
||||
}
|
||||
|
||||
func TestSignedExecutionPayloadHeader(t *testing.T) {
|
||||
t.Run("Return header when slot and parentBlockHash match", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
ParentBlockRoot: []byte("root1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header)
|
||||
result := c.SignedExecutionPayloadHeader(1, []byte("parent1"), []byte("root1"))
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, header, result)
|
||||
})
|
||||
|
||||
t.Run("Return nil when no matching slot and parentBlockHash", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
ParentBlockRoot: []byte("root1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header)
|
||||
result := c.SignedExecutionPayloadHeader(2, []byte("parent2"), []byte("root1"))
|
||||
require.IsNil(t, result)
|
||||
})
|
||||
|
||||
t.Run("Return nil when no matching slot and parentBlockRoot", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
ParentBlockRoot: []byte("root1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header)
|
||||
result := c.SignedExecutionPayloadHeader(2, []byte("parent1"), []byte("root2"))
|
||||
require.IsNil(t, result)
|
||||
})
|
||||
|
||||
t.Run("Return header when there are two slots in the cache and a match is found", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header1 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header2 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 200,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header1)
|
||||
c.SaveSignedExecutionPayloadHeader(header2)
|
||||
|
||||
// Check for the first header
|
||||
result1 := c.SignedExecutionPayloadHeader(1, []byte("parent1"), []byte{})
|
||||
require.NotNil(t, result1)
|
||||
require.Equal(t, header1, result1)
|
||||
|
||||
// Check for the second header
|
||||
result2 := c.SignedExecutionPayloadHeader(2, []byte("parent2"), []byte{})
|
||||
require.NotNil(t, result2)
|
||||
require.Equal(t, header2, result2)
|
||||
})
|
||||
|
||||
t.Run("Return nil when slot is evicted from cache", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header1 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header2 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 200,
|
||||
},
|
||||
}
|
||||
header3 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 3,
|
||||
ParentBlockHash: []byte("parent3"),
|
||||
Value: 300,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header1)
|
||||
c.SaveSignedExecutionPayloadHeader(header2)
|
||||
c.SaveSignedExecutionPayloadHeader(header3)
|
||||
|
||||
// The first slot should be evicted, so result should be nil
|
||||
result := c.SignedExecutionPayloadHeader(1, []byte("parent1"), []byte{})
|
||||
require.IsNil(t, result)
|
||||
|
||||
// The second slot should still be present
|
||||
result = c.SignedExecutionPayloadHeader(2, []byte("parent2"), []byte{})
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, header2, result)
|
||||
|
||||
// The third slot should be present
|
||||
result = c.SignedExecutionPayloadHeader(3, []byte("parent3"), []byte{})
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, header3, result)
|
||||
})
|
||||
}
|
||||
@@ -187,11 +187,12 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
|
||||
// return Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// effective_balance=effective_balance,
|
||||
// slashed=False,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// effective_balance=effective_balance,
|
||||
// )
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
|
||||
effectiveBalance := amount - (amount % params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
@@ -202,10 +203,11 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
|
||||
return ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
EffectiveBalance: effectiveBalance,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: effectiveBalance,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,6 +107,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//testing/util/random:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
|
||||
@@ -448,6 +448,7 @@ func TestValidateIndexedAttestation_AboveMaxLength(t *testing.T) {
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: primitives.Epoch(i),
|
||||
},
|
||||
Source: ðpb.Checkpoint{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -489,6 +490,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) {
|
||||
Target: ðpb.Checkpoint{
|
||||
Root: []byte{},
|
||||
},
|
||||
Source: ðpb.Checkpoint{},
|
||||
},
|
||||
Signature: sig.Marshal(),
|
||||
AggregationBits: list,
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -222,6 +223,42 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateEPBS:
|
||||
kzgs := make([][]byte, 0)
|
||||
kzgRoot, err := ssz.KzgCommitmentsRoot(kzgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockEpbs{
|
||||
Block: ðpb.BeaconBlockEpbs{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyEpbs{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
SignedExecutionPayloadHeader: &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
BlobKzgCommitmentsRoot: kzgRoot[:],
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
PayloadAttestations: make([]*ethpb.PayloadAttestation, 0),
|
||||
},
|
||||
},
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
default:
|
||||
return nil, ErrUnrecognizedState
|
||||
}
|
||||
|
||||
@@ -58,9 +58,15 @@ func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
|
||||
//
|
||||
// return block.body.execution_payload != ExecutionPayload()
|
||||
func IsExecutionBlock(body interfaces.ReadOnlyBeaconBlockBody) (bool, error) {
|
||||
if body.Version() >= version.Capella {
|
||||
return true, nil
|
||||
}
|
||||
if body == nil {
|
||||
return false, errors.New("nil block body")
|
||||
}
|
||||
if body.Version() >= version.Capella {
|
||||
return true, nil
|
||||
}
|
||||
payload, err := body.Execution()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
@@ -202,24 +208,24 @@ func ValidatePayload(st state.BeaconState, payload interfaces.ExecutionData) err
|
||||
// block_hash=payload.block_hash,
|
||||
// transactions_root=hash_tree_root(payload.transactions),
|
||||
// )
|
||||
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) (state.BeaconState, error) {
|
||||
func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
payload, err := body.Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := verifyBlobCommitmentCount(body); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := ValidatePayload(st, payload); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if err := st.SetLatestExecutionPayloadHeader(payload); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
return st, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
@@ -239,12 +245,47 @@ func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
// GetBlockPayloadHash returns the hash of the execution payload of the block
|
||||
func GetBlockPayloadHash(blk interfaces.ReadOnlyBeaconBlock) ([32]byte, error) {
|
||||
var payloadHash [32]byte
|
||||
if IsPreBellatrixVersion(blk.Version()) {
|
||||
return payloadHash, nil
|
||||
if blk.Version() >= version.EPBS {
|
||||
header, err := blk.Body().SignedExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return payloadHash, err
|
||||
}
|
||||
payload, err := header.Header()
|
||||
if err != nil {
|
||||
return payloadHash, err
|
||||
}
|
||||
return payload.BlockHash(), nil
|
||||
}
|
||||
payload, err := blk.Body().Execution()
|
||||
if err != nil {
|
||||
return payloadHash, err
|
||||
if blk.Version() >= version.Bellatrix {
|
||||
payload, err := blk.Body().Execution()
|
||||
if err != nil {
|
||||
return payloadHash, err
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash()), nil
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash()), nil
|
||||
return payloadHash, nil
|
||||
}
|
||||
|
||||
// GetBlockParentHash returns the hash of the parent execution payload
|
||||
func GetBlockParentHash(blk interfaces.ReadOnlyBeaconBlock) ([32]byte, error) {
|
||||
var parentHash [32]byte
|
||||
if blk.Version() >= version.EPBS {
|
||||
header, err := blk.Body().SignedExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return parentHash, err
|
||||
}
|
||||
payload, err := header.Header()
|
||||
if err != nil {
|
||||
return parentHash, err
|
||||
}
|
||||
return payload.ParentBlockHash(), nil
|
||||
}
|
||||
if blk.Version() >= version.Bellatrix {
|
||||
payload, err := blk.Body().Execution()
|
||||
if err != nil {
|
||||
return parentHash, err
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.ParentHash()), nil
|
||||
}
|
||||
return parentHash, nil
|
||||
}
|
||||
|
||||
@@ -253,7 +253,8 @@ func Test_IsExecutionBlockCapella(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.IsExecutionBlock(wrappedBlock.Body())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, got)
|
||||
// #14614
|
||||
require.Equal(t, true, got)
|
||||
}
|
||||
|
||||
func Test_IsExecutionEnabled(t *testing.T) {
|
||||
@@ -587,8 +588,7 @@ func Test_ProcessPayload(t *testing.T) {
|
||||
ExecutionPayload: tt.payload,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, body)
|
||||
if err != nil {
|
||||
if err := blocks.ProcessPayload(st, body); err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
@@ -619,8 +619,7 @@ func Test_ProcessPayloadCapella(t *testing.T) {
|
||||
ExecutionPayload: payload,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = blocks.ProcessPayload(st, body)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.ProcessPayload(st, body))
|
||||
}
|
||||
|
||||
func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
@@ -677,8 +676,7 @@ func Test_ProcessPayload_Blinded(t *testing.T) {
|
||||
ExecutionPayloadHeader: p,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
st, err := blocks.ProcessPayload(st, body)
|
||||
if err != nil {
|
||||
if err := blocks.ProcessPayload(st, body); err != nil {
|
||||
require.Equal(t, tt.err.Error(), err.Error())
|
||||
} else {
|
||||
require.Equal(t, tt.err, err)
|
||||
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
@@ -115,98 +115,66 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// ProcessWithdrawals processes the validator withdrawals from the provided execution payload
|
||||
// into the beacon state.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
// expected_withdrawals, partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
|
||||
//
|
||||
// assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
//
|
||||
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
|
||||
// assert withdrawal == expected_withdrawal
|
||||
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
//
|
||||
// # Update pending partial withdrawals [New in Electra:EIP7251]
|
||||
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[partial_withdrawals_count:]
|
||||
//
|
||||
// # Update the next withdrawal index if this block contained withdrawals
|
||||
// if len(expected_withdrawals) != 0:
|
||||
// latest_withdrawal = expected_withdrawals[-1]
|
||||
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
//
|
||||
// # Update the next validator index to start the next withdrawal sweep
|
||||
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// # Next sweep starts after the latest withdrawal's validator index
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
expectedWithdrawals, partialWithdrawalsCount, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get expected withdrawals")
|
||||
}
|
||||
|
||||
func checkWithdrawalsAgainstPayload(
|
||||
executionData interfaces.ExecutionData,
|
||||
numExpected int,
|
||||
expectedRoot [32]byte,
|
||||
) error {
|
||||
var wdRoot [32]byte
|
||||
if executionData.IsBlinded() {
|
||||
r, err := executionData.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals root")
|
||||
return errors.Wrap(err, "could not get withdrawals root")
|
||||
}
|
||||
wdRoot = bytesutil.ToBytes32(r)
|
||||
copy(wdRoot[:], r)
|
||||
} else {
|
||||
wds, err := executionData.Withdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals")
|
||||
return errors.Wrap(err, "could not get withdrawals")
|
||||
}
|
||||
|
||||
if len(wds) != len(expectedWithdrawals) {
|
||||
return nil, fmt.Errorf("execution payload header has %d withdrawals when %d were expected", len(wds), len(expectedWithdrawals))
|
||||
if len(wds) != numExpected {
|
||||
return fmt.Errorf("execution payload header has %d withdrawals when %d were expected", len(wds), numExpected)
|
||||
}
|
||||
|
||||
wdRoot, err = ssz.WithdrawalSliceRoot(wds, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get withdrawals root")
|
||||
return errors.Wrap(err, "could not get withdrawals root")
|
||||
}
|
||||
}
|
||||
|
||||
expectedRoot, err := ssz.WithdrawalSliceRoot(expectedWithdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get expected withdrawals root")
|
||||
}
|
||||
if expectedRoot != wdRoot {
|
||||
return nil, fmt.Errorf("expected withdrawals root %#x, got %#x", expectedRoot, wdRoot)
|
||||
return fmt.Errorf("expected withdrawals root %#x, got %#x", expectedRoot, wdRoot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func processWithdrawalStateTransition(
|
||||
st state.BeaconState,
|
||||
expectedWithdrawals []*enginev1.Withdrawal,
|
||||
partialWithdrawalsCount uint64,
|
||||
) (err error) {
|
||||
for _, withdrawal := range expectedWithdrawals {
|
||||
err := helpers.DecreaseBalance(st, withdrawal.ValidatorIndex, withdrawal.Amount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not decrease balance")
|
||||
return errors.Wrap(err, "could not decrease balance")
|
||||
}
|
||||
}
|
||||
|
||||
if st.Version() >= version.Electra {
|
||||
if err := st.DequeuePartialWithdrawals(partialWithdrawalsCount); err != nil {
|
||||
return nil, fmt.Errorf("unable to dequeue partial withdrawals from state: %w", err)
|
||||
if err := st.DequeuePendingPartialWithdrawals(partialWithdrawalsCount); err != nil {
|
||||
return fmt.Errorf("unable to dequeue partial withdrawals from state: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(expectedWithdrawals) > 0 {
|
||||
if err := st.SetNextWithdrawalIndex(expectedWithdrawals[len(expectedWithdrawals)-1].Index + 1); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set next withdrawal index")
|
||||
return errors.Wrap(err, "could not set next withdrawal index")
|
||||
}
|
||||
}
|
||||
var nextValidatorIndex primitives.ValidatorIndex
|
||||
if uint64(len(expectedWithdrawals)) < params.BeaconConfig().MaxWithdrawalsPerPayload {
|
||||
nextValidatorIndex, err = st.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get next withdrawal validator index")
|
||||
return errors.Wrap(err, "could not get next withdrawal validator index")
|
||||
}
|
||||
nextValidatorIndex += primitives.ValidatorIndex(params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep)
|
||||
nextValidatorIndex = nextValidatorIndex % primitives.ValidatorIndex(st.NumValidators())
|
||||
@@ -217,7 +185,86 @@ func ProcessWithdrawals(st state.BeaconState, executionData interfaces.Execution
|
||||
}
|
||||
}
|
||||
if err := st.SetNextWithdrawalValidatorIndex(nextValidatorIndex); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set next withdrawal validator index")
|
||||
return errors.Wrap(err, "could not set next withdrawal validator index")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessWithdrawals processes the validator withdrawals from the provided execution payload
|
||||
// into the beacon state.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
// if state.fork.current_version >= EIP7732_FORK_VERSION :
|
||||
// if not is_parent_block_full(state): # [New in EIP-7732]
|
||||
// return
|
||||
//
|
||||
// expected_withdrawals, partial_withdrawals_count = get_expected_withdrawals(state) # [Modified in Electra:EIP7251]
|
||||
//
|
||||
// if state.fork.current_version >= EIP7732_FORK_VERSION :
|
||||
// state.latest_withdrawals_root = hash_tree_root(expected_withdrawals) # [New in EIP-7732]
|
||||
// else :
|
||||
// assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
//
|
||||
// assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
//
|
||||
// for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
|
||||
// assert withdrawal == expected_withdrawal
|
||||
// decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
//
|
||||
// # Update pending partial withdrawals [New in Electra:EIP7251]
|
||||
// state.pending_partial_withdrawals = state.pending_partial_withdrawals[processed_partial_withdrawals_count:]
|
||||
//
|
||||
// # Update the next withdrawal index if this block contained withdrawals
|
||||
// if len(expected_withdrawals) != 0:
|
||||
// latest_withdrawal = expected_withdrawals[-1]
|
||||
// state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
//
|
||||
// # Update the next validator index to start the next withdrawal sweep
|
||||
// if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
// # Next sweep starts after the latest withdrawal's validator index
|
||||
// next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
// else:
|
||||
// # Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
// next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
// next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
// state.next_withdrawal_validator_index = next_validator_index
|
||||
func ProcessWithdrawals(st state.BeaconState, executionData interfaces.ExecutionData) (state.BeaconState, error) {
|
||||
if st.Version() >= version.EPBS {
|
||||
IsParentBlockFull, err := st.IsParentBlockFull()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not check if parent block is full")
|
||||
}
|
||||
|
||||
if !IsParentBlockFull {
|
||||
return st, nil
|
||||
}
|
||||
}
|
||||
|
||||
expectedWithdrawals, partialWithdrawalsCount, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get expected withdrawals")
|
||||
}
|
||||
|
||||
expectedRoot, err := ssz.WithdrawalSliceRoot(expectedWithdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get expected withdrawals root")
|
||||
}
|
||||
|
||||
if st.Version() >= version.EPBS {
|
||||
err = st.SetLastWithdrawalsRoot(expectedRoot[:])
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not set withdrawals root")
|
||||
}
|
||||
} else {
|
||||
if err := checkWithdrawalsAgainstPayload(executionData, len(expectedWithdrawals), expectedRoot); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := processWithdrawalStateTransition(st, expectedWithdrawals, partialWithdrawalsCount); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util/random"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
@@ -1132,13 +1133,407 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
checkPostState(t, test.Control, post)
|
||||
}
|
||||
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = saved
|
||||
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessWithdrawalsEPBS(t *testing.T) {
|
||||
const (
|
||||
currentEpoch = primitives.Epoch(10)
|
||||
epochInFuture = primitives.Epoch(12)
|
||||
epochInPast = primitives.Epoch(8)
|
||||
numValidators = 128
|
||||
notWithdrawableIndex = 127
|
||||
notPartiallyWithdrawable = 126
|
||||
maxSweep = uint64(80)
|
||||
)
|
||||
maxEffectiveBalance := params.BeaconConfig().MaxEffectiveBalance
|
||||
|
||||
type args struct {
|
||||
Name string
|
||||
NextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||
NextWithdrawalIndex uint64
|
||||
FullWithdrawalIndices []primitives.ValidatorIndex
|
||||
PendingPartialWithdrawalIndices []primitives.ValidatorIndex
|
||||
Withdrawals []*enginev1.Withdrawal
|
||||
PendingPartialWithdrawals []*ethpb.PendingPartialWithdrawal // Electra
|
||||
LatestBlockHash []byte // EIP-7732
|
||||
}
|
||||
type control struct {
|
||||
NextWithdrawalValidatorIndex primitives.ValidatorIndex
|
||||
NextWithdrawalIndex uint64
|
||||
Balances map[uint64]uint64
|
||||
}
|
||||
type Test struct {
|
||||
Args args
|
||||
Control control
|
||||
}
|
||||
executionAddress := func(i primitives.ValidatorIndex) []byte {
|
||||
wc := make([]byte, 20)
|
||||
wc[19] = byte(i)
|
||||
return wc
|
||||
}
|
||||
withdrawalAmount := func(i primitives.ValidatorIndex) uint64 {
|
||||
return maxEffectiveBalance + uint64(i)*100000
|
||||
}
|
||||
fullWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
|
||||
return &enginev1.Withdrawal{
|
||||
Index: idx,
|
||||
ValidatorIndex: i,
|
||||
Address: executionAddress(i),
|
||||
Amount: withdrawalAmount(i),
|
||||
}
|
||||
}
|
||||
PendingPartialWithdrawal := func(i primitives.ValidatorIndex, idx uint64) *enginev1.Withdrawal {
|
||||
return &enginev1.Withdrawal{
|
||||
Index: idx,
|
||||
ValidatorIndex: i,
|
||||
Address: executionAddress(i),
|
||||
Amount: withdrawalAmount(i) - maxEffectiveBalance,
|
||||
}
|
||||
}
|
||||
tests := []Test{
|
||||
{
|
||||
Args: args{
|
||||
Name: "success no withdrawals",
|
||||
NextWithdrawalValidatorIndex: 10,
|
||||
NextWithdrawalIndex: 3,
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 90,
|
||||
NextWithdrawalIndex: 3,
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success one full withdrawal",
|
||||
NextWithdrawalIndex: 3,
|
||||
NextWithdrawalValidatorIndex: 5,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{70},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(70, 3),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 85,
|
||||
NextWithdrawalIndex: 4,
|
||||
Balances: map[uint64]uint64{70: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success one partial withdrawal",
|
||||
NextWithdrawalIndex: 21,
|
||||
NextWithdrawalValidatorIndex: 120,
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
PendingPartialWithdrawal(7, 21),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 72,
|
||||
NextWithdrawalIndex: 22,
|
||||
Balances: map[uint64]uint64{7: maxEffectiveBalance},
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success many full withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28, 1},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(7, 22), fullWithdrawal(19, 23), fullWithdrawal(28, 24),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 84,
|
||||
NextWithdrawalIndex: 25,
|
||||
Balances: map[uint64]uint64{7: 0, 19: 0, 28: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "less than max sweep at end",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{80, 81, 82, 83},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(80, 22), fullWithdrawal(81, 23), fullWithdrawal(82, 24),
|
||||
fullWithdrawal(83, 25),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 84,
|
||||
NextWithdrawalIndex: 26,
|
||||
Balances: map[uint64]uint64{80: 0, 81: 0, 82: 0, 83: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "less than max sweep and beginning",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{4, 5, 6},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(4, 22), fullWithdrawal(5, 23), fullWithdrawal(6, 24),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 84,
|
||||
NextWithdrawalIndex: 25,
|
||||
Balances: map[uint64]uint64{4: 0, 5: 0, 6: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success many partial withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
PendingPartialWithdrawal(7, 22), PendingPartialWithdrawal(19, 23), PendingPartialWithdrawal(28, 24),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 84,
|
||||
NextWithdrawalIndex: 25,
|
||||
Balances: map[uint64]uint64{
|
||||
7: maxEffectiveBalance,
|
||||
19: maxEffectiveBalance,
|
||||
28: maxEffectiveBalance,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success many withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 88,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
PendingPartialWithdrawal(89, 22), PendingPartialWithdrawal(1, 23), PendingPartialWithdrawal(2, 24),
|
||||
fullWithdrawal(7, 25), PendingPartialWithdrawal(15, 26), fullWithdrawal(19, 27),
|
||||
fullWithdrawal(28, 28),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 40,
|
||||
NextWithdrawalIndex: 29,
|
||||
Balances: map[uint64]uint64{
|
||||
7: 0, 19: 0, 28: 0,
|
||||
2: maxEffectiveBalance, 1: maxEffectiveBalance, 89: maxEffectiveBalance,
|
||||
15: maxEffectiveBalance,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success many withdrawals with pending partial withdrawals in state",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 88,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28},
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{2, 1, 89, 15},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
PendingPartialWithdrawal(89, 22), PendingPartialWithdrawal(1, 23), PendingPartialWithdrawal(2, 24),
|
||||
fullWithdrawal(7, 25), PendingPartialWithdrawal(15, 26), fullWithdrawal(19, 27),
|
||||
fullWithdrawal(28, 28),
|
||||
},
|
||||
PendingPartialWithdrawals: []*ethpb.PendingPartialWithdrawal{
|
||||
{
|
||||
Index: 11,
|
||||
Amount: withdrawalAmount(11) - maxEffectiveBalance,
|
||||
},
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 40,
|
||||
NextWithdrawalIndex: 29,
|
||||
Balances: map[uint64]uint64{
|
||||
7: 0, 19: 0, 28: 0,
|
||||
2: maxEffectiveBalance, 1: maxEffectiveBalance, 89: maxEffectiveBalance,
|
||||
15: maxEffectiveBalance,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Args: args{
|
||||
Name: "success more than max fully withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 0,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(1, 22), fullWithdrawal(2, 23), fullWithdrawal(3, 24),
|
||||
fullWithdrawal(4, 25), fullWithdrawal(5, 26), fullWithdrawal(6, 27),
|
||||
fullWithdrawal(7, 28), fullWithdrawal(8, 29), fullWithdrawal(9, 30),
|
||||
fullWithdrawal(21, 31), fullWithdrawal(22, 32), fullWithdrawal(23, 33),
|
||||
fullWithdrawal(24, 34), fullWithdrawal(25, 35), fullWithdrawal(26, 36),
|
||||
fullWithdrawal(27, 37),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 28,
|
||||
NextWithdrawalIndex: 38,
|
||||
Balances: map[uint64]uint64{
|
||||
1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0,
|
||||
21: 0, 22: 0, 23: 0, 24: 0, 25: 0, 26: 0, 27: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "success more than max partially withdrawals",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 0,
|
||||
PendingPartialWithdrawalIndices: []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6, 7, 8, 9, 21, 22, 23, 24, 25, 26, 27, 29, 35, 89},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
PendingPartialWithdrawal(1, 22), PendingPartialWithdrawal(2, 23), PendingPartialWithdrawal(3, 24),
|
||||
PendingPartialWithdrawal(4, 25), PendingPartialWithdrawal(5, 26), PendingPartialWithdrawal(6, 27),
|
||||
PendingPartialWithdrawal(7, 28), PendingPartialWithdrawal(8, 29), PendingPartialWithdrawal(9, 30),
|
||||
PendingPartialWithdrawal(21, 31), PendingPartialWithdrawal(22, 32), PendingPartialWithdrawal(23, 33),
|
||||
PendingPartialWithdrawal(24, 34), PendingPartialWithdrawal(25, 35), PendingPartialWithdrawal(26, 36),
|
||||
PendingPartialWithdrawal(27, 37),
|
||||
},
|
||||
},
|
||||
Control: control{
|
||||
NextWithdrawalValidatorIndex: 28,
|
||||
NextWithdrawalIndex: 38,
|
||||
Balances: map[uint64]uint64{
|
||||
1: maxEffectiveBalance,
|
||||
2: maxEffectiveBalance,
|
||||
3: maxEffectiveBalance,
|
||||
4: maxEffectiveBalance,
|
||||
5: maxEffectiveBalance,
|
||||
6: maxEffectiveBalance,
|
||||
7: maxEffectiveBalance,
|
||||
8: maxEffectiveBalance,
|
||||
9: maxEffectiveBalance,
|
||||
21: maxEffectiveBalance,
|
||||
22: maxEffectiveBalance,
|
||||
23: maxEffectiveBalance,
|
||||
24: maxEffectiveBalance,
|
||||
25: maxEffectiveBalance,
|
||||
26: maxEffectiveBalance,
|
||||
27: maxEffectiveBalance,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Args: args{
|
||||
Name: "Parent Node is not full",
|
||||
NextWithdrawalIndex: 22,
|
||||
NextWithdrawalValidatorIndex: 4,
|
||||
FullWithdrawalIndices: []primitives.ValidatorIndex{7, 19, 28, 1},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
fullWithdrawal(7, 22), fullWithdrawal(19, 23), fullWithdrawal(28, 24),
|
||||
},
|
||||
LatestBlockHash: []byte{1, 2, 3},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
checkPostState := func(t *testing.T, expected control, st state.BeaconState) {
|
||||
l, err := st.NextWithdrawalValidatorIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected.NextWithdrawalValidatorIndex, l)
|
||||
|
||||
n, err := st.NextWithdrawalIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected.NextWithdrawalIndex, n)
|
||||
balances := st.Balances()
|
||||
for idx, bal := range expected.Balances {
|
||||
require.Equal(t, bal, balances[idx])
|
||||
}
|
||||
}
|
||||
|
||||
prepareValidators := func(st state.BeaconState, arguments args) error {
|
||||
validators := make([]*ethpb.Validator, numValidators)
|
||||
if err := st.SetBalances(make([]uint64, numValidators)); err != nil {
|
||||
return err
|
||||
}
|
||||
for i := range validators {
|
||||
v := ðpb.Validator{}
|
||||
v.EffectiveBalance = maxEffectiveBalance
|
||||
v.WithdrawableEpoch = epochInFuture
|
||||
v.WithdrawalCredentials = make([]byte, 32)
|
||||
v.WithdrawalCredentials[31] = byte(i)
|
||||
if err := st.UpdateBalancesAtIndex(primitives.ValidatorIndex(i), v.EffectiveBalance-uint64(rand.Intn(1000))); err != nil {
|
||||
return err
|
||||
}
|
||||
validators[i] = v
|
||||
}
|
||||
for _, idx := range arguments.FullWithdrawalIndices {
|
||||
if idx != notWithdrawableIndex {
|
||||
validators[idx].WithdrawableEpoch = epochInPast
|
||||
}
|
||||
if err := st.UpdateBalancesAtIndex(idx, withdrawalAmount(idx)); err != nil {
|
||||
return err
|
||||
}
|
||||
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
}
|
||||
for _, idx := range arguments.PendingPartialWithdrawalIndices {
|
||||
validators[idx].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
if err := st.UpdateBalancesAtIndex(idx, withdrawalAmount(idx)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return st.SetValidators(validators)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.Args.Name, func(t *testing.T) {
|
||||
saved := params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep
|
||||
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = maxSweep
|
||||
if test.Args.Withdrawals == nil {
|
||||
test.Args.Withdrawals = make([]*enginev1.Withdrawal, 0)
|
||||
}
|
||||
if test.Args.FullWithdrawalIndices == nil {
|
||||
test.Args.FullWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
|
||||
}
|
||||
if test.Args.PendingPartialWithdrawalIndices == nil {
|
||||
test.Args.PendingPartialWithdrawalIndices = make([]primitives.ValidatorIndex, 0)
|
||||
}
|
||||
slot, err := slots.EpochStart(currentEpoch)
|
||||
require.NoError(t, err)
|
||||
var st state.BeaconState
|
||||
var p interfaces.ExecutionData
|
||||
spb := ðpb.BeaconStateEPBS{
|
||||
Slot: slot,
|
||||
NextWithdrawalValidatorIndex: test.Args.NextWithdrawalValidatorIndex,
|
||||
NextWithdrawalIndex: test.Args.NextWithdrawalIndex,
|
||||
PendingPartialWithdrawals: test.Args.PendingPartialWithdrawals,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
BlockHash: []byte{},
|
||||
},
|
||||
LatestBlockHash: test.Args.LatestBlockHash,
|
||||
}
|
||||
st, err = state_native.InitializeFromProtoUnsafeEpbs(spb)
|
||||
require.NoError(t, err)
|
||||
env := random.ExecutionPayloadEnvelope(t)
|
||||
env.Payload.Withdrawals = test.Args.Withdrawals
|
||||
wp, err := consensusblocks.WrappedROExecutionPayloadEnvelope(env)
|
||||
require.NoError(t, err)
|
||||
p, err = wp.Execution()
|
||||
require.NoError(t, err)
|
||||
err = prepareValidators(st, test.Args)
|
||||
require.NoError(t, err)
|
||||
post, err := blocks.ProcessWithdrawals(st, p)
|
||||
if test.Args.Name == "Parent Node is not full" {
|
||||
require.IsNil(t, post)
|
||||
require.IsNil(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
checkPostState(t, test.Control, post)
|
||||
}
|
||||
params.BeaconConfig().MaxValidatorsPerWithdrawalsSweep = saved
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessBLSToExecutionChanges(t *testing.T) {
|
||||
spb := ðpb.BeaconStateCapella{
|
||||
Fork: ðpb.Fork{
|
||||
|
||||
@@ -10,7 +10,6 @@ go_library(
|
||||
"effective_balance_updates.go",
|
||||
"registry_updates.go",
|
||||
"transition.go",
|
||||
"transition_no_verify_sig.go",
|
||||
"upgrade.go",
|
||||
"validator.go",
|
||||
"withdrawals.go",
|
||||
|
||||
@@ -61,15 +61,15 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
}
|
||||
var nextPendingConsolidation uint64
|
||||
for _, pc := range pendingConsolidations {
|
||||
sourceValidator, err := st.ValidatorAtIndex(pc.SourceIndex)
|
||||
sourceValidator, err := st.ValidatorAtIndexReadOnly(pc.SourceIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if sourceValidator.Slashed {
|
||||
if sourceValidator.Slashed() {
|
||||
nextPendingConsolidation++
|
||||
continue
|
||||
}
|
||||
if sourceValidator.WithdrawableEpoch > nextEpoch {
|
||||
if sourceValidator.WithdrawableEpoch() > nextEpoch {
|
||||
break
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/contracts/deposit"
|
||||
@@ -385,8 +386,14 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
return errors.Wrap(err, "batch signature verification failed")
|
||||
}
|
||||
|
||||
pubKeyMap := make(map[[48]byte]struct{}, len(pendingDeposits))
|
||||
|
||||
// Process each deposit individually
|
||||
for _, pendingDeposit := range pendingDeposits {
|
||||
_, found := pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)]
|
||||
if !found {
|
||||
pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)] = struct{}{}
|
||||
}
|
||||
validSignature := allSignaturesVerified
|
||||
|
||||
// If batch verification failed, check the individual deposit signature
|
||||
@@ -404,9 +411,16 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
|
||||
// Add validator to the registry if the signature is valid
|
||||
if validSignature {
|
||||
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to add validator to registry")
|
||||
if found {
|
||||
index, _ := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
|
||||
if err := helpers.IncreaseBalance(state, index, pendingDeposit.Amount); err != nil {
|
||||
return errors.Wrap(err, "could not increase balance")
|
||||
}
|
||||
} else {
|
||||
err = AddValidatorToRegistry(state, pendingDeposit.PublicKey, pendingDeposit.WithdrawalCredentials, pendingDeposit.Amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to add validator to registry")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -474,7 +488,10 @@ func ApplyPendingDeposit(ctx context.Context, st state.BeaconState, deposit *eth
|
||||
// set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||
// set_or_append_list(state.inactivity_scores, index, uint64(0))
|
||||
func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdrawalCredentials []byte, amount uint64) error {
|
||||
val := GetValidatorFromDeposit(pubKey, withdrawalCredentials, amount)
|
||||
val, err := GetValidatorFromDeposit(pubKey, withdrawalCredentials, amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get validator from deposit")
|
||||
}
|
||||
if err := beaconState.AppendValidator(val); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -504,11 +521,12 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
|
||||
// validator = Validator(
|
||||
// pubkey=pubkey,
|
||||
// withdrawal_credentials=withdrawal_credentials,
|
||||
// effective_balance=Gwei(0),
|
||||
// slashed=False,
|
||||
// activation_eligibility_epoch=FAR_FUTURE_EPOCH,
|
||||
// activation_epoch=FAR_FUTURE_EPOCH,
|
||||
// exit_epoch=FAR_FUTURE_EPOCH,
|
||||
// withdrawable_epoch=FAR_FUTURE_EPOCH,
|
||||
// effective_balance=Gwei(0),
|
||||
// )
|
||||
//
|
||||
// # [Modified in Electra:EIP7251]
|
||||
@@ -516,19 +534,24 @@ func AddValidatorToRegistry(beaconState state.BeaconState, pubKey []byte, withdr
|
||||
// validator.effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, max_effective_balance)
|
||||
//
|
||||
// return validator
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) *ethpb.Validator {
|
||||
func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount uint64) (*ethpb.Validator, error) {
|
||||
validator := ðpb.Validator{
|
||||
PublicKey: pubKey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
EffectiveBalance: 0,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ActivationEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: 0,
|
||||
}
|
||||
maxEffectiveBalance := helpers.ValidatorMaxEffectiveBalance(validator)
|
||||
v, err := state_native.NewValidator(validator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maxEffectiveBalance := helpers.ValidatorMaxEffectiveBalance(v)
|
||||
validator.EffectiveBalance = min(amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement), maxEffectiveBalance)
|
||||
return validator
|
||||
return validator, nil
|
||||
}
|
||||
|
||||
// ProcessDepositRequests is a function as part of electra to process execution layer deposits
|
||||
@@ -550,7 +573,7 @@ func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState,
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// processDepositRequest processes the specific deposit receipt
|
||||
// processDepositRequest processes the specific deposit request
|
||||
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||
//
|
||||
// # Set deposit request start index
|
||||
@@ -580,8 +603,8 @@ func processDepositRequest(beaconState state.BeaconState, request *enginev1.Depo
|
||||
}
|
||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: bytesutil.SafeCopyBytes(request.Pubkey),
|
||||
Amount: request.Amount,
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(request.WithdrawalCredentials),
|
||||
Amount: request.Amount,
|
||||
Signature: bytesutil.SafeCopyBytes(request.Signature),
|
||||
Slot: beaconState.Slot(),
|
||||
}); err != nil {
|
||||
|
||||
@@ -22,6 +22,40 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessPendingDepositsMultiplesSameDeposits(t *testing.T) {
|
||||
st := stateWithActiveBalanceETH(t, 1000)
|
||||
deps := make([]*eth.PendingDeposit, 2) // Make same deposit twice
|
||||
validators := st.Validators()
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < len(deps); i += 1 {
|
||||
wc := make([]byte, 32)
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(i)
|
||||
validators[i].PublicKey = sk.PublicKey().Marshal()
|
||||
validators[i].WithdrawalCredentials = wc
|
||||
deps[i] = stateTesting.GeneratePendingDeposit(t, sk, 32, bytesutil.ToBytes32(wc), 0)
|
||||
}
|
||||
require.NoError(t, st.SetPendingDeposits(deps))
|
||||
|
||||
err = electra.ProcessPendingDeposits(context.TODO(), st, 10000)
|
||||
require.NoError(t, err)
|
||||
|
||||
val := st.Validators()
|
||||
seenPubkeys := make(map[string]struct{})
|
||||
for i := 0; i < len(val); i += 1 {
|
||||
if len(val[i].PublicKey) == 0 {
|
||||
continue
|
||||
}
|
||||
_, ok := seenPubkeys[string(val[i].PublicKey)]
|
||||
if ok {
|
||||
t.Fatalf("duplicated pubkeys")
|
||||
} else {
|
||||
seenPubkeys[string(val[i].PublicKey)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessPendingDeposits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -285,7 +319,7 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) {
|
||||
wc[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wc[31] = byte(0)
|
||||
validDep := stateTesting.GeneratePendingDeposit(t, sk, params.BeaconConfig().MinActivationBalance, bytesutil.ToBytes32(wc), 0)
|
||||
invalidDep := ð.PendingDeposit{}
|
||||
invalidDep := ð.PendingDeposit{PublicKey: make([]byte, 48)}
|
||||
// have a combination of valid and invalid deposits
|
||||
deps := []*eth.PendingDeposit{validDep, invalidDep}
|
||||
require.NoError(t, electra.BatchProcessNewPendingDeposits(context.Background(), st, deps))
|
||||
|
||||
@@ -29,7 +29,6 @@ var (
|
||||
ProcessParticipationFlagUpdates = altair.ProcessParticipationFlagUpdates
|
||||
ProcessSyncCommitteeUpdates = altair.ProcessSyncCommitteeUpdates
|
||||
AttestationsDelta = altair.AttestationsDelta
|
||||
ProcessSyncAggregate = altair.ProcessSyncAggregate
|
||||
)
|
||||
|
||||
// ProcessEpoch describes the per epoch operations that are performed on the beacon state.
|
||||
|
||||
@@ -64,13 +64,14 @@ func QueueExcessActiveBalance(s state.BeaconState, idx primitives.ValidatorIndex
|
||||
return err
|
||||
}
|
||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
||||
val, err := s.ValidatorAtIndex(idx)
|
||||
val, err := s.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
return s.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: val.PublicKey,
|
||||
WithdrawalCredentials: val.WithdrawalCredentials,
|
||||
PublicKey: pk[:],
|
||||
WithdrawalCredentials: val.GetWithdrawalCredentials(),
|
||||
Amount: excessBalance,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
|
||||
@@ -111,30 +111,31 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
log.Debugf("Skipping execution layer withdrawal request, validator index for %s not found\n", hexutil.Encode(wr.ValidatorPubkey))
|
||||
continue
|
||||
}
|
||||
validator, err := st.ValidatorAtIndex(vIdx)
|
||||
validator, err := st.ValidatorAtIndexReadOnly(vIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Verify withdrawal credentials
|
||||
hasCorrectCredential := helpers.HasExecutionWithdrawalCredentials(validator)
|
||||
isCorrectSourceAddress := bytes.Equal(validator.WithdrawalCredentials[12:], wr.SourceAddress)
|
||||
wc := validator.GetWithdrawalCredentials()
|
||||
isCorrectSourceAddress := bytes.Equal(wc[12:], wr.SourceAddress)
|
||||
if !hasCorrectCredential || !isCorrectSourceAddress {
|
||||
log.Debugln("Skipping execution layer withdrawal request, wrong withdrawal credentials")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the validator is active.
|
||||
if !helpers.IsActiveValidator(validator, currentEpoch) {
|
||||
if !helpers.IsActiveValidatorUsingTrie(validator, currentEpoch) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator not active")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has not yet submitted an exit.
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
if validator.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has submitted an exit already")
|
||||
continue
|
||||
}
|
||||
// Verify the validator has been active long enough.
|
||||
if currentEpoch < validator.ActivationEpoch.AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
|
||||
if currentEpoch < validator.ActivationEpoch().AddEpoch(params.BeaconConfig().ShardCommitteePeriod) {
|
||||
log.Debugln("Skipping execution layer withdrawal request, validator has not been active long enough")
|
||||
continue
|
||||
}
|
||||
@@ -156,7 +157,7 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
continue
|
||||
}
|
||||
|
||||
hasSufficientEffectiveBalance := validator.EffectiveBalance >= params.BeaconConfig().MinActivationBalance
|
||||
hasSufficientEffectiveBalance := validator.EffectiveBalance() >= params.BeaconConfig().MinActivationBalance
|
||||
vBal, err := st.BalanceAtIndex(vIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
52
beacon-chain/core/epbs/BUILD.bazel
Normal file
52
beacon-chain/core/epbs/BUILD.bazel
Normal file
@@ -0,0 +1,52 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attestation.go",
|
||||
"execution_payload_envelope.go",
|
||||
"execution_payload_header.go",
|
||||
"operations.go",
|
||||
"payload_attestation.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epbs",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"attestation_test.go",
|
||||
"execution_payload_envelope_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//testing/util/random:go_default_library",
|
||||
],
|
||||
)
|
||||
13
beacon-chain/core/epbs/attestation.go
Normal file
13
beacon-chain/core/epbs/attestation.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package epbs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// RemoveValidatorFlag removes validator flag from existing one.
|
||||
func RemoveValidatorFlag(flag, flagPosition uint8) (uint8, error) {
|
||||
if flagPosition > 7 {
|
||||
return flag, fmt.Errorf("flag position %d exceeds length", flagPosition)
|
||||
}
|
||||
return flag & ^(1 << flagPosition), nil
|
||||
}
|
||||
93
beacon-chain/core/epbs/attestation_test.go
Normal file
93
beacon-chain/core/epbs/attestation_test.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package epbs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epbs"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestValidatorFlag_Remove(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
add []uint8
|
||||
remove []uint8
|
||||
expectedTrue []uint8
|
||||
expectedFalse []uint8
|
||||
}{
|
||||
{
|
||||
name: "none",
|
||||
add: []uint8{},
|
||||
remove: []uint8{},
|
||||
expectedTrue: []uint8{},
|
||||
expectedFalse: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
|
||||
},
|
||||
{
|
||||
name: "source",
|
||||
add: []uint8{params.BeaconConfig().TimelySourceFlagIndex},
|
||||
remove: []uint8{params.BeaconConfig().TimelySourceFlagIndex},
|
||||
expectedTrue: []uint8{},
|
||||
expectedFalse: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
|
||||
},
|
||||
{
|
||||
name: "source, target",
|
||||
add: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex},
|
||||
remove: []uint8{params.BeaconConfig().TimelySourceFlagIndex},
|
||||
expectedTrue: []uint8{params.BeaconConfig().TimelyTargetFlagIndex},
|
||||
expectedFalse: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
|
||||
},
|
||||
{
|
||||
name: "source, target, head",
|
||||
add: []uint8{params.BeaconConfig().TimelySourceFlagIndex, params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
|
||||
remove: []uint8{params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
|
||||
expectedTrue: []uint8{params.BeaconConfig().TimelySourceFlagIndex},
|
||||
expectedFalse: []uint8{params.BeaconConfig().TimelyTargetFlagIndex, params.BeaconConfig().TimelyHeadFlagIndex},
|
||||
},
|
||||
}
|
||||
var err error
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
flag := uint8(0)
|
||||
|
||||
// Add flags.
|
||||
for _, flagPosition := range test.add {
|
||||
flag, err = altair.AddValidatorFlag(flag, flagPosition)
|
||||
require.NoError(t, err)
|
||||
|
||||
has, err := altair.HasValidatorFlag(flag, flagPosition)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, has)
|
||||
}
|
||||
|
||||
// Remove flags.
|
||||
for _, flagPosition := range test.remove {
|
||||
flag, err = epbs.RemoveValidatorFlag(flag, flagPosition)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Check if flags are set correctly.
|
||||
for _, flagPosition := range test.expectedTrue {
|
||||
has, err := altair.HasValidatorFlag(flag, flagPosition)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, has)
|
||||
}
|
||||
for _, flagPosition := range test.expectedFalse {
|
||||
has, err := altair.HasValidatorFlag(flag, flagPosition)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatorFlag_Remove_ExceedsLength(t *testing.T) {
|
||||
_, err := epbs.RemoveValidatorFlag(0, 8)
|
||||
require.ErrorContains(t, "flag position 8 exceeds length", err)
|
||||
}
|
||||
|
||||
func TestValidatorFlag_Remove_NotSet(t *testing.T) {
|
||||
_, err := epbs.RemoveValidatorFlag(0, 1)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
126
beacon-chain/core/epbs/execution_payload_envelope.go
Normal file
126
beacon-chain/core/epbs/execution_payload_envelope.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package epbs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
)
|
||||
|
||||
// ValidatePayloadStateTransition performs the process_execution_payload
|
||||
// function.
|
||||
func ValidatePayloadStateTransition(
|
||||
ctx context.Context,
|
||||
preState state.BeaconState,
|
||||
envelope interfaces.ROExecutionPayloadEnvelope,
|
||||
) error {
|
||||
if err := UpdateHeaderAndVerify(ctx, preState, envelope); err != nil {
|
||||
return err
|
||||
}
|
||||
committedHeader, err := preState.LatestExecutionPayloadHeaderEPBS()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateAgainstCommittedBid(committedHeader, envelope); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ProcessPayloadStateTransition(ctx, preState, envelope); err != nil {
|
||||
return err
|
||||
}
|
||||
return checkPostStateRoot(ctx, preState, envelope)
|
||||
}
|
||||
|
||||
func ProcessPayloadStateTransition(
|
||||
ctx context.Context,
|
||||
preState state.BeaconState,
|
||||
envelope interfaces.ROExecutionPayloadEnvelope,
|
||||
) error {
|
||||
er := envelope.ExecutionRequests()
|
||||
preState, err := electra.ProcessDepositRequests(ctx, preState, er.Deposits)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process deposit receipts")
|
||||
}
|
||||
preState, err = electra.ProcessWithdrawalRequests(ctx, preState, er.Withdrawals)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process ercution layer withdrawal requests")
|
||||
}
|
||||
if err := electra.ProcessConsolidationRequests(ctx, preState, er.Consolidations); err != nil {
|
||||
return errors.Wrap(err, "could not process consolidation requests")
|
||||
}
|
||||
payload, err := envelope.Execution()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
if err := preState.SetLatestBlockHash(payload.BlockHash()); err != nil {
|
||||
return err
|
||||
}
|
||||
return preState.SetLatestFullSlot(preState.Slot())
|
||||
}
|
||||
|
||||
func UpdateHeaderAndVerify(
|
||||
ctx context.Context,
|
||||
preState state.BeaconState,
|
||||
envelope interfaces.ROExecutionPayloadEnvelope,
|
||||
) error {
|
||||
blockHeader := preState.LatestBlockHeader()
|
||||
if blockHeader == nil {
|
||||
return errors.New("invalid nil latest block header")
|
||||
}
|
||||
if len(blockHeader.StateRoot) == 0 || [32]byte(blockHeader.StateRoot) == [32]byte{} {
|
||||
prevStateRoot, err := preState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute previous state root")
|
||||
}
|
||||
blockHeader.StateRoot = prevStateRoot[:]
|
||||
if err := preState.SetLatestBlockHeader(blockHeader); err != nil {
|
||||
return errors.Wrap(err, "could not set latest block header")
|
||||
}
|
||||
}
|
||||
blockHeaderRoot, err := blockHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
beaconBlockRoot := envelope.BeaconBlockRoot()
|
||||
if blockHeaderRoot != beaconBlockRoot {
|
||||
return fmt.Errorf("beacon block root does not match previous header, got: %#x wanted: %#x", beaconBlockRoot, blockHeaderRoot)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateAgainstCommittedBid(
|
||||
committedHeader *enginev1.ExecutionPayloadHeaderEPBS,
|
||||
envelope interfaces.ROExecutionPayloadEnvelope,
|
||||
) error {
|
||||
builderIndex := envelope.BuilderIndex()
|
||||
if committedHeader.BuilderIndex != builderIndex {
|
||||
return errors.New("builder index does not match committed header")
|
||||
}
|
||||
kzgRoot, err := envelope.BlobKzgCommitmentsRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if [32]byte(committedHeader.BlobKzgCommitmentsRoot) != kzgRoot {
|
||||
return errors.New("blob KZG commitments root does not match committed header")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkPostStateRoot(
|
||||
ctx context.Context,
|
||||
preState state.BeaconState,
|
||||
envelope interfaces.ROExecutionPayloadEnvelope,
|
||||
) error {
|
||||
stateRoot, err := preState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
envelopeStateRoot := envelope.StateRoot()
|
||||
if stateRoot != envelopeStateRoot {
|
||||
return errors.New("state root mismatch")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
109
beacon-chain/core/epbs/execution_payload_envelope_test.go
Normal file
109
beacon-chain/core/epbs/execution_payload_envelope_test.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package epbs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util/random"
|
||||
)
|
||||
|
||||
func TestProcessPayloadStateTransition(t *testing.T) {
|
||||
bh := [32]byte{'h'}
|
||||
p := random.ExecutionPayloadEnvelope(t)
|
||||
p.Payload.BlockHash = bh[:]
|
||||
p.ExecutionRequests = &enginev1.ExecutionRequests{}
|
||||
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
|
||||
require.NoError(t, err)
|
||||
validators := make([]*ethpb.Validator, 0)
|
||||
stpb := ðpb.BeaconStateEPBS{Slot: 3, Validators: validators}
|
||||
st, err := state_native.InitializeFromProtoUnsafeEpbs(stpb)
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
|
||||
lbh, err := st.LatestBlockHash()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{}, [32]byte(lbh))
|
||||
|
||||
require.NoError(t, ProcessPayloadStateTransition(ctx, st, e))
|
||||
|
||||
lbh, err = st.LatestBlockHash()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bh, [32]byte(lbh))
|
||||
|
||||
lfs, err := st.LatestFullSlot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, lfs, st.Slot())
|
||||
}
|
||||
|
||||
func Test_validateAgainstHeader(t *testing.T) {
|
||||
bh := [32]byte{'h'}
|
||||
payload := &enginev1.ExecutionPayloadElectra{BlockHash: bh[:]}
|
||||
p := random.ExecutionPayloadEnvelope(t)
|
||||
p.Payload = payload
|
||||
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
|
||||
require.NoError(t, err)
|
||||
stpb := ðpb.BeaconStateEPBS{Slot: 3}
|
||||
st, err := state_native.InitializeFromProtoUnsafeEpbs(stpb)
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
require.ErrorContains(t, "invalid nil latest block header", UpdateHeaderAndVerify(ctx, st, e))
|
||||
|
||||
prest, _ := util.DeterministicGenesisStateEpbs(t, 64)
|
||||
br := [32]byte{'r'}
|
||||
p.BeaconBlockRoot = br[:]
|
||||
require.ErrorContains(t, "beacon block root does not match previous header", UpdateHeaderAndVerify(ctx, prest, e))
|
||||
|
||||
header := prest.LatestBlockHeader()
|
||||
require.NoError(t, err)
|
||||
headerRoot, err := header.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
p.BeaconBlockRoot = headerRoot[:]
|
||||
require.NoError(t, UpdateHeaderAndVerify(ctx, prest, e))
|
||||
}
|
||||
|
||||
func Test_validateAgainstCommittedBid(t *testing.T) {
|
||||
payload := &enginev1.ExecutionPayloadElectra{}
|
||||
p := random.ExecutionPayloadEnvelope(t)
|
||||
p.Payload = payload
|
||||
p.BuilderIndex = 1
|
||||
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
|
||||
require.NoError(t, err)
|
||||
h := &enginev1.ExecutionPayloadHeaderEPBS{}
|
||||
require.ErrorContains(t, "builder index does not match committed header", validateAgainstCommittedBid(h, e))
|
||||
|
||||
h.BuilderIndex = 1
|
||||
p.BlobKzgCommitments = make([][]byte, 6)
|
||||
for i := range p.BlobKzgCommitments {
|
||||
p.BlobKzgCommitments[i] = make([]byte, 48)
|
||||
}
|
||||
h.BlobKzgCommitmentsRoot = make([]byte, 32)
|
||||
require.ErrorContains(t, "blob KZG commitments root does not match committed header", validateAgainstCommittedBid(h, e))
|
||||
|
||||
root, err := e.BlobKzgCommitmentsRoot()
|
||||
require.NoError(t, err)
|
||||
h.BlobKzgCommitmentsRoot = root[:]
|
||||
require.NoError(t, validateAgainstCommittedBid(h, e))
|
||||
}
|
||||
|
||||
func TestCheckPostStateRoot(t *testing.T) {
|
||||
payload := &enginev1.ExecutionPayloadElectra{}
|
||||
p := random.ExecutionPayloadEnvelope(t)
|
||||
p.Payload = payload
|
||||
p.BuilderIndex = 1
|
||||
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
|
||||
require.NoError(t, err)
|
||||
ctx := context.Background()
|
||||
st, _ := util.DeterministicGenesisStateEpbs(t, 64)
|
||||
p.StateRoot = make([]byte, 32)
|
||||
require.ErrorContains(t, "state root mismatch", checkPostStateRoot(ctx, st, e))
|
||||
root, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
p.StateRoot = root[:]
|
||||
require.NoError(t, checkPostStateRoot(ctx, st, e))
|
||||
}
|
||||
50
beacon-chain/core/epbs/execution_payload_header.go
Normal file
50
beacon-chain/core/epbs/execution_payload_header.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package epbs
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/forks"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// ValidatePayloadHeaderSignature validates the signature of the execution payload header.
|
||||
func ValidatePayloadHeaderSignature(st state.ReadOnlyBeaconState, sh interfaces.ROSignedExecutionPayloadHeader) error {
|
||||
h, err := sh.Header()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pubkey := st.PubkeyAtIndex(h.BuilderIndex())
|
||||
pub, err := bls.PublicKeyFromBytes(pubkey[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := sh.Signature()
|
||||
sig, err := bls.SignatureFromBytes(s[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(h.Slot())
|
||||
f, err := forks.Fork(currentEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
domain, err := signing.Domain(f, currentEpoch, params.BeaconConfig().DomainBeaconBuilder, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err := sh.SigningRoot(domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !sig.Verify(pub, root[:]) {
|
||||
return signing.ErrSigFailedToVerify
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package electra
|
||||
package epbs
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -6,9 +6,11 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -62,11 +64,11 @@ func ProcessOperations(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
}
|
||||
st, err = ProcessAttestationsNoVerifySignature(ctx, st, block)
|
||||
st, err = electra.ProcessAttestationsNoVerifySignature(ctx, st, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process altair attestation")
|
||||
}
|
||||
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
|
||||
if _, err := electra.ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits())
|
||||
@@ -77,20 +79,27 @@ func ProcessOperations(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process bls-to-execution changes")
|
||||
}
|
||||
// new in ePBS
|
||||
if block.Version() >= version.EPBS {
|
||||
if err := ProcessPayloadAttestations(st, bb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
// new in electra
|
||||
requests, err := bb.ExecutionRequests()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution requests")
|
||||
}
|
||||
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
st, err = electra.ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit receipts")
|
||||
return nil, errors.Wrap(err, "could not process deposit requests")
|
||||
}
|
||||
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
st, err = electra.ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution layer withdrawal requests")
|
||||
return nil, errors.Wrap(err, "could not process withdrawal requests")
|
||||
}
|
||||
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
if err := electra.ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, fmt.Errorf("could not process consolidation requests: %w", err)
|
||||
}
|
||||
return st, nil
|
||||
125
beacon-chain/core/epbs/payload_attestation.go
Normal file
125
beacon-chain/core/epbs/payload_attestation.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package epbs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
func ProcessPayloadAttestations(state state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
atts, err := body.PayloadAttestations()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(atts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 12*time.Second)
|
||||
defer cancel()
|
||||
lbh := state.LatestBlockHeader()
|
||||
proposerIndex := lbh.ProposerIndex
|
||||
var participation []byte
|
||||
if state.Slot()%32 == 0 {
|
||||
participation, err = state.PreviousEpochParticipation()
|
||||
} else {
|
||||
participation, err = state.CurrentEpochParticipation()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
totalBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
baseReward, err := altair.BaseRewardWithTotalBalance(state, proposerIndex, totalBalance)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lfs, err := state.LatestFullSlot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
sourceFlagIndex := cfg.TimelySourceFlagIndex
|
||||
targetFlagIndex := cfg.TimelyTargetFlagIndex
|
||||
headFlagIndex := cfg.TimelyHeadFlagIndex
|
||||
penaltyNumerator := uint64(0)
|
||||
rewardNumerator := uint64(0)
|
||||
rewardDenominator := (cfg.WeightDenominator - cfg.ProposerWeight) * cfg.WeightDenominator / cfg.ProposerWeight
|
||||
|
||||
for _, att := range atts {
|
||||
data := att.Data
|
||||
if !bytes.Equal(data.BeaconBlockRoot, lbh.ParentRoot) {
|
||||
return errors.New("invalid beacon block root in payload attestation data")
|
||||
}
|
||||
if data.Slot+1 != state.Slot() {
|
||||
return errors.New("invalid data slot")
|
||||
}
|
||||
indexed, err := helpers.GetIndexedPayloadAttestation(ctx, state, data.Slot, att)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
valid, err := helpers.IsValidIndexedPayloadAttestation(state, indexed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !valid {
|
||||
return errors.New("invalid payload attestation")
|
||||
}
|
||||
payloadWasPreset := data.Slot == lfs
|
||||
votedPresent := data.PayloadStatus == primitives.PAYLOAD_PRESENT
|
||||
if votedPresent != payloadWasPreset {
|
||||
for _, idx := range indexed.GetAttestingIndices() {
|
||||
flags := participation[idx]
|
||||
has, err := altair.HasValidatorFlag(flags, targetFlagIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if has {
|
||||
penaltyNumerator += baseReward * cfg.TimelyTargetWeight
|
||||
}
|
||||
has, err = altair.HasValidatorFlag(flags, sourceFlagIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if has {
|
||||
penaltyNumerator += baseReward * cfg.TimelySourceWeight
|
||||
}
|
||||
has, err = altair.HasValidatorFlag(flags, headFlagIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if has {
|
||||
penaltyNumerator += baseReward * cfg.TimelyHeadWeight
|
||||
}
|
||||
participation[idx] = 0
|
||||
}
|
||||
} else {
|
||||
for _, idx := range indexed.GetAttestingIndices() {
|
||||
participation[idx] = (1 << headFlagIndex) | (1 << sourceFlagIndex) | (1 << targetFlagIndex)
|
||||
rewardNumerator += baseReward * (cfg.TimelyHeadWeight + cfg.TimelySourceWeight + cfg.TimelyTargetWeight)
|
||||
}
|
||||
}
|
||||
}
|
||||
if penaltyNumerator > 0 {
|
||||
if err := helpers.DecreaseBalance(state, proposerIndex, penaltyNumerator/rewardDenominator); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if rewardNumerator > 0 {
|
||||
if err := helpers.IncreaseBalance(state, proposerIndex, penaltyNumerator/rewardDenominator); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -31,6 +31,8 @@ const (
|
||||
LightClientFinalityUpdate
|
||||
// LightClientOptimisticUpdate event
|
||||
LightClientOptimisticUpdate
|
||||
// PayloadAttributes events are fired upon a missed slot or new head.
|
||||
PayloadAttributes
|
||||
)
|
||||
|
||||
// BlockProcessedData is the data sent with BlockProcessed events.
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"block.go",
|
||||
"genesis.go",
|
||||
"metrics.go",
|
||||
"payload_attestation.go",
|
||||
"randao.go",
|
||||
"rewards_penalties.go",
|
||||
"shuffle.go",
|
||||
@@ -20,11 +21,13 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/epbs:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
@@ -53,6 +56,8 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"beacon_committee_test.go",
|
||||
"block_test.go",
|
||||
"exports_test.go",
|
||||
"payload_attestation_test.go",
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
"randao_test.go",
|
||||
@@ -69,21 +74,27 @@ go_test(
|
||||
tags = ["CI_race_detection"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/epbs:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//testing/util/random:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
|
||||
@@ -23,11 +23,8 @@ var (
|
||||
// Access to these nil fields will result in run time panic,
|
||||
// it is recommended to run these checks as first line of defense.
|
||||
func ValidateNilAttestation(attestation ethpb.Att) error {
|
||||
if attestation == nil {
|
||||
return errors.New("attestation can't be nil")
|
||||
}
|
||||
if attestation.GetData() == nil {
|
||||
return errors.New("attestation's data can't be nil")
|
||||
if attestation == nil || attestation.IsNil() {
|
||||
return errors.New("attestation is nil")
|
||||
}
|
||||
if attestation.GetData().Source == nil {
|
||||
return errors.New("attestation's source can't be nil")
|
||||
|
||||
@@ -260,12 +260,12 @@ func TestValidateNilAttestation(t *testing.T) {
|
||||
{
|
||||
name: "nil attestation",
|
||||
attestation: nil,
|
||||
errString: "attestation can't be nil",
|
||||
errString: "attestation is nil",
|
||||
},
|
||||
{
|
||||
name: "nil attestation data",
|
||||
attestation: ðpb.Attestation{},
|
||||
errString: "attestation's data can't be nil",
|
||||
errString: "attestation is nil",
|
||||
},
|
||||
{
|
||||
name: "nil attestation source",
|
||||
|
||||
@@ -213,6 +213,7 @@ type CommitteeAssignment struct {
|
||||
Committee []primitives.ValidatorIndex
|
||||
AttesterSlot primitives.Slot
|
||||
CommitteeIndex primitives.CommitteeIndex
|
||||
PtcSlot primitives.Slot
|
||||
}
|
||||
|
||||
// verifyAssignmentEpoch verifies if the given epoch is valid for assignment based on the provided state.
|
||||
@@ -294,7 +295,7 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
if err := verifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
startSlot, err := slots.EpochStart(epoch)
|
||||
slot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -303,14 +304,17 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
vals[v] = struct{}{}
|
||||
}
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
|
||||
|
||||
committees, err := BeaconCommittees(ctx, state, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute beacon committees")
|
||||
}
|
||||
ptcPerSlot, ptcMembersPerCommittee := PtcAllocation(len(committees))
|
||||
// Compute committee assignments for each slot in the epoch.
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
committees, err := BeaconCommittees(ctx, state, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute beacon committees")
|
||||
}
|
||||
endSlot := slot + params.BeaconConfig().SlotsPerEpoch
|
||||
for {
|
||||
for j, committee := range committees {
|
||||
for _, vIndex := range committee {
|
||||
for i, vIndex := range committee {
|
||||
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
|
||||
continue
|
||||
}
|
||||
@@ -320,8 +324,19 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
assignments[vIndex].Committee = committee
|
||||
assignments[vIndex].AttesterSlot = slot
|
||||
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
|
||||
if uint64(j) < ptcPerSlot && uint64(i) < ptcMembersPerCommittee {
|
||||
assignments[vIndex].PtcSlot = slot
|
||||
}
|
||||
}
|
||||
}
|
||||
slot++
|
||||
if slot == endSlot {
|
||||
break
|
||||
}
|
||||
committees, err = BeaconCommittees(ctx, state, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not compute beacon committees")
|
||||
}
|
||||
}
|
||||
return assignments, nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package helpers_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
@@ -722,15 +724,26 @@ func TestCommitteeIndices(t *testing.T) {
|
||||
assert.DeepEqual(t, []primitives.CommitteeIndex{0, 1, 3}, indices)
|
||||
}
|
||||
|
||||
func TestAttestationCommittees(t *testing.T) {
|
||||
validators := make([]*ethpb.Validator, params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().TargetCommitteeSize))
|
||||
func TestCommitteeAssignments_PTC(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
// Create 10 committees. Total 40960 validators.
|
||||
committeeCount := uint64(10)
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize * uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
validatorIndices := make([]primitives.ValidatorIndex, validatorCount)
|
||||
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: k,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
validatorIndices[i] = primitives.ValidatorIndex(i)
|
||||
}
|
||||
|
||||
state, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
state, err := state_native.InitializeFromProtoEpbs(ðpb.BeaconStateEPBS{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
@@ -754,6 +767,31 @@ func TestAttestationCommittees(t *testing.T) {
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[0])))
|
||||
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(committees[1])))
|
||||
})
|
||||
as, err := helpers.CommitteeAssignments(context.Background(), state, 1, validatorIndices)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Capture all the slots and all the validator index that belonged in a PTC using a map for verification later.
|
||||
slotValidatorMap := make(map[primitives.Slot][]primitives.ValidatorIndex)
|
||||
for i, a := range as {
|
||||
slotValidatorMap[a.PtcSlot] = append(slotValidatorMap[a.PtcSlot], i)
|
||||
}
|
||||
|
||||
// Verify that all the slots have the correct number of PTC.
|
||||
for s, v := range slotValidatorMap {
|
||||
if s == 0 {
|
||||
continue
|
||||
}
|
||||
// Make sure all the PTC are the correct size from the map.
|
||||
require.Equal(t, len(v), field_params.PTCSize)
|
||||
|
||||
// Get the actual PTC from the beacon state using the helper function
|
||||
ptc, err := helpers.GetPayloadTimelinessCommittee(context.Background(), state, s)
|
||||
require.NoError(t, err)
|
||||
for _, index := range ptc {
|
||||
i := slices.Index(v, index)
|
||||
require.NotEqual(t, -1, i) // PTC not found from the assignment map
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBeaconCommittees(t *testing.T) {
|
||||
|
||||
12
beacon-chain/core/helpers/exports_test.go
Normal file
12
beacon-chain/core/helpers/exports_test.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package helpers
|
||||
|
||||
var (
|
||||
ErrNilMessage = errNilMessage
|
||||
ErrNilData = errNilData
|
||||
ErrNilBeaconBlockRoot = errNilBeaconBlockRoot
|
||||
ErrNilPayloadAttestation = errNilPayloadAttestation
|
||||
ErrNilSignature = errNilSignature
|
||||
ErrNilAggregationBits = errNilAggregationBits
|
||||
ErrPreEPBSState = errPreEPBSState
|
||||
ErrCommitteeOverflow = errCommitteeOverflow
|
||||
)
|
||||
296
beacon-chain/core/helpers/payload_attestation.go
Normal file
296
beacon-chain/core/helpers/payload_attestation.go
Normal file
@@ -0,0 +1,296 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/epbs"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
var (
|
||||
errNilMessage = errors.New("nil PayloadAttestationMessage")
|
||||
errNilData = errors.New("nil PayloadAttestationData")
|
||||
errNilBeaconBlockRoot = errors.New("nil BeaconBlockRoot")
|
||||
errNilPayloadAttestation = errors.New("nil PayloadAttestation")
|
||||
errNilSignature = errors.New("nil Signature")
|
||||
errNilAggregationBits = errors.New("nil AggregationBits")
|
||||
errPreEPBSState = errors.New("beacon state pre ePBS fork")
|
||||
errCommitteeOverflow = errors.New("beacon committee of insufficient size")
|
||||
)
|
||||
|
||||
// ValidateNilPayloadAttestationData checks if any composite field of the
|
||||
// payload attestation data is nil
|
||||
func ValidateNilPayloadAttestationData(data *eth.PayloadAttestationData) error {
|
||||
if data == nil {
|
||||
return errNilData
|
||||
}
|
||||
if data.BeaconBlockRoot == nil {
|
||||
return errNilBeaconBlockRoot
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateNilPayloadAttestationMessage checks if any composite field of the
|
||||
// payload attestation message is nil
|
||||
func ValidateNilPayloadAttestationMessage(att *eth.PayloadAttestationMessage) error {
|
||||
if att == nil {
|
||||
return errNilMessage
|
||||
}
|
||||
if att.Signature == nil {
|
||||
return errNilSignature
|
||||
}
|
||||
return ValidateNilPayloadAttestationData(att.Data)
|
||||
}
|
||||
|
||||
// ValidateNilPayloadAttestation checks if any composite field of the
|
||||
// payload attestation is nil
|
||||
func ValidateNilPayloadAttestation(att *eth.PayloadAttestation) error {
|
||||
if att == nil {
|
||||
return errNilPayloadAttestation
|
||||
}
|
||||
if att.AggregationBits == nil {
|
||||
return errNilAggregationBits
|
||||
}
|
||||
if att.Signature == nil {
|
||||
return errNilSignature
|
||||
}
|
||||
return ValidateNilPayloadAttestationData(att.Data)
|
||||
}
|
||||
|
||||
// InPayloadTimelinessCommittee returns whether the given index belongs to the
|
||||
// PTC computed from the passed state.
|
||||
func InPayloadTimelinessCommittee(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot, idx primitives.ValidatorIndex) (bool, error) {
|
||||
ptc, err := GetPayloadTimelinessCommittee(ctx, state, slot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, i := range ptc {
|
||||
if i == idx {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// GetPayloadTimelinessCommittee returns the PTC for the given slot, computed from the passed state as in the
|
||||
// spec function `get_ptc`.
|
||||
func GetPayloadTimelinessCommittee(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot) (indices []primitives.ValidatorIndex, err error) {
|
||||
if state.Version() < version.EPBS {
|
||||
return nil, errPreEPBSState
|
||||
}
|
||||
committees, err := BeaconCommittees(ctx, state, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get beacon committees")
|
||||
}
|
||||
committeesPerSlot, membersPerCommittee := PtcAllocation(len(committees))
|
||||
for i, committee := range committees {
|
||||
if uint64(i) >= committeesPerSlot {
|
||||
return
|
||||
}
|
||||
if uint64(len(committee)) < membersPerCommittee {
|
||||
return nil, errCommitteeOverflow
|
||||
}
|
||||
indices = append(indices, committee[:membersPerCommittee]...)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// PtcAllocation returns:
|
||||
// 1. The number of beacon committees that PTC will borrow from in a slot.
|
||||
// 2. The number of validators that PTC will borrow from in a beacon committee.
|
||||
func PtcAllocation(slotCommittees int) (committeesPerSlot, membersPerCommittee uint64) {
|
||||
committeesPerSlot = math.LargestPowerOfTwo(math.Min(uint64(slotCommittees), fieldparams.PTCSize))
|
||||
membersPerCommittee = fieldparams.PTCSize / committeesPerSlot
|
||||
return
|
||||
}
|
||||
|
||||
// GetPayloadAttestingIndices returns the set of attester indices corresponding to the given PayloadAttestation.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def get_payload_attesting_indices(state: BeaconState, slot: Slot,
|
||||
// payload_attestation: PayloadAttestation) -> Set[ValidatorIndex]:
|
||||
// """
|
||||
// Return the set of attesting indices corresponding to ``payload_attestation``.
|
||||
// """
|
||||
// ptc = get_ptc(state, slot)
|
||||
// return set(index for i, index in enumerate(ptc) if payload_attestation.aggregation_bits[i])
|
||||
func GetPayloadAttestingIndices(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot, att *eth.PayloadAttestation) (indices []primitives.ValidatorIndex, err error) {
|
||||
if state.Version() < version.EPBS {
|
||||
return nil, errPreEPBSState
|
||||
}
|
||||
|
||||
ptc, err := GetPayloadTimelinessCommittee(ctx, state, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, validatorIndex := range ptc {
|
||||
if att.AggregationBits.BitAt(uint64(i)) {
|
||||
indices = append(indices, validatorIndex)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetIndexedPayloadAttestation replaces a PayloadAttestation's AggregationBits with sorted AttestingIndices and returns an IndexedPayloadAttestation.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def get_indexed_payload_attestation(state: BeaconState, slot: Slot,
|
||||
// payload_attestation: PayloadAttestation) -> IndexedPayloadAttestation:
|
||||
// """
|
||||
// Return the indexed payload attestation corresponding to ``payload_attestation``.
|
||||
// """
|
||||
// attesting_indices = get_payload_attesting_indices(state, slot, payload_attestation)
|
||||
//
|
||||
// return IndexedPayloadAttestation(
|
||||
// attesting_indices=sorted(attesting_indices),
|
||||
// data=payload_attestation.data,
|
||||
// signature=payload_attestation.signature,
|
||||
// )
|
||||
func GetIndexedPayloadAttestation(ctx context.Context, state state.ReadOnlyBeaconState, slot primitives.Slot, att *eth.PayloadAttestation) (*epbs.IndexedPayloadAttestation, error) {
|
||||
if state.Version() < version.EPBS {
|
||||
return nil, errPreEPBSState
|
||||
}
|
||||
|
||||
attestingIndices, err := GetPayloadAttestingIndices(ctx, state, slot, att)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slices.Sort(attestingIndices)
|
||||
|
||||
return &epbs.IndexedPayloadAttestation{
|
||||
AttestingIndices: attestingIndices,
|
||||
Data: att.Data,
|
||||
Signature: att.Signature,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IsValidIndexedPayloadAttestation validates the given IndexedPayloadAttestation.
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def is_valid_indexed_payload_attestation(
|
||||
// state: BeaconState,
|
||||
// indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
|
||||
// """
|
||||
// Check if ``indexed_payload_attestation`` is not empty, has sorted and unique indices and has
|
||||
// a valid aggregate signature.
|
||||
// """
|
||||
// # Verify the data is valid
|
||||
// if indexed_payload_attestation.data.payload_status >= PAYLOAD_INVALID_STATUS:
|
||||
// return False
|
||||
//
|
||||
// # Verify indices are sorted and unique
|
||||
// indices = indexed_payload_attestation.attesting_indices
|
||||
// if len(indices) == 0 or not indices == sorted(set(indices)):
|
||||
// return False
|
||||
//
|
||||
// # Verify aggregate signature
|
||||
// pubkeys = [state.validators[i].pubkey for i in indices]
|
||||
// domain = get_domain(state, DOMAIN_PTC_ATTESTER, None)
|
||||
// signing_root = compute_signing_root(indexed_payload_attestation.data, domain)
|
||||
// return bls.FastAggregateVerify(pubkeys, signing_root, indexed_payload_attestation.signature)
|
||||
func IsValidIndexedPayloadAttestation(state state.ReadOnlyBeaconState, att *epbs.IndexedPayloadAttestation) (bool, error) {
|
||||
if state.Version() < version.EPBS {
|
||||
return false, errPreEPBSState
|
||||
}
|
||||
|
||||
// Verify the data is valid.
|
||||
if att.Data.PayloadStatus >= primitives.PAYLOAD_INVALID_STATUS {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Verify indices are sorted and unique.
|
||||
indices := att.AttestingIndices
|
||||
slices.Sort(indices)
|
||||
if len(indices) == 0 || !slices.Equal(att.AttestingIndices, indices) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Verify aggregate signature.
|
||||
publicKeys := make([]bls.PublicKey, len(indices))
|
||||
for i, index := range indices {
|
||||
validator, err := state.ValidatorAtIndexReadOnly(index)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
publicKeyBytes := validator.PublicKey()
|
||||
publicKey, err := bls.PublicKeyFromBytes(publicKeyBytes[:])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
publicKeys[i] = publicKey
|
||||
}
|
||||
|
||||
domain, err := signing.Domain(
|
||||
state.Fork(),
|
||||
slots.ToEpoch(state.Slot()),
|
||||
params.BeaconConfig().DomainPTCAttester,
|
||||
state.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
signingRoot, err := signing.ComputeSigningRoot(att.Data, domain)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
signature, err := bls.SignatureFromBytes(att.Signature)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return signature.FastAggregateVerify(publicKeys, signingRoot), nil
|
||||
}
|
||||
|
||||
// ValidatePayloadAttestationMessageSignature verifies the signature of a
|
||||
// payload attestation message.
|
||||
func ValidatePayloadAttestationMessageSignature(ctx context.Context, st state.ReadOnlyBeaconState, msg *eth.PayloadAttestationMessage) error {
|
||||
if err := ValidateNilPayloadAttestationMessage(msg); err != nil {
|
||||
return err
|
||||
}
|
||||
val, err := st.ValidatorAtIndex(msg.ValidatorIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pub, err := bls.PublicKeyFromBytes(val.PublicKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sig, err := bls.SignatureFromBytes(msg.Signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
domain, err := signing.Domain(st.Fork(), currentEpoch, params.BeaconConfig().DomainPTCAttester, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err := signing.ComputeSigningRoot(msg.Data, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !sig.Verify(pub, root[:]) {
|
||||
return signing.ErrSigFailedToVerify
|
||||
}
|
||||
return nil
|
||||
}
|
||||
363
beacon-chain/core/helpers/payload_attestation_test.go
Normal file
363
beacon-chain/core/helpers/payload_attestation_test.go
Normal file
@@ -0,0 +1,363 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/epbs"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/rand"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util/random"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestValidateNilPayloadAttestation(t *testing.T) {
|
||||
require.ErrorIs(t, helpers.ErrNilData, helpers.ValidateNilPayloadAttestationData(nil))
|
||||
data := ð.PayloadAttestationData{}
|
||||
require.ErrorIs(t, helpers.ErrNilBeaconBlockRoot, helpers.ValidateNilPayloadAttestationData(data))
|
||||
data.BeaconBlockRoot = make([]byte, 32)
|
||||
require.NoError(t, helpers.ValidateNilPayloadAttestationData(data))
|
||||
|
||||
require.ErrorIs(t, helpers.ErrNilMessage, helpers.ValidateNilPayloadAttestationMessage(nil))
|
||||
message := ð.PayloadAttestationMessage{}
|
||||
require.ErrorIs(t, helpers.ErrNilSignature, helpers.ValidateNilPayloadAttestationMessage(message))
|
||||
message.Signature = make([]byte, 96)
|
||||
require.ErrorIs(t, helpers.ErrNilData, helpers.ValidateNilPayloadAttestationMessage(message))
|
||||
message.Data = data
|
||||
require.NoError(t, helpers.ValidateNilPayloadAttestationMessage(message))
|
||||
|
||||
require.ErrorIs(t, helpers.ErrNilPayloadAttestation, helpers.ValidateNilPayloadAttestation(nil))
|
||||
att := ð.PayloadAttestation{}
|
||||
require.ErrorIs(t, helpers.ErrNilAggregationBits, helpers.ValidateNilPayloadAttestation(att))
|
||||
att.AggregationBits = bitfield.NewBitvector512()
|
||||
require.ErrorIs(t, helpers.ErrNilSignature, helpers.ValidateNilPayloadAttestation(att))
|
||||
att.Signature = message.Signature
|
||||
require.ErrorIs(t, helpers.ErrNilData, helpers.ValidateNilPayloadAttestation(att))
|
||||
att.Data = data
|
||||
require.NoError(t, helpers.ValidateNilPayloadAttestation(att))
|
||||
}
|
||||
|
||||
func TestGetPayloadTimelinessCommittee(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create 10 committees
|
||||
committeeCount := uint64(10)
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize * uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
|
||||
for i := 0; i < len(validators); i++ {
|
||||
k := make([]byte, 48)
|
||||
copy(k, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: k,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
state, err := state_native.InitializeFromProtoEpbs(random.BeaconState(t))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, state.SetValidators(validators))
|
||||
require.NoError(t, state.SetSlot(200))
|
||||
|
||||
ctx := context.Background()
|
||||
indices, err := helpers.BeaconCommitteeFromState(ctx, state, state.Slot(), 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 128, len(indices))
|
||||
|
||||
epoch := slots.ToEpoch(state.Slot())
|
||||
activeCount, err := helpers.ActiveValidatorCount(ctx, state, epoch)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(40960), activeCount)
|
||||
|
||||
computedCommitteeCount := helpers.SlotCommitteeCount(activeCount)
|
||||
require.Equal(t, committeeCount, computedCommitteeCount)
|
||||
committeesPerSlot := math.LargestPowerOfTwo(math.Min(committeeCount, fieldparams.PTCSize))
|
||||
require.Equal(t, uint64(8), committeesPerSlot)
|
||||
|
||||
ptc, err := helpers.GetPayloadTimelinessCommittee(ctx, state, state.Slot())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fieldparams.PTCSize, len(ptc))
|
||||
|
||||
committee1, err := helpers.BeaconCommitteeFromState(ctx, state, state.Slot(), 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, committee1[:64], ptc[:64])
|
||||
}
|
||||
|
||||
func Test_PtcAllocation(t *testing.T) {
|
||||
tests := []struct {
|
||||
committeeCount int
|
||||
memberPerCommittee uint64
|
||||
committeesPerSlot uint64
|
||||
}{
|
||||
{1, 512, 1},
|
||||
{4, 128, 4},
|
||||
{128, 4, 128},
|
||||
{512, 1, 512},
|
||||
{1024, 1, 512},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
committeesPerSlot, memberPerCommittee := helpers.PtcAllocation(test.committeeCount)
|
||||
if memberPerCommittee != test.memberPerCommittee {
|
||||
t.Errorf("memberPerCommittee(%d) = %d; expected %d", test.committeeCount, memberPerCommittee, test.memberPerCommittee)
|
||||
}
|
||||
if committeesPerSlot != test.committeesPerSlot {
|
||||
t.Errorf("committeesPerSlot(%d) = %d; expected %d", test.committeeCount, committeesPerSlot, test.committeesPerSlot)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPayloadAttestingIndices(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create 10 committees. Total 40960 validators.
|
||||
committeeCount := uint64(10)
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize * uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
|
||||
for i := 0; i < len(validators); i++ {
|
||||
pubkey := make([]byte, 48)
|
||||
copy(pubkey, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: pubkey,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
// Create a beacon state.
|
||||
state, err := state_native.InitializeFromProtoEpbs(ðpb.BeaconStateEPBS{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get PTC.
|
||||
ptc, err := helpers.GetPayloadTimelinessCommittee(context.Background(), state, state.Slot())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fieldparams.PTCSize, len(ptc))
|
||||
|
||||
// Generate random indices. PTC members at the corresponding indices are considered attested.
|
||||
randGen := rand.NewDeterministicGenerator()
|
||||
attesterCount := randGen.Intn(fieldparams.PTCSize) + 1
|
||||
indices := randGen.Perm(fieldparams.PTCSize)[:attesterCount]
|
||||
slices.Sort(indices)
|
||||
require.Equal(t, attesterCount, len(indices))
|
||||
|
||||
// Create a PayloadAttestation with AggregationBits set true at the indices.
|
||||
aggregationBits := bitfield.NewBitvector512()
|
||||
for _, index := range indices {
|
||||
aggregationBits.SetBitAt(uint64(index), true)
|
||||
}
|
||||
|
||||
payloadAttestation := ð.PayloadAttestation{
|
||||
AggregationBits: aggregationBits,
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
|
||||
// Get attesting indices.
|
||||
attesters, err := helpers.GetPayloadAttestingIndices(context.Background(), state, state.Slot(), payloadAttestation)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(indices), len(attesters))
|
||||
|
||||
// Check if each attester equals to the PTC member at the corresponding index.
|
||||
for i, index := range indices {
|
||||
require.Equal(t, attesters[i], ptc[index])
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIndexedPayloadAttestation(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create 10 committees. Total 40960 validators.
|
||||
committeeCount := uint64(10)
|
||||
validatorCount := committeeCount * params.BeaconConfig().TargetCommitteeSize * uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
|
||||
for i := 0; i < len(validators); i++ {
|
||||
publicKey := make([]byte, 48)
|
||||
copy(publicKey, strconv.Itoa(i))
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: publicKey,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
// Create a beacon state.
|
||||
state, err := state_native.InitializeFromProtoEpbs(ðpb.BeaconStateEPBS{
|
||||
Validators: validators,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Get PTC.
|
||||
ptc, err := helpers.GetPayloadTimelinessCommittee(context.Background(), state, state.Slot())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fieldparams.PTCSize, len(ptc))
|
||||
|
||||
// Generate random indices. PTC members at the corresponding indices are considered attested.
|
||||
randGen := rand.NewDeterministicGenerator()
|
||||
attesterCount := randGen.Intn(fieldparams.PTCSize) + 1
|
||||
indices := randGen.Perm(fieldparams.PTCSize)[:attesterCount]
|
||||
slices.Sort(indices)
|
||||
require.Equal(t, attesterCount, len(indices))
|
||||
|
||||
// Create a PayloadAttestation with AggregationBits set true at the indices.
|
||||
aggregationBits := bitfield.NewBitvector512()
|
||||
for _, index := range indices {
|
||||
aggregationBits.SetBitAt(uint64(index), true)
|
||||
}
|
||||
|
||||
payloadAttestation := ð.PayloadAttestation{
|
||||
AggregationBits: aggregationBits,
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
|
||||
// Get attesting indices.
|
||||
ctx := context.Background()
|
||||
attesters, err := helpers.GetPayloadAttestingIndices(ctx, state, state.Slot(), payloadAttestation)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(indices), len(attesters))
|
||||
|
||||
// Get an IndexedPayloadAttestation.
|
||||
indexedPayloadAttestation, err := helpers.GetIndexedPayloadAttestation(ctx, state, state.Slot(), payloadAttestation)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(indices), len(indexedPayloadAttestation.AttestingIndices))
|
||||
require.DeepEqual(t, payloadAttestation.Data, indexedPayloadAttestation.Data)
|
||||
require.DeepEqual(t, payloadAttestation.Signature, indexedPayloadAttestation.Signature)
|
||||
|
||||
// Check if the attesting indices are the same.
|
||||
slices.Sort(attesters) // GetIndexedPayloadAttestation sorts attesting indices.
|
||||
require.DeepEqual(t, attesters, indexedPayloadAttestation.AttestingIndices)
|
||||
}
|
||||
|
||||
func TestIsValidIndexedPayloadAttestation(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
// Create validators.
|
||||
validatorCount := uint64(350)
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
_, secretKeys, err := util.DeterministicDepositsAndKeys(validatorCount)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(validators); i++ {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: secretKeys[i].PublicKey().Marshal(),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
// Create a beacon state.
|
||||
state, err := state_native.InitializeFromProtoEpbs(ðpb.BeaconStateEPBS{
|
||||
Validators: validators,
|
||||
Fork: ðpb.Fork{
|
||||
Epoch: 0,
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Define test cases.
|
||||
tests := []struct {
|
||||
attestation *epbs.IndexedPayloadAttestation
|
||||
}{
|
||||
{
|
||||
attestation: &epbs.IndexedPayloadAttestation{
|
||||
AttestingIndices: []primitives.ValidatorIndex{1},
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
},
|
||||
{
|
||||
attestation: &epbs.IndexedPayloadAttestation{
|
||||
AttestingIndices: []primitives.ValidatorIndex{13, 19},
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
},
|
||||
{
|
||||
attestation: &epbs.IndexedPayloadAttestation{
|
||||
AttestingIndices: []primitives.ValidatorIndex{123, 234, 345},
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
},
|
||||
{
|
||||
attestation: &epbs.IndexedPayloadAttestation{
|
||||
AttestingIndices: []primitives.ValidatorIndex{38, 46, 54, 62, 70, 78, 86, 194},
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
},
|
||||
{
|
||||
attestation: &epbs.IndexedPayloadAttestation{
|
||||
AttestingIndices: []primitives.ValidatorIndex{5},
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Run test cases.
|
||||
for _, test := range tests {
|
||||
signatures := make([]bls.Signature, len(test.attestation.AttestingIndices))
|
||||
for i, index := range test.attestation.AttestingIndices {
|
||||
signedBytes, err := signing.ComputeDomainAndSign(
|
||||
state,
|
||||
slots.ToEpoch(test.attestation.Data.Slot),
|
||||
test.attestation.Data,
|
||||
params.BeaconConfig().DomainPTCAttester,
|
||||
secretKeys[index],
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
signature, err := bls.SignatureFromBytes(signedBytes)
|
||||
require.NoError(t, err)
|
||||
|
||||
signatures[i] = signature
|
||||
}
|
||||
|
||||
aggregatedSignature := bls.AggregateSignatures(signatures)
|
||||
test.attestation.Signature = aggregatedSignature.Marshal()
|
||||
|
||||
isValid, err := helpers.IsValidIndexedPayloadAttestation(state, test.attestation)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isValid)
|
||||
}
|
||||
}
|
||||
@@ -69,15 +69,16 @@ func IsNextPeriodSyncCommittee(
|
||||
}
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(root, valIdx)
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
val, err := st.ValidatorAtIndexReadOnly(valIdx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
committee, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(findSubCommitteeIndices(val.PublicKey, committee.Pubkeys)) > 0, nil
|
||||
return len(findSubCommitteeIndices(pk[:], committee.Pubkeys)) > 0, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -96,10 +97,11 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
||||
}
|
||||
indices, err := syncCommitteeCache.CurrentPeriodIndexPosition(root, valIdx)
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
val, err := st.ValidatorAtIndexReadOnly(valIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
committee, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -112,7 +114,7 @@ func CurrentPeriodSyncSubcommitteeIndices(
|
||||
}
|
||||
}()
|
||||
|
||||
return findSubCommitteeIndices(val.PublicKey, committee.Pubkeys), nil
|
||||
return findSubCommitteeIndices(pk[:], committee.Pubkeys), nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -130,15 +132,16 @@ func NextPeriodSyncSubcommitteeIndices(
|
||||
}
|
||||
indices, err := syncCommitteeCache.NextPeriodIndexPosition(root, valIdx)
|
||||
if errors.Is(err, cache.ErrNonExistingSyncCommitteeKey) {
|
||||
val, err := st.ValidatorAtIndex(valIdx)
|
||||
val, err := st.ValidatorAtIndexReadOnly(valIdx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
committee, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return findSubCommitteeIndices(val.PublicKey, committee.Pubkeys), nil
|
||||
return findSubCommitteeIndices(pk[:], committee.Pubkeys), nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -584,23 +584,23 @@ func IsSameWithdrawalCredentials(a, b *ethpb.Validator) bool {
|
||||
// and validator.withdrawable_epoch <= epoch
|
||||
// and balance > 0
|
||||
// )
|
||||
func IsFullyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
func IsFullyWithdrawableValidator(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
if val == nil || balance <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Electra / EIP-7251 logic
|
||||
if fork >= version.Electra {
|
||||
return HasExecutionWithdrawalCredentials(val) && val.WithdrawableEpoch <= epoch
|
||||
return HasExecutionWithdrawalCredentials(val) && val.WithdrawableEpoch() <= epoch
|
||||
}
|
||||
|
||||
return HasETH1WithdrawalCredential(val) && val.WithdrawableEpoch <= epoch
|
||||
return HasETH1WithdrawalCredential(val) && val.WithdrawableEpoch() <= epoch
|
||||
}
|
||||
|
||||
// IsPartiallyWithdrawableValidator returns whether the validator is able to perform a
|
||||
// partial withdrawal. This function assumes that the caller has a lock on the state.
|
||||
// This method conditionally calls the fork appropriate implementation based on the epoch argument.
|
||||
func IsPartiallyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
func IsPartiallyWithdrawableValidator(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch, fork int) bool {
|
||||
if val == nil {
|
||||
return false
|
||||
}
|
||||
@@ -630,9 +630,9 @@ func IsPartiallyWithdrawableValidator(val *ethpb.Validator, balance uint64, epoc
|
||||
// and has_max_effective_balance
|
||||
// and has_excess_balance
|
||||
// )
|
||||
func isPartiallyWithdrawableValidatorElectra(val *ethpb.Validator, balance uint64, epoch primitives.Epoch) bool {
|
||||
func isPartiallyWithdrawableValidatorElectra(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch) bool {
|
||||
maxEB := ValidatorMaxEffectiveBalance(val)
|
||||
hasMaxBalance := val.EffectiveBalance == maxEB
|
||||
hasMaxBalance := val.EffectiveBalance() == maxEB
|
||||
hasExcessBalance := balance > maxEB
|
||||
|
||||
return HasExecutionWithdrawalCredentials(val) &&
|
||||
@@ -652,8 +652,8 @@ func isPartiallyWithdrawableValidatorElectra(val *ethpb.Validator, balance uint6
|
||||
// has_max_effective_balance = validator.effective_balance == MAX_EFFECTIVE_BALANCE
|
||||
// has_excess_balance = balance > MAX_EFFECTIVE_BALANCE
|
||||
// return has_eth1_withdrawal_credential(validator) and has_max_effective_balance and has_excess_balance
|
||||
func isPartiallyWithdrawableValidatorCapella(val *ethpb.Validator, balance uint64, epoch primitives.Epoch) bool {
|
||||
hasMaxBalance := val.EffectiveBalance == params.BeaconConfig().MaxEffectiveBalance
|
||||
func isPartiallyWithdrawableValidatorCapella(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch) bool {
|
||||
hasMaxBalance := val.EffectiveBalance() == params.BeaconConfig().MaxEffectiveBalance
|
||||
hasExcessBalance := balance > params.BeaconConfig().MaxEffectiveBalance
|
||||
return HasETH1WithdrawalCredential(val) && hasExcessBalance && hasMaxBalance
|
||||
}
|
||||
@@ -670,7 +670,7 @@ func isPartiallyWithdrawableValidatorCapella(val *ethpb.Validator, balance uint6
|
||||
// return MAX_EFFECTIVE_BALANCE_ELECTRA
|
||||
// else:
|
||||
// return MIN_ACTIVATION_BALANCE
|
||||
func ValidatorMaxEffectiveBalance(val *ethpb.Validator) uint64 {
|
||||
func ValidatorMaxEffectiveBalance(val state.ReadOnlyValidator) uint64 {
|
||||
if HasCompoundingWithdrawalCredential(val) {
|
||||
return params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
|
||||
@@ -974,13 +974,6 @@ func TestIsFullyWithdrawableValidator(t *testing.T) {
|
||||
fork int
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Handles nil case",
|
||||
validator: nil,
|
||||
balance: 0,
|
||||
epoch: 0,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "No ETH1 prefix",
|
||||
validator: ðpb.Validator{
|
||||
@@ -1036,7 +1029,9 @@ func TestIsFullyWithdrawableValidator(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.IsFullyWithdrawableValidator(tt.validator, tt.balance, tt.epoch, tt.fork))
|
||||
v, err := state_native.NewValidator(tt.validator)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, helpers.IsFullyWithdrawableValidator(v, tt.balance, tt.epoch, tt.fork))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1050,13 +1045,6 @@ func TestIsPartiallyWithdrawableValidator(t *testing.T) {
|
||||
fork int
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "Handles nil case",
|
||||
validator: nil,
|
||||
balance: 0,
|
||||
epoch: 0,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "No ETH1 prefix",
|
||||
validator: ðpb.Validator{
|
||||
@@ -1113,7 +1101,9 @@ func TestIsPartiallyWithdrawableValidator(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.IsPartiallyWithdrawableValidator(tt.validator, tt.balance, tt.epoch, tt.fork))
|
||||
v, err := state_native.NewValidator(tt.validator)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, helpers.IsPartiallyWithdrawableValidator(v, tt.balance, tt.epoch, tt.fork))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1167,15 +1157,12 @@ func TestValidatorMaxEffectiveBalance(t *testing.T) {
|
||||
validator: ðpb.Validator{WithdrawalCredentials: []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC}},
|
||||
want: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
{
|
||||
"Handles nil case",
|
||||
nil,
|
||||
params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.ValidatorMaxEffectiveBalance(tt.validator))
|
||||
v, err := state_native.NewValidator(tt.validator)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, helpers.ValidatorMaxEffectiveBalance(v))
|
||||
})
|
||||
}
|
||||
// Sanity check that MinActivationBalance equals (pre-electra) MaxEffectiveBalance
|
||||
|
||||
@@ -9,6 +9,7 @@ go_library(
|
||||
"state-bellatrix.go",
|
||||
"trailing_slot_state_cache.go",
|
||||
"transition.go",
|
||||
"transition_epbs.go",
|
||||
"transition_no_verify_sig.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition",
|
||||
@@ -20,6 +21,7 @@ go_library(
|
||||
"//beacon-chain/core/capella:go_default_library",
|
||||
"//beacon-chain/core/deneb:go_default_library",
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/epbs:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
@@ -45,6 +47,7 @@ go_library(
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
|
||||
109
beacon-chain/core/transition/transition_epbs.go
Normal file
109
beacon-chain/core/transition/transition_epbs.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package transition
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epbs"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func processExecution(state state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) (err error) {
|
||||
if body.Version() >= version.EPBS {
|
||||
state, err = blocks.ProcessWithdrawals(state, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
return processExecutionPayloadHeader(state, body)
|
||||
}
|
||||
enabled, err := blocks.IsExecutionEnabled(state, body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if !enabled {
|
||||
return nil
|
||||
}
|
||||
executionData, err := body.Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if state.Version() >= version.Capella {
|
||||
state, err = blocks.ProcessWithdrawals(state, executionData)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
}
|
||||
if err := blocks.ProcessPayload(state, body); err != nil {
|
||||
return errors.Wrap(err, "could not process execution data")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This function verifies the signature as it is not necessarily signed by the
|
||||
// proposer
|
||||
func processExecutionPayloadHeader(state state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) (err error) {
|
||||
sh, err := body.SignedExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
header, err := sh.Header()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := epbs.ValidatePayloadHeaderSignature(state, sh); err != nil {
|
||||
return err
|
||||
}
|
||||
builderIndex := header.BuilderIndex()
|
||||
builder, err := state.ValidatorAtIndex(builderIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
epoch := slots.ToEpoch(state.Slot())
|
||||
if builder.ActivationEpoch > epoch || epoch >= builder.ExitEpoch {
|
||||
return errors.New("builder is not active")
|
||||
}
|
||||
if builder.Slashed {
|
||||
return errors.New("builder is slashed")
|
||||
}
|
||||
amount := header.Value()
|
||||
builderBalance, err := state.BalanceAtIndex(builderIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if amount > primitives.Gwei(builderBalance) {
|
||||
return errors.New("builder has insufficient balance")
|
||||
}
|
||||
// sate.Slot == block.Slot because of process_slot
|
||||
if header.Slot() != state.Slot() {
|
||||
return errors.New("incorrect header slot")
|
||||
}
|
||||
// the state latest block header has the parent root because of
|
||||
// process_block_header
|
||||
blockHeader := state.LatestBlockHeader()
|
||||
if header.ParentBlockRoot() != [32]byte(blockHeader.ParentRoot) {
|
||||
return errors.New("incorrect parent block root")
|
||||
}
|
||||
lbh, err := state.LatestBlockHash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if header.ParentBlockHash() != [32]byte(lbh) {
|
||||
return errors.New("incorrect latest block hash")
|
||||
}
|
||||
if err := state.UpdateBalancesAtIndex(builderIndex, builderBalance-uint64(amount)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.IncreaseBalance(state, blockHeader.ProposerIndex, uint64(amount)); err != nil {
|
||||
return err
|
||||
}
|
||||
headerEPBS, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderEPBS)
|
||||
if !ok {
|
||||
return errors.New("not an ePBS execution payload header")
|
||||
}
|
||||
return state.SetLatestExecutionPayloadHeaderEPBS(headerEPBS)
|
||||
}
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/altair"
|
||||
b "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epbs"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition/interop"
|
||||
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
@@ -272,7 +272,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
state, err = electra.ProcessOperations(ctx, state, beaconBlock)
|
||||
state, err = epbs.ProcessOperations(ctx, state, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -318,27 +318,9 @@ func ProcessBlockForStateRoot(
|
||||
return nil, errors.Wrap(err, "could not process block header")
|
||||
}
|
||||
|
||||
enabled, err := b.IsExecutionEnabled(state, blk.Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not check if execution is enabled")
|
||||
if err := processExecution(state, blk.Body()); err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution")
|
||||
}
|
||||
if enabled {
|
||||
executionData, err := blk.Body().Execution()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if state.Version() >= version.Capella {
|
||||
state, err = b.ProcessWithdrawals(state, executionData)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
}
|
||||
state, err = b.ProcessPayload(state, blk.Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution data")
|
||||
}
|
||||
}
|
||||
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
state, err = b.ProcessRandaoNoVerify(state, randaoReveal[:])
|
||||
if err != nil {
|
||||
|
||||
@@ -698,3 +698,45 @@ func TestProcessSlotsConditionally(t *testing.T) {
|
||||
assert.Equal(t, primitives.Slot(6), s.Slot())
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkProcessSlots_Capella(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateCapella(b, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkProcessSlots_Deneb(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateDeneb(b, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkProcessSlots_Electra(b *testing.B) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(b, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
|
||||
var err error
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
st, err = transition.ProcessSlots(context.Background(), st, st.Slot()+1)
|
||||
if err != nil {
|
||||
b.Fatalf("Failed to process slot %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ func SlashingParamsPerVersion(v int) (slashingQuotient, proposerRewardQuotient,
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientBellatrix
|
||||
proposerRewardQuotient = cfg.ProposerRewardQuotient
|
||||
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotient
|
||||
case version.Electra:
|
||||
case version.Electra, version.EPBS:
|
||||
slashingQuotient = cfg.MinSlashingPenaltyQuotientElectra
|
||||
proposerRewardQuotient = cfg.ProposerRewardQuotient
|
||||
whistleblowerRewardQuotient = cfg.WhistleBlowerRewardQuotientElectra
|
||||
|
||||
@@ -241,76 +241,35 @@ func SlashedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validat
|
||||
return slashed
|
||||
}
|
||||
|
||||
// ExitedValidatorIndices determines the indices exited during the current epoch.
|
||||
func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator, activeValidatorCount uint64) ([]primitives.ValidatorIndex, error) {
|
||||
// ExitedValidatorIndices returns the indices of validators who exited during the specified epoch.
|
||||
//
|
||||
// A validator is considered to have exited during an epoch if their ExitEpoch equals the epoch and
|
||||
// excludes validators that have been ejected.
|
||||
// This function simplifies the exit determination by directly checking the validator's ExitEpoch,
|
||||
// avoiding the complexities and potential inaccuracies of calculating withdrawable epochs.
|
||||
func ExitedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) ([]primitives.ValidatorIndex, error) {
|
||||
exited := make([]primitives.ValidatorIndex, 0)
|
||||
exitEpochs := make([]primitives.Epoch, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
val := validators[i]
|
||||
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
exitEpochs = append(exitEpochs, val.ExitEpoch)
|
||||
}
|
||||
}
|
||||
exitQueueEpoch := primitives.Epoch(0)
|
||||
for _, i := range exitEpochs {
|
||||
if exitQueueEpoch < i {
|
||||
exitQueueEpoch = i
|
||||
}
|
||||
}
|
||||
|
||||
// We use the exit queue churn to determine if we have passed a churn limit.
|
||||
exitQueueChurn := uint64(0)
|
||||
for _, val := range validators {
|
||||
if val.ExitEpoch == exitQueueEpoch {
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
for i, val := range validators {
|
||||
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
|
||||
val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
|
||||
if val.ExitEpoch == epoch && val.EffectiveBalance > params.BeaconConfig().EjectionBalance {
|
||||
exited = append(exited, primitives.ValidatorIndex(i))
|
||||
}
|
||||
}
|
||||
return exited, nil
|
||||
}
|
||||
|
||||
// EjectedValidatorIndices determines the indices ejected during the given epoch.
|
||||
func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator, activeValidatorCount uint64) ([]primitives.ValidatorIndex, error) {
|
||||
// EjectedValidatorIndices returns the indices of validators who were ejected during the specified epoch.
|
||||
//
|
||||
// A validator is considered ejected during an epoch if:
|
||||
// - Their ExitEpoch equals the epoch.
|
||||
// - Their EffectiveBalance is less than or equal to the EjectionBalance threshold.
|
||||
//
|
||||
// This function simplifies the ejection determination by directly checking the validator's ExitEpoch
|
||||
// and EffectiveBalance, avoiding the complexities and potential inaccuracies of calculating
|
||||
// withdrawable epochs.
|
||||
func EjectedValidatorIndices(epoch primitives.Epoch, validators []*ethpb.Validator) ([]primitives.ValidatorIndex, error) {
|
||||
ejected := make([]primitives.ValidatorIndex, 0)
|
||||
exitEpochs := make([]primitives.Epoch, 0)
|
||||
for i := 0; i < len(validators); i++ {
|
||||
val := validators[i]
|
||||
if val.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
exitEpochs = append(exitEpochs, val.ExitEpoch)
|
||||
}
|
||||
}
|
||||
exitQueueEpoch := primitives.Epoch(0)
|
||||
for _, i := range exitEpochs {
|
||||
if exitQueueEpoch < i {
|
||||
exitQueueEpoch = i
|
||||
}
|
||||
}
|
||||
|
||||
// We use the exit queue churn to determine if we have passed a churn limit.
|
||||
exitQueueChurn := uint64(0)
|
||||
for _, val := range validators {
|
||||
if val.ExitEpoch == exitQueueEpoch {
|
||||
exitQueueChurn++
|
||||
}
|
||||
}
|
||||
churn := helpers.ValidatorExitChurnLimit(activeValidatorCount)
|
||||
if churn < exitQueueChurn {
|
||||
exitQueueEpoch++
|
||||
}
|
||||
withdrawableEpoch := exitQueueEpoch + params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
for i, val := range validators {
|
||||
if val.ExitEpoch == epoch && val.WithdrawableEpoch == withdrawableEpoch &&
|
||||
val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
|
||||
if val.ExitEpoch == epoch && val.EffectiveBalance <= params.BeaconConfig().EjectionBalance {
|
||||
ejected = append(ejected, primitives.ValidatorIndex(i))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -389,19 +389,16 @@ func TestExitedValidatorIndices(t *testing.T) {
|
||||
state: ðpb.BeaconState{
|
||||
Validators: []*ethpb.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: 10,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 10,
|
||||
},
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
WithdrawableEpoch: params.BeaconConfig().MinValidatorWithdrawabilityDelay,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
ExitEpoch: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -433,11 +430,7 @@ func TestExitedValidatorIndices(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
activeCount, err := helpers.ActiveValidatorCount(context.Background(), s, time.PrevEpoch(s))
|
||||
require.NoError(t, err)
|
||||
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators, activeCount)
|
||||
exitedIndices, err := validators.ExitedValidatorIndices(0, tt.state.Validators)
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, tt.wanted, exitedIndices)
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//monitoring/backup:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/backup"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/dbval"
|
||||
engine "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -25,11 +26,13 @@ import (
|
||||
type ReadOnlyDatabase interface {
|
||||
// Block related methods.
|
||||
Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
SignedExecutionPayloadHeader(ctx context.Context, blockRoot [32]byte) (interfaces.ROSignedExecutionPayloadHeader, error)
|
||||
Blocks(ctx context.Context, f *filters.QueryFilter) ([]interfaces.ReadOnlySignedBeaconBlock, [][32]byte, error)
|
||||
BlockRoots(ctx context.Context, f *filters.QueryFilter) ([][32]byte, error)
|
||||
BlocksBySlot(ctx context.Context, slot primitives.Slot) ([]interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
BlockRootsBySlot(ctx context.Context, slot primitives.Slot) (bool, [][32]byte, error)
|
||||
HasBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
HasBlindPayloadEnvelope(ctx context.Context, hash [32]byte) bool
|
||||
GenesisBlock(ctx context.Context) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
@@ -55,6 +58,7 @@ type ReadOnlyDatabase interface {
|
||||
DepositContractAddress(ctx context.Context) ([]byte, error)
|
||||
// ExecutionChainData operations.
|
||||
ExecutionChainData(ctx context.Context) (*ethpb.ETH1ChainData, error)
|
||||
SignedBlindPayloadEnvelope(ctx context.Context, blockHash []byte) (*engine.SignedBlindPayloadEnvelope, error)
|
||||
// Fee recipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (common.Address, error)
|
||||
RegistrationByValidatorID(ctx context.Context, id primitives.ValidatorIndex) (*ethpb.ValidatorRegistrationV1, error)
|
||||
@@ -92,6 +96,7 @@ type NoHeadAccessDatabase interface {
|
||||
SaveDepositContractAddress(ctx context.Context, addr common.Address) error
|
||||
// SaveExecutionChainData operations.
|
||||
SaveExecutionChainData(ctx context.Context, data *ethpb.ETH1ChainData) error
|
||||
SaveBlindPayloadEnvelope(ctx context.Context, envelope interfaces.ROSignedExecutionPayloadEnvelope) error
|
||||
// Run any required database migrations.
|
||||
RunMigrations(ctx context.Context) error
|
||||
// Fee recipients operations.
|
||||
|
||||
@@ -6,10 +6,12 @@ go_library(
|
||||
"archived_point.go",
|
||||
"backfill.go",
|
||||
"backup.go",
|
||||
"blind_payload_envelope.go",
|
||||
"blocks.go",
|
||||
"checkpoint.go",
|
||||
"deposit_contract.go",
|
||||
"encoding.go",
|
||||
"epbs.go",
|
||||
"error.go",
|
||||
"execution_chain.go",
|
||||
"finalized_block_roots.go",
|
||||
@@ -53,6 +55,7 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/dbval:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/eth/v2:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
@@ -70,6 +73,7 @@ go_library(
|
||||
"@com_github_schollz_progressbar_v3//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_etcd_go_bbolt//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -80,10 +84,12 @@ go_test(
|
||||
"archived_point_test.go",
|
||||
"backfill_test.go",
|
||||
"backup_test.go",
|
||||
"blind_payload_envelope_test.go",
|
||||
"blocks_test.go",
|
||||
"checkpoint_test.go",
|
||||
"deposit_contract_test.go",
|
||||
"encoding_test.go",
|
||||
"epbs_test.go",
|
||||
"execution_chain_test.go",
|
||||
"finalized_block_roots_test.go",
|
||||
"genesis_test.go",
|
||||
@@ -124,6 +130,7 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//testing/util/random:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
76
beacon-chain/db/kv/blind_payload_envelope.go
Normal file
76
beacon-chain/db/kv/blind_payload_envelope.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
engine "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// SaveBlindPayloadEnvelope saves a signed execution payload envelope blind in the database.
|
||||
func (s *Store) SaveBlindPayloadEnvelope(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlindPayloadEnvelope")
|
||||
defer span.End()
|
||||
|
||||
pb := signed.Proto()
|
||||
if pb == nil {
|
||||
return errors.New("nil payload envelope")
|
||||
}
|
||||
env, ok := pb.(*engine.SignedExecutionPayloadEnvelope)
|
||||
if !ok {
|
||||
return errors.New("invalid payload envelope")
|
||||
}
|
||||
|
||||
r := env.Message.Payload.BlockHash
|
||||
blind := env.Blind()
|
||||
if blind == nil {
|
||||
return errors.New("nil blind payload envelope")
|
||||
}
|
||||
enc, err := encode(ctx, blind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(executionPayloadEnvelopeBucket)
|
||||
return bucket.Put(r, enc)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SignedBlindPayloadEnvelope retrieves a signed execution payload envelope blind from the database.
|
||||
func (s *Store) SignedBlindPayloadEnvelope(ctx context.Context, blockHash []byte) (*engine.SignedBlindPayloadEnvelope, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SignedBlindPayloadEnvelope")
|
||||
defer span.End()
|
||||
|
||||
env := &engine.SignedBlindPayloadEnvelope{}
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(executionPayloadEnvelopeBucket)
|
||||
enc := bkt.Get(blockHash)
|
||||
if enc == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
return decode(ctx, enc, env)
|
||||
})
|
||||
return env, err
|
||||
}
|
||||
|
||||
func (s *Store) HasBlindPayloadEnvelope(ctx context.Context, hash [32]byte) bool {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.HasBlock")
|
||||
defer span.End()
|
||||
if v, ok := s.blockCache.Get(string(hash[:])); v != nil && ok {
|
||||
return true
|
||||
}
|
||||
exists := false
|
||||
if err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(executionPayloadEnvelopeBucket)
|
||||
exists = bkt.Get(hash[:]) != nil
|
||||
return nil
|
||||
}); err != nil { // This view never returns an error, but we'll handle anyway for sanity.
|
||||
panic(err)
|
||||
}
|
||||
return exists
|
||||
}
|
||||
23
beacon-chain/db/kv/blind_payload_envelope_test.go
Normal file
23
beacon-chain/db/kv/blind_payload_envelope_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util/random"
|
||||
)
|
||||
|
||||
func TestStore_SignedBlindPayloadEnvelope(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
_, err := db.SignedBlindPayloadEnvelope(ctx, []byte("test"))
|
||||
require.ErrorIs(t, err, ErrNotFound)
|
||||
|
||||
env := random.SignedBlindPayloadEnvelope(t)
|
||||
err = db.SaveBlindPayloadEnvelope(ctx, env)
|
||||
require.NoError(t, err)
|
||||
got, err := db.SignedBlindPayloadEnvelope(ctx, env.Message.BeaconBlockRoot)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, got, env)
|
||||
}
|
||||
@@ -23,10 +23,10 @@ import (
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// used to represent errors for inconsistent slot ranges.
|
||||
// Used to represent errors for inconsistent slot ranges.
|
||||
var errInvalidSlotRange = errors.New("invalid end slot and start slot provided")
|
||||
|
||||
// Block retrieval by root.
|
||||
// Block retrieval by root. Return nil if block is not found.
|
||||
func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (interfaces.ReadOnlySignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.Block")
|
||||
defer span.End()
|
||||
@@ -823,6 +823,11 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(electraBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Electra block")
|
||||
}
|
||||
case hasEpbsKey(enc):
|
||||
rawBlock = ðpb.SignedBeaconBlockEpbs{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(epbsKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal EPBS block")
|
||||
}
|
||||
default:
|
||||
// Marshal block bytes to phase 0 beacon block.
|
||||
rawBlock = ðpb.SignedBeaconBlock{}
|
||||
@@ -852,6 +857,8 @@ func encodeBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
|
||||
func keyForBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
switch blk.Version() {
|
||||
case version.EPBS:
|
||||
return epbsKey, nil
|
||||
case version.Electra:
|
||||
if blk.IsBlinded() {
|
||||
return electraBlindKey, nil
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user