mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 14:28:09 -05:00
Compare commits
166 Commits
execution-
...
fix-order
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eebc2c8d20 | ||
|
|
7a47c03b60 | ||
|
|
bfd33b64ce | ||
|
|
92cf0bc0ab | ||
|
|
9a79f49514 | ||
|
|
0d7d9bd5fc | ||
|
|
a6052efefb | ||
|
|
fa5d2a88ce | ||
|
|
ff02661229 | ||
|
|
09309ab1f2 | ||
|
|
cb9621702e | ||
|
|
efba931610 | ||
|
|
4a1c627f6f | ||
|
|
0c2464c497 | ||
|
|
2cfc204e9a | ||
|
|
877d9ee948 | ||
|
|
785fefa3f1 | ||
|
|
0c22d91a55 | ||
|
|
fb60456116 | ||
|
|
be56711892 | ||
|
|
96f1ebf706 | ||
|
|
bdb12c7d2f | ||
|
|
83cf0f8658 | ||
|
|
762594a368 | ||
|
|
c1fc812a38 | ||
|
|
340935af9c | ||
|
|
17d0082c5c | ||
|
|
e998b5ec97 | ||
|
|
5d0eb3168c | ||
|
|
e0c2aa71d4 | ||
|
|
70c31949ba | ||
|
|
e38fdb09a4 | ||
|
|
a50e981c74 | ||
|
|
8be205cf3d | ||
|
|
1b65e00096 | ||
|
|
e3fb4e86ec | ||
|
|
70aaad1904 | ||
|
|
e42611ec72 | ||
|
|
a3e61275a3 | ||
|
|
e82f9ccca3 | ||
|
|
38a6a7a4ea | ||
|
|
1295c987e8 | ||
|
|
6a27c41aad | ||
|
|
98b13ea144 | ||
|
|
c735ed2e32 | ||
|
|
bd17779231 | ||
|
|
e08ed0d823 | ||
|
|
2b4d8a09ff | ||
|
|
21e1f7883b | ||
|
|
bfa24606c3 | ||
|
|
d7628bab37 | ||
|
|
8e2c9313e9 | ||
|
|
fea441d889 | ||
|
|
2351064e8d | ||
|
|
d2699761ed | ||
|
|
c73473b59d | ||
|
|
2aa52fb56a | ||
|
|
16d5abd21b | ||
|
|
08bfaca42d | ||
|
|
179cedd4a0 | ||
|
|
0f39857653 | ||
|
|
645328bb9e | ||
|
|
9d2273c514 | ||
|
|
34429368fe | ||
|
|
629568c796 | ||
|
|
5c24978702 | ||
|
|
4e44999207 | ||
|
|
15ae71c0da | ||
|
|
1caea86152 | ||
|
|
7cef3b0491 | ||
|
|
15462844f9 | ||
|
|
863eee7b40 | ||
|
|
6d89373583 | ||
|
|
9a421a2feb | ||
|
|
4e41d5c610 | ||
|
|
0b6bea43a8 | ||
|
|
f89afb0fbd | ||
|
|
3cd2973c92 | ||
|
|
d3e5710a63 | ||
|
|
f40b4f16c2 | ||
|
|
7fd4f746d6 | ||
|
|
2362d9f3c2 | ||
|
|
6b84f8c6b1 | ||
|
|
997a9112d1 | ||
|
|
d46ca97680 | ||
|
|
417bbf8a9e | ||
|
|
a7b016c954 | ||
|
|
6015493de9 | ||
|
|
c718bdbe2b | ||
|
|
0a8f947169 | ||
|
|
d7efccf6a5 | ||
|
|
334920bc9e | ||
|
|
6e00db433c | ||
|
|
c6344e7c3e | ||
|
|
2131254722 | ||
|
|
b6d1866deb | ||
|
|
e56f489d06 | ||
|
|
bf62afb27c | ||
|
|
8369056027 | ||
|
|
09499a732f | ||
|
|
2ee015452c | ||
|
|
ffc1bf8bbe | ||
|
|
014dbd5c3a | ||
|
|
9bceaa59d2 | ||
|
|
832ebb3f39 | ||
|
|
8345c271cc | ||
|
|
56208aa84d | ||
|
|
b866a2c744 | ||
|
|
a77234e637 | ||
|
|
e0e7354708 | ||
|
|
0f86a16915 | ||
|
|
972c22b02f | ||
|
|
93c27340e4 | ||
|
|
c3edb32558 | ||
|
|
3baaa732df | ||
|
|
8ceb7e76ea | ||
|
|
4d5dddd302 | ||
|
|
55efccb07f | ||
|
|
961d8e1481 | ||
|
|
d396a9931e | ||
|
|
e3f8f121f4 | ||
|
|
80f29e9eda | ||
|
|
8995d8133a | ||
|
|
31044206b8 | ||
|
|
3a1702e56f | ||
|
|
501ec74a48 | ||
|
|
c248fe0bb3 | ||
|
|
215fbcb2e4 | ||
|
|
e39f44b529 | ||
|
|
9eff6ae476 | ||
|
|
3eec5a5cb6 | ||
|
|
66878deb2c | ||
|
|
0b6e1711e4 | ||
|
|
15025837bb | ||
|
|
0229a2055e | ||
|
|
eb9af15c7a | ||
|
|
0584746815 | ||
|
|
8c4ea850ba | ||
|
|
4b43f13e65 | ||
|
|
26d35474e9 | ||
|
|
9fbe3564df | ||
|
|
bed5547890 | ||
|
|
47922fe7d8 | ||
|
|
dcd25d1d97 | ||
|
|
81a2a17c5f | ||
|
|
6b3f1de19d | ||
|
|
7c17af2a41 | ||
|
|
ecf5a368d7 | ||
|
|
557c5be433 | ||
|
|
49405c3afd | ||
|
|
3439122629 | ||
|
|
f6e5da6723 | ||
|
|
842f241cb9 | ||
|
|
41daac1b04 | ||
|
|
2a7fc84044 | ||
|
|
44ff0b1a14 | ||
|
|
91cdd318a8 | ||
|
|
3dc00816fb | ||
|
|
e331d5b371 | ||
|
|
8d5090ce54 | ||
|
|
25244d906d | ||
|
|
aa445713ac | ||
|
|
177769a1ce | ||
|
|
967e9255a2 | ||
|
|
910609a75f | ||
|
|
f9c202190a |
2
.github/actions/gomodtidy/Dockerfile
vendored
2
.github/actions/gomodtidy/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.23-alpine
|
||||
FROM golang:1.24-alpine
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
|
||||
4
.github/workflows/changelog.yml
vendored
4
.github/workflows/changelog.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
|
||||
|
||||
- name: Download unclog binary
|
||||
uses: dsaltares/fetch-gh-release-asset@aa2ab1243d6e0d5b405b973c89fa4d06a2d0fff7 # 1.1.2
|
||||
@@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
- name: Get new changelog files
|
||||
id: new-changelog-files
|
||||
uses: tj-actions/changed-files@v45
|
||||
uses: OffchainLabs/gh-action-changed-files@9200e69727eb73eb060652b19946b8a2fdfb654b # v4.0.8
|
||||
with:
|
||||
files: |
|
||||
changelog/**.md
|
||||
|
||||
16
.github/workflows/go.yml
vendored
16
.github/workflows/go.yml
vendored
@@ -28,15 +28,15 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go 1.23
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.24.0'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.19.0
|
||||
gosec -exclude-generated -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@v2.22.1
|
||||
gosec -exclude-generated -exclude=G307,G115 -exclude-dir=crypto/bls/herumi ./...
|
||||
|
||||
lint:
|
||||
name: Lint
|
||||
@@ -45,16 +45,16 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go 1.23
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.24.0'
|
||||
id: go
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
version: v1.63.4
|
||||
version: v1.64.5
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.24.0'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
@@ -75,6 +75,7 @@ linters:
|
||||
- tagliatelle
|
||||
- thelper
|
||||
- unparam
|
||||
- usetesting
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
|
||||
@@ -165,7 +165,7 @@ STATICCHECK_ANALYZERS = [
|
||||
"sa6006",
|
||||
"sa9001",
|
||||
"sa9002",
|
||||
#"sa9003", # Doesn't build. See https://github.com/dominikh/go-tools/pull/1483
|
||||
"sa9003",
|
||||
"sa9004",
|
||||
"sa9005",
|
||||
"sa9006",
|
||||
@@ -197,6 +197,7 @@ nogo(
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
"//tools/analyzers/maligned:go_default_library",
|
||||
"//tools/analyzers/nop:go_default_library",
|
||||
"//tools/analyzers/nopanic:go_default_library",
|
||||
"//tools/analyzers/properpermissions:go_default_library",
|
||||
"//tools/analyzers/recursivelock:go_default_library",
|
||||
"//tools/analyzers/shadowpredecl:go_default_library",
|
||||
|
||||
268
CHANGELOG.md
268
CHANGELOG.md
@@ -4,6 +4,274 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v5.3.2](https://github.com/prysmaticlabs/prysm/compare/v5.3.1...v5.3.2) - 2025-03-25
|
||||
|
||||
This release introduces support for the `Hoodi` testnet.
|
||||
|
||||
Release highlights:
|
||||
|
||||
- Ability to run the node on the `Hoodi` tesnet. See https://blog.ethereum.org/2025/03/18/hoodi-holesky for more information about `Hoodi`.
|
||||
- A new feature that allows treat certain blocks as invalid. This is especially useful when the network is split, allowing the node to discontinue following unwanted forks.
|
||||
|
||||
Testnet operators are required to update to this release. Without this release you will be unable to run the node on the `Hoodi` testnet.
|
||||
|
||||
Mainnet operators are recommended to update to this release at their regular cadence.
|
||||
|
||||
### Added
|
||||
|
||||
- enable SSZ for builder API calls. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14976)
|
||||
- Add Hoodi testnet flag `--hoodi` to specify Hoodi testnet config and bootnodes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15057)
|
||||
- block_gossip topic support to the beacon api event stream. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15038)
|
||||
- Added a static analyzer to discourage use of panic() in Prysm. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15075)
|
||||
- Add a feature flag `--blacklist-roots` to allow the node to specify blocks that will be treated as invalid. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15030)
|
||||
|
||||
### Changed
|
||||
|
||||
- changed request object for `POST /eth/v1/beacon/states/head/validators` to omit the field if empty for satisfying other clients. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15031)
|
||||
- Update spec test to v1.5.0-beta.3. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15050)
|
||||
- Update Gossip and RPC message limits to comply with the spec. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14799)
|
||||
- Return 404 instead of 500 from API when when a blob for a requested index is not found. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14845)
|
||||
- Save Electra orphaned attestations into attestations pool's block attestations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15060)
|
||||
- Removed redundant string conversion in `BeaconDbStater.State` to improve code clarity and maintainability. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15081)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Update seen unaggregated att cache to properly handle Electra attestations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15034)
|
||||
- cosmetic fix for calling `/prysm/validators/performance` when connecting to non prysm beacon nodes and removing the 404 error log. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15062)
|
||||
- Tracked validator cache: Make sure no to loose the reference. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15077)
|
||||
- Fixed proposing at genesis when starting post Bellatrix. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15084)
|
||||
|
||||
## [v5.3.1](https://github.com/prysmaticlabs/prysm/compare/v5.3.0...v5.3.1) - 2025-03-13
|
||||
|
||||
This release is packed with critical fixes for **Electra** and some important fixes for mainnet too.
|
||||
|
||||
The release highlights include:
|
||||
|
||||
- Ensure that deleting a block from the database clears its entry in the slot->root db index. This issue was causing some operators to have a bricked database, requiring a full resync. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15011)
|
||||
- Updated go to go1.24.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14969)
|
||||
- Added a feature flag to sync from an arbitrary beacon block root at startup. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15000)
|
||||
- Updated default gas limit from 30M to 36M. Override this with `--suggested-gas-limit=` in the validator client. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14858)
|
||||
|
||||
Known issues in **Electra**:
|
||||
|
||||
- Duplicate attestations are needlessly processed. This is being addressed in [[PR]](https://github.com/prysmaticlabs/prysm/pull/15034).
|
||||
|
||||
Testnet operators are strongly encouraged to update to this release. There are many fixes and improvements from the Holesky upgrade incident.
|
||||
|
||||
Mainnet operators are recommended to update to this release at their regular cadence.
|
||||
|
||||
### Added
|
||||
|
||||
- enable E2E for minimal and mainnet tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14842)
|
||||
- enable web3signer E2E for electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14936)
|
||||
- Enable multiclient E2E for electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14946)
|
||||
- Enable Scenario E2E tests with electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14946)
|
||||
- Add endpoint for getting pending deposits. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14941)
|
||||
- Add request hash to header for builder: executable data to block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14955)
|
||||
- Log execution requests in each block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14956)
|
||||
- Add endpoint for getting pending partial withdrawals. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14949)
|
||||
- Tracked validators cache: Added the `ItemCount` method. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14957)
|
||||
- Tracked validators cache: Added the `Indices` method. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14957)
|
||||
- Added deposit request testing for electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14964)
|
||||
- Added support for otel tracing transport in HTTP clients in Prysm. This allows for tracing headers to be sent with http requests such that spans between the validator and beacon chain can be connected in the tracing graph. This change does nothing without `--enable-tracing`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14972)
|
||||
- Add SSZ support to light client finality and optimistic APIs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14836)
|
||||
- add log to committee index when committeebits are not the expected length of 1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14993)
|
||||
- Add acceptable address types for static peers. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14886)
|
||||
- Added a feature flag to sync from an arbitrary beacon block root at startup. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15000)
|
||||
|
||||
### Changed
|
||||
|
||||
- updates geth to 1.15.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14842)
|
||||
- Updates blst to v3.14.0 and fixes the references in our deps.bzl file. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14921)
|
||||
- Updated tracing exporter from jaeger to otelhttp. This should not be a breaking change. Jaeger supports otel format, however you may need to update your URL as the default otel-collector port is 4318. See the [OpenTelemtry Protocol Exporter docs](https://opentelemetry.io/docs/specs/otel/protocol/exporter/) for more details. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14928)
|
||||
- Don't use MaxCover for Electra on-chain attestations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14925)
|
||||
- Tracked validators cache: Remove validators from the cache if not seen after 1 hour. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14957)
|
||||
- execution requests errors on ssz length have been improved. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14962)
|
||||
- deprecate beacon api endpoints based on [3.0.0 release](https://github.com/ethereum/beacon-APIs/pull/506) for electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14967)
|
||||
- Use go-cmp for printing better diffs for assertions.DeepEqual. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14978)
|
||||
- Reorganized beacon chain flags in `--help` text into logical sections. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14959)
|
||||
- `--validators-registration-batch-size`: Change default value from `0` to `200`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14981)
|
||||
- Updated go to go1.24.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14969)
|
||||
- Updated gosec to v2.22.1 and golangci to v1.64.5. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14969)
|
||||
- Updated github.com/trailofbits/go-mutexasserts. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14969)
|
||||
- Updated rules_go to cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9 to support go1.24.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14969)
|
||||
- Validate blob sidecar re-order signature and bad parent block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15013)
|
||||
- Updated default gas limit from 30M to 36M. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14858)
|
||||
- Ignore errors from `hasSeenBit` and don't pack unaggregated attestations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15018)
|
||||
|
||||
### Removed
|
||||
|
||||
- Remove Fulu state and block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14905)
|
||||
- Removed the log summarizing all started services. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14958)
|
||||
|
||||
### Fixed
|
||||
|
||||
- fixed max and target blob per block from static to dynamic values. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14911)
|
||||
- refactored publish block and block ssz functions to fix gocognit. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14913)
|
||||
- refactored publish blinded block and blinded block ssz to correctly deal with version headers and sent blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14913)
|
||||
- Only check for electra related engine methods if electra is active. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14924)
|
||||
- Fixed bug that breaks new blob storage layout code on Windows, caused by accidental use of platform-dependent path parsing package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14931)
|
||||
- Fix E2E Process Deposit Evaluator for Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14933)
|
||||
- Fixed the `bazel run //:gazelle` command in `DEPENDENCIES.md`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14934)
|
||||
- Fix E2E Deposit Activation Evaluator for Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14938)
|
||||
- Dedicated processing of `SingleAttestation` in the monitor service. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14965)
|
||||
- adding in content type and accept headers for builder API call on registration. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14961)
|
||||
- fixed gocognit in block conversions between json and proto types. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14953)
|
||||
- Lint: Fix violations of S1009: should omit nil check; len() for nil slices is defined as zero. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14973)
|
||||
- Lint: Fix violations of non-constant format string in call. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14974)
|
||||
- Fixed violations of gosec G301. This is a check that created files and directories have file permissions 0750 and 0600 respectively. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14980)
|
||||
- Check for the correct attester slashing type during gossip validation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14985)
|
||||
- cosmetic fix for post electra validator logs displaying attestation committee information correctly. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14992)
|
||||
- fix inserting the wrong committee index into the seen cache for electra attestations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14998)
|
||||
- Allow any block type to be unmarshaled rather than only phase0 blocks in `slotByBlockRoot`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15008)
|
||||
- Fixed pruner to not block while pruning large database by introducing batchSize. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14929)
|
||||
- Decompose Electra block attestations to prevent redundant packing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14896)
|
||||
- Fixed use of deprecated rand.Seed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14969)
|
||||
- Fixed build issue with SszGen where the go binary was not present in the $PATH. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14969)
|
||||
- fixed /eth/v1/config/spec displays BLOB_SIDECAR_SUBNET_COUNT,BLOB_SIDECAR_SUBNET_COUNT_ELECTRA. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15016)
|
||||
- Ensure that deleting a block from the database clears its entry in the slot->root db index. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15011)
|
||||
- Broadcasting BLS to execution changes should not use the request context in a go routine. Use context.Background() for the broadcasting go routine. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15019)
|
||||
- /eth/v1/validator/sync_committee_contribution should check for optimistic status and return a 503 if it's optimistic. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15022)
|
||||
- Fixes printing superfluous response.WriteHeader call from error in builder. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15025)
|
||||
- Fixes e2e run with builder having wrong gaslimit header due to not being set on eth1 nodes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15025)
|
||||
- Fixed a bug in the event stream handler when processing payload attribute events where the timestamp and slot of the event would be based on the head rather than the current slot. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14963)
|
||||
- Handle unaggregated attestations when decomposing Electra block attestations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15027)
|
||||
|
||||
## [v5.3.0](https://github.com/prysmaticlabs/prysm/compare/v5.2.0...v5.3.0) - 2025-02-12
|
||||
|
||||
This release includes support for Pectra activation in the [Holesky](https://github.com/eth-clients/holesky) and [Sepolia](https://github.com/eth-clients/sepolia) testnets! The release contains many fixes for Electra that have been found in rigorous testing through devnets in the last few months.
|
||||
|
||||
For mainnet, we have a few nice features for you to try:
|
||||
|
||||
- [PR #14023](https://github.com/prysmaticlabs/prysm/pull/14023) introduces a new file layout structure for storing blobs. Rather than storing all blob root directories in one parent directory, blob root directories are organized in subdirectories by epoch. This should vastly decrease the blob cache warmup time when Prysm is starting. Try this feature with `--blob-storage-layout=by-epoch`.
|
||||
|
||||
Updating to this release is **required** for Holesky and Sepolia operators and it is **recommended** for mainnet users as there are a few bug fixes that apply to deneb logic.
|
||||
|
||||
### Added
|
||||
|
||||
- Added an error field to log `Finished building block`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14696)
|
||||
- Implemented a new `EmptyExecutionPayloadHeader` function. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14713)
|
||||
- Added proper gas limit check for header from the builder. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14707)
|
||||
- `Finished building block`: Display error only if not nil. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14722)
|
||||
- Added light client feature flag check to RPC handlers. [PR](https://github.com/prysmaticlabs/prysm/pull/14736). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14782)
|
||||
- Added support to update target and max blob count to different values per hard fork config. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14678)
|
||||
- Log before blob filesystem cache warm-up. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14735)
|
||||
- New design for the attestation pool. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14324)
|
||||
- Add field param placeholder for Electra blob target and max to pass spec tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14733)
|
||||
- Light client: Add better error handling. [PR](https://github.com/prysmaticlabs/prysm/pull/14749). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14782)
|
||||
- Add EIP-7691: Blob throughput increase. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14750)
|
||||
- Trace IDONTWANT Messages in Pubsub. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14778)
|
||||
- Add Fulu fork boilerplate. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14771)
|
||||
- DB optimization for saving light client bootstraps (save unique sync committees only). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14782)
|
||||
- Separate type for unaggregated network attestations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14659)
|
||||
- Remote signer electra fork support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14477)
|
||||
- Add Electra test case to rewards API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14816)
|
||||
- Update `proto_test.go` to Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14817)
|
||||
- Update slasher service to Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14812)
|
||||
- Builder API endpoint to support Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14344)
|
||||
- Added protoc toolchains with a version of v25.3. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Add test cases for the eth_lightclient_bootstrap API SSZ support. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14824)
|
||||
- Handle `AttesterSlashingElectra` everywhere in the codebase. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14823)
|
||||
- Add Beacon DB pruning service to prune historical data older than MIN_EPOCHS_FOR_BLOCK_REQUESTS (roughly equivalent to the weak subjectivity period). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14687)
|
||||
- Nil consolidation request check for core processing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14851)
|
||||
- Updated blob sidecar api endpoint for Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14852)
|
||||
- Slashing pool service to convert slashings from Phase0 to Electra at the fork. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14844)
|
||||
- check to stop eth1 voting after electra and eth1 deposits stop. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14835)
|
||||
- WARN log message on node startup advising of the upcoming deprecation of the --enable-historical-state-representation feature flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14856)
|
||||
- Beacon API event support for `SingleAttestation` and `SignedAggregateAttestationAndProofElectra`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14855)
|
||||
- Added Electra tests for `TestLightClient_NewLightClientOptimisticUpdateFromBeaconState` and `TestLightClient_NewLightClientFinalityUpdateFromBeaconState`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14783)
|
||||
- New option to select an alternate blob storage layout. Rather than a flat directory with a subdir for each block root, a multi-level scheme is used to organize blobs by epoch/slot/root, enabling leaner syscalls, indexing and pruning. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14023)
|
||||
- Send pending att queue's attestations through the notification feed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14862)
|
||||
- Prune all pending deposits and proofs in post-Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14829)
|
||||
- Add Pectra testnet dates. (Sepolia and Holesky). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14884)
|
||||
|
||||
### Changed
|
||||
|
||||
- Process light client finality updates only for new finalized epochs instead of doing it for every block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14713)
|
||||
- Refactor subnets subscriptions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14711)
|
||||
- Refactor RPC handlers subscriptions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14732)
|
||||
- Go deps upgrade, from `ioutil` to `io`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14737)
|
||||
- Move successfully registered validator(s) on builder log to debug. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14735)
|
||||
- Update some test files to use `crypto/rand` instead of `math/rand`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14747)
|
||||
- Re-organize the content of the `*.proto` files (No functional change). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14755)
|
||||
- SSZ files generation: Remove the `// Hash: ...` header.[[PR]](https://github.com/prysmaticlabs/prysm/pull/14760)
|
||||
- Updated Electra spec definition for `process_epoch`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14768)
|
||||
- Update our `go-libp2p-pubsub` dependency. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14770)
|
||||
- Re-organize the content of files to ease the creation of a new fork boilerplate. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14761)
|
||||
- Updated spec definition electra `process_registry_updates`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14767)
|
||||
- Fixed Metadata errors for peers connected via QUIC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14776)
|
||||
- Updated spec definitions for `process_slashings` in godocs. Simplified `ProcessSlashings` API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14766)
|
||||
- Update spec tests to v1.5.0-beta.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14788)
|
||||
- Process light client finality updates only for new finalized epochs instead of doing it for every block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14718)
|
||||
- Update blobs by rpc topics from V2 to V1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14785)
|
||||
- Updated geth to 1.14~. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14351)
|
||||
- E2e tests start from bellatrix. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14351)
|
||||
- Version pinning unclog after making some ux improvements. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14802)
|
||||
- Remove helpers to check for execution/compounding withdrawal credentials and expose them as methods. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14808)
|
||||
- Refactor `2006-01-02 15:04:05` to `time.DateTime`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14792)
|
||||
- Updated Prysm to Go v1.23.5. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Updated Bazel version to v7.4.1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Updated rules_go to v0.46.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- Updated golang.org/x/tools to be compatible with v1.23.5. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14818)
|
||||
- CI now requires proto files to be properly formatted with clang-format. [[PR](https://github.com/prysmaticlabs/prysm/pull/14831)]. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14831)
|
||||
- Improved test coverage of beacon-chain/core/electra/churn.go. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14837)
|
||||
- Update electra spec test to beta1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14841)
|
||||
- Move deposit request nil check to apply all. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14849)
|
||||
- Do not mark blocks as invalid on context deadlines during state transition. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14838)
|
||||
- Update electra core processing to not mark block bad if execution request error. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14826)
|
||||
- Dependency: Updated go-ethereum to v1.14.13. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14872)
|
||||
- improving readability on proposer settings loader. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14868)
|
||||
- Removes existing validator.processSlot span and adds validator.processSlot span to slotCtx. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14874)
|
||||
- DownloadFinalizedData has moved from the api/client package to beacon-chain/sync/checkpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14871)
|
||||
- Updated Blob-Batch-Limit to increase to 192 for electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14883)
|
||||
- Updated Blob-Batch-Limit-Burst-Factor to increase to 3. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14883)
|
||||
- Changed the derived batch limit when serving blobs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14883)
|
||||
- Updated go-libp2p-pubsub to v0.13.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14890)
|
||||
- Rename light client flag from `enable-lightclient` to `enable-light-client`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14887)
|
||||
- Update electra spec test to beta2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14901)
|
||||
|
||||
### Removed
|
||||
|
||||
- Cleanup ProcessSlashings method to remove unnecessary argument. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14762)
|
||||
- Remove `/proto/eth/v2` directory. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14765)
|
||||
- Remove `/memsize/` pprof endpoint as it will no longer be supported in go 1.23. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14351)
|
||||
- Clean `TestCanUpgrade*` tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14791)
|
||||
- Remove `Copy()` from the `ReadOnlyBeaconBlock` interface. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14811)
|
||||
- Removed a tracing span on signature requests. These requests usually took less than 5 nanoseconds and are generally not worth tracing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14864)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Added check to prevent nil pointer deference or out of bounds array access when validating the BLSToExecutionChange on an impossibly nil validator. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14705)
|
||||
- EIP-7691: Ensure new blobs subnets are subscribed on epoch in advance. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14759)
|
||||
- Fix kzg commitment inclusion proof depth minimal value. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14787)
|
||||
- Replace exampleIP to `96.7.129.13`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14795)
|
||||
- Fixed a p2p test to reliably return a static IP through DNS resolution. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14800)
|
||||
- `ToBlinded`: Use Fulu struct for Fulu (instead of Electra). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14797)
|
||||
- fix panic with type cast on pbgenericblock(). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14801)
|
||||
- Prysmctl generate genesis state: fix truncation of ExtraData to 32 bytes to satisfy SSZ marshaling. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14803)
|
||||
- added conditional evaluators to fix scenario e2e tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14798)
|
||||
- Use `SingleAttestation` for Fulu in p2p attestation map. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14809)
|
||||
- `UpgradeToFulu`: Respect the specification. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14821)
|
||||
- `nodeFilter`: Implement `filterPeerForBlobSubnet` to avoid error logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14822)
|
||||
- Fixed deposit packing for post-Electra: early return if EIP-6110 is applied. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14697)
|
||||
- Fix batch process new pending deposits by getting validators from state. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14827)
|
||||
- Fix handling unfound block at slot. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14852)
|
||||
- Fixed incorrect attester slashing length check. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14833)
|
||||
- Fix monitor service for Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14853)
|
||||
- add more nil checks on ToConsensus functions for added safety. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14867)
|
||||
- Fix electra state to safe share references on pending fields when append. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14895)
|
||||
- Add missing config values from the spec. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14903)
|
||||
- We remove the unused `rebuildTrie` assignments for fields which do not use them. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14906)
|
||||
- fix block api endpoint to handle blocks with the same structure but on different forks (i.e. fulu and electra). [[PR]](https://github.com/prysmaticlabs/prysm/pull/14897)
|
||||
- We change how we track blob indexes during their reconstruction from the EL to prevent. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14909)
|
||||
- We now use the correct maximum value when serving blobs for electra blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14910)
|
||||
|
||||
### Security
|
||||
|
||||
- go version upgrade to 1.22.10 for CVE CVE-2024-34156. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14729)
|
||||
- Update golang.org/x/crypto to v0.31.0 to address CVE-2024-45337. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14777)
|
||||
- Update golang.org/x/net to v0.33.0 to address CVE-2024-45338. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14780)
|
||||
|
||||
## [v5.2.0](https://github.com/prysmaticlabs/prysm/compare/v5.1.2...v5.2.0)
|
||||
|
||||
Updating to this release is highly recommended, especially for users running v5.1.1 or v5.1.2.
|
||||
|
||||
@@ -55,7 +55,7 @@ bazel build //beacon-chain --config=release
|
||||
## Adding / updating dependencies
|
||||
|
||||
1. Add your dependency as you would with go modules. I.e. `go get ...`
|
||||
1. Run `bazel run //:gazelle -- update-repos -from_file=go.mod` to update the bazel managed dependencies.
|
||||
1. Run `bazel run //:gazelle -- update-repos -from_file=go.mod -to_macro=deps.bzl%prysm_deps -prune=true` to update the bazel managed dependencies.
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
63
README.md
63
README.md
@@ -1,5 +1,7 @@
|
||||
# Prysm: An Ethereum Consensus Implementation Written in Go
|
||||
<h1 align="left">Prysm: An Ethereum Consensus Implementation Written in Go</h1>
|
||||
|
||||
<div align="left">
|
||||
|
||||
[](https://buildkite.com/prysmatic-labs/prysm)
|
||||
[](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
|
||||
[](https://github.com/ethereum/consensus-specs/tree/v1.4.0)
|
||||
@@ -7,31 +9,60 @@
|
||||
[](https://discord.gg/prysmaticlabs)
|
||||
[](https://www.gitpoap.io/gh/prysmaticlabs/prysm)
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com). See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
</div>
|
||||
|
||||
### Getting Started
|
||||
---
|
||||
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/prysmaticlabs).
|
||||
## 📖 Overview
|
||||
|
||||
### Staking on Mainnet
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com).
|
||||
|
||||
To participate in staking, you can join the [official eth2 launchpad](https://launchpad.ethereum.org). The launchpad is the only recommended way to become a validator on mainnet. You can explore validator rewards/penalties via Bitfly's block explorer: [beaconcha.in](https://beaconcha.in), and follow the latest blocks added to the chain on [beaconscan](https://beaconscan.com).
|
||||
See the [Changelog](https://github.com/prysmaticlabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Getting Started
|
||||
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the **[official documentation portal](https://docs.prylabs.network)**.
|
||||
|
||||
💬 **Need help?** Join our **[Discord Community](https://discord.gg/prysmaticlabs)** for support.
|
||||
|
||||
---
|
||||
|
||||
## 🏆 Staking on Mainnet
|
||||
|
||||
To participate in staking, you can join the **[official Ethereum launchpad](https://launchpad.ethereum.org)**. The launchpad is the **only recommended** way to become a validator on mainnet.
|
||||
|
||||
🔍 Explore validator rewards/penalties:
|
||||
|
||||
- **[beaconcha.in](https://beaconcha.in)**
|
||||
- **[beaconscan](https://beaconscan.com)**
|
||||
|
||||
---
|
||||
|
||||
## 🤝 Contributing
|
||||
|
||||
### 🔥 Branches
|
||||
|
||||
## Contributing
|
||||
### Branches
|
||||
Prysm maintains two permanent branches:
|
||||
|
||||
* [master](https://github.com/prysmaticlabs/prysm/tree/master): This points to the latest stable release. It is ideal for most users.
|
||||
* [develop](https://github.com/prysmaticlabs/prysm/tree/develop): This is used for development, it contains the latest PRs. Developers should base their PRs on this branch.
|
||||
- **[`master`](https://github.com/prysmaticlabs/prysm/tree/master)** - This points to the latest stable release. It is ideal for most users.
|
||||
- **[`develop`](https://github.com/prysmaticlabs/prysm/tree/develop)** - This is used for development and contains the latest PRs. Developers should base their PRs on this branch.
|
||||
|
||||
### Guide
|
||||
Want to get involved? Check out our [Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/) to learn more!
|
||||
### 🛠 Contribution Guide
|
||||
|
||||
## License
|
||||
Want to get involved? Check out our **[Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/)** to learn more!
|
||||
|
||||
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||
---
|
||||
|
||||
## Legal Disclaimer
|
||||
## 📜 License
|
||||
|
||||
[Terms of Use](/TERMS_OF_SERVICE.md)
|
||||
[](https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||
|
||||
This project is licensed under the **GNU General Public License v3.0**.
|
||||
|
||||
---
|
||||
|
||||
## ⚖️ Legal Disclaimer
|
||||
|
||||
📜 [Terms of Use](/TERMS_OF_SERVICE.md)
|
||||
|
||||
48
WORKSPACE
48
WORKSPACE
@@ -160,15 +160,15 @@ oci_register_toolchains(
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
integrity = "sha256-JD8o94crTb2DFiJJR8nMAGdBAW95zIENB4cbI+JnrI4=",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "b2038e2de2cace18f032249cb4bb0048abf583a36369fa98f687af1b3f880b26",
|
||||
strip_prefix = "rules_go-cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.48.1/rules_go-v0.48.1.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.48.1/rules_go-v0.48.1.zip",
|
||||
"https://github.com/bazel-contrib/rules_go/archive/cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -210,7 +210,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.23.5",
|
||||
go_version = "1.24.0",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -255,7 +255,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-beta.1"
|
||||
consensus_spec_version = "v1.5.0-beta.4"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -271,7 +271,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-R6r60geCfEjMaB1Ag3svaMFXFIgaJvkTJhfKsf76rFE=",
|
||||
integrity = "sha256-QG0NUqaCvP5lKaKKwF/fmeICZVjONMlb7EE+MtYl0C0=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -287,7 +287,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-2Pem2gMHxW/6bBhZ2BaqkQruQSd/dTS3WMaMQO8rZ/o=",
|
||||
integrity = "sha256-8NQngTSSqzW/j3tOUi3r5h+94ChRbLNWTt7BOGqr4+E=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -303,7 +303,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-5yP05JTV1MhcUZ2kSh+T+kXjG+uW3A5877veC5c1mD4=",
|
||||
integrity = "sha256-gFqxbaBnJ7dtdoj0zFbVrtlHv/bLNuWjrTHkyCAjFjI=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -318,7 +318,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-O6Rg6h19T0RsJs0sBDZ9O1k4LnCJ/gu2ilHijFBVfME=",
|
||||
integrity = "sha256-9paalF0POULpP2ga+4ouHSETKYrWNCUCZoJHPuFw06E=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -365,9 +365,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-b7ZTT+olF+VXEJYNTV5jggNtCkt9dOejm1i2VE+zy+0=",
|
||||
strip_prefix = "holesky-874c199423ccd180607320c38cbaca05d9a1573a",
|
||||
url = "https://github.com/eth-clients/holesky/archive/874c199423ccd180607320c38cbaca05d9a1573a.tar.gz", # 2024-06-18
|
||||
integrity = "sha256-YVFFrCmjoGZ3fXMWpsCpSsYbANy1grnqYwOLKIg2SsA=",
|
||||
strip_prefix = "holesky-32a72e21c6e53c262f27d50dd540cb654517d03a",
|
||||
url = "https://github.com/eth-clients/holesky/archive/32a72e21c6e53c262f27d50dd540cb654517d03a.tar.gz", # 2025-03-17
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -381,9 +381,25 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-cY/UgpCcYEhQf7JefD65FI8tn/A+rAvKhcm2/qiVdqY=",
|
||||
strip_prefix = "sepolia-f2c219a93c4491cee3d90c18f2f8e82aed850eab",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/f2c219a93c4491cee3d90c18f2f8e82aed850eab.tar.gz", # 2024-09-19
|
||||
integrity = "sha256-b5F7Wg9LLMqGRIpP2uqb/YsSFVn2ynzlV7g/Nb1EFLk=",
|
||||
strip_prefix = "sepolia-562d9938f08675e9ba490a1dfba21fb05843f39f",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/562d9938f08675e9ba490a1dfba21fb05843f39f.tar.gz", # 2025-03-17
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "hoodi_testnet",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = [
|
||||
"metadata/config.yaml",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-dPiEWUd8QvbYGwGtIm0QtCekitVLOLsW5rpQIGzz8PU=",
|
||||
strip_prefix = "hoodi-828c2c940e1141092bd4bb979cef547ea926d272",
|
||||
url = "https://github.com/eth-clients/hoodi/archive/828c2c940e1141092bd4bb979cef547ea926d272.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -431,7 +447,7 @@ gometalinter_dependencies()
|
||||
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
|
||||
|
||||
gazelle_dependencies()
|
||||
gazelle_dependencies(go_sdk = "go_sdk")
|
||||
|
||||
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
|
||||
|
||||
|
||||
19
api/apiutil/BUILD.bazel
Normal file
19
api/apiutil/BUILD.bazel
Normal file
@@ -0,0 +1,19 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["common.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/apiutil",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//consensus-types/primitives:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["common_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
],
|
||||
)
|
||||
23
api/apiutil/common.go
Normal file
23
api/apiutil/common.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package apiutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
neturl "net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// Uint64ToString is a util function that will convert uints to string
|
||||
func Uint64ToString[T uint64 | primitives.Slot | primitives.ValidatorIndex | primitives.CommitteeIndex | primitives.Epoch](val T) string {
|
||||
return strconv.FormatUint(uint64(val), 10)
|
||||
}
|
||||
|
||||
// BuildURL is a util function that assists with adding query parameters to the url
|
||||
func BuildURL(path string, queryParams ...neturl.Values) string {
|
||||
if len(queryParams) == 0 {
|
||||
return path
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s?%s", path, queryParams[0].Encode())
|
||||
}
|
||||
37
api/apiutil/common_test.go
Normal file
37
api/apiutil/common_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package apiutil
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
)
|
||||
|
||||
func TestBeaconApiHelpers_TestUint64ToString(t *testing.T) {
|
||||
const expectedResult = "1234"
|
||||
const val = uint64(1234)
|
||||
|
||||
assert.Equal(t, expectedResult, Uint64ToString(val))
|
||||
assert.Equal(t, expectedResult, Uint64ToString(primitives.Slot(val)))
|
||||
assert.Equal(t, expectedResult, Uint64ToString(primitives.ValidatorIndex(val)))
|
||||
assert.Equal(t, expectedResult, Uint64ToString(primitives.CommitteeIndex(val)))
|
||||
assert.Equal(t, expectedResult, Uint64ToString(primitives.Epoch(val)))
|
||||
}
|
||||
|
||||
func TestBuildURL_NoParams(t *testing.T) {
|
||||
wanted := "/aaa/bbb/ccc"
|
||||
actual := BuildURL("/aaa/bbb/ccc")
|
||||
assert.Equal(t, wanted, actual)
|
||||
}
|
||||
|
||||
func TestBuildURL_WithParams(t *testing.T) {
|
||||
params := url.Values{}
|
||||
params.Add("xxxx", "1")
|
||||
params.Add("yyyy", "2")
|
||||
params.Add("zzzz", "3")
|
||||
|
||||
wanted := "/aaa/bbb/ccc?xxxx=1&yyyy=2&zzzz=3"
|
||||
actual := BuildURL("/aaa/bbb/ccc", params)
|
||||
assert.Equal(t, wanted, actual)
|
||||
}
|
||||
@@ -3,62 +3,33 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"checkpoint.go",
|
||||
"client.go",
|
||||
"client_epbs.go",
|
||||
"doc.go",
|
||||
"health.go",
|
||||
"log.go",
|
||||
"template.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon/iface:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//io/file:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_x_mod//semver:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"checkpoint_test.go",
|
||||
"client_test.go",
|
||||
"health_test.go",
|
||||
],
|
||||
srcs = ["client_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api/client:go_default_library",
|
||||
"//api/client/beacon/testing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/blocks/testing:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz/detect:go_default_library",
|
||||
"//network/forks:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,276 +0,0 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
base "github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz/detect"
|
||||
"github.com/prysmaticlabs/prysm/v5/io/file"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
var errCheckpointBlockMismatch = errors.New("mismatch between checkpoint sync state and block")
|
||||
|
||||
// OriginData represents the BeaconState and ReadOnlySignedBeaconBlock necessary to start an empty Beacon Node
|
||||
// using Checkpoint Sync.
|
||||
type OriginData struct {
|
||||
sb []byte
|
||||
bb []byte
|
||||
st state.BeaconState
|
||||
b interfaces.ReadOnlySignedBeaconBlock
|
||||
vu *detect.VersionedUnmarshaler
|
||||
br [32]byte
|
||||
sr [32]byte
|
||||
}
|
||||
|
||||
// SaveBlock saves the downloaded block to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (o *OriginData) SaveBlock(dir string) (string, error) {
|
||||
blockPath := path.Join(dir, fname("block", o.vu, o.b.Block().Slot(), o.br))
|
||||
return blockPath, file.WriteFile(blockPath, o.BlockBytes())
|
||||
}
|
||||
|
||||
// SaveState saves the downloaded state to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (o *OriginData) SaveState(dir string) (string, error) {
|
||||
statePath := path.Join(dir, fname("state", o.vu, o.st.Slot(), o.sr))
|
||||
return statePath, file.WriteFile(statePath, o.StateBytes())
|
||||
}
|
||||
|
||||
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
|
||||
func (o *OriginData) StateBytes() []byte {
|
||||
return o.sb
|
||||
}
|
||||
|
||||
// BlockBytes returns the ssz-encoded bytes of the downloaded ReadOnlySignedBeaconBlock value.
|
||||
func (o *OriginData) BlockBytes() []byte {
|
||||
return o.bb
|
||||
}
|
||||
|
||||
func fname(prefix string, vu *detect.VersionedUnmarshaler, slot primitives.Slot, root [32]byte) string {
|
||||
return fmt.Sprintf("%s_%s_%s_%d-%#x.ssz", prefix, vu.Config.ConfigName, version.String(vu.Fork), slot, root)
|
||||
}
|
||||
|
||||
// DownloadFinalizedData downloads the most recently finalized state, and the block most recently applied to that state.
|
||||
// This pair can be used to initialize a new beacon node via checkpoint sync.
|
||||
func DownloadFinalizedData(ctx context.Context, client *Client) (*OriginData, error) {
|
||||
sb, err := client.GetState(ctx, IdFinalized)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vu, err := detect.FromState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for finalized state")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"name": vu.Config.ConfigName,
|
||||
"fork": version.String(vu.Fork),
|
||||
}).Info("Detected supported config in remote finalized state")
|
||||
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error unmarshaling finalized state to correct version")
|
||||
}
|
||||
|
||||
slot := s.LatestBlockHeader().Slot
|
||||
bb, err := client.GetBlock(ctx, IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by slot = %d", slot)
|
||||
}
|
||||
b, err := vu.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
br, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block")
|
||||
}
|
||||
bodyRoot, err := b.Block().Body().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of retrieved block body")
|
||||
}
|
||||
|
||||
sbr := bytesutil.ToBytes32(s.LatestBlockHeader().BodyRoot)
|
||||
if sbr != bodyRoot {
|
||||
return nil, errors.Wrapf(errCheckpointBlockMismatch, "state body root = %#x, block body root = %#x", sbr, bodyRoot)
|
||||
}
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to compute htr for finalized state at slot=%d", s.Slot())
|
||||
}
|
||||
|
||||
log.
|
||||
WithField("blockSlot", b.Block().Slot()).
|
||||
WithField("stateSlot", s.Slot()).
|
||||
WithField("stateRoot", hexutil.Encode(sr[:])).
|
||||
WithField("blockRoot", hexutil.Encode(br[:])).
|
||||
Info("Downloaded checkpoint sync state and block.")
|
||||
return &OriginData{
|
||||
st: s,
|
||||
b: b,
|
||||
sb: sb,
|
||||
bb: bb,
|
||||
vu: vu,
|
||||
br: br,
|
||||
sr: sr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + ReadOnlySignedBeaconBlock
|
||||
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
||||
// a weak subjectivity checkpoint beacon node flag to be used for validation.
|
||||
type WeakSubjectivityData struct {
|
||||
BlockRoot [32]byte
|
||||
StateRoot [32]byte
|
||||
Epoch primitives.Epoch
|
||||
}
|
||||
|
||||
// CheckpointString returns the standard string representation of a Checkpoint.
|
||||
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||
func (wsd *WeakSubjectivityData) CheckpointString() string {
|
||||
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
|
||||
}
|
||||
|
||||
// ComputeWeakSubjectivityCheckpoint attempts to use the prysm weak_subjectivity api
|
||||
// to obtain the current weak_subjectivity checkpoint.
|
||||
// For non-prysm nodes, the same computation will be performed with extra steps,
|
||||
// using the head state downloaded from the beacon node api.
|
||||
func ComputeWeakSubjectivityCheckpoint(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
|
||||
ws, err := client.GetWeakSubjectivity(ctx)
|
||||
if err != nil {
|
||||
// a 404/405 is expected if querying an endpoint that doesn't support the weak subjectivity checkpoint api
|
||||
if !errors.Is(err, base.ErrNotOK) {
|
||||
return nil, errors.Wrap(err, "unexpected API response for prysm-only weak subjectivity checkpoint API")
|
||||
}
|
||||
// fall back to vanilla Beacon Node API method
|
||||
return computeBackwardsCompatible(ctx, client)
|
||||
}
|
||||
log.Printf("server weak subjectivity checkpoint response - epoch=%d, block_root=%#x, state_root=%#x", ws.Epoch, ws.BlockRoot, ws.StateRoot)
|
||||
return ws, nil
|
||||
}
|
||||
|
||||
const (
|
||||
prysmMinimumVersion = "v2.0.7"
|
||||
prysmImplementationName = "Prysm"
|
||||
)
|
||||
|
||||
// errUnsupportedPrysmCheckpointVersion indicates remote beacon node can't be used for checkpoint retrieval.
|
||||
var errUnsupportedPrysmCheckpointVersion = errors.New("node does not meet minimum version requirements for checkpoint retrieval")
|
||||
|
||||
// for older endpoints or clients that do not support the weak_subjectivity api method
|
||||
// we gather the necessary data for a checkpoint sync by:
|
||||
// - inspecting the remote server's head state and computing the weak subjectivity epoch locally
|
||||
// - requesting the state at the first slot of the epoch
|
||||
// - using hash_tree_root(state.latest_block_header) to compute the block the state integrates
|
||||
// - requesting that block by its root
|
||||
func computeBackwardsCompatible(ctx context.Context, client *Client) (*WeakSubjectivityData, error) {
|
||||
log.Print("falling back to generic checkpoint derivation, weak_subjectivity API not supported by server")
|
||||
nv, err := client.GetNodeVersion(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to proceed with fallback method without confirming node version")
|
||||
}
|
||||
if nv.implementation == prysmImplementationName && semver.Compare(nv.semver, prysmMinimumVersion) < 0 {
|
||||
return nil, errors.Wrapf(errUnsupportedPrysmCheckpointVersion, "%s < minimum (%s)", nv.semver, prysmMinimumVersion)
|
||||
}
|
||||
epoch, err := getWeakSubjectivityEpochFromHead(ctx, client)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing weak subjectivity epoch via head state inspection")
|
||||
}
|
||||
|
||||
// use first slot of the epoch for the state slot
|
||||
slot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error computing first slot of epoch=%d", epoch)
|
||||
}
|
||||
|
||||
log.Printf("requesting checkpoint state at slot %d", slot)
|
||||
// get the state at the first slot of the epoch
|
||||
sb, err := client.GetState(ctx, IdFromSlot(slot))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to request state by slot from api, slot=%d", slot)
|
||||
}
|
||||
|
||||
// ConfigFork is used to unmarshal the BeaconState so we can read the block root in latest_block_header
|
||||
vu, err := detect.FromState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in checkpoint state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
|
||||
s, err := vu.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error using detected config fork to unmarshal state bytes")
|
||||
}
|
||||
|
||||
// compute state and block roots
|
||||
sr, err := s.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root of state")
|
||||
}
|
||||
|
||||
h := s.LatestBlockHeader()
|
||||
h.StateRoot = sr[:]
|
||||
br, err := h.HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error while computing block root using state data")
|
||||
}
|
||||
|
||||
bb, err := client.GetBlock(ctx, IdFromRoot(br))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting block by root = %d", br)
|
||||
}
|
||||
b, err := vu.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to unmarshal block to a supported type using the detected fork schedule")
|
||||
}
|
||||
br, err = b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error computing hash_tree_root for block obtained via root")
|
||||
}
|
||||
|
||||
return &WeakSubjectivityData{
|
||||
Epoch: epoch,
|
||||
BlockRoot: br,
|
||||
StateRoot: sr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// this method downloads the head state, which can be used to find the correct chain config
|
||||
// and use prysm's helper methods to compute the latest weak subjectivity epoch.
|
||||
func getWeakSubjectivityEpochFromHead(ctx context.Context, client *Client) (primitives.Epoch, error) {
|
||||
headBytes, err := client.GetState(ctx, IdHead)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
vu, err := detect.FromState(headBytes)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error detecting chain config for beacon state")
|
||||
}
|
||||
log.Printf("detected supported config in remote head state, name=%s, fork=%s", vu.Config.ConfigName, version.String(vu.Fork))
|
||||
headState, err := vu.UnmarshalBeaconState(headBytes)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error unmarshaling state to correct version")
|
||||
}
|
||||
|
||||
epoch, err := helpers.LatestWeakSubjectivityEpoch(ctx, headState, vu.Config)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error computing the weak subjectivity epoch from head state")
|
||||
}
|
||||
|
||||
log.Printf("(computed client-side) weak subjectivity epoch = %d", epoch)
|
||||
return epoch, nil
|
||||
}
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"text/template"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -29,13 +28,13 @@ const (
|
||||
getSignedBlockPath = "/eth/v2/beacon/blocks"
|
||||
getBlockRootPath = "/eth/v1/beacon/blocks/{{.Id}}/root"
|
||||
getForkForStatePath = "/eth/v1/beacon/states/{{.Id}}/fork"
|
||||
getWeakSubjectivityPath = "/prysm/v1/beacon/weak_subjectivity"
|
||||
getForkSchedulePath = "/eth/v1/config/fork_schedule"
|
||||
getConfigSpecPath = "/eth/v1/config/spec"
|
||||
getStatePath = "/eth/v2/debug/beacon/states"
|
||||
getNodeVersionPath = "/eth/v1/node/version"
|
||||
changeBLStoExecutionPath = "/eth/v1/beacon/pool/bls_to_execution_changes"
|
||||
getPayloadEnvelopePath = "/eth/v1/beacon/execution_payload"
|
||||
|
||||
GetNodeVersionPath = "/eth/v1/node/version"
|
||||
GetWeakSubjectivityPath = "/prysm/v1/beacon/weak_subjectivity"
|
||||
)
|
||||
|
||||
// StateOrBlockId represents the block_id / state_id parameters that several of the Eth Beacon API methods accept.
|
||||
@@ -64,24 +63,8 @@ func IdFromSlot(s primitives.Slot) StateOrBlockId {
|
||||
return StateOrBlockId(strconv.FormatUint(uint64(s), 10))
|
||||
}
|
||||
|
||||
// idTemplate is used to create template functions that can interpolate StateOrBlockId values.
|
||||
func idTemplate(ts string) func(StateOrBlockId) string {
|
||||
t := template.Must(template.New("").Parse(ts))
|
||||
f := func(id StateOrBlockId) string {
|
||||
b := bytes.NewBuffer(nil)
|
||||
err := t.Execute(b, struct{ Id string }{Id: string(id)})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("invalid idTemplate: %s", ts))
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
// run the template to ensure that it is valid
|
||||
// this should happen load time (using package scoped vars) to ensure runtime errors aren't possible
|
||||
_ = f(IdGenesis)
|
||||
return f
|
||||
}
|
||||
|
||||
func renderGetBlockPath(id StateOrBlockId) string {
|
||||
// RenderGetBlockPath formats a block id into a path for the GetBlock API endpoint.
|
||||
func RenderGetBlockPath(id StateOrBlockId) string {
|
||||
return path.Join(getSignedBlockPath, string(id))
|
||||
}
|
||||
|
||||
@@ -105,7 +88,7 @@ func NewClient(host string, opts ...client.ClientOpt) (*Client, error) {
|
||||
// for the named identifiers.
|
||||
// The return value contains the ssz-encoded bytes.
|
||||
func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
|
||||
blockPath := renderGetBlockPath(blockId)
|
||||
blockPath := RenderGetBlockPath(blockId)
|
||||
b, err := c.Get(ctx, blockPath, client.WithSSZEncoding())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting state by id = %s", blockId)
|
||||
@@ -113,8 +96,6 @@ func (c *Client) GetBlock(ctx context.Context, blockId StateOrBlockId) ([]byte,
|
||||
return b, nil
|
||||
}
|
||||
|
||||
var getBlockRootTpl = idTemplate(getBlockRootPath)
|
||||
|
||||
// GetBlockRoot retrieves the hash_tree_root of the BeaconBlock for the given block id.
|
||||
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
|
||||
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
|
||||
@@ -137,8 +118,6 @@ func (c *Client) GetBlockRoot(ctx context.Context, blockId StateOrBlockId) ([32]
|
||||
return bytesutil.ToBytes32(rs), nil
|
||||
}
|
||||
|
||||
var getForkTpl = idTemplate(getForkForStatePath)
|
||||
|
||||
// GetFork queries the Beacon Node API for the Fork from the state identified by stateId.
|
||||
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
|
||||
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
|
||||
@@ -196,6 +175,10 @@ type NodeVersion struct {
|
||||
systemInfo string
|
||||
}
|
||||
|
||||
func (nv *NodeVersion) SetImplementation(impl string) {
|
||||
nv.implementation = impl
|
||||
}
|
||||
|
||||
var versionRE = regexp.MustCompile(`^(\w+)/(v\d+\.\d+\.\d+[-a-zA-Z0-9]*)\s*/?(.*)$`)
|
||||
|
||||
func parseNodeVersion(v string) (*NodeVersion, error) {
|
||||
@@ -213,7 +196,7 @@ func parseNodeVersion(v string) (*NodeVersion, error) {
|
||||
// GetNodeVersion requests that the beacon node identify information about its implementation in a format
|
||||
// similar to a HTTP User-Agent field. ex: Lighthouse/v0.1.5 (Linux x86_64)
|
||||
func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
|
||||
b, err := c.Get(ctx, getNodeVersionPath)
|
||||
b, err := c.Get(ctx, GetNodeVersionPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error requesting node version")
|
||||
}
|
||||
@@ -229,7 +212,8 @@ func (c *Client) GetNodeVersion(ctx context.Context) (*NodeVersion, error) {
|
||||
return parseNodeVersion(d.Data.Version)
|
||||
}
|
||||
|
||||
func renderGetStatePath(id StateOrBlockId) string {
|
||||
// RenderGetStatePath formats a state id into a path for the GetState API endpoint.
|
||||
func RenderGetStatePath(id StateOrBlockId) string {
|
||||
return path.Join(getStatePath, string(id))
|
||||
}
|
||||
|
||||
@@ -247,13 +231,29 @@ func (c *Client) GetState(ctx context.Context, stateId StateOrBlockId) ([]byte,
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// WeakSubjectivityData represents the state root, block root and epoch of the BeaconState + ReadOnlySignedBeaconBlock
|
||||
// that falls at the beginning of the current weak subjectivity period. These values can be used to construct
|
||||
// a weak subjectivity checkpoint beacon node flag to be used for validation.
|
||||
type WeakSubjectivityData struct {
|
||||
BlockRoot [32]byte
|
||||
StateRoot [32]byte
|
||||
Epoch primitives.Epoch
|
||||
}
|
||||
|
||||
// CheckpointString returns the standard string representation of a Checkpoint.
|
||||
// The format is a hex-encoded block root, followed by the epoch of the block, separated by a colon. For example:
|
||||
// "0x1c35540cac127315fabb6bf29181f2ae0de1a3fc909d2e76ba771e61312cc49a:74888"
|
||||
func (wsd *WeakSubjectivityData) CheckpointString() string {
|
||||
return fmt.Sprintf("%#x:%d", wsd.BlockRoot, wsd.Epoch)
|
||||
}
|
||||
|
||||
// GetWeakSubjectivity calls a proposed API endpoint that is unique to prysm
|
||||
// This api method does the following:
|
||||
// - computes weak subjectivity epoch
|
||||
// - finds the highest non-skipped block preceding the epoch
|
||||
// - returns the htr of the found block and returns this + the value of state_root from the block
|
||||
func (c *Client) GetWeakSubjectivity(ctx context.Context) (*WeakSubjectivityData, error) {
|
||||
body, err := c.Get(ctx, getWeakSubjectivityPath)
|
||||
body, err := c.Get(ctx, GetWeakSubjectivityPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
)
|
||||
|
||||
// GetExecutionPayload retrieves the SignedExecutionPayloadEnvelope for the given block id.
|
||||
// Block identifier can be one of: "head" (canonical head in node's view), "genesis", "finalized",
|
||||
// <slot>, <hex encoded blockRoot with 0x prefix>. Variables of type StateOrBlockId are exported by this package
|
||||
// for the named identifiers.
|
||||
// The return value contains the ssz-encoded bytes.
|
||||
func (c *Client) GetExecutionPayload(ctx context.Context, blockId StateOrBlockId) ([]byte, error) {
|
||||
blockPath := renderGetBlockPath(blockId)
|
||||
b, err := c.Get(ctx, blockPath, client.WithSSZEncoding())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error requesting execuction payload by id = %s", blockId)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
@@ -97,31 +97,31 @@ func TestValidHostname(t *testing.T) {
|
||||
{
|
||||
name: "hostname with port",
|
||||
hostArg: "mydomain.org:3500",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "http://mydomain.org:3500/eth/v1/node/version",
|
||||
},
|
||||
{
|
||||
name: "https scheme, hostname with port",
|
||||
hostArg: "https://mydomain.org:3500",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "https://mydomain.org:3500/eth/v1/node/version",
|
||||
},
|
||||
{
|
||||
name: "http scheme, hostname without port",
|
||||
hostArg: "http://mydomain.org",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "http://mydomain.org/eth/v1/node/version",
|
||||
},
|
||||
{
|
||||
name: "http scheme, trailing slash, hostname without port",
|
||||
hostArg: "http://mydomain.org/",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "http://mydomain.org/eth/v1/node/version",
|
||||
},
|
||||
{
|
||||
name: "http scheme, hostname with basic auth creds and no port",
|
||||
hostArg: "http://username:pass@mydomain.org/",
|
||||
path: getNodeVersionPath,
|
||||
path: GetNodeVersionPath,
|
||||
joined: "http://username:pass@mydomain.org/eth/v1/node/version",
|
||||
},
|
||||
}
|
||||
|
||||
20
api/client/beacon/health/BUILD.bazel
Normal file
20
api/client/beacon/health/BUILD.bazel
Normal file
@@ -0,0 +1,20 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"health.go",
|
||||
"interfaces.go",
|
||||
"mock.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/health",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@org_uber_go_mock//gomock:go_default_library"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["health_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["@org_uber_go_mock//gomock:go_default_library"],
|
||||
)
|
||||
@@ -1,20 +1,18 @@
|
||||
package beacon
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
|
||||
)
|
||||
|
||||
type NodeHealthTracker struct {
|
||||
isHealthy *bool
|
||||
healthChan chan bool
|
||||
node iface.HealthNode
|
||||
node Node
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewNodeHealthTracker(node iface.HealthNode) *NodeHealthTracker {
|
||||
func NewTracker(node Node) Tracker {
|
||||
return &NodeHealthTracker{
|
||||
node: node,
|
||||
healthChan: make(chan bool, 1),
|
||||
@@ -26,7 +24,7 @@ func (n *NodeHealthTracker) HealthUpdates() <-chan bool {
|
||||
return n.healthChan
|
||||
}
|
||||
|
||||
func (n *NodeHealthTracker) IsHealthy() bool {
|
||||
func (n *NodeHealthTracker) IsHealthy(_ context.Context) bool {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
if n.isHealthy == nil {
|
||||
@@ -1,11 +1,10 @@
|
||||
package beacon
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
healthTesting "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
@@ -24,7 +23,7 @@ func TestNodeHealth_IsHealthy(t *testing.T) {
|
||||
isHealthy: &tt.isHealthy,
|
||||
healthChan: make(chan bool, 1),
|
||||
}
|
||||
if got := n.IsHealthy(); got != tt.want {
|
||||
if got := n.IsHealthy(context.Background()); got != tt.want {
|
||||
t.Errorf("IsHealthy() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
@@ -47,7 +46,7 @@ func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := healthTesting.NewMockHealthClient(ctrl)
|
||||
client := NewMockHealthClient(ctrl)
|
||||
client.EXPECT().IsHealthy(gomock.Any()).Return(tt.newStatus)
|
||||
n := &NodeHealthTracker{
|
||||
isHealthy: &tt.initial,
|
||||
@@ -80,8 +79,8 @@ func TestNodeHealth_UpdateNodeHealth(t *testing.T) {
|
||||
func TestNodeHealth_Concurrency(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
client := healthTesting.NewMockHealthClient(ctrl)
|
||||
n := NewNodeHealthTracker(client)
|
||||
client := NewMockHealthClient(ctrl)
|
||||
n := NewTracker(client)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Number of goroutines to spawn for both reading and writing
|
||||
@@ -104,7 +103,7 @@ func TestNodeHealth_Concurrency(t *testing.T) {
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
_ = n.IsHealthy() // Just read the value
|
||||
_ = n.IsHealthy(context.Background()) // Just read the value
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
package iface
|
||||
package health
|
||||
|
||||
import "context"
|
||||
|
||||
type HealthTracker interface {
|
||||
type Tracker interface {
|
||||
HealthUpdates() <-chan bool
|
||||
IsHealthy() bool
|
||||
CheckHealth(ctx context.Context) bool
|
||||
Node
|
||||
}
|
||||
|
||||
type HealthNode interface {
|
||||
type Node interface {
|
||||
IsHealthy(ctx context.Context) bool
|
||||
}
|
||||
@@ -1,16 +1,15 @@
|
||||
package testing
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface"
|
||||
"go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
var (
|
||||
_ = iface.HealthNode(&MockHealthClient{})
|
||||
_ = Node(&MockHealthClient{})
|
||||
)
|
||||
|
||||
// MockHealthClient is a mock of HealthClient interface.
|
||||
@@ -1,8 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["health.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/iface",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
34
api/client/beacon/template.go
Normal file
34
api/client/beacon/template.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
type templateFn func(StateOrBlockId) string
|
||||
|
||||
var getBlockRootTpl templateFn
|
||||
var getForkTpl templateFn
|
||||
|
||||
func init() {
|
||||
// idTemplate is used to create template functions that can interpolate StateOrBlockId values.
|
||||
idTemplate := func(ts string) func(StateOrBlockId) string {
|
||||
t := template.Must(template.New("").Parse(ts))
|
||||
f := func(id StateOrBlockId) string {
|
||||
b := bytes.NewBuffer(nil)
|
||||
err := t.Execute(b, struct{ Id string }{Id: string(id)})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("invalid idTemplate: %s", ts))
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
// run the template to ensure that it is valid
|
||||
// this should happen load time (using package scoped vars) to ensure runtime errors aren't possible
|
||||
_ = f(IdGenesis)
|
||||
return f
|
||||
}
|
||||
|
||||
getBlockRootTpl = idTemplate(getBlockRootPath)
|
||||
getForkTpl = idTemplate(getForkForStatePath)
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["mock.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/api/client/beacon/testing",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api/client/beacon/iface:go_default_library",
|
||||
"@org_uber_go_mock//gomock:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -33,6 +33,7 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opentelemetry_go_contrib_instrumentation_net_http_otelhttp//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -46,6 +47,7 @@ go_test(
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/client"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -25,6 +26,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -34,9 +36,14 @@ const (
|
||||
postRegisterValidatorPath = "/eth/v1/builder/validators"
|
||||
)
|
||||
|
||||
var errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
||||
var errMalformedRequest = errors.New("required request data are missing")
|
||||
var errNotBlinded = errors.New("submitted block is not blinded")
|
||||
var (
|
||||
vrExample = ðpb.SignedValidatorRegistrationV1{}
|
||||
vrSize = vrExample.SizeSSZ()
|
||||
errMalformedHostname = errors.New("hostname must include port, separated by one colon, like example.com:3500")
|
||||
errMalformedRequest = errors.New("required request data are missing")
|
||||
errNotBlinded = errors.New("submitted block is not blinded")
|
||||
errVersionUnsupported = errors.New("version is not supported")
|
||||
)
|
||||
|
||||
// ClientOpt is a functional option for the Client type (http.Client wrapper)
|
||||
type ClientOpt func(*Client)
|
||||
@@ -51,6 +58,12 @@ func WithObserver(m observer) ClientOpt {
|
||||
}
|
||||
}
|
||||
|
||||
func WithSSZ() ClientOpt {
|
||||
return func(c *Client) {
|
||||
c.sszEnabled = true
|
||||
}
|
||||
}
|
||||
|
||||
type requestLogger struct{}
|
||||
|
||||
func (*requestLogger) observe(r *http.Request) (e error) {
|
||||
@@ -94,9 +107,10 @@ type BuilderClient interface {
|
||||
|
||||
// Client provides a collection of helper methods for calling Builder API endpoints.
|
||||
type Client struct {
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
obvs []observer
|
||||
hc *http.Client
|
||||
baseURL *url.URL
|
||||
obvs []observer
|
||||
sszEnabled bool
|
||||
}
|
||||
|
||||
// NewClient constructs a new client with the provided options (ex WithTimeout).
|
||||
@@ -108,7 +122,7 @@ func NewClient(host string, opts ...ClientOpt) (*Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
hc: &http.Client{},
|
||||
hc: &http.Client{Transport: otelhttp.NewTransport(http.DefaultTransport)},
|
||||
baseURL: u,
|
||||
}
|
||||
for _, o := range opts {
|
||||
@@ -138,7 +152,7 @@ func (c *Client) NodeURL() string {
|
||||
type reqOption func(*http.Request)
|
||||
|
||||
// do is a generic, opinionated request function to reduce boilerplate amongst the methods in this package api/client/builder.
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, err error) {
|
||||
func (c *Client) do(ctx context.Context, method string, path string, body io.Reader, opts ...reqOption) (res []byte, header http.Header, err error) {
|
||||
ctx, span := trace.StartSpan(ctx, "builder.client.do")
|
||||
defer func() {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -182,6 +196,7 @@ func (c *Client) do(ctx context.Context, method string, path string, body io.Rea
|
||||
err = errors.Wrap(err, "error reading http response body from builder server")
|
||||
return
|
||||
}
|
||||
header = r.Header
|
||||
return
|
||||
}
|
||||
|
||||
@@ -211,64 +226,145 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hb, err := c.do(ctx, http.MethodGet, path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var getOpts reqOption
|
||||
if c.sszEnabled {
|
||||
getOpts = func(r *http.Request) {
|
||||
r.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
}
|
||||
} else {
|
||||
getOpts = func(r *http.Request) {
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
}
|
||||
v := &VersionResponse{}
|
||||
if err := json.Unmarshal(hb, v); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
data, header, err := c.do(ctx, http.MethodGet, path, nil, getOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting header from builder server")
|
||||
}
|
||||
|
||||
ver, err := version.FromString(strings.ToLower(v.Version))
|
||||
bid, err := c.parseHeaderResponse(data, header)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("unsupported header version %s", strings.ToLower(v.Version)))
|
||||
return nil, errors.Wrapf(
|
||||
err,
|
||||
"error rendering exec header template with slot=%d, parentHash=%#x, pubkey=%#x",
|
||||
slot,
|
||||
parentHash,
|
||||
pubkey,
|
||||
)
|
||||
}
|
||||
return bid, nil
|
||||
}
|
||||
|
||||
func (c *Client) parseHeaderResponse(data []byte, header http.Header) (SignedBid, error) {
|
||||
var versionHeader string
|
||||
if c.sszEnabled || header.Get(api.VersionHeader) != "" {
|
||||
versionHeader = header.Get(api.VersionHeader)
|
||||
} else {
|
||||
// If we don't have a version header, attempt to parse JSON for version
|
||||
v := &VersionResponse{}
|
||||
if err := json.Unmarshal(data, v); err != nil {
|
||||
return nil, errors.Wrap(
|
||||
err,
|
||||
"error unmarshaling builder GetHeader response",
|
||||
)
|
||||
}
|
||||
versionHeader = strings.ToLower(v.Version)
|
||||
}
|
||||
|
||||
ver, err := version.FromString(versionHeader)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("unsupported header version %s", versionHeader))
|
||||
}
|
||||
|
||||
if ver >= version.Electra {
|
||||
hr := &ExecHeaderResponseElectra{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
p, err := hr.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from header")
|
||||
}
|
||||
return WrappedSignedBuilderBidElectra(p)
|
||||
return c.parseHeaderElectra(data)
|
||||
}
|
||||
if ver >= version.Deneb {
|
||||
hr := &ExecHeaderResponseDeneb{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
p, err := hr.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from header")
|
||||
}
|
||||
return WrappedSignedBuilderBidDeneb(p)
|
||||
return c.parseHeaderDeneb(data)
|
||||
}
|
||||
if ver >= version.Capella {
|
||||
hr := &ExecHeaderResponseCapella{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
p, err := hr.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from header")
|
||||
}
|
||||
return WrappedSignedBuilderBidCapella(p)
|
||||
return c.parseHeaderCapella(data)
|
||||
}
|
||||
if ver >= version.Bellatrix {
|
||||
hr := &ExecHeaderResponse{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
p, err := hr.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not extract proto message from header")
|
||||
}
|
||||
return WrappedSignedBuilderBid(p)
|
||||
return c.parseHeaderBellatrix(data)
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
|
||||
|
||||
return nil, fmt.Errorf("unsupported header version %s", versionHeader)
|
||||
}
|
||||
|
||||
func (c *Client) parseHeaderElectra(data []byte) (SignedBid, error) {
|
||||
if c.sszEnabled {
|
||||
sb := ðpb.SignedBuilderBidElectra{}
|
||||
if err := sb.UnmarshalSSZ(data); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal SignedBuilderBidElectra SSZ")
|
||||
}
|
||||
return WrappedSignedBuilderBidElectra(sb)
|
||||
}
|
||||
hr := &ExecHeaderResponseElectra{}
|
||||
if err := json.Unmarshal(data, hr); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal ExecHeaderResponseElectra JSON")
|
||||
}
|
||||
p, err := hr.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert ExecHeaderResponseElectra to proto")
|
||||
}
|
||||
return WrappedSignedBuilderBidElectra(p)
|
||||
}
|
||||
|
||||
func (c *Client) parseHeaderDeneb(data []byte) (SignedBid, error) {
|
||||
if c.sszEnabled {
|
||||
sb := ðpb.SignedBuilderBidDeneb{}
|
||||
if err := sb.UnmarshalSSZ(data); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal SignedBuilderBidDeneb SSZ")
|
||||
}
|
||||
return WrappedSignedBuilderBidDeneb(sb)
|
||||
}
|
||||
hr := &ExecHeaderResponseDeneb{}
|
||||
if err := json.Unmarshal(data, hr); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal ExecHeaderResponseDeneb JSON")
|
||||
}
|
||||
p, err := hr.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert ExecHeaderResponseDeneb to proto")
|
||||
}
|
||||
return WrappedSignedBuilderBidDeneb(p)
|
||||
}
|
||||
|
||||
func (c *Client) parseHeaderCapella(data []byte) (SignedBid, error) {
|
||||
if c.sszEnabled {
|
||||
sb := ðpb.SignedBuilderBidCapella{}
|
||||
if err := sb.UnmarshalSSZ(data); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal SignedBuilderBidCapella SSZ")
|
||||
}
|
||||
return WrappedSignedBuilderBidCapella(sb)
|
||||
}
|
||||
hr := &ExecHeaderResponseCapella{}
|
||||
if err := json.Unmarshal(data, hr); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal ExecHeaderResponseCapella JSON")
|
||||
}
|
||||
p, err := hr.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert ExecHeaderResponseCapella to proto")
|
||||
}
|
||||
return WrappedSignedBuilderBidCapella(p)
|
||||
}
|
||||
|
||||
func (c *Client) parseHeaderBellatrix(data []byte) (SignedBid, error) {
|
||||
if c.sszEnabled {
|
||||
sb := ðpb.SignedBuilderBid{}
|
||||
if err := sb.UnmarshalSSZ(data); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal SignedBuilderBid SSZ")
|
||||
}
|
||||
return WrappedSignedBuilderBid(sb)
|
||||
}
|
||||
hr := &ExecHeaderResponse{}
|
||||
if err := json.Unmarshal(data, hr); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal ExecHeaderResponse JSON")
|
||||
}
|
||||
p, err := hr.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert ExecHeaderResponse to proto")
|
||||
}
|
||||
return WrappedSignedBuilderBid(p)
|
||||
}
|
||||
|
||||
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
|
||||
@@ -283,70 +379,243 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
body []byte
|
||||
err error
|
||||
postOpts reqOption
|
||||
)
|
||||
if c.sszEnabled {
|
||||
postOpts = func(r *http.Request) {
|
||||
r.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
r.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
}
|
||||
body, err = sszValidatorRegisterRequest(svr)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "error ssz encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
postOpts = func(r *http.Request) {
|
||||
r.Header.Set("Content-Type", api.JsonMediaType)
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
body, err = jsonValidatorRegisterRequest(svr)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "error json encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, _, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body), postOpts); err != nil {
|
||||
return errors.Wrap(err, "do")
|
||||
}
|
||||
log.WithField("registrationCount", len(svr)).Debug("Successfully registered validator(s) on builder")
|
||||
return nil
|
||||
}
|
||||
|
||||
func jsonValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]byte, error) {
|
||||
vs := make([]*structs.SignedValidatorRegistration, len(svr))
|
||||
for i := 0; i < len(svr); i++ {
|
||||
vs[i] = structs.SignedValidatorRegistrationFromConsensus(svr[i])
|
||||
}
|
||||
body, err := json.Marshal(vs)
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "error encoding the SignedValidatorRegistration value body in RegisterValidator")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
_, err = c.do(ctx, http.MethodPost, postRegisterValidatorPath, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return err
|
||||
func sszValidatorRegisterRequest(svr []*ethpb.SignedValidatorRegistrationV1) ([]byte, error) {
|
||||
if uint64(len(svr)) > params.BeaconConfig().ValidatorRegistryLimit {
|
||||
return nil, errors.Wrap(errMalformedRequest, "validator registry limit exceeded")
|
||||
}
|
||||
log.WithField("registrationCount", len(svr)).Debug("Successfully registered validator(s) on builder")
|
||||
return nil
|
||||
ssz := make([]byte, vrSize*len(svr))
|
||||
for i, vr := range svr {
|
||||
sszrep, err := vr.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal validator registry ssz")
|
||||
}
|
||||
copy(ssz[i*vrSize:(i+1)*vrSize], sszrep)
|
||||
}
|
||||
return ssz, nil
|
||||
}
|
||||
|
||||
var errResponseVersionMismatch = errors.New("builder API response uses a different version than requested in " + api.VersionHeader + " header")
|
||||
|
||||
func getVersionsBlockToPayload(blockVersion int) (int, error) {
|
||||
if blockVersion >= version.Deneb {
|
||||
return version.Deneb, nil
|
||||
}
|
||||
if blockVersion == version.Capella {
|
||||
return version.Capella, nil
|
||||
}
|
||||
if blockVersion == version.Bellatrix {
|
||||
return version.Bellatrix, nil
|
||||
}
|
||||
return 0, errors.Wrapf(errVersionUnsupported, "block version %d", blockVersion)
|
||||
}
|
||||
|
||||
// SubmitBlindedBlock calls the builder API endpoint that binds the validator to the builder and submits the block.
|
||||
// The response is the full execution payload used to create the blinded block.
|
||||
func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlySignedBeaconBlock) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
body, postOpts, err := c.buildBlindedBlockRequest(sb)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// post the blinded block - the execution payload response should contain the unblinded payload, along with the
|
||||
// blobs bundle if it is post deneb.
|
||||
data, header, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), postOpts)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the blinded block to the builder api")
|
||||
}
|
||||
|
||||
ver, err := c.checkBlockVersion(data, header)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
expectedPayloadVer, err := getVersionsBlockToPayload(sb.Version())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
gotPayloadVer, err := getVersionsBlockToPayload(ver)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if expectedPayloadVer != gotPayloadVer {
|
||||
return nil, nil, errors.Wrapf(errResponseVersionMismatch, "expected payload version %d, got %d", expectedPayloadVer, gotPayloadVer)
|
||||
}
|
||||
|
||||
ed, blobs, err := c.parseBlindedBlockResponse(data, ver)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return ed, blobs, nil
|
||||
}
|
||||
|
||||
func (c *Client) checkBlockVersion(respBytes []byte, header http.Header) (int, error) {
|
||||
var versionHeader string
|
||||
if c.sszEnabled {
|
||||
versionHeader = strings.ToLower(header.Get(api.VersionHeader))
|
||||
} else {
|
||||
// fallback to JSON-based version extraction
|
||||
v := &VersionResponse{}
|
||||
if err := json.Unmarshal(respBytes, v); err != nil {
|
||||
return 0, errors.Wrapf(err, "error unmarshaling JSON version fallback")
|
||||
}
|
||||
versionHeader = strings.ToLower(v.Version)
|
||||
}
|
||||
|
||||
ver, err := version.FromString(versionHeader)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "unsupported header version %s", versionHeader)
|
||||
}
|
||||
|
||||
return ver, nil
|
||||
}
|
||||
|
||||
// Helper: build request body for SubmitBlindedBlock
|
||||
func (c *Client) buildBlindedBlockRequest(sb interfaces.ReadOnlySignedBeaconBlock) ([]byte, reqOption, error) {
|
||||
if !sb.IsBlinded() {
|
||||
return nil, nil, errNotBlinded
|
||||
}
|
||||
|
||||
// massage the proto struct type data into the api response type.
|
||||
if c.sszEnabled {
|
||||
body, err := sb.MarshalSSZ()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not marshal SSZ for blinded block")
|
||||
}
|
||||
opt := func(r *http.Request) {
|
||||
r.Header.Set(api.VersionHeader, version.String(sb.Version()))
|
||||
r.Header.Set("Content-Type", api.OctetStreamMediaType)
|
||||
r.Header.Set("Accept", api.OctetStreamMediaType)
|
||||
}
|
||||
return body, opt, nil
|
||||
}
|
||||
|
||||
mj, err := structs.SignedBeaconBlockMessageJsoner(sb)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error generating blinded beacon block post request")
|
||||
}
|
||||
|
||||
body, err := json.Marshal(mj)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error marshaling blinded block post request to json")
|
||||
return nil, nil, errors.Wrap(err, "error marshaling blinded block to JSON")
|
||||
}
|
||||
postOpts := func(r *http.Request) {
|
||||
r.Header.Add("Eth-Consensus-Version", version.String(sb.Version()))
|
||||
opt := func(r *http.Request) {
|
||||
r.Header.Set(api.VersionHeader, version.String(sb.Version()))
|
||||
r.Header.Set("Content-Type", api.JsonMediaType)
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
// post the blinded block - the execution payload response should contain the unblinded payload, along with the
|
||||
// blobs bundle if it is post deneb.
|
||||
rb, err := c.do(ctx, http.MethodPost, postBlindedBeaconBlockPath, bytes.NewBuffer(body), postOpts)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error posting the blinded block to the builder api")
|
||||
return body, opt, nil
|
||||
}
|
||||
|
||||
// Helper: parse the response returned by SubmitBlindedBlock
|
||||
func (c *Client) parseBlindedBlockResponse(
|
||||
respBytes []byte,
|
||||
forkVersion int,
|
||||
) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
if c.sszEnabled {
|
||||
return c.parseBlindedBlockResponseSSZ(respBytes, forkVersion)
|
||||
}
|
||||
// ExecutionPayloadResponse parses just the outer container and the Value key, enabling it to use the .Value
|
||||
// key to determine which underlying data type to use to finish the unmarshaling.
|
||||
return c.parseBlindedBlockResponseJSON(respBytes, forkVersion)
|
||||
}
|
||||
|
||||
func (c *Client) parseBlindedBlockResponseSSZ(
|
||||
respBytes []byte,
|
||||
forkVersion int,
|
||||
) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
if forkVersion >= version.Deneb {
|
||||
payloadAndBlobs := &v1.ExecutionPayloadDenebAndBlobsBundle{}
|
||||
if err := payloadAndBlobs.UnmarshalSSZ(respBytes); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unable to unmarshal ExecutionPayloadDenebAndBlobsBundle SSZ")
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(payloadAndBlobs.Payload)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "unable to wrap execution data for %s", version.String(forkVersion))
|
||||
}
|
||||
return ed, payloadAndBlobs.BlobsBundle, nil
|
||||
} else if forkVersion >= version.Capella {
|
||||
payload := &v1.ExecutionPayloadCapella{}
|
||||
if err := payload.UnmarshalSSZ(respBytes); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unable to unmarshal ExecutionPayloadCapella SSZ")
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(payload)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "unable to wrap execution data for %s", version.String(forkVersion))
|
||||
}
|
||||
return ed, nil, nil
|
||||
} else if forkVersion >= version.Bellatrix {
|
||||
payload := &v1.ExecutionPayload{}
|
||||
if err := payload.UnmarshalSSZ(respBytes); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "unable to unmarshal ExecutionPayload SSZ")
|
||||
}
|
||||
ed, err := blocks.NewWrappedExecutionData(payload)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "unable to wrap execution data for %s", version.String(forkVersion))
|
||||
}
|
||||
return ed, nil, nil
|
||||
} else {
|
||||
return nil, nil, fmt.Errorf("unsupported header version %s", version.String(forkVersion))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) parseBlindedBlockResponseJSON(
|
||||
respBytes []byte,
|
||||
forkVersion int,
|
||||
) (interfaces.ExecutionData, *v1.BlobsBundle, error) {
|
||||
ep := &ExecutionPayloadResponse{}
|
||||
if err := json.Unmarshal(rb, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling the builder ExecutionPayloadResponse")
|
||||
if err := json.Unmarshal(respBytes, ep); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "error unmarshaling ExecutionPayloadResponse")
|
||||
}
|
||||
if strings.ToLower(ep.Version) != version.String(sb.Version()) {
|
||||
return nil, nil, errors.Wrapf(errResponseVersionMismatch, "req=%s, recv=%s", strings.ToLower(ep.Version), version.String(sb.Version()))
|
||||
}
|
||||
// This parses the rest of the response and returns the inner data field.
|
||||
pp, err := ep.ParsePayload()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to parse execution payload from builder with version=%s", ep.Version)
|
||||
return nil, nil, errors.Wrapf(err, "failed to parse payload with version=%s", ep.Version)
|
||||
}
|
||||
// Get the payload as a proto.Message so it can be wrapped as an execution payload interface.
|
||||
pb, err := pp.PayloadProto()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -355,11 +624,13 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Check if it contains blobs
|
||||
bb, ok := pp.(BlobBundler)
|
||||
if ok {
|
||||
bbpb, err := bb.BundleProto()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to extract blobs bundle from builder response with version=%s", ep.Version)
|
||||
return nil, nil, errors.Wrapf(err, "failed to extract blobs bundle from version=%s", ep.Version)
|
||||
}
|
||||
return ed, bbpb, nil
|
||||
}
|
||||
@@ -370,7 +641,10 @@ func (c *Client) SubmitBlindedBlock(ctx context.Context, sb interfaces.ReadOnlyS
|
||||
// response, and an error response may have an error message. This method will return a nil value for error in the
|
||||
// happy path, and an error with information about the server response body for a non-200 response.
|
||||
func (c *Client) Status(ctx context.Context) error {
|
||||
_, err := c.do(ctx, http.MethodGet, getStatus, nil)
|
||||
getOpts := func(r *http.Request) {
|
||||
r.Header.Set("Accept", api.JsonMediaType)
|
||||
}
|
||||
_, _, err := c.do(ctx, http.MethodGet, getStatus, nil, getOpts)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -385,6 +659,18 @@ func non200Err(response *http.Response) error {
|
||||
}
|
||||
msg := fmt.Sprintf("code=%d, url=%s, body=%s", response.StatusCode, response.Request.URL, body)
|
||||
switch response.StatusCode {
|
||||
case http.StatusUnsupportedMediaType:
|
||||
log.WithError(ErrUnsupportedMediaType).Debug(msg)
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
return errors.Wrap(ErrUnsupportedMediaType, errMessage.Message)
|
||||
case http.StatusNotAcceptable:
|
||||
log.WithError(ErrNotAcceptable).Debug(msg)
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
return errors.Wrap(ErrNotAcceptable, errMessage.Message)
|
||||
case http.StatusNoContent:
|
||||
log.WithError(ErrNoContent).Debug(msg)
|
||||
return ErrNoContent
|
||||
|
||||
@@ -8,10 +8,10 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -87,37 +87,84 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
expectedBody := `[{"message":{"fee_recipient":"0x0000000000000000000000000000000000000000","gas_limit":"23","timestamp":"42","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"},"signature":"0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"}]`
|
||||
expectedPath := "/eth/v1/builder/validators"
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
body, err := io.ReadAll(r.Body)
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedBody, string(body))
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
require.Equal(t, http.MethodPost, r.Method)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBuffer(nil)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
reg := ð.SignedValidatorRegistrationV1{
|
||||
Message: ð.ValidatorRegistrationV1{
|
||||
FeeRecipient: ezDecode(t, params.BeaconConfig().EthBurnAddressHex),
|
||||
GasLimit: 23,
|
||||
Timestamp: 42,
|
||||
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
}
|
||||
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
|
||||
t.Run("JSON success", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
body, err := io.ReadAll(r.Body)
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedBody, string(body))
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
require.Equal(t, http.MethodPost, r.Method)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBuffer(nil)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
reg := ð.SignedValidatorRegistrationV1{
|
||||
Message: ð.ValidatorRegistrationV1{
|
||||
FeeRecipient: ezDecode(t, params.BeaconConfig().EthBurnAddressHex),
|
||||
GasLimit: 23,
|
||||
Timestamp: 42,
|
||||
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
}
|
||||
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
|
||||
})
|
||||
t.Run("SSZ success", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
body, err := io.ReadAll(r.Body)
|
||||
defer func() {
|
||||
require.NoError(t, r.Body.Close())
|
||||
}()
|
||||
require.NoError(t, err)
|
||||
request := ð.SignedValidatorRegistrationV1{}
|
||||
itemBytes := body[:request.SizeSSZ()]
|
||||
require.NoError(t, request.UnmarshalSSZ(itemBytes))
|
||||
jsRequest := structs.SignedValidatorRegistrationFromConsensus(request)
|
||||
js, err := json.Marshal([]*structs.SignedValidatorRegistration{jsRequest})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, expectedBody, string(js))
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
require.Equal(t, http.MethodPost, r.Method)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBuffer(nil)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
reg := ð.SignedValidatorRegistrationV1{
|
||||
Message: ð.ValidatorRegistrationV1{
|
||||
FeeRecipient: ezDecode(t, params.BeaconConfig().EthBurnAddressHex),
|
||||
GasLimit: 23,
|
||||
Timestamp: 42,
|
||||
Pubkey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
}
|
||||
require.NoError(t, c.RegisterValidator(ctx, []*eth.SignedValidatorRegistrationV1{reg}))
|
||||
})
|
||||
}
|
||||
|
||||
func TestClient_GetHeader(t *testing.T) {
|
||||
@@ -173,6 +220,7 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponse)),
|
||||
@@ -204,9 +252,56 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
})
|
||||
t.Run("bellatrix ssz", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
epr := &ExecHeaderResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponse), epr))
|
||||
pro, err := epr.ToProto()
|
||||
require.NoError(t, err)
|
||||
ssz, err := pro.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
header := http.Header{}
|
||||
header.Set(api.VersionHeader, "bellatrix")
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: header,
|
||||
Body: io.NopCloser(bytes.NewBuffer(ssz)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.NoError(t, err)
|
||||
expectedSig := ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505")
|
||||
require.Equal(t, true, bytes.Equal(expectedSig, h.Signature()))
|
||||
expectedTxRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
bid, err := h.Message()
|
||||
require.NoError(t, err)
|
||||
bidHeader, err := bid.Header()
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot, err := bidHeader.TransactionsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedTxRoot, withdrawalsRoot))
|
||||
require.Equal(t, uint64(1), bidHeader.GasUsed())
|
||||
// this matches the value in the testExampleHeaderResponse
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
})
|
||||
t.Run("capella", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
@@ -235,9 +330,52 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
})
|
||||
t.Run("capella ssz", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
epr := &ExecHeaderResponseCapella{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseCapella), epr))
|
||||
pro, err := epr.ToProto()
|
||||
require.NoError(t, err)
|
||||
ssz, err := pro.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
header := http.Header{}
|
||||
header.Set(api.VersionHeader, "capella")
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: header,
|
||||
Body: io.NopCloser(bytes.NewBuffer(ssz)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.NoError(t, err)
|
||||
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
bid, err := h.Message()
|
||||
require.NoError(t, err)
|
||||
bidHeader, err := bid.Header()
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
})
|
||||
t.Run("deneb", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
@@ -274,6 +412,56 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.Equal(t, len(kcgCommitments[i]) == 48, true)
|
||||
}
|
||||
})
|
||||
t.Run("deneb ssz", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
epr := &ExecHeaderResponseDeneb{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseDeneb), epr))
|
||||
pro, err := epr.ToProto()
|
||||
require.NoError(t, err)
|
||||
ssz, err := pro.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
header := http.Header{}
|
||||
header.Set(api.VersionHeader, "deneb")
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: header,
|
||||
Body: io.NopCloser(bytes.NewBuffer(ssz)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.NoError(t, err)
|
||||
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
bid, err := h.Message()
|
||||
require.NoError(t, err)
|
||||
bidHeader, err := bid.Header()
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
dbid, ok := bid.(builderBidDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
kcgCommitments := dbid.BlobKzgCommitments()
|
||||
require.Equal(t, len(kcgCommitments) > 0, true)
|
||||
for i := range kcgCommitments {
|
||||
require.Equal(t, len(kcgCommitments[i]) == 48, true)
|
||||
}
|
||||
})
|
||||
t.Run("deneb, too many kzg commitments", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
@@ -290,11 +478,12 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.ErrorContains(t, "could not extract proto message from header: too many blob commitments: 7", err)
|
||||
require.ErrorContains(t, "could not convert ExecHeaderResponseDeneb to proto: too many blob commitments: 7", err)
|
||||
})
|
||||
t.Run("electra", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
@@ -335,6 +524,61 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.Equal(t, 1, len(requests.Withdrawals))
|
||||
require.Equal(t, 1, len(requests.Consolidations))
|
||||
|
||||
})
|
||||
t.Run("electra ssz", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
epr := &ExecHeaderResponseElectra{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseElectra), epr))
|
||||
pro, err := epr.ToProto()
|
||||
require.NoError(t, err)
|
||||
ssz, err := pro.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
header := http.Header{}
|
||||
header.Set(api.VersionHeader, "electra")
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: header,
|
||||
Body: io.NopCloser(bytes.NewBuffer(ssz)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.NoError(t, err)
|
||||
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
bid, err := h.Message()
|
||||
require.NoError(t, err)
|
||||
bidHeader, err := bid.Header()
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
ebid, ok := bid.(builderBidElectra)
|
||||
require.Equal(t, true, ok)
|
||||
kcgCommitments := ebid.BlobKzgCommitments()
|
||||
require.Equal(t, len(kcgCommitments) > 0, true)
|
||||
for i := range kcgCommitments {
|
||||
require.Equal(t, len(kcgCommitments[i]) == 48, true)
|
||||
}
|
||||
requests := ebid.ExecutionRequests()
|
||||
require.Equal(t, 1, len(requests.Deposits))
|
||||
require.Equal(t, 1, len(requests.Withdrawals))
|
||||
require.Equal(t, 1, len(requests.Consolidations))
|
||||
|
||||
})
|
||||
t.Run("unsupported version", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
@@ -364,8 +608,8 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayload)),
|
||||
@@ -387,13 +631,53 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
require.Equal(t, fmt.Sprintf("%#x", bfpg.SSZBytes()), fmt.Sprintf("%#x", ep.BaseFeePerGas()))
|
||||
require.Equal(t, uint64(1), ep.GasLimit())
|
||||
})
|
||||
t.Run("bellatrix ssz", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "bellatrix", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
epr := &ExecutionPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayload), epr))
|
||||
ep := &ExecutionPayload{}
|
||||
require.NoError(t, json.Unmarshal(epr.Data, ep))
|
||||
pro, err := ep.ToProto()
|
||||
require.NoError(t, err)
|
||||
ssz, err := pro.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
header := http.Header{}
|
||||
header.Set(api.VersionHeader, "bellatrix")
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: header,
|
||||
Body: io.NopCloser(bytes.NewBuffer(ssz)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
sbbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockBellatrix(t))
|
||||
require.NoError(t, err)
|
||||
ep, _, err := c.SubmitBlindedBlock(ctx, sbbb)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"), ep.ParentHash()))
|
||||
bfpg, err := stringToUint256("452312848583266388373324160190187140051835877600158453279131187530910662656")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, fmt.Sprintf("%#x", bfpg.SSZBytes()), fmt.Sprintf("%#x", ep.BaseFeePerGas()))
|
||||
require.Equal(t, uint64(1), ep.GasLimit())
|
||||
})
|
||||
t.Run("capella", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "capella", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
require.Equal(t, "capella", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadCapella)),
|
||||
@@ -417,14 +701,56 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
})
|
||||
t.Run("capella ssz", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "capella", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
epr := &ExecutionPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadCapella), epr))
|
||||
ep := &ExecutionPayloadCapella{}
|
||||
require.NoError(t, json.Unmarshal(epr.Data, ep))
|
||||
pro, err := ep.ToProto()
|
||||
require.NoError(t, err)
|
||||
ssz, err := pro.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
header := http.Header{}
|
||||
header.Set(api.VersionHeader, "capella")
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: header,
|
||||
Body: io.NopCloser(bytes.NewBuffer(ssz)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
sbb, err := blocks.NewSignedBeaconBlock(testSignedBlindedBeaconBlockCapella(t))
|
||||
require.NoError(t, err)
|
||||
ep, _, err := c.SubmitBlindedBlock(ctx, sbb)
|
||||
require.NoError(t, err)
|
||||
withdrawals, err := ep.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(withdrawals))
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Index)
|
||||
assert.Equal(t, primitives.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
|
||||
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
})
|
||||
t.Run("deneb", func(t *testing.T) {
|
||||
test := testSignedBlindedBeaconBlockDeneb(t)
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "deneb", r.Header.Get("Eth-Consensus-Version"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Content-Type"))
|
||||
require.Equal(t, "application/json", r.Header.Get("Accept"))
|
||||
require.Equal(t, "deneb", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
var req structs.SignedBlindedBeaconBlockDeneb
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
require.NoError(t, err)
|
||||
@@ -457,6 +783,140 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
require.NotNil(t, blobBundle)
|
||||
})
|
||||
t.Run("deneb ssz", func(t *testing.T) {
|
||||
test := testSignedBlindedBeaconBlockDeneb(t)
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "deneb", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
epr := &ExecPayloadResponseDeneb{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDeneb), epr))
|
||||
pro, blob, err := epr.ToProto()
|
||||
require.NoError(t, err)
|
||||
combined := &v1.ExecutionPayloadDenebAndBlobsBundle{
|
||||
Payload: pro,
|
||||
BlobsBundle: blob,
|
||||
}
|
||||
ssz, err := combined.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
header := http.Header{}
|
||||
header.Set(api.VersionHeader, "deneb")
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: header,
|
||||
Body: io.NopCloser(bytes.NewBuffer(ssz)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
sbb, err := blocks.NewSignedBeaconBlock(test)
|
||||
require.NoError(t, err)
|
||||
|
||||
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb)
|
||||
require.NoError(t, err)
|
||||
withdrawals, err := ep.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(withdrawals))
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Index)
|
||||
assert.Equal(t, primitives.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
|
||||
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
require.NotNil(t, blobBundle)
|
||||
})
|
||||
t.Run("electra", func(t *testing.T) {
|
||||
test := testSignedBlindedBeaconBlockElectra(t)
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "electra", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.JsonMediaType, r.Header.Get("Accept"))
|
||||
var req structs.SignedBlindedBeaconBlockElectra
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
require.NoError(t, err)
|
||||
block, err := req.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, block, test)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleExecutionPayloadDeneb)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
|
||||
sbb, err := blocks.NewSignedBeaconBlock(test)
|
||||
require.NoError(t, err)
|
||||
|
||||
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb)
|
||||
require.NoError(t, err)
|
||||
withdrawals, err := ep.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(withdrawals))
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Index)
|
||||
assert.Equal(t, primitives.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
|
||||
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
require.NotNil(t, blobBundle)
|
||||
})
|
||||
t.Run("electra ssz", func(t *testing.T) {
|
||||
test := testSignedBlindedBeaconBlockElectra(t)
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, postBlindedBeaconBlockPath, r.URL.Path)
|
||||
require.Equal(t, "electra", r.Header.Get(api.VersionHeader))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Content-Type"))
|
||||
require.Equal(t, api.OctetStreamMediaType, r.Header.Get("Accept"))
|
||||
epr := &ExecPayloadResponseDeneb{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleExecutionPayloadDeneb), epr))
|
||||
pro, blob, err := epr.ToProto()
|
||||
require.NoError(t, err)
|
||||
combined := &v1.ExecutionPayloadDenebAndBlobsBundle{
|
||||
Payload: pro,
|
||||
BlobsBundle: blob,
|
||||
}
|
||||
ssz, err := combined.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
header := http.Header{}
|
||||
header.Set(api.VersionHeader, "electra")
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: header,
|
||||
Body: io.NopCloser(bytes.NewBuffer(ssz)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
sszEnabled: true,
|
||||
}
|
||||
sbb, err := blocks.NewSignedBeaconBlock(test)
|
||||
require.NoError(t, err)
|
||||
|
||||
ep, blobBundle, err := c.SubmitBlindedBlock(ctx, sbb)
|
||||
require.NoError(t, err)
|
||||
withdrawals, err := ep.Withdrawals()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(withdrawals))
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Index)
|
||||
assert.Equal(t, primitives.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
|
||||
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
require.NotNil(t, blobBundle)
|
||||
})
|
||||
t.Run("mismatched versions, expected bellatrix got capella", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
@@ -583,7 +1043,13 @@ func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaco
|
||||
},
|
||||
Deposits: []*eth.Deposit{
|
||||
{
|
||||
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
|
||||
Proof: func() [][]byte {
|
||||
b := make([][]byte, 33)
|
||||
for i := range b {
|
||||
b[i] = ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
}
|
||||
return b
|
||||
}(),
|
||||
Data: ð.Deposit_Data{
|
||||
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -602,8 +1068,8 @@ func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaco
|
||||
},
|
||||
},
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 48),
|
||||
SyncCommitteeBits: bitfield.Bitvector512{0x01},
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
SyncCommitteeBits: make(bitfield.Bitvector512, 64),
|
||||
},
|
||||
ExecutionPayloadHeader: &v1.ExecutionPayloadHeader{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -617,7 +1083,7 @@ func testSignedBlindedBeaconBlockBellatrix(t *testing.T) *eth.SignedBlindedBeaco
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
|
||||
BaseFeePerGas: ezDecode(t, "0x4523128485832663883733241601901871400518358776001584532791311875"),
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
@@ -725,7 +1191,13 @@ func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconB
|
||||
},
|
||||
Deposits: []*eth.Deposit{
|
||||
{
|
||||
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
|
||||
Proof: func() [][]byte {
|
||||
b := make([][]byte, 33)
|
||||
for i := range b {
|
||||
b[i] = ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
}
|
||||
return b
|
||||
}(),
|
||||
Data: ð.Deposit_Data{
|
||||
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -744,8 +1216,8 @@ func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconB
|
||||
},
|
||||
},
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 48),
|
||||
SyncCommitteeBits: bitfield.Bitvector512{0x01},
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
SyncCommitteeBits: make(bitfield.Bitvector512, 64),
|
||||
},
|
||||
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -759,7 +1231,7 @@ func testSignedBlindedBeaconBlockCapella(t *testing.T) *eth.SignedBlindedBeaconB
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BaseFeePerGas: []byte(strconv.FormatUint(1, 10)),
|
||||
BaseFeePerGas: ezDecode(t, "0x4523128485832663883733241601901871400518358776001584532791311875"),
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -872,7 +1344,13 @@ func testSignedBlindedBeaconBlockDeneb(t *testing.T) *eth.SignedBlindedBeaconBlo
|
||||
},
|
||||
Deposits: []*eth.Deposit{
|
||||
{
|
||||
Proof: [][]byte{ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")},
|
||||
Proof: func() [][]byte {
|
||||
b := make([][]byte, 33)
|
||||
for i := range b {
|
||||
b[i] = ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
}
|
||||
return b
|
||||
}(),
|
||||
Data: ð.Deposit_Data{
|
||||
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
@@ -919,6 +1397,163 @@ func testSignedBlindedBeaconBlockDeneb(t *testing.T) *eth.SignedBlindedBeaconBlo
|
||||
}
|
||||
}
|
||||
|
||||
func testSignedBlindedBeaconBlockElectra(t *testing.T) *eth.SignedBlindedBeaconBlockElectra {
|
||||
basebytes, err := bytesutil.Uint256ToSSZBytes("14074904626401341155369551180448584754667373453244490859944217516317499064576")
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
return ð.SignedBlindedBeaconBlockElectra{
|
||||
Message: ð.BlindedBeaconBlockElectra{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Body: ð.BlindedBeaconBlockBodyElectra{
|
||||
RandaoReveal: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
DepositCount: 1,
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Graffiti: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ProposerSlashings: []*eth.ProposerSlashing{
|
||||
{
|
||||
Header_1: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Header_2: ð.SignedBeaconBlockHeader{
|
||||
Header: ð.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 1,
|
||||
ParentRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BodyRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
},
|
||||
AttesterSlashings: []*eth.AttesterSlashingElectra{
|
||||
{
|
||||
Attestation_1: ð.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{1},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{1},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Attestations: []*eth.AttestationElectra{
|
||||
{
|
||||
AggregationBits: bitfield.Bitlist{0x01},
|
||||
Data: ð.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Source: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
},
|
||||
},
|
||||
CommitteeBits: make(bitfield.Bitvector64, 8),
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
Deposits: []*eth.Deposit{
|
||||
{
|
||||
Proof: func() [][]byte {
|
||||
b := make([][]byte, 33)
|
||||
for i := range b {
|
||||
b[i] = ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
}
|
||||
return b
|
||||
}(),
|
||||
Data: ð.Deposit_Data{
|
||||
PublicKey: ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"),
|
||||
WithdrawalCredentials: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
Amount: 1,
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
},
|
||||
VoluntaryExits: []*eth.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ð.VoluntaryExit{
|
||||
Epoch: 1,
|
||||
ValidatorIndex: 1,
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
},
|
||||
},
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
SyncCommitteeBits: ezDecode(t, "0x6451e9f951ebf05edc01de67e593484b672877054f055903ff0df1a1a945cf30ca26bb4d4b154f94a1bc776bcf5d0efb3603e1f9b8ee2499ccdcfe2a18cef458"),
|
||||
},
|
||||
ExecutionRequests: &v1.ExecutionRequests{},
|
||||
ExecutionPayloadHeader: &v1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
FeeRecipient: ezDecode(t, "0xabcf8e0d4e9587369b2301d0790347320302cc09"),
|
||||
StateRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
ReceiptsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
LogsBloom: ezDecode(t, "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
PrevRandao: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlockNumber: 1,
|
||||
GasLimit: 1,
|
||||
GasUsed: 1,
|
||||
Timestamp: 1,
|
||||
ExtraData: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BaseFeePerGas: basebytes,
|
||||
BlockHash: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
TransactionsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
WithdrawalsRoot: ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"),
|
||||
BlobGasUsed: 1,
|
||||
ExcessBlobGas: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
Signature: ezDecode(t, "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"),
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestLogger(t *testing.T) {
|
||||
wo := WithObserver(&requestLogger{})
|
||||
c, err := NewClient("localhost:3500", wo)
|
||||
|
||||
@@ -15,3 +15,9 @@ var ErrBadRequest = errors.Wrap(ErrNotOK, "recv 400 BadRequest response from API
|
||||
// ErrNoContent specifically means that a '204 - No Content' response was received from the API.
|
||||
// Typically, a 204 is a success but in this case for the Header API means No header is available
|
||||
var ErrNoContent = errors.New("recv 204 no content response from API, No header is available")
|
||||
|
||||
// ErrUnsupportedMediaType specifically means that a '415 - Unsupported Media Type' was received from the API.
|
||||
var ErrUnsupportedMediaType = errors.Wrap(ErrNotOK, "The media type in \"Content-Type\" header is unsupported, and the request has been rejected. This occurs when a HTTP request supplies a payload in a content-type that the server is not able to handle.")
|
||||
|
||||
// ErrNotAcceptable specifically means that a '406 - Not Acceptable' was received from the API.
|
||||
var ErrNotAcceptable = errors.Wrap(ErrNotOK, "The accept header value is not acceptable")
|
||||
|
||||
@@ -4,13 +4,13 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"block.go",
|
||||
"block_epbs.go",
|
||||
"block_execution.go",
|
||||
"conversions.go",
|
||||
"conversions_blob.go",
|
||||
"conversions_block.go",
|
||||
"conversions_block_execution.go",
|
||||
"conversions_lightclient.go",
|
||||
"conversions_state.go",
|
||||
"converstions_block_epbs.go",
|
||||
"endpoints_beacon.go",
|
||||
"endpoints_blob.go",
|
||||
"endpoints_builder.go",
|
||||
@@ -49,10 +49,16 @@ go_library(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["conversions_test.go"],
|
||||
srcs = [
|
||||
"conversions_block_execution_test.go",
|
||||
"conversions_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -186,40 +186,6 @@ type BlindedBeaconBlockBodyBellatrix struct {
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
|
||||
}
|
||||
|
||||
type ExecutionPayload struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeader struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Capella
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -298,42 +264,6 @@ type BlindedBeaconBlockBodyCapella struct {
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadCapella struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderCapella struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Deneb
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -426,46 +356,6 @@ type BlindedBeaconBlockBodyDeneb struct {
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Electra
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -560,14 +450,6 @@ type BlindedBeaconBlockBodyElectra struct {
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
type (
|
||||
ExecutionRequests struct {
|
||||
Deposits []*DepositRequest `json:"deposits"`
|
||||
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
|
||||
Consolidations []*ConsolidationRequest `json:"consolidations"`
|
||||
}
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Fulu
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -579,14 +461,14 @@ type SignedBeaconBlockContentsFulu struct {
|
||||
}
|
||||
|
||||
type BeaconBlockContentsFulu struct {
|
||||
Block *BeaconBlockFulu `json:"block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
Block *BeaconBlockElectra `json:"block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockFulu struct {
|
||||
Message *BeaconBlockFulu `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
Message *BeaconBlockElectra `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockFulu{}
|
||||
@@ -599,36 +481,12 @@ func (s *SignedBeaconBlockFulu) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockFulu struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BeaconBlockBodyFulu `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyFulu struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadDeneb `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockFulu struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyFulu `json:"body"`
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyElectra `json:"body"`
|
||||
}
|
||||
|
||||
type SignedBlindedBeaconBlockFulu struct {
|
||||
@@ -645,19 +503,3 @@ func (s *SignedBlindedBeaconBlockFulu) MessageRawJson() ([]byte, error) {
|
||||
func (s *SignedBlindedBeaconBlockFulu) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyFulu struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
package structs
|
||||
|
||||
import "encoding/json"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Epbs
|
||||
// ----------------------------------------------------------------------------
|
||||
type SignedBeaconBlockEpbs struct {
|
||||
Message *BeaconBlockEpbs `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockElectra{}
|
||||
|
||||
func (s *SignedBeaconBlockEpbs) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockEpbs) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BeaconBlockEpbs struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BeaconBlockBodyEpbs `json:"body"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyEpbs struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
SignedExecutionPayloadHeader *SignedExecutionPayloadHeader `json:"signed_execution_payload_header"`
|
||||
PayloadAttestations []*PayloadAttestation `json:"payload_attestations"`
|
||||
}
|
||||
|
||||
type SignedExecutionPayloadEnvelope struct {
|
||||
Message *ExecutionPayloadEnvelope `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadEnvelope struct {
|
||||
Payload *ExecutionPayloadDeneb `json:"payload"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
BeaconBlockRoot string `json:"beacon_block_root"`
|
||||
Slot string `json:"slot"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
StateRoot string `json:"state_root"`
|
||||
}
|
||||
type SignedExecutionPayloadHeader struct {
|
||||
Message *ExecutionPayloadHeaderEPBS `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderEPBS struct {
|
||||
ParentBlockHash string `json:"parent_block_hash"`
|
||||
ParentBlockRoot string `json:"parent_block_root"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
Slot string `json:"slot"`
|
||||
Value string `json:"value"`
|
||||
BlobKzgCommitmentsRoot string `json:"blob_kzg_commitments_root"`
|
||||
}
|
||||
|
||||
type PayloadAttestation struct {
|
||||
AggregationBits string `json:"aggregation_bits"`
|
||||
Data *PayloadAttestationData `json:"data"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type PayloadAttestationData struct {
|
||||
BeaconBlockRoot string `json:"beacon_block_root"`
|
||||
Slot string `json:"slot"`
|
||||
PayloadStatus string `json:"payload_status"`
|
||||
}
|
||||
|
||||
type GetExecutionPayloadV1Response struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data *SignedExecutionPayloadEnvelope `json:"data"`
|
||||
}
|
||||
157
api/server/structs/block_execution.go
Normal file
157
api/server/structs/block_execution.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package structs
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Bellatrix
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type ExecutionPayload struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeader struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Capella
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type ExecutionPayloadCapella struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderCapella struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Deneb
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type ExecutionPayloadDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Electra
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type ExecutionRequests struct {
|
||||
Deposits []*DepositRequest `json:"deposits"`
|
||||
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
|
||||
Consolidations []*ConsolidationRequest `json:"consolidations"`
|
||||
}
|
||||
|
||||
type DepositRequest struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials"`
|
||||
Amount string `json:"amount"`
|
||||
Signature string `json:"signature"`
|
||||
Index string `json:"index"`
|
||||
}
|
||||
|
||||
type WithdrawalRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
ValidatorPubkey string `json:"validator_pubkey"`
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type ConsolidationRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
SourcePubkey string `json:"source_pubkey"`
|
||||
TargetPubkey string `json:"target_pubkey"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Fulu
|
||||
// ----------------------------------------------------------------------------
|
||||
@@ -52,6 +52,9 @@ func HistoricalSummaryFromConsensus(s *eth.HistoricalSummary) *HistoricalSummary
|
||||
}
|
||||
|
||||
func (s *SignedBLSToExecutionChange) ToConsensus() (*eth.SignedBLSToExecutionChange, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
change, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -103,14 +106,17 @@ func SignedBLSChangeFromConsensus(ch *eth.SignedBLSToExecutionChange) *SignedBLS
|
||||
|
||||
func SignedBLSChangesToConsensus(src []*SignedBLSToExecutionChange) ([]*eth.SignedBLSToExecutionChange, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "SignedBLSToExecutionChanges")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "SignedBLSToExecutionChanges")
|
||||
}
|
||||
changes := make([]*eth.SignedBLSToExecutionChange, len(src))
|
||||
for i, ch := range src {
|
||||
if ch == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
changes[i], err = ch.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
@@ -156,6 +162,9 @@ func ForkFromConsensus(f *eth.Fork) *Fork {
|
||||
}
|
||||
|
||||
func (s *SignedValidatorRegistration) ToConsensus() (*eth.SignedValidatorRegistrationV1, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -212,6 +221,9 @@ func SignedValidatorRegistrationFromConsensus(vr *eth.SignedValidatorRegistratio
|
||||
}
|
||||
|
||||
func (s *SignedContributionAndProof) ToConsensus() (*eth.SignedContributionAndProof, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -236,6 +248,9 @@ func SignedContributionAndProofFromConsensus(c *eth.SignedContributionAndProof)
|
||||
}
|
||||
|
||||
func (c *ContributionAndProof) ToConsensus() (*eth.ContributionAndProof, error) {
|
||||
if c.Contribution == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Contribution")
|
||||
}
|
||||
contribution, err := c.Contribution.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Contribution")
|
||||
@@ -307,6 +322,9 @@ func SyncCommitteeContributionFromConsensus(c *eth.SyncCommitteeContribution) *S
|
||||
}
|
||||
|
||||
func (s *SignedAggregateAttestationAndProof) ToConsensus() (*eth.SignedAggregateAttestationAndProof, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -327,6 +345,9 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregatorIndex")
|
||||
}
|
||||
if a.Aggregate == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Aggregate")
|
||||
}
|
||||
agg, err := a.Aggregate.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Aggregate")
|
||||
@@ -343,6 +364,9 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
|
||||
}
|
||||
|
||||
func (s *SignedAggregateAttestationAndProofElectra) ToConsensus() (*eth.SignedAggregateAttestationAndProofElectra, error) {
|
||||
if s.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
msg, err := s.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
@@ -363,6 +387,9 @@ func (a *AggregateAttestationAndProofElectra) ToConsensus() (*eth.AggregateAttes
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregatorIndex")
|
||||
}
|
||||
if a.Aggregate == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Aggregate")
|
||||
}
|
||||
agg, err := a.Aggregate.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Aggregate")
|
||||
@@ -383,6 +410,9 @@ func (a *Attestation) ToConsensus() (*eth.Attestation, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregationBits")
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -412,6 +442,9 @@ func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregationBits")
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -433,6 +466,15 @@ func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SingleAttFromConsensus(a *eth.SingleAttestation) *SingleAttestation {
|
||||
return &SingleAttestation{
|
||||
CommitteeIndex: fmt.Sprintf("%d", a.CommitteeId),
|
||||
AttesterIndex: fmt.Sprintf("%d", a.AttesterIndex),
|
||||
Data: AttDataFromConsensus(a.Data),
|
||||
Signature: hexutil.Encode(a.Signature),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *SingleAttestation) ToConsensus() (*eth.SingleAttestation, error) {
|
||||
ci, err := strconv.ParseUint(a.CommitteeIndex, 10, 64)
|
||||
if err != nil {
|
||||
@@ -442,6 +484,9 @@ func (a *SingleAttestation) ToConsensus() (*eth.SingleAttestation, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AttesterIndex")
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -481,10 +526,16 @@ func (a *AttestationData) ToConsensus() (*eth.AttestationData, error) {
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BeaconBlockRoot")
|
||||
}
|
||||
if a.Source == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Source")
|
||||
}
|
||||
source, err := a.Source.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Source")
|
||||
}
|
||||
if a.Target == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Target")
|
||||
}
|
||||
target, err := a.Target.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Target")
|
||||
@@ -584,15 +635,17 @@ func (b *BeaconCommitteeSubscription) ToConsensus() (*validator.BeaconCommitteeS
|
||||
}
|
||||
|
||||
func (e *SignedVoluntaryExit) ToConsensus() (*eth.SignedVoluntaryExit, error) {
|
||||
sig, err := bytesutil.DecodeHexWithLength(e.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
if e.Message == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Message")
|
||||
}
|
||||
exit, err := e.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
}
|
||||
|
||||
sig, err := bytesutil.DecodeHexWithLength(e.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
return ð.SignedVoluntaryExit{
|
||||
Exit: exit,
|
||||
Signature: sig,
|
||||
@@ -695,10 +748,16 @@ func Eth1DataFromConsensus(e1d *eth.Eth1Data) *Eth1Data {
|
||||
}
|
||||
|
||||
func (s *ProposerSlashing) ToConsensus() (*eth.ProposerSlashing, error) {
|
||||
if s.SignedHeader1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "SignedHeader1")
|
||||
}
|
||||
h1, err := s.SignedHeader1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SignedHeader1")
|
||||
}
|
||||
if s.SignedHeader2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "SignedHeader2")
|
||||
}
|
||||
h2, err := s.SignedHeader2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SignedHeader2")
|
||||
@@ -711,10 +770,16 @@ func (s *ProposerSlashing) ToConsensus() (*eth.ProposerSlashing, error) {
|
||||
}
|
||||
|
||||
func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestation1")
|
||||
}
|
||||
att1, err := s.Attestation1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation1")
|
||||
}
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestation2")
|
||||
}
|
||||
att2, err := s.Attestation2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation2")
|
||||
@@ -723,10 +788,16 @@ func (s *AttesterSlashing) ToConsensus() (*eth.AttesterSlashing, error) {
|
||||
}
|
||||
|
||||
func (s *AttesterSlashingElectra) ToConsensus() (*eth.AttesterSlashingElectra, error) {
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestation1")
|
||||
}
|
||||
att1, err := s.Attestation1.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation1")
|
||||
}
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestation2")
|
||||
}
|
||||
att2, err := s.Attestation2.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestation2")
|
||||
@@ -747,6 +818,9 @@ func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
|
||||
}
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -779,6 +853,9 @@ func (a *IndexedAttestationElectra) ToConsensus() (*eth.IndexedAttestationElectr
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("AttestingIndices[%d]", i))
|
||||
}
|
||||
}
|
||||
if a.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Data")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
@@ -812,133 +889,13 @@ func WithdrawalFromConsensus(w *enginev1.Withdrawal) *Withdrawal {
|
||||
}
|
||||
}
|
||||
|
||||
func WithdrawalRequestsFromConsensus(ws []*enginev1.WithdrawalRequest) []*WithdrawalRequest {
|
||||
result := make([]*WithdrawalRequest, len(ws))
|
||||
for i, w := range ws {
|
||||
result[i] = WithdrawalRequestFromConsensus(w)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func WithdrawalRequestFromConsensus(w *enginev1.WithdrawalRequest) *WithdrawalRequest {
|
||||
return &WithdrawalRequest{
|
||||
SourceAddress: hexutil.Encode(w.SourceAddress),
|
||||
ValidatorPubkey: hexutil.Encode(w.ValidatorPubkey),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WithdrawalRequest) ToConsensus() (*enginev1.WithdrawalRequest, error) {
|
||||
src, err := bytesutil.DecodeHexWithLength(w.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(w.ValidatorPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ValidatorPubkey")
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
return &enginev1.WithdrawalRequest{
|
||||
SourceAddress: src,
|
||||
ValidatorPubkey: pubkey,
|
||||
Amount: amount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ConsolidationRequestsFromConsensus(cs []*enginev1.ConsolidationRequest) []*ConsolidationRequest {
|
||||
result := make([]*ConsolidationRequest, len(cs))
|
||||
for i, c := range cs {
|
||||
result[i] = ConsolidationRequestFromConsensus(c)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func ConsolidationRequestFromConsensus(c *enginev1.ConsolidationRequest) *ConsolidationRequest {
|
||||
return &ConsolidationRequest{
|
||||
SourceAddress: hexutil.Encode(c.SourceAddress),
|
||||
SourcePubkey: hexutil.Encode(c.SourcePubkey),
|
||||
TargetPubkey: hexutil.Encode(c.TargetPubkey),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConsolidationRequest) ToConsensus() (*enginev1.ConsolidationRequest, error) {
|
||||
srcAddress, err := bytesutil.DecodeHexWithLength(c.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
srcPubkey, err := bytesutil.DecodeHexWithLength(c.SourcePubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourcePubkey")
|
||||
}
|
||||
targetPubkey, err := bytesutil.DecodeHexWithLength(c.TargetPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "TargetPubkey")
|
||||
}
|
||||
return &enginev1.ConsolidationRequest{
|
||||
SourceAddress: srcAddress,
|
||||
SourcePubkey: srcPubkey,
|
||||
TargetPubkey: targetPubkey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func DepositRequestsFromConsensus(ds []*enginev1.DepositRequest) []*DepositRequest {
|
||||
result := make([]*DepositRequest, len(ds))
|
||||
for i, d := range ds {
|
||||
result[i] = DepositRequestFromConsensus(d)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func DepositRequestFromConsensus(d *enginev1.DepositRequest) *DepositRequest {
|
||||
return &DepositRequest{
|
||||
Pubkey: hexutil.Encode(d.Pubkey),
|
||||
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
Signature: hexutil.Encode(d.Signature),
|
||||
Index: fmt.Sprintf("%d", d.Index),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(d.Pubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Pubkey")
|
||||
}
|
||||
withdrawalCredentials, err := bytesutil.DecodeHexWithLength(d.WithdrawalCredentials, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "WithdrawalCredentials")
|
||||
}
|
||||
amount, err := strconv.ParseUint(d.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(d.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
index, err := strconv.ParseUint(d.Index, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Index")
|
||||
}
|
||||
return &enginev1.DepositRequest{
|
||||
Pubkey: pubkey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
Amount: amount,
|
||||
Signature: sig,
|
||||
Index: index,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ProposerSlashingsToConsensus(src []*ProposerSlashing) ([]*eth.ProposerSlashing, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "ProposerSlashings")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "ProposerSlashings")
|
||||
}
|
||||
proposerSlashings := make([]*eth.ProposerSlashing, len(src))
|
||||
for i, s := range src {
|
||||
@@ -1067,11 +1024,11 @@ func ProposerSlashingFromConsensus(src *eth.ProposerSlashing) *ProposerSlashing
|
||||
|
||||
func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlashing, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "AttesterSlashings")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "AttesterSlashings")
|
||||
}
|
||||
|
||||
attesterSlashings := make([]*eth.AttesterSlashing, len(src))
|
||||
@@ -1082,10 +1039,19 @@ func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlash
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1", i))
|
||||
}
|
||||
|
||||
if s.Attestation1.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1.Data", i))
|
||||
}
|
||||
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2", i))
|
||||
}
|
||||
|
||||
if s.Attestation2.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2.Data", i))
|
||||
}
|
||||
|
||||
a1Sig, err := bytesutil.DecodeHexWithLength(s.Attestation1.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
|
||||
@@ -1102,6 +1068,7 @@ func AttesterSlashingsToConsensus(src []*AttesterSlashing) ([]*eth.AttesterSlash
|
||||
}
|
||||
a1AttestingIndices[j] = attestingIndex
|
||||
}
|
||||
|
||||
a1Data, err := s.Attestation1.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Data", i))
|
||||
@@ -1199,11 +1166,11 @@ func AttesterSlashingFromConsensus(src *eth.AttesterSlashing) *AttesterSlashing
|
||||
|
||||
func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth.AttesterSlashingElectra, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "AttesterSlashingsElectra")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, fieldparams.MaxAttesterSlashingsElectra)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "AttesterSlashingsElectra")
|
||||
}
|
||||
|
||||
attesterSlashings := make([]*eth.AttesterSlashingElectra, len(src))
|
||||
@@ -1211,13 +1178,23 @@ func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth
|
||||
if s == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
|
||||
if s.Attestation1 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1", i))
|
||||
}
|
||||
|
||||
if s.Attestation1.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation1.Data", i))
|
||||
}
|
||||
|
||||
if s.Attestation2 == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2", i))
|
||||
}
|
||||
|
||||
if s.Attestation2.Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d].Attestation2.Data", i))
|
||||
}
|
||||
|
||||
a1Sig, err := bytesutil.DecodeHexWithLength(s.Attestation1.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
|
||||
@@ -1331,15 +1308,18 @@ func AttesterSlashingElectraFromConsensus(src *eth.AttesterSlashingElectra) *Att
|
||||
|
||||
func AttsToConsensus(src []*Attestation) ([]*eth.Attestation, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "Attestations")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 128)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "Attestations")
|
||||
}
|
||||
|
||||
atts := make([]*eth.Attestation, len(src))
|
||||
for i, a := range src {
|
||||
if a == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
atts[i], err = a.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
@@ -1358,15 +1338,18 @@ func AttsFromConsensus(src []*eth.Attestation) []*Attestation {
|
||||
|
||||
func AttsElectraToConsensus(src []*AttestationElectra) ([]*eth.AttestationElectra, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "AttestationsElectra")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 8)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "AttestationsElectra")
|
||||
}
|
||||
|
||||
atts := make([]*eth.AttestationElectra, len(src))
|
||||
for i, a := range src {
|
||||
if a == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
atts[i], err = a.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
@@ -1385,11 +1368,11 @@ func AttsElectraFromConsensus(src []*eth.AttestationElectra) []*AttestationElect
|
||||
|
||||
func DepositsToConsensus(src []*Deposit) ([]*eth.Deposit, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "Deposits")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "Deposits")
|
||||
}
|
||||
|
||||
deposits := make([]*eth.Deposit, len(src))
|
||||
@@ -1461,15 +1444,18 @@ func DepositsFromConsensus(src []*eth.Deposit) []*Deposit {
|
||||
|
||||
func SignedExitsToConsensus(src []*SignedVoluntaryExit) ([]*eth.SignedVoluntaryExit, error) {
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
return nil, server.NewDecodeError(errNilValue, "SignedVoluntaryExits")
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, server.NewDecodeError(err, "SignedVoluntaryExits")
|
||||
}
|
||||
|
||||
exits := make([]*eth.SignedVoluntaryExit, len(src))
|
||||
for i, e := range src {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
exits[i], err = e.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
973
api/server/structs/conversions_block_execution.go
Normal file
973
api/server/structs/conversions_block_execution.go
Normal file
@@ -0,0 +1,973 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Bellatrix
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func ExecutionPayloadFromConsensus(payload *enginev1.ExecutionPayload) (*ExecutionPayload, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions := make([]string, len(payload.Transactions))
|
||||
for i, tx := range payload.Transactions {
|
||||
transactions[i] = hexutil.Encode(tx)
|
||||
}
|
||||
|
||||
return &ExecutionPayload{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
Transactions: transactions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *ExecutionPayload) ToConsensus() (*enginev1.ExecutionPayload, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionPayload")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockHash")
|
||||
}
|
||||
err = slice.VerifyMaxLength(e.Transactions, fieldparams.MaxTxsPerPayloadLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Transactions")
|
||||
}
|
||||
payloadTxs := make([][]byte, len(e.Transactions))
|
||||
for i, tx := range e.Transactions {
|
||||
payloadTxs[i], err = bytesutil.DecodeHexWithMaxLength(tx, fieldparams.MaxBytesPerTxLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Transactions[%d]", i))
|
||||
}
|
||||
}
|
||||
return &enginev1.ExecutionPayload{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
Transactions: payloadTxs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExecutionPayloadHeaderFromConsensus(payload *enginev1.ExecutionPayloadHeader) (*ExecutionPayloadHeader, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ExecutionPayloadHeader{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
TransactionsRoot: hexutil.Encode(payload.TransactionsRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *ExecutionPayloadHeader) ToConsensus() (*enginev1.ExecutionPayloadHeader, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionPayloadHeader")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockHash")
|
||||
}
|
||||
payloadTxsRoot, err := bytesutil.DecodeHexWithLength(e.TransactionsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.TransactionsRoot")
|
||||
}
|
||||
|
||||
return &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
TransactionsRoot: payloadTxsRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Capella
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func ExecutionPayloadCapellaFromConsensus(payload *enginev1.ExecutionPayloadCapella) (*ExecutionPayloadCapella, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions := make([]string, len(payload.Transactions))
|
||||
for i, tx := range payload.Transactions {
|
||||
transactions[i] = hexutil.Encode(tx)
|
||||
}
|
||||
|
||||
return &ExecutionPayloadCapella{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
Transactions: transactions,
|
||||
Withdrawals: WithdrawalsFromConsensus(payload.Withdrawals),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *ExecutionPayloadCapella) ToConsensus() (*enginev1.ExecutionPayloadCapella, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionPayload")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockHash")
|
||||
}
|
||||
err = slice.VerifyMaxLength(e.Transactions, fieldparams.MaxTxsPerPayloadLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Transactions")
|
||||
}
|
||||
payloadTxs := make([][]byte, len(e.Transactions))
|
||||
for i, tx := range e.Transactions {
|
||||
payloadTxs[i], err = bytesutil.DecodeHexWithMaxLength(tx, fieldparams.MaxBytesPerTxLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Transactions[%d]", i))
|
||||
}
|
||||
}
|
||||
err = slice.VerifyMaxLength(e.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Withdrawals")
|
||||
}
|
||||
withdrawals := make([]*enginev1.Withdrawal, len(e.Withdrawals))
|
||||
for i, w := range e.Withdrawals {
|
||||
withdrawalIndex, err := strconv.ParseUint(w.WithdrawalIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].WithdrawalIndex", i))
|
||||
}
|
||||
validatorIndex, err := strconv.ParseUint(w.ValidatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].ValidatorIndex", i))
|
||||
}
|
||||
address, err := bytesutil.DecodeHexWithLength(w.ExecutionAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].ExecutionAddress", i))
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].Amount", i))
|
||||
}
|
||||
withdrawals[i] = &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: primitives.ValidatorIndex(validatorIndex),
|
||||
Address: address,
|
||||
Amount: amount,
|
||||
}
|
||||
}
|
||||
return &enginev1.ExecutionPayloadCapella{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
Transactions: payloadTxs,
|
||||
Withdrawals: withdrawals,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExecutionPayloadHeaderCapellaFromConsensus(payload *enginev1.ExecutionPayloadHeaderCapella) (*ExecutionPayloadHeaderCapella, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ExecutionPayloadHeaderCapella{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
TransactionsRoot: hexutil.Encode(payload.TransactionsRoot),
|
||||
WithdrawalsRoot: hexutil.Encode(payload.WithdrawalsRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *ExecutionPayloadHeaderCapella) ToConsensus() (*enginev1.ExecutionPayloadHeaderCapella, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionPayloadHeader")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithMaxLength(e.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockHash")
|
||||
}
|
||||
payloadTxsRoot, err := bytesutil.DecodeHexWithMaxLength(e.TransactionsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.TransactionsRoot")
|
||||
}
|
||||
payloadWithdrawalsRoot, err := bytesutil.DecodeHexWithMaxLength(e.WithdrawalsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.WithdrawalsRoot")
|
||||
}
|
||||
return &enginev1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
TransactionsRoot: payloadTxsRoot,
|
||||
WithdrawalsRoot: payloadWithdrawalsRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Deneb
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func ExecutionPayloadDenebFromConsensus(payload *enginev1.ExecutionPayloadDeneb) (*ExecutionPayloadDeneb, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
transactions := make([]string, len(payload.Transactions))
|
||||
for i, tx := range payload.Transactions {
|
||||
transactions[i] = hexutil.Encode(tx)
|
||||
}
|
||||
|
||||
return &ExecutionPayloadDeneb{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
Transactions: transactions,
|
||||
Withdrawals: WithdrawalsFromConsensus(payload.Withdrawals),
|
||||
BlobGasUsed: fmt.Sprintf("%d", payload.BlobGasUsed),
|
||||
ExcessBlobGas: fmt.Sprintf("%d", payload.ExcessBlobGas),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *ExecutionPayloadDeneb) ToConsensus() (*enginev1.ExecutionPayloadDeneb, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionPayload")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlockHash")
|
||||
}
|
||||
err = slice.VerifyMaxLength(e.Transactions, fieldparams.MaxTxsPerPayloadLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Transactions")
|
||||
}
|
||||
txs := make([][]byte, len(e.Transactions))
|
||||
for i, tx := range e.Transactions {
|
||||
txs[i], err = bytesutil.DecodeHexWithMaxLength(tx, fieldparams.MaxBytesPerTxLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Transactions[%d]", i))
|
||||
}
|
||||
}
|
||||
err = slice.VerifyMaxLength(e.Withdrawals, fieldparams.MaxWithdrawalsPerPayload)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.Withdrawals")
|
||||
}
|
||||
withdrawals := make([]*enginev1.Withdrawal, len(e.Withdrawals))
|
||||
for i, w := range e.Withdrawals {
|
||||
withdrawalIndex, err := strconv.ParseUint(w.WithdrawalIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].WithdrawalIndex", i))
|
||||
}
|
||||
validatorIndex, err := strconv.ParseUint(w.ValidatorIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].ValidatorIndex", i))
|
||||
}
|
||||
address, err := bytesutil.DecodeHexWithLength(w.ExecutionAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].ExecutionAddress", i))
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionPayload.Withdrawals[%d].Amount", i))
|
||||
}
|
||||
withdrawals[i] = &enginev1.Withdrawal{
|
||||
Index: withdrawalIndex,
|
||||
ValidatorIndex: primitives.ValidatorIndex(validatorIndex),
|
||||
Address: address,
|
||||
Amount: amount,
|
||||
}
|
||||
}
|
||||
|
||||
payloadBlobGasUsed, err := strconv.ParseUint(e.BlobGasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlobGasUsed")
|
||||
}
|
||||
payloadExcessBlobGas, err := strconv.ParseUint(e.ExcessBlobGas, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ExcessBlobGas")
|
||||
}
|
||||
return &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
Transactions: txs,
|
||||
Withdrawals: withdrawals,
|
||||
BlobGasUsed: payloadBlobGasUsed,
|
||||
ExcessBlobGas: payloadExcessBlobGas,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExecutionPayloadHeaderDenebFromConsensus(payload *enginev1.ExecutionPayloadHeaderDeneb) (*ExecutionPayloadHeaderDeneb, error) {
|
||||
baseFeePerGas, err := sszBytesToUint256String(payload.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: hexutil.Encode(payload.ParentHash),
|
||||
FeeRecipient: hexutil.Encode(payload.FeeRecipient),
|
||||
StateRoot: hexutil.Encode(payload.StateRoot),
|
||||
ReceiptsRoot: hexutil.Encode(payload.ReceiptsRoot),
|
||||
LogsBloom: hexutil.Encode(payload.LogsBloom),
|
||||
PrevRandao: hexutil.Encode(payload.PrevRandao),
|
||||
BlockNumber: fmt.Sprintf("%d", payload.BlockNumber),
|
||||
GasLimit: fmt.Sprintf("%d", payload.GasLimit),
|
||||
GasUsed: fmt.Sprintf("%d", payload.GasUsed),
|
||||
Timestamp: fmt.Sprintf("%d", payload.Timestamp),
|
||||
ExtraData: hexutil.Encode(payload.ExtraData),
|
||||
BaseFeePerGas: baseFeePerGas,
|
||||
BlockHash: hexutil.Encode(payload.BlockHash),
|
||||
TransactionsRoot: hexutil.Encode(payload.TransactionsRoot),
|
||||
WithdrawalsRoot: hexutil.Encode(payload.WithdrawalsRoot),
|
||||
BlobGasUsed: fmt.Sprintf("%d", payload.BlobGasUsed),
|
||||
ExcessBlobGas: fmt.Sprintf("%d", payload.ExcessBlobGas),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *ExecutionPayloadHeaderDeneb) ToConsensus() (*enginev1.ExecutionPayloadHeaderDeneb, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionPayloadHeader")
|
||||
}
|
||||
payloadParentHash, err := bytesutil.DecodeHexWithLength(e.ParentHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ParentHash")
|
||||
}
|
||||
payloadFeeRecipient, err := bytesutil.DecodeHexWithLength(e.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.FeeRecipient")
|
||||
}
|
||||
payloadStateRoot, err := bytesutil.DecodeHexWithLength(e.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.StateRoot")
|
||||
}
|
||||
payloadReceiptsRoot, err := bytesutil.DecodeHexWithLength(e.ReceiptsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ReceiptsRoot")
|
||||
}
|
||||
payloadLogsBloom, err := bytesutil.DecodeHexWithLength(e.LogsBloom, fieldparams.LogsBloomLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.LogsBloom")
|
||||
}
|
||||
payloadPrevRandao, err := bytesutil.DecodeHexWithLength(e.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.PrevRandao")
|
||||
}
|
||||
payloadBlockNumber, err := strconv.ParseUint(e.BlockNumber, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockNumber")
|
||||
}
|
||||
payloadGasLimit, err := strconv.ParseUint(e.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasLimit")
|
||||
}
|
||||
payloadGasUsed, err := strconv.ParseUint(e.GasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.GasUsed")
|
||||
}
|
||||
payloadTimestamp, err := strconv.ParseUint(e.Timestamp, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.Timestamp")
|
||||
}
|
||||
payloadExtraData, err := bytesutil.DecodeHexWithMaxLength(e.ExtraData, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.ExtraData")
|
||||
}
|
||||
payloadBaseFeePerGas, err := bytesutil.Uint256ToSSZBytes(e.BaseFeePerGas)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BaseFeePerGas")
|
||||
}
|
||||
payloadBlockHash, err := bytesutil.DecodeHexWithLength(e.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.BlockHash")
|
||||
}
|
||||
payloadTxsRoot, err := bytesutil.DecodeHexWithLength(e.TransactionsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.TransactionsRoot")
|
||||
}
|
||||
payloadWithdrawalsRoot, err := bytesutil.DecodeHexWithLength(e.WithdrawalsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayloadHeader.WithdrawalsRoot")
|
||||
}
|
||||
payloadBlobGasUsed, err := strconv.ParseUint(e.BlobGasUsed, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.BlobGasUsed")
|
||||
}
|
||||
payloadExcessBlobGas, err := strconv.ParseUint(e.ExcessBlobGas, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayload.ExcessBlobGas")
|
||||
}
|
||||
return &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadParentHash,
|
||||
FeeRecipient: payloadFeeRecipient,
|
||||
StateRoot: payloadStateRoot,
|
||||
ReceiptsRoot: payloadReceiptsRoot,
|
||||
LogsBloom: payloadLogsBloom,
|
||||
PrevRandao: payloadPrevRandao,
|
||||
BlockNumber: payloadBlockNumber,
|
||||
GasLimit: payloadGasLimit,
|
||||
GasUsed: payloadGasUsed,
|
||||
Timestamp: payloadTimestamp,
|
||||
ExtraData: payloadExtraData,
|
||||
BaseFeePerGas: payloadBaseFeePerGas,
|
||||
BlockHash: payloadBlockHash,
|
||||
TransactionsRoot: payloadTxsRoot,
|
||||
WithdrawalsRoot: payloadWithdrawalsRoot,
|
||||
BlobGasUsed: payloadBlobGasUsed,
|
||||
ExcessBlobGas: payloadExcessBlobGas,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Electra
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
var (
|
||||
ExecutionPayloadElectraFromConsensus = ExecutionPayloadDenebFromConsensus
|
||||
ExecutionPayloadHeaderElectraFromConsensus = ExecutionPayloadHeaderDenebFromConsensus
|
||||
)
|
||||
|
||||
func WithdrawalRequestsFromConsensus(ws []*enginev1.WithdrawalRequest) []*WithdrawalRequest {
|
||||
result := make([]*WithdrawalRequest, len(ws))
|
||||
for i, w := range ws {
|
||||
result[i] = WithdrawalRequestFromConsensus(w)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func WithdrawalRequestFromConsensus(w *enginev1.WithdrawalRequest) *WithdrawalRequest {
|
||||
return &WithdrawalRequest{
|
||||
SourceAddress: hexutil.Encode(w.SourceAddress),
|
||||
ValidatorPubkey: hexutil.Encode(w.ValidatorPubkey),
|
||||
Amount: fmt.Sprintf("%d", w.Amount),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *WithdrawalRequest) ToConsensus() (*enginev1.WithdrawalRequest, error) {
|
||||
if w == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "WithdrawalRequest")
|
||||
}
|
||||
src, err := bytesutil.DecodeHexWithLength(w.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(w.ValidatorPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ValidatorPubkey")
|
||||
}
|
||||
amount, err := strconv.ParseUint(w.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
return &enginev1.WithdrawalRequest{
|
||||
SourceAddress: src,
|
||||
ValidatorPubkey: pubkey,
|
||||
Amount: amount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ConsolidationRequestsFromConsensus(cs []*enginev1.ConsolidationRequest) []*ConsolidationRequest {
|
||||
result := make([]*ConsolidationRequest, len(cs))
|
||||
for i, c := range cs {
|
||||
result[i] = ConsolidationRequestFromConsensus(c)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func ConsolidationRequestFromConsensus(c *enginev1.ConsolidationRequest) *ConsolidationRequest {
|
||||
return &ConsolidationRequest{
|
||||
SourceAddress: hexutil.Encode(c.SourceAddress),
|
||||
SourcePubkey: hexutil.Encode(c.SourcePubkey),
|
||||
TargetPubkey: hexutil.Encode(c.TargetPubkey),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConsolidationRequest) ToConsensus() (*enginev1.ConsolidationRequest, error) {
|
||||
if c == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ConsolidationRequest")
|
||||
}
|
||||
srcAddress, err := bytesutil.DecodeHexWithLength(c.SourceAddress, common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourceAddress")
|
||||
}
|
||||
srcPubkey, err := bytesutil.DecodeHexWithLength(c.SourcePubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SourcePubkey")
|
||||
}
|
||||
targetPubkey, err := bytesutil.DecodeHexWithLength(c.TargetPubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "TargetPubkey")
|
||||
}
|
||||
return &enginev1.ConsolidationRequest{
|
||||
SourceAddress: srcAddress,
|
||||
SourcePubkey: srcPubkey,
|
||||
TargetPubkey: targetPubkey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func DepositRequestsFromConsensus(ds []*enginev1.DepositRequest) []*DepositRequest {
|
||||
result := make([]*DepositRequest, len(ds))
|
||||
for i, d := range ds {
|
||||
result[i] = DepositRequestFromConsensus(d)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func DepositRequestFromConsensus(d *enginev1.DepositRequest) *DepositRequest {
|
||||
return &DepositRequest{
|
||||
Pubkey: hexutil.Encode(d.Pubkey),
|
||||
WithdrawalCredentials: hexutil.Encode(d.WithdrawalCredentials),
|
||||
Amount: fmt.Sprintf("%d", d.Amount),
|
||||
Signature: hexutil.Encode(d.Signature),
|
||||
Index: fmt.Sprintf("%d", d.Index),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DepositRequest) ToConsensus() (*enginev1.DepositRequest, error) {
|
||||
if d == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "DepositRequest")
|
||||
}
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(d.Pubkey, fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Pubkey")
|
||||
}
|
||||
withdrawalCredentials, err := bytesutil.DecodeHexWithLength(d.WithdrawalCredentials, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "WithdrawalCredentials")
|
||||
}
|
||||
amount, err := strconv.ParseUint(d.Amount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Amount")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(d.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
index, err := strconv.ParseUint(d.Index, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Index")
|
||||
}
|
||||
return &enginev1.DepositRequest{
|
||||
Pubkey: pubkey,
|
||||
WithdrawalCredentials: withdrawalCredentials,
|
||||
Amount: amount,
|
||||
Signature: sig,
|
||||
Index: index,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExecutionRequestsFromConsensus(er *enginev1.ExecutionRequests) *ExecutionRequests {
|
||||
return &ExecutionRequests{
|
||||
Deposits: DepositRequestsFromConsensus(er.Deposits),
|
||||
Withdrawals: WithdrawalRequestsFromConsensus(er.Withdrawals),
|
||||
Consolidations: ConsolidationRequestsFromConsensus(er.Consolidations),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ExecutionRequests) ToConsensus() (*enginev1.ExecutionRequests, error) {
|
||||
if e == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "ExecutionRequests")
|
||||
}
|
||||
var err error
|
||||
if err = slice.VerifyMaxLength(e.Deposits, params.BeaconConfig().MaxDepositRequestsPerPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depositRequests := make([]*enginev1.DepositRequest, len(e.Deposits))
|
||||
for i, d := range e.Deposits {
|
||||
depositRequests[i], err = d.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionRequests.Deposits[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
if err = slice.VerifyMaxLength(e.Withdrawals, params.BeaconConfig().MaxWithdrawalRequestsPerPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
withdrawalRequests := make([]*enginev1.WithdrawalRequest, len(e.Withdrawals))
|
||||
for i, w := range e.Withdrawals {
|
||||
withdrawalRequests[i], err = w.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionRequests.Withdrawals[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
if err = slice.VerifyMaxLength(e.Consolidations, params.BeaconConfig().MaxConsolidationsRequestsPerPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
consolidationRequests := make([]*enginev1.ConsolidationRequest, len(e.Consolidations))
|
||||
for i, c := range e.Consolidations {
|
||||
consolidationRequests[i], err = c.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("ExecutionRequests.Consolidations[%d]", i))
|
||||
}
|
||||
}
|
||||
return &enginev1.ExecutionRequests{
|
||||
Deposits: depositRequests,
|
||||
Withdrawals: withdrawalRequests,
|
||||
Consolidations: consolidationRequests,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Fulu
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
var (
|
||||
ExecutionPayloadFuluFromConsensus = ExecutionPayloadDenebFromConsensus
|
||||
ExecutionPayloadHeaderFuluFromConsensus = ExecutionPayloadHeaderDenebFromConsensus
|
||||
BeaconBlockFuluFromConsensus = BeaconBlockElectraFromConsensus
|
||||
)
|
||||
563
api/server/structs/conversions_block_execution_test.go
Normal file
563
api/server/structs/conversions_block_execution_test.go
Normal file
@@ -0,0 +1,563 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func fillByteSlice(sliceLength int, value byte) []byte {
|
||||
bytes := make([]byte, sliceLength)
|
||||
|
||||
for index := range bytes {
|
||||
bytes[index] = value
|
||||
}
|
||||
|
||||
return bytes
|
||||
}
|
||||
|
||||
// TestExecutionPayloadFromConsensus_HappyPath checks the
|
||||
// ExecutionPayloadFromConsensus function under normal conditions.
|
||||
func TestExecutionPayloadFromConsensus_HappyPath(t *testing.T) {
|
||||
consensusPayload := &enginev1.ExecutionPayload{
|
||||
ParentHash: fillByteSlice(common.HashLength, 0xaa),
|
||||
FeeRecipient: fillByteSlice(20, 0xbb),
|
||||
StateRoot: fillByteSlice(32, 0xcc),
|
||||
ReceiptsRoot: fillByteSlice(32, 0xdd),
|
||||
LogsBloom: fillByteSlice(256, 0xee),
|
||||
PrevRandao: fillByteSlice(32, 0xff),
|
||||
BlockNumber: 12345,
|
||||
GasLimit: 15000000,
|
||||
GasUsed: 8000000,
|
||||
Timestamp: 1680000000,
|
||||
ExtraData: fillByteSlice(8, 0x11),
|
||||
BaseFeePerGas: fillByteSlice(32, 0x01),
|
||||
BlockHash: fillByteSlice(common.HashLength, 0x22),
|
||||
Transactions: [][]byte{
|
||||
fillByteSlice(10, 0x33),
|
||||
fillByteSlice(10, 0x44),
|
||||
},
|
||||
}
|
||||
|
||||
result, err := ExecutionPayloadFromConsensus(consensusPayload)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, hexutil.Encode(consensusPayload.ParentHash), result.ParentHash)
|
||||
require.Equal(t, hexutil.Encode(consensusPayload.FeeRecipient), result.FeeRecipient)
|
||||
require.Equal(t, hexutil.Encode(consensusPayload.StateRoot), result.StateRoot)
|
||||
require.Equal(t, hexutil.Encode(consensusPayload.ReceiptsRoot), result.ReceiptsRoot)
|
||||
require.Equal(t, fmt.Sprintf("%d", consensusPayload.BlockNumber), result.BlockNumber)
|
||||
}
|
||||
|
||||
// TestExecutionPayload_ToConsensus_HappyPath checks the
|
||||
// (*ExecutionPayload).ToConsensus function under normal conditions.
|
||||
func TestExecutionPayload_ToConsensus_HappyPath(t *testing.T) {
|
||||
payload := &ExecutionPayload{
|
||||
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
|
||||
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
|
||||
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
|
||||
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
|
||||
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
|
||||
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
|
||||
BlockNumber: "12345",
|
||||
GasLimit: "15000000",
|
||||
GasUsed: "8000000",
|
||||
Timestamp: "1680000000",
|
||||
ExtraData: "0x11111111",
|
||||
BaseFeePerGas: "1234",
|
||||
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x22)),
|
||||
Transactions: []string{
|
||||
hexutil.Encode(fillByteSlice(10, 0x33)),
|
||||
hexutil.Encode(fillByteSlice(10, 0x44)),
|
||||
},
|
||||
}
|
||||
|
||||
result, err := payload.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, result.ParentHash, fillByteSlice(common.HashLength, 0xaa))
|
||||
require.DeepEqual(t, result.FeeRecipient, fillByteSlice(20, 0xbb))
|
||||
require.DeepEqual(t, result.StateRoot, fillByteSlice(32, 0xcc))
|
||||
}
|
||||
|
||||
// TestExecutionPayloadHeaderFromConsensus_HappyPath checks the
|
||||
// ExecutionPayloadHeaderFromConsensus function under normal conditions.
|
||||
func TestExecutionPayloadHeaderFromConsensus_HappyPath(t *testing.T) {
|
||||
consensusHeader := &enginev1.ExecutionPayloadHeader{
|
||||
ParentHash: fillByteSlice(common.HashLength, 0xaa),
|
||||
FeeRecipient: fillByteSlice(20, 0xbb),
|
||||
StateRoot: fillByteSlice(32, 0xcc),
|
||||
ReceiptsRoot: fillByteSlice(32, 0xdd),
|
||||
LogsBloom: fillByteSlice(256, 0xee),
|
||||
PrevRandao: fillByteSlice(32, 0xff),
|
||||
BlockNumber: 9999,
|
||||
GasLimit: 5000000,
|
||||
GasUsed: 2500000,
|
||||
Timestamp: 1111111111,
|
||||
ExtraData: fillByteSlice(4, 0x12),
|
||||
BaseFeePerGas: fillByteSlice(32, 0x34),
|
||||
BlockHash: fillByteSlice(common.HashLength, 0x56),
|
||||
TransactionsRoot: fillByteSlice(32, 0x78),
|
||||
}
|
||||
|
||||
result, err := ExecutionPayloadHeaderFromConsensus(consensusHeader)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, hexutil.Encode(consensusHeader.ParentHash), result.ParentHash)
|
||||
require.Equal(t, fmt.Sprintf("%d", consensusHeader.BlockNumber), result.BlockNumber)
|
||||
}
|
||||
|
||||
// TestExecutionPayloadHeader_ToConsensus_HappyPath checks the
|
||||
// (*ExecutionPayloadHeader).ToConsensus function under normal conditions.
|
||||
func TestExecutionPayloadHeader_ToConsensus_HappyPath(t *testing.T) {
|
||||
header := &ExecutionPayloadHeader{
|
||||
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
|
||||
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
|
||||
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
|
||||
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
|
||||
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
|
||||
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
|
||||
BlockNumber: "9999",
|
||||
GasLimit: "5000000",
|
||||
GasUsed: "2500000",
|
||||
Timestamp: "1111111111",
|
||||
ExtraData: "0x1234abcd",
|
||||
BaseFeePerGas: "1234",
|
||||
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x56)),
|
||||
TransactionsRoot: hexutil.Encode(fillByteSlice(32, 0x78)),
|
||||
}
|
||||
|
||||
result, err := header.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hexutil.Encode(result.ParentHash), header.ParentHash)
|
||||
require.DeepEqual(t, hexutil.Encode(result.FeeRecipient), header.FeeRecipient)
|
||||
require.DeepEqual(t, hexutil.Encode(result.StateRoot), header.StateRoot)
|
||||
}
|
||||
|
||||
// TestExecutionPayloadCapellaFromConsensus_HappyPath checks the
|
||||
// ExecutionPayloadCapellaFromConsensus function under normal conditions.
|
||||
func TestExecutionPayloadCapellaFromConsensus_HappyPath(t *testing.T) {
|
||||
capellaPayload := &enginev1.ExecutionPayloadCapella{
|
||||
ParentHash: fillByteSlice(common.HashLength, 0xaa),
|
||||
FeeRecipient: fillByteSlice(20, 0xbb),
|
||||
StateRoot: fillByteSlice(32, 0xcc),
|
||||
ReceiptsRoot: fillByteSlice(32, 0xdd),
|
||||
LogsBloom: fillByteSlice(256, 0xee),
|
||||
PrevRandao: fillByteSlice(32, 0xff),
|
||||
BlockNumber: 123,
|
||||
GasLimit: 9876543,
|
||||
GasUsed: 1234567,
|
||||
Timestamp: 5555555,
|
||||
ExtraData: fillByteSlice(6, 0x11),
|
||||
BaseFeePerGas: fillByteSlice(32, 0x22),
|
||||
BlockHash: fillByteSlice(common.HashLength, 0x33),
|
||||
Transactions: [][]byte{
|
||||
fillByteSlice(5, 0x44),
|
||||
},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
{
|
||||
Index: 1,
|
||||
ValidatorIndex: 2,
|
||||
Address: fillByteSlice(20, 0xaa),
|
||||
Amount: 100,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result, err := ExecutionPayloadCapellaFromConsensus(capellaPayload)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, hexutil.Encode(capellaPayload.ParentHash), result.ParentHash)
|
||||
require.Equal(t, len(capellaPayload.Transactions), len(result.Transactions))
|
||||
require.Equal(t, len(capellaPayload.Withdrawals), len(result.Withdrawals))
|
||||
}
|
||||
|
||||
// TestExecutionPayloadCapella_ToConsensus_HappyPath checks the
|
||||
// (*ExecutionPayloadCapella).ToConsensus function under normal conditions.
|
||||
func TestExecutionPayloadCapella_ToConsensus_HappyPath(t *testing.T) {
|
||||
capella := &ExecutionPayloadCapella{
|
||||
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
|
||||
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
|
||||
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
|
||||
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
|
||||
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
|
||||
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
|
||||
BlockNumber: "123",
|
||||
GasLimit: "9876543",
|
||||
GasUsed: "1234567",
|
||||
Timestamp: "5555555",
|
||||
ExtraData: hexutil.Encode(fillByteSlice(6, 0x11)),
|
||||
BaseFeePerGas: "1234",
|
||||
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x33)),
|
||||
Transactions: []string{
|
||||
hexutil.Encode(fillByteSlice(5, 0x44)),
|
||||
},
|
||||
Withdrawals: []*Withdrawal{
|
||||
{
|
||||
WithdrawalIndex: "1",
|
||||
ValidatorIndex: "2",
|
||||
ExecutionAddress: hexutil.Encode(fillByteSlice(20, 0xaa)),
|
||||
Amount: "100",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result, err := capella.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hexutil.Encode(result.ParentHash), capella.ParentHash)
|
||||
require.DeepEqual(t, hexutil.Encode(result.FeeRecipient), capella.FeeRecipient)
|
||||
require.DeepEqual(t, hexutil.Encode(result.StateRoot), capella.StateRoot)
|
||||
}
|
||||
|
||||
// TestExecutionPayloadDenebFromConsensus_HappyPath checks the
|
||||
// ExecutionPayloadDenebFromConsensus function under normal conditions.
|
||||
func TestExecutionPayloadDenebFromConsensus_HappyPath(t *testing.T) {
|
||||
denebPayload := &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: fillByteSlice(common.HashLength, 0xaa),
|
||||
FeeRecipient: fillByteSlice(20, 0xbb),
|
||||
StateRoot: fillByteSlice(32, 0xcc),
|
||||
ReceiptsRoot: fillByteSlice(32, 0xdd),
|
||||
LogsBloom: fillByteSlice(256, 0xee),
|
||||
PrevRandao: fillByteSlice(32, 0xff),
|
||||
BlockNumber: 999,
|
||||
GasLimit: 2222222,
|
||||
GasUsed: 1111111,
|
||||
Timestamp: 666666,
|
||||
ExtraData: fillByteSlice(6, 0x11),
|
||||
BaseFeePerGas: fillByteSlice(32, 0x22),
|
||||
BlockHash: fillByteSlice(common.HashLength, 0x33),
|
||||
Transactions: [][]byte{
|
||||
fillByteSlice(5, 0x44),
|
||||
},
|
||||
Withdrawals: []*enginev1.Withdrawal{
|
||||
{
|
||||
Index: 1,
|
||||
ValidatorIndex: 2,
|
||||
Address: fillByteSlice(20, 0xaa),
|
||||
Amount: 100,
|
||||
},
|
||||
},
|
||||
BlobGasUsed: 1234,
|
||||
ExcessBlobGas: 5678,
|
||||
}
|
||||
|
||||
result, err := ExecutionPayloadDenebFromConsensus(denebPayload)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, hexutil.Encode(denebPayload.ParentHash), result.ParentHash)
|
||||
require.Equal(t, len(denebPayload.Transactions), len(result.Transactions))
|
||||
require.Equal(t, len(denebPayload.Withdrawals), len(result.Withdrawals))
|
||||
require.Equal(t, "1234", result.BlobGasUsed)
|
||||
require.Equal(t, fmt.Sprintf("%d", denebPayload.BlockNumber), result.BlockNumber)
|
||||
}
|
||||
|
||||
// TestExecutionPayloadDeneb_ToConsensus_HappyPath checks the
|
||||
// (*ExecutionPayloadDeneb).ToConsensus function under normal conditions.
|
||||
func TestExecutionPayloadDeneb_ToConsensus_HappyPath(t *testing.T) {
|
||||
deneb := &ExecutionPayloadDeneb{
|
||||
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
|
||||
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
|
||||
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
|
||||
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
|
||||
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
|
||||
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
|
||||
BlockNumber: "999",
|
||||
GasLimit: "2222222",
|
||||
GasUsed: "1111111",
|
||||
Timestamp: "666666",
|
||||
ExtraData: hexutil.Encode(fillByteSlice(6, 0x11)),
|
||||
BaseFeePerGas: "1234",
|
||||
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x33)),
|
||||
Transactions: []string{
|
||||
hexutil.Encode(fillByteSlice(5, 0x44)),
|
||||
},
|
||||
Withdrawals: []*Withdrawal{
|
||||
{
|
||||
WithdrawalIndex: "1",
|
||||
ValidatorIndex: "2",
|
||||
ExecutionAddress: hexutil.Encode(fillByteSlice(20, 0xaa)),
|
||||
Amount: "100",
|
||||
},
|
||||
},
|
||||
BlobGasUsed: "1234",
|
||||
ExcessBlobGas: "5678",
|
||||
}
|
||||
|
||||
result, err := deneb.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hexutil.Encode(result.ParentHash), deneb.ParentHash)
|
||||
require.DeepEqual(t, hexutil.Encode(result.FeeRecipient), deneb.FeeRecipient)
|
||||
require.Equal(t, result.BlockNumber, uint64(999))
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeaderCapellaFromConsensus_HappyPath(t *testing.T) {
|
||||
capellaHeader := &enginev1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: fillByteSlice(common.HashLength, 0xaa),
|
||||
FeeRecipient: fillByteSlice(20, 0xbb),
|
||||
StateRoot: fillByteSlice(32, 0xcc),
|
||||
ReceiptsRoot: fillByteSlice(32, 0xdd),
|
||||
LogsBloom: fillByteSlice(256, 0xee),
|
||||
PrevRandao: fillByteSlice(32, 0xff),
|
||||
BlockNumber: 555,
|
||||
GasLimit: 1111111,
|
||||
GasUsed: 222222,
|
||||
Timestamp: 3333333333,
|
||||
ExtraData: fillByteSlice(4, 0x12),
|
||||
BaseFeePerGas: fillByteSlice(32, 0x34),
|
||||
BlockHash: fillByteSlice(common.HashLength, 0x56),
|
||||
TransactionsRoot: fillByteSlice(32, 0x78),
|
||||
WithdrawalsRoot: fillByteSlice(32, 0x99),
|
||||
}
|
||||
|
||||
result, err := ExecutionPayloadHeaderCapellaFromConsensus(capellaHeader)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, hexutil.Encode(capellaHeader.ParentHash), result.ParentHash)
|
||||
require.DeepEqual(t, hexutil.Encode(capellaHeader.WithdrawalsRoot), result.WithdrawalsRoot)
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeaderCapella_ToConsensus_HappyPath(t *testing.T) {
|
||||
header := &ExecutionPayloadHeaderCapella{
|
||||
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
|
||||
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
|
||||
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
|
||||
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
|
||||
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
|
||||
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
|
||||
BlockNumber: "555",
|
||||
GasLimit: "1111111",
|
||||
GasUsed: "222222",
|
||||
Timestamp: "3333333333",
|
||||
ExtraData: "0x1234abcd",
|
||||
BaseFeePerGas: "1234",
|
||||
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x56)),
|
||||
TransactionsRoot: hexutil.Encode(fillByteSlice(32, 0x78)),
|
||||
WithdrawalsRoot: hexutil.Encode(fillByteSlice(32, 0x99)),
|
||||
}
|
||||
|
||||
result, err := header.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hexutil.Encode(result.ParentHash), header.ParentHash)
|
||||
require.DeepEqual(t, hexutil.Encode(result.FeeRecipient), header.FeeRecipient)
|
||||
require.DeepEqual(t, hexutil.Encode(result.StateRoot), header.StateRoot)
|
||||
require.DeepEqual(t, hexutil.Encode(result.ReceiptsRoot), header.ReceiptsRoot)
|
||||
require.DeepEqual(t, hexutil.Encode(result.WithdrawalsRoot), header.WithdrawalsRoot)
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeaderDenebFromConsensus_HappyPath(t *testing.T) {
|
||||
denebHeader := &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: fillByteSlice(common.HashLength, 0xaa),
|
||||
FeeRecipient: fillByteSlice(20, 0xbb),
|
||||
StateRoot: fillByteSlice(32, 0xcc),
|
||||
ReceiptsRoot: fillByteSlice(32, 0xdd),
|
||||
LogsBloom: fillByteSlice(256, 0xee),
|
||||
PrevRandao: fillByteSlice(32, 0xff),
|
||||
BlockNumber: 999,
|
||||
GasLimit: 5000000,
|
||||
GasUsed: 2500000,
|
||||
Timestamp: 4444444444,
|
||||
ExtraData: fillByteSlice(4, 0x12),
|
||||
BaseFeePerGas: fillByteSlice(32, 0x34),
|
||||
BlockHash: fillByteSlice(common.HashLength, 0x56),
|
||||
TransactionsRoot: fillByteSlice(32, 0x78),
|
||||
WithdrawalsRoot: fillByteSlice(32, 0x99),
|
||||
BlobGasUsed: 1234,
|
||||
ExcessBlobGas: 5678,
|
||||
}
|
||||
|
||||
result, err := ExecutionPayloadHeaderDenebFromConsensus(denebHeader)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, hexutil.Encode(denebHeader.ParentHash), result.ParentHash)
|
||||
require.DeepEqual(t, hexutil.Encode(denebHeader.FeeRecipient), result.FeeRecipient)
|
||||
require.DeepEqual(t, hexutil.Encode(denebHeader.StateRoot), result.StateRoot)
|
||||
require.DeepEqual(t, fmt.Sprintf("%d", denebHeader.BlobGasUsed), result.BlobGasUsed)
|
||||
}
|
||||
|
||||
func TestExecutionPayloadHeaderDeneb_ToConsensus_HappyPath(t *testing.T) {
|
||||
header := &ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: hexutil.Encode(fillByteSlice(common.HashLength, 0xaa)),
|
||||
FeeRecipient: hexutil.Encode(fillByteSlice(20, 0xbb)),
|
||||
StateRoot: hexutil.Encode(fillByteSlice(32, 0xcc)),
|
||||
ReceiptsRoot: hexutil.Encode(fillByteSlice(32, 0xdd)),
|
||||
LogsBloom: hexutil.Encode(fillByteSlice(256, 0xee)),
|
||||
PrevRandao: hexutil.Encode(fillByteSlice(32, 0xff)),
|
||||
BlockNumber: "999",
|
||||
GasLimit: "5000000",
|
||||
GasUsed: "2500000",
|
||||
Timestamp: "4444444444",
|
||||
ExtraData: "0x1234abcd",
|
||||
BaseFeePerGas: "1234",
|
||||
BlockHash: hexutil.Encode(fillByteSlice(common.HashLength, 0x56)),
|
||||
TransactionsRoot: hexutil.Encode(fillByteSlice(32, 0x78)),
|
||||
WithdrawalsRoot: hexutil.Encode(fillByteSlice(32, 0x99)),
|
||||
BlobGasUsed: "1234",
|
||||
ExcessBlobGas: "5678",
|
||||
}
|
||||
|
||||
result, err := header.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hexutil.Encode(result.ParentHash), header.ParentHash)
|
||||
require.DeepEqual(t, result.BlobGasUsed, uint64(1234))
|
||||
require.DeepEqual(t, result.ExcessBlobGas, uint64(5678))
|
||||
require.DeepEqual(t, result.BlockNumber, uint64(999))
|
||||
}
|
||||
|
||||
func TestWithdrawalRequestsFromConsensus_HappyPath(t *testing.T) {
|
||||
consensusRequests := []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: fillByteSlice(20, 0xbb),
|
||||
ValidatorPubkey: fillByteSlice(48, 0xbb),
|
||||
Amount: 12345,
|
||||
},
|
||||
{
|
||||
SourceAddress: fillByteSlice(20, 0xcc),
|
||||
ValidatorPubkey: fillByteSlice(48, 0xcc),
|
||||
Amount: 54321,
|
||||
},
|
||||
}
|
||||
|
||||
result := WithdrawalRequestsFromConsensus(consensusRequests)
|
||||
require.DeepEqual(t, len(result), len(consensusRequests))
|
||||
require.DeepEqual(t, result[0].Amount, fmt.Sprintf("%d", consensusRequests[0].Amount))
|
||||
}
|
||||
|
||||
func TestWithdrawalRequestFromConsensus_HappyPath(t *testing.T) {
|
||||
req := &enginev1.WithdrawalRequest{
|
||||
SourceAddress: fillByteSlice(20, 0xbb),
|
||||
ValidatorPubkey: fillByteSlice(48, 0xbb),
|
||||
Amount: 42,
|
||||
}
|
||||
result := WithdrawalRequestFromConsensus(req)
|
||||
require.NotNil(t, result)
|
||||
require.DeepEqual(t, result.SourceAddress, hexutil.Encode(fillByteSlice(20, 0xbb)))
|
||||
}
|
||||
|
||||
func TestWithdrawalRequest_ToConsensus_HappyPath(t *testing.T) {
|
||||
withdrawalReq := &WithdrawalRequest{
|
||||
SourceAddress: hexutil.Encode(fillByteSlice(20, 111)),
|
||||
ValidatorPubkey: hexutil.Encode(fillByteSlice(48, 123)),
|
||||
Amount: "12345",
|
||||
}
|
||||
result, err := withdrawalReq.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, result.Amount, uint64(12345))
|
||||
}
|
||||
|
||||
func TestConsolidationRequestsFromConsensus_HappyPath(t *testing.T) {
|
||||
consensusRequests := []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: fillByteSlice(20, 111),
|
||||
SourcePubkey: fillByteSlice(48, 112),
|
||||
TargetPubkey: fillByteSlice(48, 113),
|
||||
},
|
||||
}
|
||||
result := ConsolidationRequestsFromConsensus(consensusRequests)
|
||||
require.DeepEqual(t, len(result), len(consensusRequests))
|
||||
require.DeepEqual(t, result[0].SourceAddress, "0x6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f6f")
|
||||
}
|
||||
|
||||
func TestDepositRequestsFromConsensus_HappyPath(t *testing.T) {
|
||||
ds := []*enginev1.DepositRequest{
|
||||
{
|
||||
Pubkey: fillByteSlice(48, 0xbb),
|
||||
WithdrawalCredentials: fillByteSlice(32, 0xdd),
|
||||
Amount: 98765,
|
||||
Signature: fillByteSlice(96, 0xff),
|
||||
Index: 111,
|
||||
},
|
||||
}
|
||||
result := DepositRequestsFromConsensus(ds)
|
||||
require.DeepEqual(t, len(result), len(ds))
|
||||
require.DeepEqual(t, result[0].Amount, "98765")
|
||||
}
|
||||
|
||||
func TestDepositRequest_ToConsensus_HappyPath(t *testing.T) {
|
||||
req := &DepositRequest{
|
||||
Pubkey: hexutil.Encode(fillByteSlice(48, 0xbb)),
|
||||
WithdrawalCredentials: hexutil.Encode(fillByteSlice(32, 0xaa)),
|
||||
Amount: "123",
|
||||
Signature: hexutil.Encode(fillByteSlice(96, 0xdd)),
|
||||
Index: "456",
|
||||
}
|
||||
|
||||
result, err := req.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, result.Amount, uint64(123))
|
||||
require.DeepEqual(t, result.Signature, fillByteSlice(96, 0xdd))
|
||||
}
|
||||
|
||||
func TestExecutionRequestsFromConsensus_HappyPath(t *testing.T) {
|
||||
er := &enginev1.ExecutionRequests{
|
||||
Deposits: []*enginev1.DepositRequest{
|
||||
{
|
||||
Pubkey: fillByteSlice(48, 0xba),
|
||||
WithdrawalCredentials: fillByteSlice(32, 0xaa),
|
||||
Amount: 33,
|
||||
Signature: fillByteSlice(96, 0xff),
|
||||
Index: 44,
|
||||
},
|
||||
},
|
||||
Withdrawals: []*enginev1.WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: fillByteSlice(20, 0xaa),
|
||||
ValidatorPubkey: fillByteSlice(48, 0xba),
|
||||
Amount: 555,
|
||||
},
|
||||
},
|
||||
Consolidations: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: fillByteSlice(20, 0xdd),
|
||||
SourcePubkey: fillByteSlice(48, 0xdd),
|
||||
TargetPubkey: fillByteSlice(48, 0xcc),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := ExecutionRequestsFromConsensus(er)
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, 1, len(result.Deposits))
|
||||
require.Equal(t, "33", result.Deposits[0].Amount)
|
||||
require.Equal(t, 1, len(result.Withdrawals))
|
||||
require.Equal(t, "555", result.Withdrawals[0].Amount)
|
||||
require.Equal(t, 1, len(result.Consolidations))
|
||||
require.Equal(t, "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", result.Consolidations[0].TargetPubkey)
|
||||
}
|
||||
|
||||
func TestExecutionRequests_ToConsensus_HappyPath(t *testing.T) {
|
||||
execReq := &ExecutionRequests{
|
||||
Deposits: []*DepositRequest{
|
||||
{
|
||||
Pubkey: hexutil.Encode(fillByteSlice(48, 0xbb)),
|
||||
WithdrawalCredentials: hexutil.Encode(fillByteSlice(32, 0xaa)),
|
||||
Amount: "33",
|
||||
Signature: hexutil.Encode(fillByteSlice(96, 0xff)),
|
||||
Index: "44",
|
||||
},
|
||||
},
|
||||
Withdrawals: []*WithdrawalRequest{
|
||||
{
|
||||
SourceAddress: hexutil.Encode(fillByteSlice(20, 0xdd)),
|
||||
ValidatorPubkey: hexutil.Encode(fillByteSlice(48, 0xbb)),
|
||||
Amount: "555",
|
||||
},
|
||||
},
|
||||
Consolidations: []*ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: hexutil.Encode(fillByteSlice(20, 0xcc)),
|
||||
SourcePubkey: hexutil.Encode(fillByteSlice(48, 0xbb)),
|
||||
TargetPubkey: hexutil.Encode(fillByteSlice(48, 0xcc)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result, err := execReq.ToConsensus()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(result.Deposits))
|
||||
require.Equal(t, uint64(33), result.Deposits[0].Amount)
|
||||
require.Equal(t, 1, len(result.Withdrawals))
|
||||
require.Equal(t, uint64(555), result.Withdrawals[0].Amount)
|
||||
require.Equal(t, 1, len(result.Consolidations))
|
||||
require.DeepEqual(t, fillByteSlice(48, 0xcc), result.Consolidations[0].TargetPubkey)
|
||||
}
|
||||
@@ -24,3 +24,96 @@ func TestDepositSnapshotFromConsensus(t *testing.T) {
|
||||
require.Equal(t, "0x1234", res.ExecutionBlockHash)
|
||||
require.Equal(t, "67890", res.ExecutionBlockHeight)
|
||||
}
|
||||
|
||||
func TestSignedBLSToExecutionChange_ToConsensus(t *testing.T) {
|
||||
s := &SignedBLSToExecutionChange{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSignedValidatorRegistration_ToConsensus(t *testing.T) {
|
||||
s := &SignedValidatorRegistration{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSignedContributionAndProof_ToConsensus(t *testing.T) {
|
||||
s := &SignedContributionAndProof{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestContributionAndProof_ToConsensus(t *testing.T) {
|
||||
c := &ContributionAndProof{
|
||||
Contribution: nil,
|
||||
AggregatorIndex: "invalid",
|
||||
SelectionProof: "",
|
||||
}
|
||||
_, err := c.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSignedAggregateAttestationAndProof_ToConsensus(t *testing.T) {
|
||||
s := &SignedAggregateAttestationAndProof{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestAggregateAttestationAndProof_ToConsensus(t *testing.T) {
|
||||
a := &AggregateAttestationAndProof{
|
||||
AggregatorIndex: "1",
|
||||
Aggregate: nil,
|
||||
SelectionProof: "",
|
||||
}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestAttestation_ToConsensus(t *testing.T) {
|
||||
a := &Attestation{
|
||||
AggregationBits: "0x10",
|
||||
Data: nil,
|
||||
Signature: "",
|
||||
}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSingleAttestation_ToConsensus(t *testing.T) {
|
||||
s := &SingleAttestation{
|
||||
CommitteeIndex: "1",
|
||||
AttesterIndex: "1",
|
||||
Data: nil,
|
||||
Signature: "",
|
||||
}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestSignedVoluntaryExit_ToConsensus(t *testing.T) {
|
||||
s := &SignedVoluntaryExit{Message: nil, Signature: ""}
|
||||
_, err := s.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestProposerSlashing_ToConsensus(t *testing.T) {
|
||||
p := &ProposerSlashing{SignedHeader1: nil, SignedHeader2: nil}
|
||||
_, err := p.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestAttesterSlashing_ToConsensus(t *testing.T) {
|
||||
a := &AttesterSlashing{Attestation1: nil, Attestation2: nil}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
func TestIndexedAttestation_ToConsensus(t *testing.T) {
|
||||
a := &IndexedAttestation{
|
||||
AttestingIndices: []string{"1"},
|
||||
Data: nil,
|
||||
Signature: "invalid",
|
||||
}
|
||||
_, err := a.ToConsensus()
|
||||
require.ErrorContains(t, errNilValue.Error(), err)
|
||||
}
|
||||
|
||||
@@ -1,397 +0,0 @@
|
||||
package structs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Epbs
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// nolint:gocognit
|
||||
func (b *BeaconBlockEpbs) ToConsensus() (*eth.BeaconBlockEpbs, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
if b.Body == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body")
|
||||
}
|
||||
if b.Body.Eth1Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.Eth1Data")
|
||||
}
|
||||
if b.Body.SyncAggregate == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.SyncAggregate")
|
||||
}
|
||||
slot, err := strconv.ParseUint(b.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Slot")
|
||||
}
|
||||
proposerIndex, err := strconv.ParseUint(b.ProposerIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ProposerIndex")
|
||||
}
|
||||
parentRoot, err := bytesutil.DecodeHexWithLength(b.ParentRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ParentRoot")
|
||||
}
|
||||
stateRoot, err := bytesutil.DecodeHexWithLength(b.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "StateRoot")
|
||||
}
|
||||
randaoReveal, err := bytesutil.DecodeHexWithLength(b.Body.RandaoReveal, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.RandaoReveal")
|
||||
}
|
||||
depositRoot, err := bytesutil.DecodeHexWithLength(b.Body.Eth1Data.DepositRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Eth1Data.DepositRoot")
|
||||
}
|
||||
depositCount, err := strconv.ParseUint(b.Body.Eth1Data.DepositCount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Eth1Data.DepositCount")
|
||||
}
|
||||
blockHash, err := bytesutil.DecodeHexWithLength(b.Body.Eth1Data.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Eth1Data.BlockHash")
|
||||
}
|
||||
graffiti, err := bytesutil.DecodeHexWithLength(b.Body.Graffiti, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Graffiti")
|
||||
}
|
||||
proposerSlashings, err := ProposerSlashingsToConsensus(b.Body.ProposerSlashings)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ProposerSlashings")
|
||||
}
|
||||
attesterSlashings, err := AttesterSlashingsElectraToConsensus(b.Body.AttesterSlashings)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.AttesterSlashings")
|
||||
}
|
||||
atts, err := AttsElectraToConsensus(b.Body.Attestations)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Attestations")
|
||||
}
|
||||
deposits, err := DepositsToConsensus(b.Body.Deposits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.Deposits")
|
||||
}
|
||||
exits, err := SignedExitsToConsensus(b.Body.VoluntaryExits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.VoluntaryExits")
|
||||
}
|
||||
syncCommitteeBits, err := bytesutil.DecodeHexWithLength(b.Body.SyncAggregate.SyncCommitteeBits, fieldparams.SyncAggregateSyncCommitteeBytesLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.SyncAggregate.SyncCommitteeBits")
|
||||
}
|
||||
syncCommitteeSig, err := bytesutil.DecodeHexWithLength(b.Body.SyncAggregate.SyncCommitteeSignature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.SyncAggregate.SyncCommitteeSignature")
|
||||
}
|
||||
signedPayloadHeader, err := b.Body.SignedExecutionPayloadHeader.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.SignedExecutionPayloadHeader")
|
||||
}
|
||||
|
||||
blsChanges, err := SignedBLSChangesToConsensus(b.Body.BLSToExecutionChanges)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.BLSToExecutionChanges")
|
||||
}
|
||||
payloadAttestations := make([]*eth.PayloadAttestation, len(b.Body.PayloadAttestations))
|
||||
for i, p := range b.Body.PayloadAttestations {
|
||||
payloadAttestations[i], err = p.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("Body.PayloadAttestations[%d]", i))
|
||||
}
|
||||
}
|
||||
|
||||
return ð.BeaconBlockEpbs{
|
||||
Slot: primitives.Slot(slot),
|
||||
ProposerIndex: primitives.ValidatorIndex(proposerIndex),
|
||||
ParentRoot: parentRoot,
|
||||
StateRoot: stateRoot,
|
||||
Body: ð.BeaconBlockBodyEpbs{
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: depositRoot,
|
||||
DepositCount: depositCount,
|
||||
BlockHash: blockHash,
|
||||
},
|
||||
Graffiti: graffiti,
|
||||
ProposerSlashings: proposerSlashings,
|
||||
AttesterSlashings: attesterSlashings,
|
||||
Attestations: atts,
|
||||
Deposits: deposits,
|
||||
VoluntaryExits: exits,
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: syncCommitteeSig,
|
||||
},
|
||||
BlsToExecutionChanges: blsChanges,
|
||||
SignedExecutionPayloadHeader: signedPayloadHeader,
|
||||
PayloadAttestations: payloadAttestations,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *PayloadAttestation) ToConsensus() (*eth.PayloadAttestation, error) {
|
||||
if p == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
aggregationBits, err := bytesutil.DecodeHexWithLength(p.AggregationBits, fieldparams.PTCSize/8)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregationBits")
|
||||
}
|
||||
data, err := p.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(p.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
return ð.PayloadAttestation{
|
||||
AggregationBits: aggregationBits,
|
||||
Data: data,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *PayloadAttestationData) ToConsensus() (*eth.PayloadAttestationData, error) {
|
||||
if p == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
beaconBlockRoot, err := bytesutil.DecodeHexWithLength(p.BeaconBlockRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BeaconBlockRoot")
|
||||
}
|
||||
slot, err := strconv.ParseUint(p.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Slot")
|
||||
}
|
||||
payloadStatus, err := strconv.ParseUint(p.PayloadStatus, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "PayloadStatus")
|
||||
}
|
||||
return ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: beaconBlockRoot,
|
||||
Slot: primitives.Slot(slot),
|
||||
PayloadStatus: primitives.PTCStatus(payloadStatus),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *SignedExecutionPayloadHeader) ToConsensus() (*enginev1.SignedExecutionPayloadHeader, error) {
|
||||
if p == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(p.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
header, err := p.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Header")
|
||||
}
|
||||
return &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: header,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *ExecutionPayloadHeaderEPBS) ToConsensus() (*enginev1.ExecutionPayloadHeaderEPBS, error) {
|
||||
parentBlockHash, err := bytesutil.DecodeHexWithLength(p.ParentBlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ParentBlockHash")
|
||||
}
|
||||
parentBlockRoot, err := bytesutil.DecodeHexWithLength(p.ParentBlockRoot, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ParentBlockRoot")
|
||||
}
|
||||
blockHash, err := bytesutil.DecodeHexWithLength(p.BlockHash, common.HashLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BlockHash")
|
||||
}
|
||||
gasLimit, err := strconv.ParseUint(p.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "GasLimit")
|
||||
}
|
||||
builderIndex, err := strconv.ParseUint(p.BuilderIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BuilderIndex")
|
||||
}
|
||||
slot, err := strconv.ParseUint(p.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Slot")
|
||||
}
|
||||
value, err := strconv.ParseUint(p.Value, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Value")
|
||||
}
|
||||
blobKzgCommitmentsRoot, err := bytesutil.DecodeHexWithLength(p.BlobKzgCommitmentsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BlobKzgCommitmentsRoot")
|
||||
}
|
||||
return &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
ParentBlockHash: parentBlockHash,
|
||||
ParentBlockRoot: parentBlockRoot,
|
||||
BlockHash: blockHash,
|
||||
GasLimit: gasLimit,
|
||||
BuilderIndex: primitives.ValidatorIndex(builderIndex),
|
||||
Slot: primitives.Slot(slot),
|
||||
Value: value,
|
||||
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *SignedBeaconBlockEpbs) ToConsensus() (*eth.SignedBeaconBlockEpbs, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
|
||||
sig, err := bytesutil.DecodeHexWithLength(b.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
block, err := b.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
}
|
||||
return ð.SignedBeaconBlockEpbs{
|
||||
Block: block,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func BeaconBlockEpbsFromConsensus(b *eth.BeaconBlockEpbs) (*BeaconBlockEpbs, error) {
|
||||
signedPayloadHeader, err := SignedExecutionPayloadHeaderFromConsensus(b.Body.SignedExecutionPayloadHeader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadAttestations, err := PayloadAttestationsFromConsensus(b.Body.PayloadAttestations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &BeaconBlockEpbs{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: hexutil.Encode(b.ParentRoot),
|
||||
StateRoot: hexutil.Encode(b.StateRoot),
|
||||
Body: &BeaconBlockBodyEpbs{
|
||||
RandaoReveal: hexutil.Encode(b.Body.RandaoReveal),
|
||||
Eth1Data: Eth1DataFromConsensus(b.Body.Eth1Data),
|
||||
Graffiti: hexutil.Encode(b.Body.Graffiti),
|
||||
ProposerSlashings: ProposerSlashingsFromConsensus(b.Body.ProposerSlashings),
|
||||
AttesterSlashings: AttesterSlashingsElectraFromConsensus(b.Body.AttesterSlashings),
|
||||
Attestations: AttsElectraFromConsensus(b.Body.Attestations),
|
||||
Deposits: DepositsFromConsensus(b.Body.Deposits),
|
||||
VoluntaryExits: SignedExitsFromConsensus(b.Body.VoluntaryExits),
|
||||
SyncAggregate: &SyncAggregate{
|
||||
SyncCommitteeBits: hexutil.Encode(b.Body.SyncAggregate.SyncCommitteeBits),
|
||||
SyncCommitteeSignature: hexutil.Encode(b.Body.SyncAggregate.SyncCommitteeSignature),
|
||||
},
|
||||
BLSToExecutionChanges: SignedBLSChangesFromConsensus(b.Body.BlsToExecutionChanges),
|
||||
SignedExecutionPayloadHeader: signedPayloadHeader,
|
||||
PayloadAttestations: payloadAttestations,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SignedExecutionPayloadEnvelopeFromConsensus(b *enginev1.SignedExecutionPayloadEnvelope) (*SignedExecutionPayloadEnvelope, error) {
|
||||
payload, err := ExecutionPayloadEnvelopeFromConsensus(b.Message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SignedExecutionPayloadEnvelope{
|
||||
Message: payload,
|
||||
Signature: hexutil.Encode(b.Signature),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExecutionPayloadEnvelopeFromConsensus(b *enginev1.ExecutionPayloadEnvelope) (*ExecutionPayloadEnvelope, error) {
|
||||
payload, err := ExecutionPayloadDenebFromConsensus(b.Payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
committments := make([]string, len(b.BlobKzgCommitments))
|
||||
for i, c := range b.BlobKzgCommitments {
|
||||
committments[i] = hexutil.Encode(c)
|
||||
}
|
||||
|
||||
executionRequests := ExecutionRequestsFromConsensus(b.ExecutionRequests)
|
||||
return &ExecutionPayloadEnvelope{
|
||||
Payload: payload,
|
||||
ExecutionRequests: executionRequests,
|
||||
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
|
||||
BeaconBlockRoot: hexutil.Encode(b.BeaconBlockRoot),
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
BlobKzgCommitments: committments,
|
||||
StateRoot: hexutil.Encode(b.StateRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SignedBeaconBlockEpbsFromConsensus(b *eth.SignedBeaconBlockEpbs) (*SignedBeaconBlockEpbs, error) {
|
||||
block, err := BeaconBlockEpbsFromConsensus(b.Block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SignedBeaconBlockEpbs{
|
||||
Message: block,
|
||||
Signature: hexutil.Encode(b.Signature),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SignedExecutionPayloadHeaderFromConsensus(b *enginev1.SignedExecutionPayloadHeader) (*SignedExecutionPayloadHeader, error) {
|
||||
header, err := ExecutionPayloadHeaderEPBSFromConsensus(b.Message)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SignedExecutionPayloadHeader{
|
||||
Message: header,
|
||||
Signature: hexutil.Encode(b.Signature),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExecutionPayloadHeaderEPBSFromConsensus(b *enginev1.ExecutionPayloadHeaderEPBS) (*ExecutionPayloadHeaderEPBS, error) {
|
||||
return &ExecutionPayloadHeaderEPBS{
|
||||
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
|
||||
BlockHash: hexutil.Encode(b.BlockHash),
|
||||
GasLimit: fmt.Sprintf("%d", b.GasLimit),
|
||||
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
Value: fmt.Sprintf("%d", b.Value),
|
||||
BlobKzgCommitmentsRoot: hexutil.Encode(b.BlobKzgCommitmentsRoot),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func PayloadAttestationsFromConsensus(b []*eth.PayloadAttestation) ([]*PayloadAttestation, error) {
|
||||
payloadAttestations := make([]*PayloadAttestation, len(b))
|
||||
for i, p := range b {
|
||||
data, err := PayloadAttestationDataFromConsensus(p.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadAttestations[i] = &PayloadAttestation{
|
||||
AggregationBits: hexutil.Encode(p.AggregationBits),
|
||||
Data: data,
|
||||
Signature: hexutil.Encode(p.Signature),
|
||||
}
|
||||
}
|
||||
return payloadAttestations, nil
|
||||
}
|
||||
|
||||
func PayloadAttestationDataFromConsensus(b *eth.PayloadAttestationData) (*PayloadAttestationData, error) {
|
||||
return &PayloadAttestationData{
|
||||
BeaconBlockRoot: hexutil.Encode(b.BeaconBlockRoot),
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
PayloadStatus: fmt.Sprintf("%d", b.PayloadStatus),
|
||||
}, nil
|
||||
}
|
||||
@@ -78,8 +78,8 @@ type GetBlockHeaderResponse struct {
|
||||
}
|
||||
|
||||
type GetValidatorsRequest struct {
|
||||
Ids []string `json:"ids"`
|
||||
Statuses []string `json:"statuses"`
|
||||
Ids []string `json:"ids,omitempty"`
|
||||
Statuses []string `json:"statuses,omitempty"`
|
||||
}
|
||||
|
||||
type GetValidatorsResponse struct {
|
||||
@@ -100,6 +100,12 @@ type GetValidatorBalancesResponse struct {
|
||||
Data []*ValidatorBalance `json:"data"`
|
||||
}
|
||||
|
||||
type GetValidatorIdentitiesResponse struct {
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*ValidatorIdentity `json:"data"`
|
||||
}
|
||||
|
||||
type ValidatorContainer struct {
|
||||
Index string `json:"index"`
|
||||
Balance string `json:"balance"`
|
||||
@@ -112,6 +118,12 @@ type ValidatorBalance struct {
|
||||
Balance string `json:"balance"`
|
||||
}
|
||||
|
||||
type ValidatorIdentity struct {
|
||||
Index string `json:"index"`
|
||||
Pubkey string `json:"pubkey"`
|
||||
ActivationEpoch string `json:"activation_epoch"`
|
||||
}
|
||||
|
||||
type GetBlockResponse struct {
|
||||
Data *SignedBlock `json:"data"`
|
||||
}
|
||||
@@ -250,3 +262,17 @@ type ChainHead struct {
|
||||
PreviousJustifiedBlockRoot string `json:"previous_justified_block_root"`
|
||||
OptimisticStatus bool `json:"optimistic_status"`
|
||||
}
|
||||
|
||||
type GetPendingDepositsResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*PendingDeposit `json:"data"`
|
||||
}
|
||||
|
||||
type GetPendingPartialWithdrawalsResponse struct {
|
||||
Version string `json:"version"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []*PendingPartialWithdrawal `json:"data"`
|
||||
}
|
||||
|
||||
@@ -54,4 +54,5 @@ type ForkChoiceNodeExtraData struct {
|
||||
Balance string `json:"balance"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
TimeStamp string `json:"timestamp"`
|
||||
Target string `json:"target"`
|
||||
}
|
||||
|
||||
@@ -20,11 +20,9 @@ type BlockEvent struct {
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
type PayloadEvent struct {
|
||||
Slot string `json:"slot"`
|
||||
BlockRoot string `json:"block_root"`
|
||||
ExecutionBlockHash string `json:"execution_block_hash"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
type BlockGossipEvent struct {
|
||||
Slot string `json:"slot"`
|
||||
Block string `json:"block"`
|
||||
}
|
||||
|
||||
type AggregatedAttEventSource struct {
|
||||
|
||||
@@ -244,26 +244,6 @@ type Withdrawal struct {
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type DepositRequest struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials"`
|
||||
Amount string `json:"amount"`
|
||||
Signature string `json:"signature"`
|
||||
Index string `json:"index"`
|
||||
}
|
||||
|
||||
type WithdrawalRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
ValidatorPubkey string `json:"validator_pubkey"`
|
||||
Amount string `json:"amount"`
|
||||
}
|
||||
|
||||
type ConsolidationRequest struct {
|
||||
SourceAddress string `json:"source_address"`
|
||||
SourcePubkey string `json:"source_pubkey"`
|
||||
TargetPubkey string `json:"target_pubkey"`
|
||||
}
|
||||
|
||||
type PendingDeposit struct {
|
||||
Pubkey string `json:"pubkey"`
|
||||
WithdrawalCredentials string `json:"withdrawal_credentials"`
|
||||
|
||||
@@ -154,7 +154,7 @@ retry:
|
||||
continue retry
|
||||
}
|
||||
if sub == nil {
|
||||
panic("event: ResubscribeFunc returned nil subscription and no error")
|
||||
panic("event: ResubscribeFunc returned nil subscription and no error") // lint:nopanic -- This should never happen.
|
||||
}
|
||||
return sub
|
||||
case <-s.unsub:
|
||||
|
||||
@@ -125,7 +125,7 @@ func getChan(key string) chan byte {
|
||||
|
||||
// Return a new string with unique elements.
|
||||
func unique(arr []string) []string {
|
||||
if arr == nil || len(arr) <= 1 {
|
||||
if len(arr) <= 1 {
|
||||
return arr
|
||||
}
|
||||
|
||||
|
||||
@@ -6,11 +6,9 @@ go_library(
|
||||
"chain_info.go",
|
||||
"chain_info_forkchoice.go",
|
||||
"currently_syncing_block.go",
|
||||
"currently_syncing_execution_payload_envelope.go",
|
||||
"defragment.go",
|
||||
"error.go",
|
||||
"execution_engine.go",
|
||||
"execution_engine_epbs.go",
|
||||
"forkchoice_update_execution.go",
|
||||
"head.go",
|
||||
"head_sync_committee_info.go",
|
||||
@@ -27,9 +25,8 @@ go_library(
|
||||
"receive_attestation.go",
|
||||
"receive_blob.go",
|
||||
"receive_block.go",
|
||||
"receive_execution_payload_envelope.go",
|
||||
"receive_payload_attestation_message.go",
|
||||
"service.go",
|
||||
"setup_forchoice.go",
|
||||
"tracked_proposer.go",
|
||||
"weak_subjectivity_checks.go",
|
||||
],
|
||||
@@ -47,7 +44,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epbs:go_default_library",
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
@@ -100,8 +97,8 @@ go_library(
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -114,7 +111,6 @@ go_test(
|
||||
"chain_info_norace_test.go",
|
||||
"chain_info_test.go",
|
||||
"checktags_test.go",
|
||||
"epbs_test.go",
|
||||
"error_test.go",
|
||||
"execution_engine_test.go",
|
||||
"forkchoice_update_execution_test.go",
|
||||
@@ -130,9 +126,9 @@ go_test(
|
||||
"process_block_test.go",
|
||||
"receive_attestation_test.go",
|
||||
"receive_block_test.go",
|
||||
"receive_execution_payload_envelope_test.go",
|
||||
"service_norace_test.go",
|
||||
"service_test.go",
|
||||
"setup_forkchoice_test.go",
|
||||
"setup_test.go",
|
||||
"weak_subjectivity_checks_test.go",
|
||||
],
|
||||
@@ -161,6 +157,7 @@ go_test(
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/attestations/kv:go_default_library",
|
||||
"//beacon-chain/operations/blstoexec:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
@@ -185,7 +182,6 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//testing/util/random:go_default_library",
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
@@ -193,6 +189,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
|
||||
@@ -43,7 +43,7 @@ type ForkchoiceFetcher interface {
|
||||
GetProposerHead() [32]byte
|
||||
SetForkChoiceGenesisTime(uint64)
|
||||
UpdateHead(context.Context, primitives.Slot)
|
||||
HighestReceivedBlockSlotRoot() (primitives.Slot, [32]byte)
|
||||
HighestReceivedBlockSlot() primitives.Slot
|
||||
ReceivedBlocksLastEpoch() (uint64, error)
|
||||
InsertNode(context.Context, state.BeaconState, consensus_blocks.ROBlock) error
|
||||
ForkChoiceDump(context.Context) (*forkchoice.Dump, error)
|
||||
@@ -51,8 +51,7 @@ type ForkchoiceFetcher interface {
|
||||
ProposerBoost() [32]byte
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
IsCanonical(ctx context.Context, blockRoot [32]byte) (bool, error)
|
||||
GetPTCVote(root [32]byte) primitives.PTCStatus
|
||||
HashForBlockRoot(root [32]byte) [32]byte
|
||||
DependentRoot(primitives.Epoch) ([32]byte, error)
|
||||
}
|
||||
|
||||
// TimeFetcher retrieves the Ethereum consensus data that's related to time.
|
||||
@@ -121,12 +120,6 @@ type OptimisticModeFetcher interface {
|
||||
IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error)
|
||||
}
|
||||
|
||||
// ExecutionPayloadFetcher defines a common interface that returns forkchoice
|
||||
// information about payload block hashes
|
||||
type ExecutionPayloadFetcher interface {
|
||||
HashInForkchoice([32]byte) bool
|
||||
}
|
||||
|
||||
// FinalizedCheckpt returns the latest finalized checkpoint from chain store.
|
||||
func (s *Service) FinalizedCheckpt() *ethpb.Checkpoint {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
@@ -408,14 +401,6 @@ func (s *Service) InForkchoice(root [32]byte) bool {
|
||||
return s.cfg.ForkChoiceStore.HasNode(root)
|
||||
}
|
||||
|
||||
// HashInForkchoice returns true if the given payload block hash is found in
|
||||
// forkchoice
|
||||
func (s *Service) HashInForkchoice(hash [32]byte) bool {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HasHash(hash)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root as argument instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
@@ -552,12 +537,6 @@ func (s *Service) BlockBeingSynced(root [32]byte) bool {
|
||||
return s.blockBeingSynced.isSyncing(root)
|
||||
}
|
||||
|
||||
// PayloadBeingSynced returns whether the block with the given root is currently being synced
|
||||
func (s *Service) PayloadBeingSynced(root [32]byte) bool {
|
||||
_, syncing := s.payloadBeingSynced.isSyncing(root)
|
||||
return syncing
|
||||
}
|
||||
|
||||
// RecentBlockSlot returns block slot form fork choice store
|
||||
func (s *Service) RecentBlockSlot(root [32]byte) (primitives.Slot, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
|
||||
@@ -3,11 +3,13 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
consensus_blocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// CachedHeadRoot returns the corresponding value from Forkchoice
|
||||
@@ -31,11 +33,11 @@ func (s *Service) SetForkChoiceGenesisTime(timestamp uint64) {
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(timestamp)
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlotRoot returns the corresponding value from forkchoice
|
||||
func (s *Service) HighestReceivedBlockSlotRoot() (primitives.Slot, [32]byte) {
|
||||
// HighestReceivedBlockSlot returns the corresponding value from forkchoice
|
||||
func (s *Service) HighestReceivedBlockSlot() primitives.Slot {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlotRoot()
|
||||
return s.cfg.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
}
|
||||
|
||||
// ReceivedBlocksLastEpoch returns the corresponding value from forkchoice
|
||||
@@ -102,36 +104,32 @@ func (s *Service) ParentRoot(root [32]byte) ([32]byte, error) {
|
||||
return s.cfg.ForkChoiceStore.ParentRoot(root)
|
||||
}
|
||||
|
||||
// HashForBlockRoot wraps a call to the corresponding method in forkchoice
|
||||
func (s *Service) HashForBlockRoot(root [32]byte) [32]byte {
|
||||
// hashForGenesisBlock returns the right hash for the genesis block
|
||||
func (s *Service) hashForGenesisBlock(ctx context.Context, root [32]byte) ([]byte, error) {
|
||||
genRoot, err := s.cfg.BeaconDB.GenesisBlockRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get genesis block root")
|
||||
}
|
||||
if root != genRoot {
|
||||
return nil, errNotGenesisRoot
|
||||
}
|
||||
st, err := s.cfg.BeaconDB.GenesisState(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get genesis state")
|
||||
}
|
||||
if st.Version() < version.Bellatrix {
|
||||
return nil, nil
|
||||
}
|
||||
header, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get latest execution payload header")
|
||||
}
|
||||
return bytesutil.SafeCopyBytes(header.BlockHash()), nil
|
||||
}
|
||||
|
||||
// DependentRoot wraps the corresponding method in forkchoice
|
||||
func (s *Service) DependentRoot(epoch primitives.Epoch) ([32]byte, error) {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
defer s.cfg.ForkChoiceStore.RUnlock()
|
||||
return s.cfg.ForkChoiceStore.HashForBlockRoot(root)
|
||||
}
|
||||
|
||||
// GetPTCVote wraps a call to the corresponding method in forkchoice and checks
|
||||
// the currently syncing status
|
||||
// Warning: this method will return the current PTC status regardless of
|
||||
// timeliness. A client MUST call this method when about to submit a PTC
|
||||
// attestation, that is exactly at the threshold to submit the attestation.
|
||||
func (s *Service) GetPTCVote(root [32]byte) primitives.PTCStatus {
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
f := s.cfg.ForkChoiceStore.GetPTCVote()
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
if f != primitives.PAYLOAD_ABSENT {
|
||||
return f
|
||||
}
|
||||
f, isSyncing := s.payloadBeingSynced.isSyncing(root)
|
||||
if isSyncing {
|
||||
return f
|
||||
}
|
||||
return primitives.PAYLOAD_ABSENT
|
||||
}
|
||||
|
||||
// insertPayloadEnvelope wraps a locked call to the corresponding method in
|
||||
// forkchoice
|
||||
func (s *Service) insertPayloadEnvelope(envelope interfaces.ROExecutionPayloadEnvelope) error {
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertPayloadEnvelope(envelope)
|
||||
return s.cfg.ForkChoiceStore.DependentRoot(epoch)
|
||||
}
|
||||
|
||||
@@ -36,7 +36,6 @@ func prepareForkchoiceState(
|
||||
blockRoot [32]byte,
|
||||
parentRoot [32]byte,
|
||||
payloadHash [32]byte,
|
||||
parentHash [32]byte,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, blocks.ROBlock, error) {
|
||||
@@ -69,8 +68,7 @@ func prepareForkchoiceState(
|
||||
ParentRoot: parentRoot[:],
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &enginev1.ExecutionPayload{
|
||||
BlockHash: payloadHash[:],
|
||||
ParentHash: parentHash[:],
|
||||
BlockHash: payloadHash[:],
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -143,7 +141,7 @@ func TestUnrealizedJustifiedBlockHash(t *testing.T) {
|
||||
service := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: []byte{'j'}}
|
||||
ofc := ðpb.Checkpoint{Root: []byte{'f'}}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
service.cfg.ForkChoiceStore.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
|
||||
@@ -337,22 +335,22 @@ func TestService_ChainHeads(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, [32]byte{'A'}, ojc, ofc)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, [32]byte{'B'}, ojc, ofc)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, [32]byte{'D'}, [32]byte{'C'}, ojc, ofc)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, [32]byte{'E'}, [32]byte{'D'}, ojc, ofc)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
@@ -434,10 +432,10 @@ func TestService_IsOptimistic(t *testing.T) {
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
@@ -470,10 +468,10 @@ func TestService_IsOptimisticForRoot(t *testing.T) {
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{root: [32]byte{'b'}}}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, ojc, ofc)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
@@ -584,6 +582,7 @@ func TestService_IsOptimisticForRoot_StateSummaryRecovered(t *testing.T) {
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, context.Background(), beaconDB, b)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, [32]byte{}))
|
||||
_, err = c.IsOptimisticForRoot(ctx, br)
|
||||
assert.NoError(t, err)
|
||||
summ, err := beaconDB.StateSummary(ctx, br)
|
||||
@@ -614,3 +613,20 @@ func TestService_IsFinalized(t *testing.T) {
|
||||
require.Equal(t, true, c.IsFinalized(ctx, br))
|
||||
require.Equal(t, false, c.IsFinalized(ctx, [32]byte{'c'}))
|
||||
}
|
||||
|
||||
func Test_hashForGenesisRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := setupBeaconChain(t, beaconDB)
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 10)
|
||||
require.NoError(t, c.cfg.BeaconDB.SaveGenesisData(ctx, st))
|
||||
root, err := beaconDB.GenesisBlockRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
genRoot, err := c.hashForGenesisBlock(ctx, [32]byte{'a'})
|
||||
require.ErrorIs(t, err, errNotGenesisRoot)
|
||||
require.IsNil(t, genRoot)
|
||||
|
||||
genRoot, err = c.hashForGenesisBlock(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{}, [32]byte(genRoot))
|
||||
}
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
type currentlySyncingPayload struct {
|
||||
sync.Mutex
|
||||
roots map[[32]byte]primitives.PTCStatus
|
||||
}
|
||||
|
||||
func (b *currentlySyncingPayload) set(envelope interfaces.ROExecutionPayloadEnvelope) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
b.roots[envelope.BeaconBlockRoot()] = primitives.PAYLOAD_PRESENT
|
||||
}
|
||||
|
||||
func (b *currentlySyncingPayload) unset(root [32]byte) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
delete(b.roots, root)
|
||||
}
|
||||
|
||||
func (b *currentlySyncingPayload) isSyncing(root [32]byte) (status primitives.PTCStatus, isSyncing bool) {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
status, isSyncing = b.roots[root]
|
||||
return
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestServiceGetPTCVote(t *testing.T) {
|
||||
c := ¤tlySyncingPayload{roots: make(map[[32]byte]primitives.PTCStatus)}
|
||||
s := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, payloadBeingSynced: c}
|
||||
r := [32]byte{'r'}
|
||||
require.Equal(t, primitives.PAYLOAD_ABSENT, s.GetPTCVote(r))
|
||||
c.roots[r] = primitives.PAYLOAD_WITHHELD
|
||||
require.Equal(t, primitives.PAYLOAD_WITHHELD, s.GetPTCVote(r))
|
||||
}
|
||||
@@ -30,9 +30,10 @@ var (
|
||||
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
|
||||
// ErrNilHead is returned when no head is present in the blockchain service.
|
||||
ErrNilHead = errors.New("nil head")
|
||||
// errInvalidValidatorIndex is returned when a validator index is
|
||||
// invalid or unexpected
|
||||
errInvalidValidatorIndex = errors.New("invalid validator index")
|
||||
// errNotGenesisRoot is returned when the root is not the genesis block root.
|
||||
errNotGenesisRoot = errors.New("root is not the genesis block root")
|
||||
// errBlacklistedBlock is returned when a block is blacklisted as invalid.
|
||||
errBlacklistedRoot = errors.New("block root is blacklisted")
|
||||
)
|
||||
|
||||
var errMaxBlobsExceeded = errors.New("Expected commitments in block exceeds MAX_BLOBS_PER_BLOCK")
|
||||
|
||||
@@ -2,7 +2,6 @@ package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -31,8 +30,6 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const blobCommitmentVersionKZG uint8 = 0x01
|
||||
|
||||
var defaultLatestValidHash = bytesutil.PadTo([]byte{0xff}, 32)
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
@@ -72,10 +69,21 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
if len(fcs.HeadBlockHash) != 32 || [32]byte(fcs.HeadBlockHash) == [32]byte{} {
|
||||
// check if we are sending FCU at genesis
|
||||
hash, err := s.hashForGenesisBlock(ctx, arg.headRoot)
|
||||
if errors.Is(err, errNotGenesisRoot) {
|
||||
log.Error("Sending nil head block hash to execution engine")
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head block hash")
|
||||
}
|
||||
fcs.HeadBlockHash = hash
|
||||
}
|
||||
if arg.attributes == nil {
|
||||
arg.attributes = payloadattribute.EmptyWithVersion(headBlk.Version())
|
||||
}
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), arg)
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, arg.attributes)
|
||||
if err != nil {
|
||||
switch {
|
||||
@@ -98,14 +106,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
log.WithError(err).Error("Could not set head root to invalid")
|
||||
return nil, nil
|
||||
}
|
||||
if len(invalidRoots) == 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": headBlk.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot[:])),
|
||||
}).Warn("invalid payload")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
log.WithError(err).Error("Could not remove invalid block and state")
|
||||
return nil, nil
|
||||
@@ -120,7 +120,6 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
}).Warn("Pruned invalid blocks, could not update head root")
|
||||
return nil, invalidBlock{error: ErrInvalidPayload, root: arg.headRoot, invalidAncestorRoots: invalidRoots}
|
||||
}
|
||||
|
||||
b, err := s.getBlock(ctx, r)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get head block")
|
||||
@@ -171,6 +170,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(arg.headRoot[:])),
|
||||
"headSlot": headBlk.Slot(),
|
||||
"nextSlot": nextSlot,
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.PayloadIDCache.Set(nextSlot, arg.headRoot, pId)
|
||||
@@ -178,40 +178,19 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *fcuConfig) (*
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", headPayload.BlockHash()),
|
||||
"slot": headBlk.Slot(),
|
||||
"nextSlot": nextSlot,
|
||||
}).Error("Received nil payload ID on VALID engine response")
|
||||
}
|
||||
return payloadID, nil
|
||||
}
|
||||
|
||||
func firePayloadAttributesEvent(ctx context.Context, f event.SubscriberSender, cfg *fcuConfig) {
|
||||
pidx, err := helpers.BeaconProposerIndex(ctx, cfg.headState)
|
||||
if err != nil {
|
||||
log.WithError(err).
|
||||
WithField("head_root", cfg.headRoot[:]).
|
||||
Error("Could not get proposer index for PayloadAttributes event")
|
||||
return
|
||||
}
|
||||
evd := payloadattribute.EventData{
|
||||
ProposerIndex: pidx,
|
||||
ProposalSlot: cfg.headState.Slot(),
|
||||
ParentBlockRoot: cfg.headRoot[:],
|
||||
Attributer: cfg.attributes,
|
||||
HeadRoot: cfg.headRoot,
|
||||
HeadState: cfg.headState,
|
||||
HeadBlock: cfg.headBlock,
|
||||
}
|
||||
if cfg.headBlock != nil && !cfg.headBlock.IsNil() {
|
||||
headPayload, err := cfg.headBlock.Block().Body().Execution()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get execution payload for head block")
|
||||
return
|
||||
}
|
||||
evd.ParentBlockHash = headPayload.BlockHash()
|
||||
evd.ParentBlockNumber = headPayload.BlockNumber()
|
||||
}
|
||||
func firePayloadAttributesEvent(_ context.Context, f event.SubscriberSender, nextSlot primitives.Slot) {
|
||||
// the fcu args have differing amounts of completeness based on the code path,
|
||||
// and there is work we only want to do if a client is actually listening to the events beacon api endpoint.
|
||||
// temporary solution: just fire a blank event and fill in the details in the api handler.
|
||||
f.Send(&feed.Event{
|
||||
Type: statefeed.PayloadAttributes,
|
||||
Data: evd,
|
||||
Data: payloadattribute.EventData{ProposalSlot: nextSlot},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -468,13 +447,7 @@ func kzgCommitmentsToVersionedHashes(body interfaces.ReadOnlyBeaconBlockBody) ([
|
||||
|
||||
versionedHashes := make([]common.Hash, len(commitments))
|
||||
for i, commitment := range commitments {
|
||||
versionedHashes[i] = ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
versionedHashes[i] = primitives.ConvertKzgCommitmentToVersionedHash(commitment)
|
||||
}
|
||||
return versionedHashes, nil
|
||||
}
|
||||
|
||||
func ConvertKzgCommitmentToVersionedHash(commitment []byte) common.Hash {
|
||||
versionedHash := sha256.Sum256(commitment)
|
||||
versionedHash[0] = blobCommitmentVersionKZG
|
||||
return versionedHash
|
||||
}
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
|
||||
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
|
||||
func (s *Service) notifyForkchoiceUpdateEPBS(ctx context.Context, blockhash [32]byte, attributes payloadattribute.Attributer) (*enginev1.PayloadIDBytes, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdateEPBS")
|
||||
defer span.End()
|
||||
|
||||
finalizedHash := s.cfg.ForkChoiceStore.FinalizedPayloadBlockHash()
|
||||
justifiedHash := s.cfg.ForkChoiceStore.UnrealizedJustifiedPayloadBlockHash()
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: blockhash[:],
|
||||
SafeBlockHash: justifiedHash[:],
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
if attributes == nil {
|
||||
attributes = payloadattribute.EmptyWithVersion(version.EPBS)
|
||||
}
|
||||
payloadID, lastValidHash, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attributes)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
forkchoiceUpdatedOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(blockhash[:])),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, nil
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
log.WithError(err).Info("forkchoice updated to invalid block")
|
||||
return nil, invalidBlock{error: ErrInvalidPayload, root: [32]byte(lastValidHash)}
|
||||
default:
|
||||
log.WithError(err).Error(ErrUndefinedExecutionEngineError)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
forkchoiceUpdatedValidNodeCount.Inc()
|
||||
// If the forkchoice update call has an attribute, update the payload ID cache.
|
||||
hasAttr := attributes != nil && !attributes.IsEmpty()
|
||||
if hasAttr && payloadID == nil && !features.Get().PrepareAllPayloads {
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockHash": fmt.Sprintf("%#x", blockhash[:]),
|
||||
}).Error("Received nil payload ID on VALID engine response")
|
||||
}
|
||||
return payloadID, nil
|
||||
}
|
||||
@@ -46,13 +46,13 @@ func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
|
||||
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, [32]byte{'B'}, [32]byte{'A'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -84,7 +84,7 @@ func Test_NotifyForkchoiceUpdate_GetPayloadAttrErrorCanContinue(t *testing.T) {
|
||||
service.cfg.PayloadIDCache.Set(1, [32]byte{}, [8]byte{})
|
||||
got, err := service.notifyForkchoiceUpdate(ctx, arg)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, got, pid) // We still get a payload ID even though the state is bad. This means it returns until the end.
|
||||
require.IsNil(t, got)
|
||||
}
|
||||
|
||||
func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
@@ -104,15 +104,16 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, [32]byte{'B'}, [32]byte{'A'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
badHash := [32]byte{'h'}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -210,7 +211,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockBellatrix{Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
ExecutionPayload: &v1.ExecutionPayload{BlockHash: badHash[:]},
|
||||
},
|
||||
}})
|
||||
require.NoError(t, err)
|
||||
@@ -287,16 +288,16 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, [32]byte{'A'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, [32]byte{'B'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, [32]byte{'C'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -316,8 +317,10 @@ func Test_NotifyForkchoiceUpdate_NIlLVH(t *testing.T) {
|
||||
headRoot: brd,
|
||||
}
|
||||
_, err = service.notifyForkchoiceUpdate(ctx, a)
|
||||
// The incoming block is not invalid because the empty node is still valid on ePBS.
|
||||
require.Equal(t, false, IsInvalidBlock(err))
|
||||
require.Equal(t, true, IsInvalidBlock(err))
|
||||
require.Equal(t, brd, InvalidBlockRoot(err))
|
||||
require.Equal(t, brd, InvalidAncestorRoots(err)[0])
|
||||
require.Equal(t, 1, len(InvalidAncestorRoots(err)))
|
||||
}
|
||||
|
||||
//
|
||||
@@ -396,28 +399,28 @@ func Test_NotifyForkchoiceUpdateRecursive_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, &forkchoicetypes.Checkpoint{}))
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, bState, bra))
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, [32]byte{'A'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, [32]byte{'B'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, [32]byte{'C'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, bre, brb, [32]byte{'E'}, [32]byte{'D'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, bre, brb, [32]byte{'E'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 6, brf, bre, [32]byte{'F'}, [32]byte{'E'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 6, brf, bre, [32]byte{'F'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 7, brg, bre, [32]byte{'G'}, [32]byte{'F'}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 7, brg, bre, [32]byte{'G'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -510,10 +513,10 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, r, [32]byte{}, [32]byte{'A'}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -690,7 +693,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
root := [32]byte{'a'}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, root, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, root, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
@@ -757,17 +760,17 @@ func Test_reportInvalidBlock(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, _, fcs := tr.ctx, tr.db, tr.fcs
|
||||
jcp := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'A'}, [32]byte{}, [32]byte{'a'}, [32]byte{}, jcp, jcp)
|
||||
st, root, err := prepareForkchoiceState(ctx, 0, [32]byte{'A'}, [32]byte{}, [32]byte{'a'}, jcp, jcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 1, [32]byte{'B'}, [32]byte{'A'}, [32]byte{'b'}, [32]byte{'a'}, jcp, jcp)
|
||||
st, root, err = prepareForkchoiceState(ctx, 1, [32]byte{'B'}, [32]byte{'A'}, [32]byte{'b'}, jcp, jcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'C'}, [32]byte{'B'}, [32]byte{'c'}, [32]byte{'b'}, jcp, jcp)
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, [32]byte{'C'}, [32]byte{'B'}, [32]byte{'c'}, jcp, jcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'d'}, [32]byte{'c'}, jcp, jcp)
|
||||
st, root, err = prepareForkchoiceState(ctx, 3, [32]byte{'D'}, [32]byte{'C'}, [32]byte{'d'}, jcp, jcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
|
||||
@@ -929,7 +932,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
fjc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash}
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(ctx, fjc))
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(fjc))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
fcs.SetOriginRoot(genesisRoot)
|
||||
@@ -963,7 +966,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, opStateSummary))
|
||||
tenjc := ðpb.Checkpoint{Epoch: 10, Root: genesisRoot[:]}
|
||||
tenfc := ðpb.Checkpoint{Epoch: 10, Root: genesisRoot[:]}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 320, opRoot, genesisRoot, params.BeaconConfig().ZeroHash, [32]byte{}, tenjc, tenfc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 320, opRoot, genesisRoot, params.BeaconConfig().ZeroHash, tenjc, tenfc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, opRoot))
|
||||
@@ -992,7 +995,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, validSummary))
|
||||
twentyjc := ðpb.Checkpoint{Epoch: 20, Root: validRoot[:]}
|
||||
twentyfc := ðpb.Checkpoint{Epoch: 20, Root: validRoot[:]}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 640, validRoot, genesisRoot, params.BeaconConfig().ZeroHash, [32]byte{}, twentyjc, twentyfc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 640, validRoot, genesisRoot, params.BeaconConfig().ZeroHash, twentyjc, twentyfc)
|
||||
require.NoError(t, err)
|
||||
fcs.SetBalancesByRooter(func(_ context.Context, _ [32]byte) ([]uint64, error) { return []uint64{}, nil })
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
@@ -1054,8 +1057,8 @@ func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{r1, r2}))
|
||||
|
||||
require.Equal(t, false, service.chainHasBlock(ctx, r1))
|
||||
require.Equal(t, false, service.chainHasBlock(ctx, r2))
|
||||
require.Equal(t, false, service.hasBlock(ctx, r1))
|
||||
require.Equal(t, false, service.hasBlock(ctx, r2))
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r1))
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r2))
|
||||
has, err := service.cfg.StateGen.HasState(ctx, r1)
|
||||
|
||||
@@ -102,10 +102,10 @@ func (s *Service) forkchoiceUpdateWithExecution(ctx context.Context, args *fcuCo
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), s.CurrentSlot()+1)
|
||||
|
||||
// Only need to prune attestations from pool if the head has changed.
|
||||
if err := s.pruneAttsFromPool(args.headBlock); err != nil {
|
||||
log.WithError(err).Error("could not prune attestations from pool")
|
||||
}
|
||||
s.pruneAttsFromPool(s.ctx, args.headState, args.headBlock)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -122,13 +122,13 @@ func TestService_forkchoiceUpdateWithExecution_SameHeadRootNewProposer(t *testin
|
||||
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -164,10 +164,10 @@ func TestShouldOverrideFCU(t *testing.T) {
|
||||
headRoot := [32]byte{'b'}
|
||||
parentRoot := [32]byte{'a'}
|
||||
ojc := ðpb.Checkpoint{}
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, parentRoot, [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, headRoot, parentRoot, [32]byte{}, [32]byte{}, ojc, ojc)
|
||||
st, root, err = prepareForkchoiceState(ctx, 2, headRoot, parentRoot, [32]byte{}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, root))
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
@@ -328,34 +327,22 @@ func (s *Service) notifyNewHeadEvent(
|
||||
newHeadStateRoot,
|
||||
newHeadRoot []byte,
|
||||
) error {
|
||||
previousDutyDependentRoot := s.originBlockRoot[:]
|
||||
currentDutyDependentRoot := s.originBlockRoot[:]
|
||||
currEpoch := slots.ToEpoch(newHeadSlot)
|
||||
currentDutyDependentRoot, err := s.DependentRoot(currEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty dependent root")
|
||||
}
|
||||
if currentDutyDependentRoot == [32]byte{} {
|
||||
currentDutyDependentRoot = s.originBlockRoot
|
||||
}
|
||||
previousDutyDependentRoot := currentDutyDependentRoot
|
||||
if currEpoch > 0 {
|
||||
previousDutyDependentRoot, err = s.DependentRoot(currEpoch.Sub(1))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty dependent root")
|
||||
}
|
||||
}
|
||||
|
||||
var previousDutyEpoch primitives.Epoch
|
||||
currentDutyEpoch := slots.ToEpoch(newHeadSlot)
|
||||
if currentDutyEpoch > 0 {
|
||||
previousDutyEpoch = currentDutyEpoch.Sub(1)
|
||||
}
|
||||
currentDutySlot, err := slots.EpochStart(currentDutyEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty slot")
|
||||
}
|
||||
previousDutySlot, err := slots.EpochStart(previousDutyEpoch)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty slot")
|
||||
}
|
||||
if currentDutySlot > 0 {
|
||||
currentDutyDependentRoot, err = helpers.BlockRootAtSlot(newHeadState, currentDutySlot-1)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty dependent root")
|
||||
}
|
||||
}
|
||||
if previousDutySlot > 0 {
|
||||
previousDutyDependentRoot, err = helpers.BlockRootAtSlot(newHeadState, previousDutySlot-1)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get duty dependent root")
|
||||
}
|
||||
}
|
||||
isOptimistic, err := s.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
@@ -367,8 +354,8 @@ func (s *Service) notifyNewHeadEvent(
|
||||
Block: newHeadRoot,
|
||||
State: newHeadStateRoot,
|
||||
EpochTransition: slots.IsEpochStart(newHeadSlot),
|
||||
PreviousDutyDependentRoot: previousDutyDependentRoot,
|
||||
CurrentDutyDependentRoot: currentDutyDependentRoot,
|
||||
PreviousDutyDependentRoot: previousDutyDependentRoot[:],
|
||||
CurrentDutyDependentRoot: currentDutyDependentRoot[:],
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
},
|
||||
})
|
||||
@@ -410,7 +397,11 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if a.IsAggregated() {
|
||||
if orphanedBlk.Version() >= version.Electra {
|
||||
if err = s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if a.IsAggregated() {
|
||||
if err = s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -48,7 +49,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head = &head{
|
||||
@@ -63,11 +64,11 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
headState, err := util.NewBeaconState()
|
||||
@@ -101,7 +102,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head = &head{
|
||||
@@ -110,7 +111,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
}
|
||||
|
||||
reorgChainParent := [32]byte{'B'}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 0, reorgChainParent, oldRoot, oldBlock.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 0, reorgChainParent, oldRoot, oldBlock.Block().ParentRoot(), ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -122,7 +123,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
headState, err := util.NewBeaconState()
|
||||
@@ -156,14 +157,17 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
notifier := &mock.MockStateNotifier{RecordEvents: true}
|
||||
srv := &Service{
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
StateNotifier: notifier,
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
},
|
||||
originBlockRoot: [32]byte{1},
|
||||
}
|
||||
st, blk, err := prepareForkchoiceState(context.Background(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(context.Background(), st, blk))
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
err := srv.notifyNewHeadEvent(context.Background(), 1, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.notifyNewHeadEvent(context.Background(), 1, bState, newHeadStateRoot[:], newHeadRoot[:]))
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
|
||||
@@ -185,10 +189,14 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
genesisRoot := [32]byte{1}
|
||||
srv := &Service{
|
||||
cfg: &config{
|
||||
StateNotifier: notifier,
|
||||
StateNotifier: notifier,
|
||||
ForkChoiceStore: doublylinkedtree.New(),
|
||||
},
|
||||
originBlockRoot: genesisRoot,
|
||||
}
|
||||
st, blk, err := prepareForkchoiceState(context.Background(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(context.Background(), st, blk))
|
||||
epoch1Start, err := slots.EpochStart(1)
|
||||
require.NoError(t, err)
|
||||
epoch2Start, err := slots.EpochStart(1)
|
||||
@@ -209,8 +217,8 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
Block: newHeadRoot[:],
|
||||
State: newHeadStateRoot[:],
|
||||
EpochTransition: true,
|
||||
PreviousDutyDependentRoot: genesisRoot[:],
|
||||
CurrentDutyDependentRoot: make([]byte, 32),
|
||||
PreviousDutyDependentRoot: make([]byte, 32),
|
||||
CurrentDutyDependentRoot: srv.originBlockRoot[:],
|
||||
}
|
||||
require.DeepSSZEqual(t, wanted, eventHead)
|
||||
})
|
||||
@@ -238,11 +246,11 @@ func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
wsb := util.SaveBlock(t, context.Background(), service.cfg.BeaconDB, newHeadSignedBlock)
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, slots.PrevSlot(wsb.Block().Slot()), wsb.Block().ParentRoot(), service.cfg.ForkChoiceStore.CachedHeadRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, wsb.Block().ParentRoot(), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
headState, err := util.NewBeaconState()
|
||||
@@ -304,7 +312,7 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
@@ -324,6 +332,72 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
|
||||
func TestSaveOrphanedAttsElectra(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
service.genesisTime = time.Now().Add(time.Duration(-10*int64(1)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
|
||||
// Chain setup
|
||||
// 0 -- 1 -- 2 -- 3
|
||||
// \-4
|
||||
st, keys := util.DeterministicGenesisStateElectra(t, 64)
|
||||
blkG, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, blkG)
|
||||
rG, err := blkG.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk1, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 2)
|
||||
assert.NoError(t, err)
|
||||
blk1.Block.ParentRoot = rG[:]
|
||||
r1, err := blk1.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk2, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 3)
|
||||
assert.NoError(t, err)
|
||||
blk2.Block.ParentRoot = r1[:]
|
||||
r2, err := blk2.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk3, err := util.GenerateFullBlockElectra(st, keys, util.DefaultBlockGenConfig(), 4)
|
||||
assert.NoError(t, err)
|
||||
blk3.Block.ParentRoot = r2[:]
|
||||
r3, err := blk3.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blk4 := util.NewBeaconBlockElectra()
|
||||
blk4.Block.Slot = 4
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlockElectra{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
}
|
||||
|
||||
require.NoError(t, service.saveOrphanedOperations(ctx, r3, r4))
|
||||
require.Equal(t, 3, len(service.cfg.AttPool.BlockAttestations()))
|
||||
wantAtts := []ethpb.Att{
|
||||
blk3.Block.Body.Attestations[0],
|
||||
blk2.Block.Body.Attestations[0],
|
||||
blk1.Block.Body.Attestations[0],
|
||||
}
|
||||
atts := service.cfg.AttPool.BlockAttestations()
|
||||
sort.Slice(atts, func(i, j int) bool {
|
||||
return atts[i].GetData().Slot > atts[j].GetData().Slot
|
||||
})
|
||||
require.DeepEqual(t, wantAtts, atts)
|
||||
}
|
||||
|
||||
func TestSaveOrphanedOps(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
@@ -381,7 +455,7 @@ func TestSaveOrphanedOps(t *testing.T) {
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
@@ -451,7 +525,7 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
for _, blk := range []*ethpb.SignedBeaconBlockCapella{blkG, blk1, blk2, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
@@ -509,7 +583,7 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
@@ -568,7 +642,7 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, blk)
|
||||
@@ -583,7 +657,7 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
ctx, beaconDB, fcs := tr.ctx, tr.db, tr.fcs
|
||||
|
||||
ojp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
@@ -603,7 +677,7 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
headRoot := service.headRoot()
|
||||
require.Equal(t, [32]byte{}, headRoot)
|
||||
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, [32]byte{}, fcp, fcp)
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
fcs.SetBalancesByRooter(func(context.Context, [32]byte) ([]uint64, error) { return []uint64{1, 2}, nil })
|
||||
|
||||
@@ -45,43 +45,43 @@ func logStateTransitionData(b interfaces.ReadOnlyBeaconBlock) error {
|
||||
}
|
||||
log = log.WithField("syncBitsCount", agg.SyncCommitteeBits.Count())
|
||||
}
|
||||
if b.Version() >= version.EPBS {
|
||||
sh, err := b.Body().SignedExecutionPayloadHeader()
|
||||
if b.Version() >= version.Bellatrix {
|
||||
p, err := b.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
header, err := sh.Header()
|
||||
if err != nil {
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
case err != nil:
|
||||
return err
|
||||
default:
|
||||
log = log.WithField("txCount", len(txs))
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
log = log.WithFields(logrus.Fields{"payloadHash": fmt.Sprintf("%#x", header.BlockHash()),
|
||||
"builderIndex": header.BuilderIndex(),
|
||||
"value": header.Value(),
|
||||
"blobKzgCommitmentsRoot": fmt.Sprintf("%#x", header.BlobKzgCommitmentsRoot()),
|
||||
})
|
||||
} else {
|
||||
if b.Version() >= version.Bellatrix {
|
||||
p, err := b.Body().Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log = log.WithField("payloadHash", fmt.Sprintf("%#x", bytesutil.Trunc(p.BlockHash())))
|
||||
txs, err := p.Transactions()
|
||||
switch {
|
||||
case errors.Is(err, consensus_types.ErrUnsupportedField):
|
||||
case err != nil:
|
||||
return err
|
||||
default:
|
||||
log = log.WithField("txCount", len(txs))
|
||||
txsPerSlotCount.Set(float64(len(txs)))
|
||||
}
|
||||
}
|
||||
if b.Version() >= version.Deneb {
|
||||
kzgs, err := b.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
} else if len(kzgs) > 0 {
|
||||
log = log.WithField("kzgCommitmentCount", len(kzgs))
|
||||
}
|
||||
if b.Version() >= version.Deneb {
|
||||
kzgs, err := b.Body().BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get blob KZG commitments")
|
||||
} else if len(kzgs) > 0 {
|
||||
log = log.WithField("kzgCommitmentCount", len(kzgs))
|
||||
}
|
||||
if b.Version() >= version.Electra {
|
||||
eReqs, err := b.Body().ExecutionRequests()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get execution requests")
|
||||
} else {
|
||||
if len(eReqs.Deposits) > 0 {
|
||||
log = log.WithField("depositRequestCount", len(eReqs.Deposits))
|
||||
}
|
||||
if len(eReqs.Consolidations) > 0 {
|
||||
log = log.WithField("consolidationRequestCount", len(eReqs.Consolidations))
|
||||
}
|
||||
if len(eReqs.Withdrawals) > 0 {
|
||||
log = log.WithField("withdrawalRequestCount", len(eReqs.Withdrawals))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -113,18 +113,6 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
"dataAvailabilityWaitedTime": daWaitedTime,
|
||||
"deposits": len(block.Body().Deposits()),
|
||||
}
|
||||
if block.Version() >= version.EPBS {
|
||||
ph, err := block.Body().SignedExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
header, err := ph.Header()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hash := header.ParentBlockHash()
|
||||
lf["parentHash"] = fmt.Sprintf("0x%s...", hex.EncodeToString(hash[:])[:8])
|
||||
}
|
||||
log.WithFields(lf).Debug("Synced new block")
|
||||
} else {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -140,9 +128,6 @@ func logBlockSyncStatus(block interfaces.ReadOnlyBeaconBlock, blockRoot [32]byte
|
||||
|
||||
// logs payload related data every slot.
|
||||
func logPayload(block interfaces.ReadOnlyBeaconBlock) error {
|
||||
if block.Version() >= version.EPBS {
|
||||
return nil
|
||||
}
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(block.Body())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if block is execution block")
|
||||
|
||||
@@ -182,10 +182,6 @@ var (
|
||||
Name: "chain_service_processing_milliseconds",
|
||||
Help: "Total time to call a chain service in ReceiveBlock()",
|
||||
})
|
||||
executionEngineProcessingTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "execution_engine_processing_milliseconds",
|
||||
Help: "Total time to process an execution payload envelope in ReceiveExecutionPayloadEnvelope()",
|
||||
})
|
||||
dataAvailWaitedTime = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "da_waited_time_milliseconds",
|
||||
Help: "Total time spent waiting for a data availability check in ReceiveBlock()",
|
||||
|
||||
@@ -3,6 +3,7 @@ package blockchain
|
||||
import (
|
||||
"testing"
|
||||
|
||||
mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
@@ -18,6 +19,7 @@ func testServiceOptsWithDB(t *testing.T) []Option {
|
||||
WithStateGen(stategen.New(beaconDB, fcs)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithClockSynchronizer(cs),
|
||||
WithStateNotifier(&mock.MockStateNotifier{RecordEvents: true}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/async/event"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
@@ -71,22 +69,6 @@ func WithDepositCache(c cache.DepositCache) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithPayloadAttestationCache for payload attestation cache.
|
||||
func WithPayloadAttestationCache(c *cache.PayloadAttestationCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.PayloadAttestationCache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPayloadEnvelopeCache for payload envelope cache.
|
||||
func WithPayloadEnvelopeCache(c *sync.Map) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.PayloadEnvelopeCache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPayloadIDCache for payload ID cache.
|
||||
func WithPayloadIDCache(c *cache.PayloadIDCache) Option {
|
||||
return func(s *Service) error {
|
||||
@@ -231,3 +213,10 @@ func WithSyncChecker(checker Checker) Option {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithSlasherEnabled(enabled bool) Option {
|
||||
return func(s *Service) error {
|
||||
s.slasherEnabled = enabled
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
}
|
||||
|
||||
// Use the target state to verify attesting indices are valid.
|
||||
committees, err := helpers.AttestationCommittees(ctx, baseState, a)
|
||||
committees, err := helpers.AttestationCommitteesFromState(ctx, baseState, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -97,7 +97,7 @@ func (s *Service) OnAttestation(ctx context.Context, a ethpb.Att, disparity time
|
||||
// We assume trusted attestation in this function has verified signature.
|
||||
|
||||
// Update forkchoice store with the new attestation for updating weight.
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.GetAttestingIndices(), bytesutil.ToBytes32(a.GetData().BeaconBlockRoot), a.GetData().Slot)
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indexedAtt.GetAttestingIndices(), bytesutil.ToBytes32(a.GetData().BeaconBlockRoot), a.GetData().Target.Epoch)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) state.ReadOnlyBeaconState {
|
||||
headEpoch := slots.ToEpoch(s.HeadSlot())
|
||||
if c.Epoch < headEpoch {
|
||||
@@ -27,13 +28,6 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
return nil
|
||||
}
|
||||
if c.Epoch == headEpoch {
|
||||
targetSlot, err := s.cfg.ForkChoiceStore.Slot([32]byte(c.Root))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if slots.ToEpoch(targetSlot)+1 < headEpoch {
|
||||
return nil
|
||||
}
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -65,12 +59,13 @@ func (s *Service) getRecentPreState(ctx context.Context, c *ethpb.Checkpoint) st
|
||||
return nil
|
||||
}
|
||||
if err := s.checkpointStateCache.AddCheckpointState(c, st); err != nil {
|
||||
return nil
|
||||
log.WithError(err).Error("Could not save checkpoint state to cache")
|
||||
}
|
||||
return st
|
||||
}
|
||||
|
||||
// getAttPreState retrieves the att pre state by either from the cache or the DB.
|
||||
// The caller of this function must have a lock on forkchoice.
|
||||
func (s *Service) getAttPreState(ctx context.Context, c *ethpb.Checkpoint) (state.ReadOnlyBeaconState, error) {
|
||||
// If the attestation is recent and canonical we can use the head state to compute the shuffling.
|
||||
if st := s.getRecentPreState(ctx, c); st != nil {
|
||||
|
||||
@@ -32,7 +32,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithoutState)
|
||||
|
||||
cp := ðpb.Checkpoint{}
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, cp, cp)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) {
|
||||
r, err := blkWithStateBadAtt.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp = ðpb.Checkpoint{Root: r[:]}
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, cp, cp)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blkWithStateBadAtt.Block.Slot, r, [32]byte{}, params.BeaconConfig().ZeroHash, cp, cp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
util.SaveBlock(t, ctx, beaconDB, blkWithStateBadAtt)
|
||||
@@ -139,7 +139,7 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, roblock, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
state, roblock, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, roblock))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0], 0))
|
||||
@@ -170,7 +170,7 @@ func TestService_GetRecentPreState(t *testing.T) {
|
||||
err = s.SetFinalizedCheckpoint(cp0)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, [32]byte{}, cp0, cp0)
|
||||
st, root, err := prepareForkchoiceState(ctx, 31, [32]byte(ckRoot), [32]byte{}, [32]byte{'R'}, cp0, cp0)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
service.head = &head{
|
||||
@@ -202,7 +202,7 @@ func TestService_GetAttPreState_Concurrency(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: ckRoot}))
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, [32]byte{}, cp1, cp1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 100, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
|
||||
@@ -259,7 +259,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'A'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'A'}, fieldparams.RootLength)}))
|
||||
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, [32]byte{}, cp1, cp1)
|
||||
st, root, err := prepareForkchoiceState(ctx, 1, [32]byte(cp1.Root), [32]byte{}, [32]byte{'R'}, cp1, cp1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
s1, err := service.getAttPreState(ctx, cp1)
|
||||
@@ -273,7 +273,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
_, err = service.getAttPreState(ctx, cp2)
|
||||
require.ErrorContains(t, "epoch 2 root 0x4200000000000000000000000000000000000000000000000000000000000000: not a checkpoint in forkchoice", err)
|
||||
|
||||
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, [32]byte{}, cp2, cp2)
|
||||
st, root, err = prepareForkchoiceState(ctx, 33, [32]byte(cp2.Root), [32]byte(cp1.Root), [32]byte{'R'}, cp2, cp2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
|
||||
@@ -298,7 +298,7 @@ func TestStore_SaveCheckpointState(t *testing.T) {
|
||||
cp3 := ðpb.Checkpoint{Epoch: 1, Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, s, bytesutil.ToBytes32([]byte{'C'})))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo([]byte{'C'}, fieldparams.RootLength)}))
|
||||
st, root, err = prepareForkchoiceState(ctx, 31, [32]byte(cp3.Root), [32]byte(cp2.Root), [32]byte{'P'}, [32]byte{}, cp2, cp2)
|
||||
st, root, err = prepareForkchoiceState(ctx, 31, [32]byte(cp3.Root), [32]byte(cp2.Root), [32]byte{'P'}, cp2, cp2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, root))
|
||||
|
||||
@@ -318,7 +318,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
checkpoint := ðpb.Checkpoint{Epoch: epoch, Root: r1[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(checkpoint.Root)))
|
||||
st, roblock, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, [32]byte{}, checkpoint, checkpoint)
|
||||
st, roblock, err := prepareForkchoiceState(ctx, blk.Block.Slot, r1, [32]byte{}, params.BeaconConfig().ZeroHash, checkpoint, checkpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
returned, err := service.getAttPreState(ctx, checkpoint)
|
||||
@@ -336,7 +336,7 @@ func TestStore_UpdateCheckpointState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
newCheckpoint := ðpb.Checkpoint{Epoch: epoch, Root: r2[:]}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, baseState, bytesutil.ToBytes32(newCheckpoint.Root)))
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, [32]byte{}, newCheckpoint, newCheckpoint)
|
||||
st, roblock, err = prepareForkchoiceState(ctx, blk.Block.Slot, r2, r1, params.BeaconConfig().ZeroHash, newCheckpoint, newCheckpoint)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, roblock))
|
||||
returned, err = service.getAttPreState(ctx, newCheckpoint)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -64,9 +66,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
fcuArgs := &fcuConfig{}
|
||||
|
||||
if s.inRegularSync() {
|
||||
if cfg.roblock.Version() < version.EPBS {
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
}
|
||||
defer s.handleSecondFCUCall(cfg, fcuArgs)
|
||||
}
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
@@ -104,18 +104,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
return nil
|
||||
}
|
||||
if cfg.roblock.Version() >= version.EPBS {
|
||||
if err := s.saveHead(ctx, cfg.headRoot, cfg.roblock, cfg.postState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
if err := s.pruneAttsFromPool(cfg.roblock); err != nil {
|
||||
log.WithError(err).Error("could not prune attestations from pool")
|
||||
}
|
||||
|
||||
// update the NSC and handle epoch boundaries here since we do
|
||||
// not send FCU at all
|
||||
return s.updateCachesPostBlockProcessing(cfg)
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
return nil
|
||||
@@ -187,6 +175,9 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []consensusblocks.ROBlo
|
||||
var set *bls.SignatureBatch
|
||||
boundaries := make(map[[32]byte]state.BeaconState)
|
||||
for i, b := range blks {
|
||||
if features.BlacklistedBlock(b.Root()) {
|
||||
return errBlacklistedRoot
|
||||
}
|
||||
v, h, err := getStateVersionAndPayload(preState)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -382,7 +373,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot,
|
||||
func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.ReadOnlyBeaconBlock, st state.BeaconState) error {
|
||||
// Feed in block's attestations to fork choice store.
|
||||
for _, a := range blk.Body().Attestations() {
|
||||
committees, err := helpers.AttestationCommittees(ctx, st, a)
|
||||
committees, err := helpers.AttestationCommitteesFromState(ctx, st, a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -392,7 +383,7 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
|
||||
}
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
if s.cfg.ForkChoiceStore.HasNode(r) {
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Slot)
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Target.Epoch)
|
||||
} else if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.cfg.AttestationCache.Add(a); err != nil {
|
||||
return err
|
||||
@@ -433,24 +424,98 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
|
||||
return nil
|
||||
}
|
||||
|
||||
// This removes the attestations in block `b` from the attestation mem pool.
|
||||
func (s *Service) pruneAttsFromPool(headBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
atts := headBlock.Block().Body().Attestations()
|
||||
for _, att := range atts {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := s.cfg.AttestationCache.DeleteCovered(att); err != nil {
|
||||
return errors.Wrap(err, "could not delete attestation")
|
||||
}
|
||||
} else if att.IsAggregated() {
|
||||
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.AttPool.DeleteUnaggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
// pruneAttsFromPool removes these attestations from the attestation pool
|
||||
// which are covered by attestations from the received block.
|
||||
func (s *Service) pruneAttsFromPool(ctx context.Context, headState state.BeaconState, headBlock interfaces.ReadOnlySignedBeaconBlock) {
|
||||
for _, att := range headBlock.Block().Body().Attestations() {
|
||||
if err := s.pruneCoveredAttsFromPool(ctx, headState, att); err != nil {
|
||||
log.WithError(err).Warn("Could not prune attestations covered by a received block's attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) pruneCoveredAttsFromPool(ctx context.Context, headState state.BeaconState, att ethpb.Att) error {
|
||||
switch {
|
||||
case !att.IsAggregated():
|
||||
return s.cfg.AttPool.DeleteUnaggregatedAttestation(att)
|
||||
case att.Version() == version.Phase0:
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
return errors.Wrap(s.cfg.AttestationCache.DeleteCovered(att), "could not delete covered attestation")
|
||||
}
|
||||
return errors.Wrap(s.cfg.AttPool.DeleteAggregatedAttestation(att), "could not delete aggregated attestation")
|
||||
default:
|
||||
return s.pruneCoveredElectraAttsFromPool(ctx, headState, att)
|
||||
}
|
||||
}
|
||||
|
||||
// pruneCoveredElectraAttsFromPool handles removing aggregated Electra attestations from the pool after receiving a block.
|
||||
// Because in Electra block attestations can combine aggregates for multiple committees, comparing attestation bits
|
||||
// of a block attestation with attestations bits of an aggregate can cause unexpected results, leading to covered
|
||||
// aggregates not being removed from the pool.
|
||||
//
|
||||
// To make sure aggregates are removed, we decompose the block attestation into dummy aggregates, with each
|
||||
// aggregate accounting for one committee. This allows us to compare aggregates in the same way it's done for
|
||||
// Phase0. Even though we can't provide a valid signature for the dummy aggregate, it does not matter because
|
||||
// signatures play no part in pruning attestations.
|
||||
func (s *Service) pruneCoveredElectraAttsFromPool(ctx context.Context, headState state.BeaconState, att ethpb.Att) error {
|
||||
if att.Version() == version.Phase0 {
|
||||
log.Error("Called pruneCoveredElectraAttsFromPool with a Phase0 attestation")
|
||||
return nil
|
||||
}
|
||||
|
||||
// We don't want to recompute committees. If they are not cached already,
|
||||
// we allow attestations to stay in the pool. If these attestations are
|
||||
// included in a later block, they will be redundant. But given that
|
||||
// they were not cached in the first place, it's unlikely that they
|
||||
// will be chosen into a block.
|
||||
ok, committees, err := helpers.AttestationCommitteesFromCache(ctx, headState, att)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get attestation committees")
|
||||
}
|
||||
if !ok {
|
||||
log.Debug("Attestation committees are not cached. Skipping attestation pruning.")
|
||||
return nil
|
||||
}
|
||||
|
||||
committeeIndices := att.CommitteeBitsVal().BitIndices()
|
||||
offset := uint64(0)
|
||||
|
||||
// Sanity check as this should never happen
|
||||
if len(committeeIndices) != len(committees) {
|
||||
return errors.New("committee indices and committees have different lengths")
|
||||
}
|
||||
|
||||
for i, c := range committees {
|
||||
ab := bitfield.NewBitlist(uint64(len(c)))
|
||||
for j := uint64(0); j < uint64(len(c)); j++ {
|
||||
ab.SetBitAt(j, att.GetAggregationBits().BitAt(j+offset))
|
||||
}
|
||||
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(uint64(committeeIndices[i]), true)
|
||||
|
||||
a := ðpb.AttestationElectra{
|
||||
AggregationBits: ab,
|
||||
Data: att.GetData(),
|
||||
CommitteeBits: cb,
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.cfg.AttestationCache.DeleteCovered(a); err != nil {
|
||||
return errors.Wrap(err, "could not delete covered attestation")
|
||||
}
|
||||
} else if !a.IsAggregated() {
|
||||
if err = s.cfg.AttPool.DeleteUnaggregatedAttestation(a); err != nil {
|
||||
return errors.Wrap(err, "could not delete unaggregated attestation")
|
||||
}
|
||||
} else if err = s.cfg.AttPool.DeleteAggregatedAttestation(a); err != nil {
|
||||
return errors.Wrap(err, "could not delete aggregated attestation")
|
||||
}
|
||||
|
||||
offset += uint64(len(c))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -502,15 +567,9 @@ func (s *Service) runLateBlockTasks() {
|
||||
|
||||
attThreshold := params.BeaconConfig().SecondsPerSlot / 3
|
||||
ticker := slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
|
||||
epbs := params.BeaconConfig().EPBSForkEpoch
|
||||
for {
|
||||
select {
|
||||
case slot := <-ticker.C():
|
||||
if slots.ToEpoch(slot) == epbs && slot%32 == 0 {
|
||||
ticker.Done()
|
||||
attThreshold := params.BeaconConfig().SecondsPerSlot / 4
|
||||
ticker = slots.NewSlotTickerWithOffset(s.genesisTime, time.Duration(attThreshold)*time.Second, params.BeaconConfig().SecondsPerSlot)
|
||||
}
|
||||
case <-ticker.C():
|
||||
s.lateBlockTasks(s.ctx)
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
@@ -532,17 +591,11 @@ func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte
|
||||
if len(expected) > maxBlobsPerBlock {
|
||||
return nil, errMaxBlobsExceeded
|
||||
}
|
||||
indices, err := bs.Indices(root, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices := bs.Summary(root)
|
||||
missing := make(map[uint64]struct{}, len(expected))
|
||||
for i := range expected {
|
||||
ui := uint64(i)
|
||||
if len(expected[i]) > 0 {
|
||||
if !indices[i] {
|
||||
missing[ui] = struct{}{}
|
||||
}
|
||||
if len(expected[i]) > 0 && !indices.HasIndex(uint64(i)) {
|
||||
missing[uint64(i)] = struct{}{}
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
@@ -676,46 +729,30 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
|
||||
attribute := s.getPayloadAttribute(ctx, headState, s.CurrentSlot()+1, headRoot[:])
|
||||
// return early if we are not proposing next slot
|
||||
if attribute.IsEmpty() {
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: nil,
|
||||
attributes: attribute,
|
||||
}
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), fcuArgs)
|
||||
// notifyForkchoiceUpdate fires the payload attribute event. But in this case, we won't
|
||||
// call notifyForkchoiceUpdate, so the event is fired here.
|
||||
go firePayloadAttributesEvent(ctx, s.cfg.StateNotifier.StateFeed(), s.CurrentSlot()+1)
|
||||
return
|
||||
}
|
||||
|
||||
if headState.Version() >= version.EPBS {
|
||||
bh, err := headState.LatestBlockHash()
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve latest block hash")
|
||||
return
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdateEPBS(ctx, [32]byte(bh), attribute)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
} else {
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
s.headLock.RUnlock()
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
|
||||
return
|
||||
}
|
||||
s.headLock.RLock()
|
||||
headBlock, err := s.headBlock()
|
||||
if err != nil {
|
||||
s.headLock.RUnlock()
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to retrieve head block")
|
||||
return
|
||||
}
|
||||
s.headLock.RUnlock()
|
||||
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: headRoot,
|
||||
headBlock: headBlock,
|
||||
attributes: attribute,
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(ctx, fcuArgs)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("could not perform late block tasks: failed to update forkchoice with engine")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -368,31 +369,7 @@ func (s *Service) getBlockPreState(ctx context.Context, b interfaces.ReadOnlyBea
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parentRoot := b.ParentRoot()
|
||||
s.ForkChoicer().RLock()
|
||||
slot, err := s.ForkChoicer().Slot(parentRoot)
|
||||
s.ForkChoicer().RUnlock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get slot for parent root")
|
||||
}
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().EPBSForkEpoch {
|
||||
s.ForkChoicer().RLock()
|
||||
parentHash := s.ForkChoicer().HashForBlockRoot(parentRoot)
|
||||
s.ForkChoicer().RUnlock()
|
||||
signedBid, err := b.Body().SignedExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get signed execution payload header")
|
||||
}
|
||||
bid, err := signedBid.Header()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload header")
|
||||
}
|
||||
if parentHash == bid.ParentBlockHash() {
|
||||
// It's based on full, use the state by hash
|
||||
parentRoot = parentHash
|
||||
}
|
||||
}
|
||||
preState, err := s.cfg.StateGen.StateByRoot(ctx, parentRoot)
|
||||
preState, err := s.cfg.StateGen.StateByRoot(ctx, b.ParentRoot())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not get pre state for slot %d", b.Slot())
|
||||
}
|
||||
@@ -576,7 +553,8 @@ func (s *Service) fillInForkChoiceMissingBlocks(ctx context.Context, signed inte
|
||||
|
||||
// inserts finalized deposits into our finalized deposit trie, needs to be
|
||||
// called in the background
|
||||
func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
|
||||
// Post-Electra: prunes all proofs and pending deposits in the cache
|
||||
func (s *Service) insertFinalizedDepositsAndPrune(ctx context.Context, fRoot [32]byte) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.insertFinalizedDeposits")
|
||||
defer span.End()
|
||||
startTime := time.Now()
|
||||
@@ -587,6 +565,16 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
|
||||
log.WithError(err).Error("could not fetch finalized state")
|
||||
return
|
||||
}
|
||||
|
||||
// Check if we should prune all pending deposits.
|
||||
// In post-Electra(after the legacy deposit mechanism is deprecated),
|
||||
// we can prune all pending deposits in the deposit cache.
|
||||
// See: https://eips.ethereum.org/EIPS/eip-6110#eth1data-poll-deprecation
|
||||
if helpers.DepositRequestsStarted(finalizedState) {
|
||||
s.pruneAllPendingDepositsAndProofs(ctx)
|
||||
return
|
||||
}
|
||||
|
||||
// We update the cache up to the last deposit index in the finalized block's state.
|
||||
// We can be confident that these deposits will be included in some block
|
||||
// because the Eth1 follow distance makes such long-range reorgs extremely unlikely.
|
||||
@@ -615,6 +603,12 @@ func (s *Service) insertFinalizedDeposits(ctx context.Context, fRoot [32]byte) {
|
||||
log.WithField("duration", time.Since(startTime).String()).Debugf("Finalized deposit insertion completed at index %d", finalizedEth1DepIdx)
|
||||
}
|
||||
|
||||
// pruneAllPendingDepositsAndProofs prunes all proofs and pending deposits in the cache.
|
||||
func (s *Service) pruneAllPendingDepositsAndProofs(ctx context.Context) {
|
||||
s.cfg.DepositCache.PruneAllPendingDeposits(ctx)
|
||||
s.cfg.DepositCache.PruneAllProofs(ctx)
|
||||
}
|
||||
|
||||
// This ensures that the input root defaults to using genesis root instead of zero hashes. This is needed for handling
|
||||
// fork choice justification routine.
|
||||
func (s *Service) ensureRootNotZeros(root [32]byte) [32]byte {
|
||||
|
||||
@@ -12,8 +12,10 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
gethtypes "github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
lightClient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
@@ -25,6 +27,7 @@ import (
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/kv"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
@@ -45,6 +48,94 @@ import (
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_pruneAttsFromPool_Electra(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
logHook := logTest.NewGlobal()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.TargetCommitteeSize = 8
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
s := Service{
|
||||
cfg: &config{
|
||||
AttPool: kv.NewAttCaches(),
|
||||
},
|
||||
}
|
||||
|
||||
data := ðpb.AttestationData{
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
}
|
||||
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(0, true)
|
||||
att1 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b10000000, 0b00000001},
|
||||
Data: data,
|
||||
Signature: make([]byte, 96),
|
||||
CommitteeBits: cb,
|
||||
}
|
||||
|
||||
cb = primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(1, true)
|
||||
att2 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b11110111, 0b00000001},
|
||||
Data: data,
|
||||
Signature: make([]byte, 96),
|
||||
CommitteeBits: cb,
|
||||
}
|
||||
|
||||
cb = primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(3, true)
|
||||
att3 := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b11110111, 0b00000001},
|
||||
Data: data,
|
||||
Signature: make([]byte, 96),
|
||||
CommitteeBits: cb,
|
||||
}
|
||||
|
||||
require.NoError(t, s.cfg.AttPool.SaveUnaggregatedAttestation(att1))
|
||||
require.NoError(t, s.cfg.AttPool.SaveAggregatedAttestation(att2))
|
||||
require.NoError(t, s.cfg.AttPool.SaveAggregatedAttestation(att3))
|
||||
|
||||
cb = primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(0, true)
|
||||
cb.SetBitAt(1, true)
|
||||
onChainAtt := ðpb.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b10000000, 0b11110111, 0b00000001},
|
||||
Data: data,
|
||||
Signature: make([]byte, 96),
|
||||
CommitteeBits: cb,
|
||||
}
|
||||
bl := ðpb.SignedBeaconBlockElectra{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
Attestations: []*ethpb.AttestationElectra{onChainAtt},
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
rob, err := consensusblocks.NewSignedBeaconBlock(bl)
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1024)
|
||||
committees, err := helpers.BeaconCommittees(ctx, st, 0)
|
||||
require.NoError(t, err)
|
||||
// Sanity check to make sure the on-chain att will be decomposed
|
||||
// into the correct number of aggregates.
|
||||
require.Equal(t, 4, len(committees))
|
||||
|
||||
s.pruneAttsFromPool(ctx, st, rob)
|
||||
require.LogsDoNotContain(t, logHook, "Could not prune attestations")
|
||||
|
||||
attsInPool := s.cfg.AttPool.UnaggregatedAttestations()
|
||||
assert.Equal(t, 0, len(attsInPool))
|
||||
attsInPool = s.cfg.AttPool.AggregatedAttestations()
|
||||
require.Equal(t, 1, len(attsInPool))
|
||||
assert.DeepEqual(t, att3, attsInPool[0])
|
||||
}
|
||||
|
||||
func TestStore_OnBlockBatch(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
@@ -142,7 +233,7 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) {
|
||||
// the parent of the last block inserted is the tree node.
|
||||
fcp := ðpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
|
||||
r0 := bytesutil.ToBytes32(roots[0])
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, [32]byte{}, fcp, fcp)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
@@ -184,7 +275,7 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) {
|
||||
// the parent of the last block inserted is the tree node.
|
||||
fcp := ðpb.Checkpoint{Epoch: 0, Root: service.originBlockRoot[:]}
|
||||
r0 := bytesutil.ToBytes32(roots[0])
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, [32]byte{}, fcp, fcp)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, r0, service.originBlockRoot, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
fcp2 := &forkchoicetypes.Checkpoint{Epoch: 0, Root: r0}
|
||||
@@ -464,7 +555,7 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st, blkRoot, err := prepareForkchoiceState(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
}
|
||||
@@ -504,7 +595,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
util.SaveBlock(t, context.Background(), beaconDB, beaconBlock)
|
||||
}
|
||||
|
||||
st, blkRoot, err := prepareForkchoiceState(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
st, blkRoot, err := prepareForkchoiceState(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
@@ -723,7 +814,7 @@ func TestInsertFinalizedDeposits(t *testing.T) {
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root)))
|
||||
}
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
service.insertFinalizedDepositsAndPrune(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits, err := depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
@@ -759,7 +850,7 @@ func TestInsertFinalizedDeposits_PrunePendingDeposits(t *testing.T) {
|
||||
Signature: zeroSig[:],
|
||||
}, Proof: [][]byte{root}}, 100+i, int64(i), bytesutil.ToBytes32(root))
|
||||
}
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
service.insertFinalizedDepositsAndPrune(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits, err := depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 7, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
@@ -799,7 +890,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
// Insert 3 deposits before hand.
|
||||
require.NoError(t, depositCache.InsertFinalizedDeposits(ctx, 2, [32]byte{}, 0))
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
service.insertFinalizedDepositsAndPrune(ctx, [32]byte{'m', 'o', 'c', 'k'})
|
||||
fDeposits, err := depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 5, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
@@ -810,7 +901,7 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
|
||||
// Insert New Finalized State with higher deposit count.
|
||||
service.insertFinalizedDeposits(ctx, [32]byte{'m', 'o', 'c', 'k', '2'})
|
||||
service.insertFinalizedDepositsAndPrune(ctx, [32]byte{'m', 'o', 'c', 'k', '2'})
|
||||
fDeposits, err = depositCache.FinalizedDeposits(ctx)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 12, int(fDeposits.MerkleTrieIndex()), "Finalized deposits not inserted correctly")
|
||||
@@ -821,6 +912,8 @@ func TestInsertFinalizedDeposits_MultipleFinalizedRoutines(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveBlockAttestationsInPool(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
|
||||
genesis, keys := util.DeterministicGenesisState(t, 64)
|
||||
b, err := util.GenerateFullBlock(genesis, keys, util.DefaultBlockGenConfig(), 1)
|
||||
assert.NoError(t, err)
|
||||
@@ -840,7 +933,8 @@ func TestRemoveBlockAttestationsInPool(t *testing.T) {
|
||||
require.NoError(t, service.cfg.AttPool.SaveAggregatedAttestations(atts))
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.pruneAttsFromPool(wsb))
|
||||
service.pruneAttsFromPool(context.Background(), nil /* state not needed pre-Electra */, wsb)
|
||||
require.LogsDoNotContain(t, logHook, "Could not prune attestations")
|
||||
require.Equal(t, 0, service.cfg.AttPool.AggregatedAttestationCount())
|
||||
}
|
||||
|
||||
@@ -1153,7 +1247,7 @@ func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
logHook := logTest.NewGlobal()
|
||||
for i := 0; i < 10; i++ {
|
||||
fc := ðpb.Checkpoint{}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, wsb1.Block().ParentRoot(), [32]byte{}, [32]byte{}, [32]byte{}, fc, fc)
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, wsb1.Block().ParentRoot(), [32]byte{}, [32]byte{}, fc, fc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, blkRoot))
|
||||
var wg sync.WaitGroup
|
||||
@@ -1896,6 +1990,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
for i := 1; i < 6; i++ {
|
||||
driftGenesisTime(service, int64(i), 0)
|
||||
@@ -2030,6 +2125,7 @@ func TestNoViableHead_Reboot(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, jroot))
|
||||
service.cfg.ForkChoiceStore.SetBalancesByRooter(service.cfg.StateGen.ActiveNonSlashedBalancesByRoot)
|
||||
require.NoError(t, service.StartFromSavedState(genesisState))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
|
||||
// Forkchoice has the genesisRoot loaded at startup
|
||||
require.Equal(t, genesisRoot, service.ensureRootNotZeros(service.cfg.ForkChoiceStore.CachedHeadRoot()))
|
||||
@@ -2297,7 +2393,7 @@ func TestMissingIndices(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, c.present...))
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, 0, c.present...))
|
||||
missing, err := missingIndices(bs, c.root, c.expected, 0)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
@@ -2557,7 +2653,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
t.Run("No old update", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
l := util.NewTestLightClient(t).SetupTestAltair(0, true)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
@@ -2603,7 +2699,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("New update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
l := util.NewTestLightClient(t).SetupTestAltair(0, true)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
@@ -2655,7 +2751,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Old update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
l := util.NewTestLightClient(t).SetupTestAltair(0, false)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
@@ -2716,7 +2812,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
t.Run("No old update", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false)
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false, 0, true)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
@@ -2761,7 +2857,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("New update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false)
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false, 0, true)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
@@ -2813,7 +2909,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Old update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false)
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false, 0, false)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
@@ -2874,7 +2970,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
t.Run("No old update", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false)
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false, 0, true)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
@@ -2919,7 +3015,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("New update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false)
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false, 0, true)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
@@ -2971,7 +3067,7 @@ func TestSaveLightClientUpdate(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Old update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false)
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false, 0, false)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
@@ -3042,7 +3138,7 @@ func TestSaveLightClientBootstrap(t *testing.T) {
|
||||
ctx := tr.ctx
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
l := util.NewTestLightClient(t).SetupTestAltair(0, true)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
@@ -3077,7 +3173,7 @@ func TestSaveLightClientBootstrap(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false)
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false, 0, true)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
@@ -3112,7 +3208,7 @@ func TestSaveLightClientBootstrap(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false)
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false, 0, true)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
payloadattribute "github.com/prysmaticlabs/prysm/v5/consensus-types/payload-attribute"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
@@ -149,35 +148,15 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
return
|
||||
}
|
||||
newAttHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
var attributes payloadattribute.Attributer
|
||||
if s.inRegularSync() {
|
||||
attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
}
|
||||
if headState.Version() >= version.EPBS {
|
||||
bh, err := headState.LatestBlockHash()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get latest block hash")
|
||||
return
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdateEPBS(ctx, [32]byte(bh), attributes)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not notify forkchoice update")
|
||||
}
|
||||
if err := s.saveHead(ctx, newHeadRoot, headBlock, headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
return
|
||||
}
|
||||
if err := s.pruneAttsFromPool(headBlock); err != nil {
|
||||
log.WithError(err).Error("could not prune attestations from pool")
|
||||
}
|
||||
return
|
||||
}
|
||||
fcuArgs := &fcuConfig{
|
||||
headState: headState,
|
||||
headRoot: newHeadRoot,
|
||||
headBlock: headBlock,
|
||||
proposingSlot: proposingSlot,
|
||||
}
|
||||
if s.inRegularSync() {
|
||||
fcuArgs.attributes = s.getPayloadAttribute(ctx, headState, proposingSlot, newHeadRoot[:])
|
||||
}
|
||||
if fcuArgs.attributes != nil && s.shouldOverrideFCU(newHeadRoot, proposingSlot) {
|
||||
return
|
||||
}
|
||||
@@ -205,7 +184,7 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
}
|
||||
|
||||
hasState := s.cfg.BeaconDB.HasStateSummary(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
|
||||
hasBlock := s.chainHasBlock(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
|
||||
hasBlock := s.hasBlock(ctx, bytesutil.ToBytes32(a.GetData().BeaconBlockRoot))
|
||||
if !(hasState && hasBlock) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -42,11 +42,11 @@ func TestVerifyLMDFFGConsistent(t *testing.T) {
|
||||
|
||||
f := service.cfg.ForkChoiceStore
|
||||
fc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, r32, err := prepareForkchoiceState(ctx, 32, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, [32]byte{}, fc, fc)
|
||||
state, r32, err := prepareForkchoiceState(ctx, 32, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, fc, fc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, r32))
|
||||
|
||||
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32.Root(), params.BeaconConfig().ZeroHash, [32]byte{}, fc, fc)
|
||||
state, r33, err := prepareForkchoiceState(ctx, 33, [32]byte{'b'}, r32.Root(), params.BeaconConfig().ZeroHash, fc, fc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, r33))
|
||||
|
||||
@@ -82,7 +82,7 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, [32]byte{}, ojc, ofc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
attsToSave := make([]ethpb.Att, len(atts))
|
||||
@@ -142,7 +142,7 @@ func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 2, r, service.originBlockRoot, [32]byte{'b'}, [32]byte{}, ojc, ojc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 2, r, service.originBlockRoot, [32]byte{'b'}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.Equal(t, 3, fcs.NodeCount())
|
||||
@@ -191,7 +191,7 @@ func TestService_UpdateHead_NoAtts(t *testing.T) {
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
util.SaveBlock(t, ctx, service.cfg.BeaconDB, b)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 2, r, service.originBlockRoot, [32]byte{'b'}, [32]byte{}, ojc, ojc)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 2, r, service.originBlockRoot, [32]byte{'b'}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.Equal(t, 3, fcs.NodeCount())
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
@@ -39,18 +40,10 @@ var epochsSinceFinalityExpandCache = primitives.Epoch(4)
|
||||
// BlockReceiver interface defines the methods of chain service for receiving and processing new blocks.
|
||||
type BlockReceiver interface {
|
||||
ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error
|
||||
ReceiveExecutionPayloadEnvelope(ctx context.Context, env interfaces.ROSignedExecutionPayloadEnvelope, avs das.AvailabilityStore) error
|
||||
ReceiveBlockBatch(ctx context.Context, blocks []blocks.ROBlock, avs das.AvailabilityStore) error
|
||||
HasBlock(ctx context.Context, root [32]byte) bool
|
||||
RecentBlockSlot(root [32]byte) (primitives.Slot, error)
|
||||
BlockBeingSynced([32]byte) bool
|
||||
PayloadBeingSynced([32]byte) bool
|
||||
}
|
||||
|
||||
// PayloadAttestationReceiver defines methods of the chain service for receiving
|
||||
// and processing new payload attestations and payload attestation messages
|
||||
type PayloadAttestationReceiver interface {
|
||||
ReceivePayloadAttestationMessage(ctx context.Context, a *ethpb.PayloadAttestationMessage) error
|
||||
}
|
||||
|
||||
// BlobReceiver interface defines the methods of chain service for receiving new
|
||||
@@ -59,11 +52,6 @@ type BlobReceiver interface {
|
||||
ReceiveBlob(context.Context, blocks.VerifiedROBlob) error
|
||||
}
|
||||
|
||||
// ExecutionPayloadReceiver interface defines the methods of chain service for receiving `ROExecutionPayloadEnvelope`.
|
||||
type ExecutionPayloadReceiver interface {
|
||||
ReceiveExecutionPayloadEnvelope(ctx context.Context, envelope interfaces.ROSignedExecutionPayloadEnvelope, _ das.AvailabilityStore) error
|
||||
}
|
||||
|
||||
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
|
||||
type SlashingReceiver interface {
|
||||
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
|
||||
@@ -77,6 +65,10 @@ type SlashingReceiver interface {
|
||||
func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySignedBeaconBlock, blockRoot [32]byte, avs das.AvailabilityStore) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.ReceiveBlock")
|
||||
defer span.End()
|
||||
// Return early if the block is blacklisted
|
||||
if features.BlacklistedBlock(blockRoot) {
|
||||
return errBlacklistedRoot
|
||||
}
|
||||
// Return early if the block has been synced
|
||||
if s.InForkchoice(blockRoot) {
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", blockRoot)).Debug("Ignoring already synced block")
|
||||
@@ -100,30 +92,14 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var postState state.BeaconState
|
||||
var isValidPayload bool
|
||||
var daWaitedTime time.Duration
|
||||
if blockCopy.Version() >= version.EPBS {
|
||||
postState, err = s.validateStateTransition(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not validate state transition")
|
||||
}
|
||||
optimistic, err := s.IsOptimisticForRoot(ctx, roblock.Block().ParentRoot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if parent is optimistic")
|
||||
}
|
||||
// if the parent is not optimistic then we can set the block as
|
||||
// not optimistic.
|
||||
isValidPayload = !optimistic
|
||||
} else {
|
||||
postState, isValidPayload, err = s.validateExecutionAndConsensus(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daWaitedTime, err = s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
postState, isValidPayload, err := s.validateExecutionAndConsensus(ctx, preState, roblock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
daWaitedTime, err := s.handleDA(ctx, blockCopy, blockRoot, avs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Defragment the state before continuing block processing.
|
||||
s.defragmentState(postState)
|
||||
@@ -149,7 +125,7 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
|
||||
return err
|
||||
}
|
||||
// If slasher is configured, forward the attestations in the block via an event feed for processing.
|
||||
if features.Get().EnableSlasher {
|
||||
if s.slasherEnabled {
|
||||
go s.sendBlockAttestationsToSlasher(blockCopy, preState)
|
||||
}
|
||||
|
||||
@@ -307,9 +283,10 @@ func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedSta
|
||||
go func() {
|
||||
s.sendNewFinalizedEvent(ctx, finalizedState)
|
||||
}()
|
||||
|
||||
depCtx, cancel := context.WithTimeout(context.Background(), depositDeadline)
|
||||
go func() {
|
||||
s.insertFinalizedDeposits(depCtx, finalized.Root)
|
||||
s.insertFinalizedDepositsAndPrune(depCtx, finalized.Root)
|
||||
cancel()
|
||||
}()
|
||||
}
|
||||
@@ -497,7 +474,7 @@ func (s *Service) validateStateTransition(ctx context.Context, preState state.Be
|
||||
stateTransitionStartTime := time.Now()
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
if ctx.Err() != nil || electra.IsExecutionRequestError(err) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, invalidBlock{error: err}
|
||||
@@ -575,7 +552,7 @@ func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySigne
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
ctx := context.TODO()
|
||||
for _, att := range signed.Block().Body().Attestations() {
|
||||
committees, err := helpers.AttestationCommittees(ctx, preState, att)
|
||||
committees, err := helpers.AttestationCommitteesFromState(ctx, preState, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committees")
|
||||
return
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -41,6 +42,16 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
bc.ShardCommitteePeriod = 0 // Required for voluntary exits test in reasonable time.
|
||||
params.OverrideBeaconConfig(bc)
|
||||
|
||||
badBlock := genFullBlock(t, util.DefaultBlockGenConfig(), 101)
|
||||
badRoot, err := badBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
badRoots := make(map[[32]byte]struct{})
|
||||
badRoots[badRoot] = struct{}{}
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
BlacklistedRoots: badRoots,
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
type args struct {
|
||||
block *ethpb.SignedBeaconBlock
|
||||
}
|
||||
@@ -124,8 +135,14 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "The block is blacklisted",
|
||||
args: args{
|
||||
block: badBlock,
|
||||
},
|
||||
wantedErr: errBlacklistedRoot.Error(),
|
||||
},
|
||||
}
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
for _, tt := range tests {
|
||||
wg.Add(1)
|
||||
@@ -455,41 +472,81 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
Root: headRoot[:],
|
||||
}))
|
||||
require.NoError(t, headState.SetGenesisValidatorsRoot(params.BeaconConfig().ZeroHash[:]))
|
||||
t.Run("pre deposit request", func(t *testing.T) {
|
||||
require.NoError(t, headState.SetEth1DepositIndex(1))
|
||||
s, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
s, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, headRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, headRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
// check the cache
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), index) // first index
|
||||
|
||||
// check the cache
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), index) // first index
|
||||
// check deposit
|
||||
require.LogsContain(t, logHook, "Finalized deposit insertion completed at index")
|
||||
})
|
||||
t.Run("deposit requests started", func(t *testing.T) {
|
||||
require.NoError(t, headState.SetEth1DepositIndex(1))
|
||||
require.NoError(t, headState.SetDepositRequestsStartIndex(1))
|
||||
s, tr := minimalTestService(t, WithFinalizedStateAtStartUp(headState))
|
||||
ctx, beaconDB, stateGen := tr.ctx, tr.db, tr.sg
|
||||
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, genesis)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, headRoot))
|
||||
require.NoError(t, beaconDB.SaveState(ctx, headState, genesisRoot))
|
||||
util.SaveBlock(t, ctx, beaconDB, headBlock)
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, stateGen.SaveState(ctx, headRoot, headState))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, ðpb.Checkpoint{Epoch: slots.ToEpoch(finalizedSlot), Root: headRoot[:]}))
|
||||
|
||||
notifier := &blockchainTesting.MockStateNotifier{RecordEvents: true}
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
require.Equal(t, true, ok, "event has wrong data type")
|
||||
assert.Equal(t, primitives.Epoch(123), fc.Epoch)
|
||||
assert.DeepEqual(t, headRoot[:], fc.Block)
|
||||
assert.DeepEqual(t, finalizedStRoot[:], fc.State)
|
||||
assert.Equal(t, false, fc.ExecutionOptimistic)
|
||||
|
||||
// check the cache
|
||||
index, ok := headState.ValidatorIndexByPubkey(bytesutil.ToBytes48(key))
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), index) // first index
|
||||
})
|
||||
|
||||
// check deposit
|
||||
require.LogsContain(t, logHook, "Finalized deposit insertion completed at index")
|
||||
}
|
||||
|
||||
@@ -1,256 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epbs"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// ReceiveExecutionPayloadEnvelope is a function that defines the operations (minus pubsub)
|
||||
// that are performed on a received execution payload envelope. The operations consist of:
|
||||
// 1. Validate the payload, apply state transition.
|
||||
// 2. Apply fork choice to the processed payload
|
||||
// 3. Save latest head info
|
||||
func (s *Service) ReceiveExecutionPayloadEnvelope(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope, _ das.AvailabilityStore) error {
|
||||
receivedTime := time.Now()
|
||||
envelope, err := signed.Envelope()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("Receiving execution payload envelope")
|
||||
root := envelope.BeaconBlockRoot()
|
||||
s.payloadBeingSynced.set(envelope)
|
||||
defer s.payloadBeingSynced.unset(root)
|
||||
|
||||
preState, err := s.getPayloadEnvelopePrestate(ctx, envelope)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get prestate")
|
||||
}
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
eg.Go(func() error {
|
||||
if err := epbs.ValidatePayloadStateTransition(ctx, preState, envelope); err != nil {
|
||||
return errors.Wrap(err, "failed to validate consensus state transition function")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
var isValidPayload bool
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
isValidPayload, err = s.validateExecutionOnEnvelope(ctx, envelope)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not notify the engine of the new payload")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
daStartTime := time.Now()
|
||||
// TODO: Add DA check
|
||||
daWaitedTime := time.Since(daStartTime)
|
||||
dataAvailWaitedTime.Observe(float64(daWaitedTime.Milliseconds()))
|
||||
if err := s.savePostPayload(ctx, signed, preState); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.insertPayloadEnvelope(envelope); err != nil {
|
||||
return errors.Wrap(err, "could not insert payload to forkchoice")
|
||||
}
|
||||
if isValidPayload {
|
||||
s.ForkChoicer().Lock()
|
||||
if err := s.ForkChoicer().SetOptimisticToValid(ctx, root); err != nil {
|
||||
s.ForkChoicer().Unlock()
|
||||
return errors.Wrap(err, "could not set optimistic payload to valid")
|
||||
}
|
||||
s.ForkChoicer().Unlock()
|
||||
}
|
||||
|
||||
headRoot, err := s.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get headroot to compute attributes")
|
||||
return nil
|
||||
}
|
||||
execution, err := envelope.Execution()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get execution data")
|
||||
return nil
|
||||
}
|
||||
blockHash := [32]byte(execution.BlockHash())
|
||||
if bytes.Equal(headRoot, root[:]) {
|
||||
attr := s.getPayloadAttribute(ctx, preState, envelope.Slot()+1, headRoot)
|
||||
payloadID, err := s.notifyForkchoiceUpdateEPBS(ctx, blockHash, attr)
|
||||
if err != nil {
|
||||
if IsInvalidBlock(err) {
|
||||
// TODO handle the lvh here
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if attr != nil && !attr.IsEmpty() && payloadID != nil {
|
||||
var pid [8]byte
|
||||
copy(pid[:], payloadID[:])
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(headRoot)),
|
||||
"headSlot": envelope.Slot(),
|
||||
"payloadID": fmt.Sprintf("%#x", bytesutil.Trunc(payloadID[:])),
|
||||
}).Info("Forkchoice updated with payload attributes for proposal")
|
||||
s.cfg.PayloadIDCache.Set(envelope.Slot()+1, root, pid)
|
||||
}
|
||||
// simply update the headstate in head
|
||||
s.headLock.Lock()
|
||||
s.head.state = preState.Copy()
|
||||
s.headLock.Unlock()
|
||||
// update the NSC with the hash for the full block, we use the block hash as the key
|
||||
if err := transition.UpdateNextSlotCache(ctx, blockHash[:], preState); err != nil {
|
||||
log.WithError(err).Error("could not update next slot cache with payload")
|
||||
}
|
||||
|
||||
}
|
||||
timeWithoutDaWait := time.Since(receivedTime) - daWaitedTime
|
||||
executionEngineProcessingTime.Observe(float64(timeWithoutDaWait.Milliseconds()))
|
||||
ex, err := envelope.Execution()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution data")
|
||||
}
|
||||
// Send feed event
|
||||
// Send notification of the processed block to the state feed.
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.PayloadProcessed,
|
||||
Data: &statefeed.PayloadProcessedData{
|
||||
Slot: envelope.Slot(),
|
||||
BlockRoot: root,
|
||||
ExecutionBlockHash: blockHash,
|
||||
ExecutionOptimistic: !isValidPayload,
|
||||
},
|
||||
})
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": envelope.Slot(),
|
||||
"blockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(ex.BlockHash())),
|
||||
"ParentHash": fmt.Sprintf("%#x", bytesutil.Trunc(ex.ParentHash())),
|
||||
}).Info("Processed execution payload envelope")
|
||||
return nil
|
||||
}
|
||||
|
||||
// notifyNewPayload signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewEnvelope(ctx context.Context, envelope interfaces.ROExecutionPayloadEnvelope) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
payload, err := envelope.Execution()
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
|
||||
versionedHashes := envelope.VersionedHashes()
|
||||
root := envelope.BeaconBlockRoot()
|
||||
parentRoot, err := s.ParentRoot(root)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get parent block root")
|
||||
}
|
||||
pr := common.Hash(parentRoot)
|
||||
requests := envelope.ExecutionRequests()
|
||||
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, &pr, requests)
|
||||
switch {
|
||||
case err == nil:
|
||||
newPayloadValidNodeCount.Inc()
|
||||
return true, nil
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
lvh := bytesutil.ToBytes32(lastValidHash)
|
||||
return false, invalidBlock{
|
||||
error: ErrInvalidPayload,
|
||||
lastValidHash: lvh,
|
||||
}
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// validateExecutionOnEnvelope notifies the engine of the incoming execution payload and returns true if the payload is valid
|
||||
func (s *Service) validateExecutionOnEnvelope(ctx context.Context, e interfaces.ROExecutionPayloadEnvelope) (bool, error) {
|
||||
isValidPayload, err := s.notifyNewEnvelope(ctx, e)
|
||||
if err == nil {
|
||||
return isValidPayload, nil
|
||||
}
|
||||
blockRoot := e.BeaconBlockRoot()
|
||||
parentRoot, rootErr := s.ParentRoot(blockRoot)
|
||||
if rootErr != nil {
|
||||
return false, errors.Wrap(rootErr, "could not get parent block root")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
err = s.handleInvalidExecutionError(ctx, err, blockRoot, parentRoot)
|
||||
s.cfg.ForkChoiceStore.Unlock()
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (s *Service) getPayloadEnvelopePrestate(ctx context.Context, e interfaces.ROExecutionPayloadEnvelope) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.getPayloadEnvelopePreState")
|
||||
defer span.End()
|
||||
|
||||
// Verify incoming payload has a valid pre state.
|
||||
root := e.BeaconBlockRoot()
|
||||
// Verify the referred block is known to forkchoice
|
||||
if !s.InForkchoice(root) {
|
||||
return nil, errors.New("Cannot import execution payload envelope for unknown block")
|
||||
}
|
||||
if err := s.verifyBlkPreState(ctx, root); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify payload prestate")
|
||||
}
|
||||
|
||||
preState, err := s.cfg.StateGen.StateByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get pre state")
|
||||
}
|
||||
if preState == nil || preState.IsNil() {
|
||||
return nil, errors.Wrap(err, "nil pre state")
|
||||
}
|
||||
return preState, nil
|
||||
}
|
||||
|
||||
func (s *Service) savePostPayload(ctx context.Context, signed interfaces.ROSignedExecutionPayloadEnvelope, st state.BeaconState) error {
|
||||
if err := s.cfg.BeaconDB.SaveBlindPayloadEnvelope(ctx, signed); err != nil {
|
||||
return err
|
||||
}
|
||||
envelope, err := signed.Envelope()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
execution, err := envelope.Execution()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := envelope.BeaconBlockRoot()
|
||||
if err := s.cfg.StateGen.SaveState(ctx, [32]byte(execution.BlockHash()), st); err != nil {
|
||||
log.Warnf("Rolling back insertion of block with root %#x", r)
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, r); err != nil {
|
||||
log.WithError(err).Errorf("Could not delete block with block root %#x", r)
|
||||
}
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
mockExecution "github.com/prysmaticlabs/prysm/v5/beacon-chain/execution/testing"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util/random"
|
||||
)
|
||||
|
||||
func Test_getPayloadEnvelopePrestate(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
|
||||
gs, _ := util.DeterministicGenesisStateEpbs(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
|
||||
|
||||
p := random.ExecutionPayloadEnvelope(t)
|
||||
p.BeaconBlockRoot = service.originBlockRoot[:]
|
||||
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = service.getPayloadEnvelopePrestate(ctx, e)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_notifyNewEnvelope(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
gs, _ := util.DeterministicGenesisStateEpbs(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
|
||||
p := random.ExecutionPayloadEnvelope(t)
|
||||
p.BeaconBlockRoot = service.originBlockRoot[:]
|
||||
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
|
||||
require.NoError(t, err)
|
||||
engine := &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
isValidPayload, err := service.notifyNewEnvelope(ctx, e)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isValidPayload)
|
||||
}
|
||||
|
||||
func Test_validateExecutionOnEnvelope(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
gs, _ := util.DeterministicGenesisStateEpbs(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
|
||||
p := random.ExecutionPayloadEnvelope(t)
|
||||
p.BeaconBlockRoot = service.originBlockRoot[:]
|
||||
e, err := blocks.WrappedROExecutionPayloadEnvelope(p)
|
||||
require.NoError(t, err)
|
||||
engine := &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
isValidPayload, err := service.validateExecutionOnEnvelope(ctx, e)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, isValidPayload)
|
||||
}
|
||||
|
||||
func Test_ReceiveExecutionPayloadEnvelope(t *testing.T) {
|
||||
service, tr := minimalTestService(t, WithPayloadIDCache(cache.NewPayloadIDCache()))
|
||||
ctx, fcs := tr.ctx, tr.fcs
|
||||
gs, _ := util.DeterministicGenesisStateEpbs(t, 32)
|
||||
require.NoError(t, service.saveGenesisData(ctx, gs))
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Root: service.originBlockRoot}))
|
||||
post := gs.Copy()
|
||||
p := &enginev1.ExecutionPayloadEnvelope{
|
||||
Payload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
BeaconBlockRoot: service.originBlockRoot[:],
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
StateRoot: make([]byte, 32),
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{},
|
||||
}
|
||||
sp := &enginev1.SignedExecutionPayloadEnvelope{
|
||||
Message: p,
|
||||
}
|
||||
e, err := blocks.WrappedROSignedExecutionPayloadEnvelope(sp)
|
||||
require.NoError(t, err)
|
||||
das := &das.MockAvailabilityStore{}
|
||||
|
||||
blockHeader := post.LatestBlockHeader()
|
||||
prevStateRoot, err := post.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
blockHeader.StateRoot = prevStateRoot[:]
|
||||
require.NoError(t, post.SetLatestBlockHeader(blockHeader))
|
||||
stRoot, err := post.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
p.StateRoot = stRoot[:]
|
||||
engine := &mockExecution.EngineClient{}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
require.NoError(t, service.ReceiveExecutionPayloadEnvelope(ctx, e, das))
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
func (s *Service) ReceivePayloadAttestationMessage(ctx context.Context, a *eth.PayloadAttestationMessage) error {
|
||||
if err := helpers.ValidateNilPayloadAttestationMessage(a); err != nil {
|
||||
return err
|
||||
}
|
||||
root := [32]byte(a.Data.BeaconBlockRoot)
|
||||
st, err := s.HeadStateReadOnly(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ptc, err := helpers.GetPayloadTimelinessCommittee(ctx, st, a.Data.Slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idx := slices.Index(ptc, a.ValidatorIndex)
|
||||
if idx == -1 {
|
||||
return errInvalidValidatorIndex
|
||||
}
|
||||
if s.cfg.PayloadAttestationCache.Seen(root, uint64(primitives.ValidatorIndex(idx))) {
|
||||
return nil
|
||||
}
|
||||
return s.cfg.PayloadAttestationCache.Add(a, uint64(idx))
|
||||
}
|
||||
@@ -3,7 +3,6 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
@@ -23,7 +22,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
f "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/blstoexec"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/slashings"
|
||||
@@ -32,7 +30,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -42,31 +39,31 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
prysmTime "github.com/prysmaticlabs/prysm/v5/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Service represents a service that handles the internal
|
||||
// logic of managing the full PoS beacon chain.
|
||||
type Service struct {
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
lastPublishedLightClientEpoch primitives.Epoch
|
||||
blobStorage *filesystem.BlobStorage
|
||||
payloadBeingSynced *currentlySyncingPayload
|
||||
cfg *config
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
genesisTime time.Time
|
||||
head *head
|
||||
headLock sync.RWMutex
|
||||
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
|
||||
boundaryRoots [][32]byte
|
||||
checkpointStateCache *cache.CheckpointStateCache
|
||||
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
|
||||
initSyncBlocksLock sync.RWMutex
|
||||
wsVerifier *WeakSubjectivityVerifier
|
||||
clockSetter startup.ClockSetter
|
||||
clockWaiter startup.ClockWaiter
|
||||
syncComplete chan struct{}
|
||||
blobNotifiers *blobNotifierMap
|
||||
blockBeingSynced *currentlySyncingBlock
|
||||
blobStorage *filesystem.BlobStorage
|
||||
slasherEnabled bool
|
||||
}
|
||||
|
||||
// config options for the service.
|
||||
@@ -75,8 +72,6 @@ type config struct {
|
||||
ChainStartFetcher execution.ChainStartFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache cache.DepositCache
|
||||
PayloadAttestationCache *cache.PayloadAttestationCache
|
||||
PayloadEnvelopeCache *sync.Map
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
AttestationCache *cache.AttestationCache
|
||||
@@ -187,7 +182,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
blobNotifiers: bn,
|
||||
cfg: &config{},
|
||||
blockBeingSynced: ¤tlySyncingBlock{roots: make(map[[32]byte]struct{})},
|
||||
payloadBeingSynced: ¤tlySyncingPayload{roots: make(map[[32]byte]primitives.PTCStatus)},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(srv); err != nil {
|
||||
@@ -274,69 +268,18 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return err
|
||||
}
|
||||
s.originBlockRoot = originRoot
|
||||
|
||||
if err := s.initializeHeadFromDB(s.ctx); err != nil {
|
||||
return errors.Wrap(err, "could not set up chain info")
|
||||
st, err := s.cfg.StateGen.Resume(s.ctx, s.cfg.FinalizedStateAtStartUp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
spawnCountdownIfPreGenesis(s.ctx, s.genesisTime, s.cfg.BeaconDB)
|
||||
|
||||
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
if justified == nil {
|
||||
return errNilJustifiedCheckpoint
|
||||
}
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
if finalized == nil {
|
||||
return errNilFinalizedCheckpoint
|
||||
}
|
||||
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
|
||||
st, err := s.cfg.StateGen.StateByRoot(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint state")
|
||||
}
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
if !features.Get().EnableStartOptimistic {
|
||||
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
}
|
||||
}
|
||||
if err := s.setupForkchoice(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice")
|
||||
}
|
||||
// not attempting to save initial sync blocks here, because there shouldn't be any until
|
||||
// after the statefeed.Initialized event is fired (below)
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, finalized.Epoch); err != nil {
|
||||
cp := s.FinalizedCheckpt()
|
||||
if err := s.wsVerifier.VerifyWeakSubjectivity(s.ctx, cp.Epoch); err != nil {
|
||||
// Exit run time if the node failed to verify weak subjectivity checkpoint.
|
||||
return errors.Wrap(err, "could not verify initial checkpoint provided for chain sync")
|
||||
}
|
||||
@@ -345,7 +288,6 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
if err := s.clockSetter.SetClock(startup.NewClock(s.genesisTime, vr)); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -375,45 +317,36 @@ func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error
|
||||
return genesisBlkRoot, nil
|
||||
}
|
||||
|
||||
// initializeHeadFromDB uses the finalized checkpoint and head block found in the database to set the current head.
|
||||
// initializeHeadFromDB uses the finalized checkpoint and head block root from forkchoice to set the current head.
|
||||
// Note that this may block until stategen replays blocks between the finalized and head blocks
|
||||
// if the head sync flag was specified and the gap between the finalized and head blocks is at least 128 epochs long.
|
||||
func (s *Service) initializeHeadFromDB(ctx context.Context) error {
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint from db")
|
||||
}
|
||||
if finalized == nil {
|
||||
// This should never happen. At chain start, the finalized checkpoint
|
||||
// would be the genesis state and block.
|
||||
return errors.New("no finalized epoch in the database")
|
||||
}
|
||||
finalizedRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
var finalizedState state.BeaconState
|
||||
|
||||
finalizedState, err = s.cfg.StateGen.Resume(ctx, s.cfg.FinalizedStateAtStartUp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized state from db")
|
||||
}
|
||||
|
||||
if finalizedState == nil || finalizedState.IsNil() {
|
||||
func (s *Service) initializeHead(ctx context.Context, st state.BeaconState) error {
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
if st == nil || st.IsNil() {
|
||||
return errors.New("finalized state can't be nil")
|
||||
}
|
||||
|
||||
finalizedBlock, err := s.getBlock(ctx, finalizedRoot)
|
||||
s.cfg.ForkChoiceStore.RLock()
|
||||
root := s.cfg.ForkChoiceStore.HighestReceivedBlockRoot()
|
||||
s.cfg.ForkChoiceStore.RUnlock()
|
||||
blk, err := s.cfg.BeaconDB.Block(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized block")
|
||||
return errors.Wrap(err, "could not get head block")
|
||||
}
|
||||
if err := s.setHead(&head{
|
||||
finalizedRoot,
|
||||
finalizedBlock,
|
||||
finalizedState,
|
||||
finalizedBlock.Block().Slot(),
|
||||
false,
|
||||
}); err != nil {
|
||||
if root != fRoot {
|
||||
st, err = s.cfg.StateGen.StateByRoot(ctx, root)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
}
|
||||
if err := s.setHead(&head{root, blk, st, blk.Block().Slot(), false}); err != nil {
|
||||
return errors.Wrap(err, "could not set head")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": blk.Block().Slot(),
|
||||
}).Info("Initialized head block from DB")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -563,7 +496,7 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
// 2.) Check DB.
|
||||
// Checking 1.) is ten times faster than checking 2.)
|
||||
// this function requires a lock in forkchoice
|
||||
func (s *Service) chainHasBlock(ctx context.Context, root [32]byte) bool {
|
||||
func (s *Service) hasBlock(ctx context.Context, root [32]byte) bool {
|
||||
if s.cfg.ForkChoiceStore.HasNode(root) {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -386,8 +386,8 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.cfg.ForkChoiceStore.InsertNode(ctx, beaconState, roblock))
|
||||
|
||||
assert.Equal(t, false, s.chainHasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.chainHasBlock(ctx, r), "Should have block")
|
||||
assert.Equal(t, false, s.hasBlock(ctx, [32]byte{}), "Should not have block")
|
||||
assert.Equal(t, true, s.hasBlock(ctx, r), "Should have block")
|
||||
}
|
||||
|
||||
func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
|
||||
175
beacon-chain/blockchain/setup_forchoice.go
Normal file
175
beacon-chain/blockchain/setup_forchoice.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func (s *Service) setupForkchoice(st state.BeaconState) error {
|
||||
if err := s.setupForkchoiceCheckpoints(); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice checkpoints")
|
||||
}
|
||||
if err := s.setupForkchoiceTree(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
}
|
||||
if err := s.initializeHead(s.ctx, st); err != nil {
|
||||
return errors.Wrap(err, "could not initialize head from db")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) startupHeadRoot() [32]byte {
|
||||
headStr := features.Get().ForceHead
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
if headStr == "" {
|
||||
return fRoot
|
||||
}
|
||||
if headStr == "head" {
|
||||
root, err := s.cfg.BeaconDB.HeadBlockRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
log.Infof("Using Head root of %#x", root)
|
||||
return root
|
||||
}
|
||||
root, err := bytesutil.DecodeHexWithLength(headStr, 32)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not parse head root, starting with finalized block as head")
|
||||
return fRoot
|
||||
}
|
||||
return [32]byte(root)
|
||||
}
|
||||
|
||||
func (s *Service) setupForkchoiceTree(st state.BeaconState) error {
|
||||
headRoot := s.startupHeadRoot()
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
if err := s.setupForkchoiceRoot(st); err != nil {
|
||||
return errors.Wrap(err, "could not set up forkchoice root")
|
||||
}
|
||||
if headRoot == fRoot {
|
||||
return nil
|
||||
}
|
||||
blk, err := s.cfg.BeaconDB.Block(s.ctx, headRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not get head block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
if slots.ToEpoch(blk.Block().Slot()) < cp.Epoch {
|
||||
log.WithField("headRoot", fmt.Sprintf("%#x", headRoot)).Error("head block is older than finalized block, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
chain, err := s.buildForkchoiceChain(s.ctx, blk)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not build forkchoice chain, starting with finalized block as head")
|
||||
return nil
|
||||
}
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
return s.cfg.ForkChoiceStore.InsertChain(s.ctx, chain)
|
||||
}
|
||||
|
||||
func (s *Service) buildForkchoiceChain(ctx context.Context, head interfaces.ReadOnlySignedBeaconBlock) ([]*forkchoicetypes.BlockAndCheckpoints, error) {
|
||||
chain := []*forkchoicetypes.BlockAndCheckpoints{}
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
jp := s.CurrentJustifiedCheckpt()
|
||||
root, err := head.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head block root")
|
||||
}
|
||||
for {
|
||||
roblock, err := blocks.NewROBlockWithRoot(head, root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// This chain sets the justified checkpoint for every block, including some that are older than jp.
|
||||
// This should be however safe for forkchoice at startup. An alternative would be to hook during the
|
||||
// block processing pipeline when setting the head state, to compute the right states for the justified
|
||||
// checkpoint.
|
||||
chain = append(chain, &forkchoicetypes.BlockAndCheckpoints{Block: roblock, JustifiedCheckpoint: jp, FinalizedCheckpoint: cp})
|
||||
root = head.Block().ParentRoot()
|
||||
if root == fRoot {
|
||||
break
|
||||
}
|
||||
head, err = s.cfg.BeaconDB.Block(s.ctx, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get block")
|
||||
}
|
||||
if slots.ToEpoch(head.Block().Slot()) < cp.Epoch {
|
||||
return nil, errors.New("head block is not a descendant of the finalized checkpoint")
|
||||
}
|
||||
}
|
||||
return chain, nil
|
||||
}
|
||||
|
||||
func (s *Service) setupForkchoiceRoot(st state.BeaconState) error {
|
||||
cp := s.FinalizedCheckpt()
|
||||
fRoot := s.ensureRootNotZeros([32]byte(cp.Root))
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
}
|
||||
roblock, err := blocks.NewROBlockWithRoot(finalizedBlock, fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(s.ctx, st, roblock); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
if !features.Get().EnableStartOptimistic {
|
||||
lastValidatedCheckpoint, err := s.cfg.BeaconDB.LastValidatedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
if bytes.Equal(fRoot[:], lastValidatedCheckpoint.Root) {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) setupForkchoiceCheckpoints() error {
|
||||
justified, err := s.cfg.BeaconDB.JustifiedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
if justified == nil {
|
||||
return errNilJustifiedCheckpoint
|
||||
}
|
||||
finalized, err := s.cfg.BeaconDB.FinalizedCheckpoint(s.ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
if finalized == nil {
|
||||
return errNilFinalizedCheckpoint
|
||||
}
|
||||
|
||||
fRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(finalized.Root))
|
||||
s.cfg.ForkChoiceStore.Lock()
|
||||
defer s.cfg.ForkChoiceStore.Unlock()
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(s.ctx, &forkchoicetypes.Checkpoint{Epoch: justified.Epoch,
|
||||
Root: bytesutil.ToBytes32(justified.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's justified checkpoint")
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{Epoch: finalized.Epoch,
|
||||
Root: fRoot}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
return nil
|
||||
}
|
||||
128
beacon-chain/blockchain/setup_forkchoice_test.go
Normal file
128
beacon-chain/blockchain/setup_forkchoice_test.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_startupHeadRoot(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
hook := logTest.NewGlobal()
|
||||
cp := service.FinalizedCheckpt()
|
||||
require.DeepEqual(t, cp.Root, params.BeaconConfig().ZeroHash[:])
|
||||
gr := [32]byte{'r', 'o', 'o', 't'}
|
||||
service.originBlockRoot = gr
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, gr))
|
||||
t.Run("start from finalized", func(t *testing.T) {
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
})
|
||||
t.Run("head requested, error path", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
ForceHead: "head",
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), gr)
|
||||
require.LogsContain(t, hook, "could not get head block root, starting with finalized block as head")
|
||||
})
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
hr := [32]byte{'h', 'e', 'a', 'd'}
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, hr), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, hr), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, hr))
|
||||
|
||||
t.Run("start from head", func(t *testing.T) {
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
ForceHead: "head",
|
||||
})
|
||||
defer resetCfg()
|
||||
require.Equal(t, service.startupHeadRoot(), hr)
|
||||
})
|
||||
}
|
||||
|
||||
func Test_setupForkchoiceTree_Finalized(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
|
||||
require.NoError(t, service.saveGenesisData(ctx, st))
|
||||
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
parentRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, parentRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFinalizedCheckpoint(ctx, ðpb.Checkpoint{Root: parentRoot[:]}))
|
||||
require.NoError(t, service.setupForkchoiceTree(st))
|
||||
require.Equal(t, 1, service.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
|
||||
func Test_setupForkchoiceTree_Head(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
resetCfg := features.InitWithReset(&features.Flags{
|
||||
ForceHead: "head",
|
||||
})
|
||||
defer resetCfg()
|
||||
|
||||
genesisState, keys := util.DeterministicGenesisState(t, 64)
|
||||
stateRoot, err := genesisState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err, "Could not hash genesis state")
|
||||
genesis := blocks.NewGenesisBlock(stateRoot[:])
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(genesis)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err, "Could not get signing root")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb), "Could not save genesis block")
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, genesisState, genesisRoot), "Could not save genesis state")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, genesisRoot), "Could not save genesis state")
|
||||
|
||||
st, err := service.HeadState(ctx)
|
||||
require.NoError(t, err)
|
||||
b, err := util.GenerateFullBlock(st, keys, util.DefaultBlockGenConfig(), primitives.Slot(1))
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
preState, err := service.getBlockPreState(ctx, wsb.Block())
|
||||
require.NoError(t, err)
|
||||
postState, err := service.validateStateTransition(ctx, preState, wsb)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, postState))
|
||||
|
||||
b, err = util.GenerateFullBlock(postState, keys, util.DefaultBlockGenConfig(), primitives.Slot(2))
|
||||
require.NoError(t, err)
|
||||
wsb, err = consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
root, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.savePostStateInfo(ctx, root, wsb, preState))
|
||||
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveHeadBlockRoot(ctx, root))
|
||||
cp := service.FinalizedCheckpt()
|
||||
fRoot := service.ensureRootNotZeros([32]byte(cp.Root))
|
||||
require.NotEqual(t, fRoot, root)
|
||||
require.Equal(t, root, service.startupHeadRoot())
|
||||
require.NoError(t, service.setupForkchoiceTree(st))
|
||||
require.Equal(t, 2, service.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
@@ -3,10 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
testonly = True,
|
||||
srcs = [
|
||||
"mock.go",
|
||||
"mock_epbs.go",
|
||||
],
|
||||
srcs = ["mock.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
|
||||
@@ -37,50 +37,45 @@ var ErrNilState = errors.New("nil state")
|
||||
|
||||
// ChainService defines the mock interface for testing
|
||||
type ChainService struct {
|
||||
NotFinalized bool
|
||||
Optimistic bool
|
||||
ValidAttestation bool
|
||||
ValidatorsRoot [32]byte
|
||||
PublicKey [fieldparams.BLSPubkeyLength]byte
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckPoint *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckPoint *ethpb.Checkpoint
|
||||
Slot *primitives.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
Balance *precompute.Balance
|
||||
CanonicalRoots map[[32]byte]bool
|
||||
Fork *ethpb.Fork
|
||||
ETH1Data *ethpb.Eth1Data
|
||||
InitSyncBlockRoots map[[32]byte]bool
|
||||
DB db.Database
|
||||
State state.BeaconState
|
||||
Block interfaces.ReadOnlySignedBeaconBlock
|
||||
ExecutionPayloadEnvelope interfaces.ROExecutionPayloadEnvelope
|
||||
VerifyBlkDescendantErr error
|
||||
stateNotifier statefeed.Notifier
|
||||
BlocksReceived []interfaces.ReadOnlySignedBeaconBlock
|
||||
SyncCommitteeIndices []primitives.CommitteeIndex
|
||||
blockNotifier blockfeed.Notifier
|
||||
opNotifier opfeed.Notifier
|
||||
Root []byte
|
||||
SyncCommitteeDomain []byte
|
||||
SyncSelectionProofDomain []byte
|
||||
SyncContributionProofDomain []byte
|
||||
SyncCommitteePubkeys [][]byte
|
||||
Genesis time.Time
|
||||
ForkChoiceStore forkchoice.ForkChoicer
|
||||
ReceiveBlockMockErr error
|
||||
ReceiveEnvelopeMockErr error
|
||||
OptimisticCheckRootReceived [32]byte
|
||||
FinalizedRoots map[[32]byte]bool
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []blocks.VerifiedROBlob
|
||||
TargetRoot [32]byte
|
||||
HighestReceivedSlot primitives.Slot
|
||||
HighestReceivedRoot [32]byte
|
||||
PayloadStatus primitives.PTCStatus
|
||||
ReceivePayloadAttestationMessageErr error
|
||||
NotFinalized bool
|
||||
Optimistic bool
|
||||
ValidAttestation bool
|
||||
ValidatorsRoot [32]byte
|
||||
PublicKey [fieldparams.BLSPubkeyLength]byte
|
||||
FinalizedCheckPoint *ethpb.Checkpoint
|
||||
CurrentJustifiedCheckPoint *ethpb.Checkpoint
|
||||
PreviousJustifiedCheckPoint *ethpb.Checkpoint
|
||||
Slot *primitives.Slot // Pointer because 0 is a useful value, so checking against it can be incorrect.
|
||||
Balance *precompute.Balance
|
||||
CanonicalRoots map[[32]byte]bool
|
||||
Fork *ethpb.Fork
|
||||
ETH1Data *ethpb.Eth1Data
|
||||
InitSyncBlockRoots map[[32]byte]bool
|
||||
DB db.Database
|
||||
State state.BeaconState
|
||||
Block interfaces.ReadOnlySignedBeaconBlock
|
||||
VerifyBlkDescendantErr error
|
||||
stateNotifier statefeed.Notifier
|
||||
BlocksReceived []interfaces.ReadOnlySignedBeaconBlock
|
||||
SyncCommitteeIndices []primitives.CommitteeIndex
|
||||
blockNotifier blockfeed.Notifier
|
||||
opNotifier opfeed.Notifier
|
||||
Root []byte
|
||||
SyncCommitteeDomain []byte
|
||||
SyncSelectionProofDomain []byte
|
||||
SyncContributionProofDomain []byte
|
||||
SyncCommitteePubkeys [][]byte
|
||||
Genesis time.Time
|
||||
ForkChoiceStore forkchoice.ForkChoicer
|
||||
ReceiveBlockMockErr error
|
||||
OptimisticCheckRootReceived [32]byte
|
||||
FinalizedRoots map[[32]byte]bool
|
||||
OptimisticRoots map[[32]byte]bool
|
||||
BlockSlot primitives.Slot
|
||||
SyncingRoot [32]byte
|
||||
Blobs []blocks.VerifiedROBlob
|
||||
TargetRoot [32]byte
|
||||
MockHeadSlot *primitives.Slot
|
||||
}
|
||||
|
||||
func (s *ChainService) Ancestor(ctx context.Context, root []byte, slot primitives.Slot) ([]byte, error) {
|
||||
@@ -340,6 +335,9 @@ func (s *ChainService) ReceiveBlock(ctx context.Context, block interfaces.ReadOn
|
||||
|
||||
// HeadSlot mocks HeadSlot method in chain service.
|
||||
func (s *ChainService) HeadSlot() primitives.Slot {
|
||||
if s.MockHeadSlot != nil {
|
||||
return *s.MockHeadSlot
|
||||
}
|
||||
if s.State == nil {
|
||||
return 0
|
||||
}
|
||||
@@ -450,6 +448,11 @@ func (s *ChainService) IsCanonical(_ context.Context, r [32]byte) (bool, error)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// DependentRoot mocks the base method in the chain service.
|
||||
func (*ChainService) DependentRoot(_ primitives.Epoch) ([32]byte, error) {
|
||||
return [32]byte{}, nil
|
||||
}
|
||||
|
||||
// HasBlock mocks the same method in the chain service.
|
||||
func (s *ChainService) HasBlock(ctx context.Context, rt [32]byte) bool {
|
||||
if s.DB == nil {
|
||||
@@ -647,12 +650,12 @@ func (s *ChainService) ReceivedBlocksLastEpoch() (uint64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// HighestReceivedBlockSlotRoot mocks the same method in the chain service
|
||||
func (s *ChainService) HighestReceivedBlockSlotRoot() (primitives.Slot, [32]byte) {
|
||||
// HighestReceivedBlockSlot mocks the same method in the chain service
|
||||
func (s *ChainService) HighestReceivedBlockSlot() primitives.Slot {
|
||||
if s.ForkChoiceStore != nil {
|
||||
return s.ForkChoiceStore.HighestReceivedBlockSlotRoot()
|
||||
return s.ForkChoiceStore.HighestReceivedBlockSlot()
|
||||
}
|
||||
return s.HighestReceivedSlot, s.HighestReceivedRoot
|
||||
return 0
|
||||
}
|
||||
|
||||
// InsertNode mocks the same method in the chain service
|
||||
@@ -697,11 +700,6 @@ func (*ChainService) UnrealizedJustifiedPayloadBlockHash() [32]byte {
|
||||
return [32]byte{}
|
||||
}
|
||||
|
||||
// PayloadBeingSynced mocks the same method in the chain service
|
||||
func (c *ChainService) PayloadBeingSynced(root [32]byte) bool {
|
||||
return root == c.SyncingRoot
|
||||
}
|
||||
|
||||
// BlockBeingSynced mocks the same method in the chain service
|
||||
func (c *ChainService) BlockBeingSynced(root [32]byte) bool {
|
||||
return root == c.SyncingRoot
|
||||
@@ -717,21 +715,3 @@ func (c *ChainService) ReceiveBlob(_ context.Context, b blocks.VerifiedROBlob) e
|
||||
func (c *ChainService) TargetRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
|
||||
return c.TargetRoot, nil
|
||||
}
|
||||
|
||||
// HashInForkchoice mocks the same method in the chain service
|
||||
func (c *ChainService) HashInForkchoice([32]byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ReceivePayloadAttestationMessage mocks the same method in the chain service
|
||||
func (c *ChainService) ReceivePayloadAttestationMessage(_ context.Context, _ *ethpb.PayloadAttestationMessage) error {
|
||||
return c.ReceivePayloadAttestationMessageErr
|
||||
}
|
||||
|
||||
func (c *ChainService) GetPTCVote(root [32]byte) primitives.PTCStatus {
|
||||
return c.PayloadStatus
|
||||
}
|
||||
|
||||
func (c *ChainService) HashForBlockRoot(root [32]byte) [32]byte {
|
||||
return root
|
||||
}
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
package testing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
)
|
||||
|
||||
// ReceiveExecutionPayloadEnvelope mocks the method in chain service.
|
||||
func (s *ChainService) ReceiveExecutionPayloadEnvelope(ctx context.Context, env interfaces.ROSignedExecutionPayloadEnvelope, _ das.AvailabilityStore) error {
|
||||
if s.ReceiveBlockMockErr != nil {
|
||||
return s.ReceiveBlockMockErr
|
||||
}
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
e, err := env.Envelope()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s.State.Slot() == e.Slot() {
|
||||
if err := s.State.SetLatestFullSlot(s.State.Slot()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.ExecutionPayloadEnvelope = e
|
||||
return nil
|
||||
}
|
||||
@@ -3,6 +3,7 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"log.go",
|
||||
"metric.go",
|
||||
"option.go",
|
||||
"service.go",
|
||||
|
||||
5
beacon-chain/builder/log.go
Normal file
5
beacon-chain/builder/log.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package builder
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
var log = logrus.WithField("prefix", "builder")
|
||||
@@ -14,10 +14,16 @@ type Option func(s *Service) error
|
||||
// FlagOptions for builder service flag configurations.
|
||||
func FlagOptions(c *cli.Context) ([]Option, error) {
|
||||
endpoint := c.String(flags.MevRelayEndpoint.Name)
|
||||
sszEnabled := c.Bool(flags.EnableBuilderSSZ.Name)
|
||||
var client *builder.Client
|
||||
if endpoint != "" {
|
||||
var opts []builder.ClientOpt
|
||||
if sszEnabled {
|
||||
log.Info("Using APIs with SSZ enabled")
|
||||
opts = append(opts, builder.WithSSZ())
|
||||
}
|
||||
var err error
|
||||
client, err = builder.NewClient(endpoint)
|
||||
client, err = builder.NewClient(endpoint, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ErrNoBuilder is used when builder endpoint is not configured.
|
||||
|
||||
8
beacon-chain/cache/BUILD.bazel
vendored
8
beacon-chain/cache/BUILD.bazel
vendored
@@ -16,13 +16,11 @@ go_library(
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"interfaces.go",
|
||||
"payload_attestation.go",
|
||||
"payload_id.go",
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
"proposer_indices_type.go",
|
||||
"registration.go",
|
||||
"signed_execution_header.go",
|
||||
"skip_slot_cache.go",
|
||||
"subnet_ids.go",
|
||||
"sync_committee.go",
|
||||
@@ -52,7 +50,6 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
@@ -78,17 +75,16 @@ go_test(
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"payload_attestation_test.go",
|
||||
"payload_id_test.go",
|
||||
"private_access_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"registration_test.go",
|
||||
"signed_execution_header_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"subnet_ids_test.go",
|
||||
"sync_committee_head_state_test.go",
|
||||
"sync_committee_test.go",
|
||||
"sync_subnet_ids_test.go",
|
||||
"tracked_validators_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
@@ -98,10 +94,8 @@ go_test(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"deposit_fetcher.go",
|
||||
"deposit_inserter.go",
|
||||
"deposit_pruner.go",
|
||||
"deposit_tree.go",
|
||||
"deposit_tree_snapshot.go",
|
||||
"merkle_tree.go",
|
||||
@@ -35,6 +36,7 @@ go_test(
|
||||
srcs = [
|
||||
"deposit_cache_test.go",
|
||||
"deposit_fetcher_test.go",
|
||||
"deposit_pruner_test.go",
|
||||
"deposit_tree_snapshot_test.go",
|
||||
"merkle_tree_test.go",
|
||||
"spec_test.go",
|
||||
|
||||
@@ -903,189 +903,6 @@ func TestMin(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestPruneProofs_Ok(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 1))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[2].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}}, index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 2))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 99))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 4))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestDepositMap_WorksCorrectly(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -178,52 +178,6 @@ func (c *Cache) NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int
|
||||
return deposits
|
||||
}
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (c *Cache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PruneProofs")
|
||||
defer span.End()
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
if untilDepositIndex >= int64(len(c.deposits)) {
|
||||
untilDepositIndex = int64(len(c.deposits) - 1)
|
||||
}
|
||||
|
||||
for i := untilDepositIndex; i >= 0; i-- {
|
||||
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
|
||||
if c.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
c.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (c *Cache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
log.Debug("Ignoring 0 deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(c.pendingDeposits))
|
||||
for _, dp := range c.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
}
|
||||
}
|
||||
|
||||
c.pendingDeposits = cleanDeposits
|
||||
pendingDepositsCount.Set(float64(len(c.pendingDeposits)))
|
||||
}
|
||||
|
||||
// InsertPendingDeposit into the database. If deposit or block number are nil
|
||||
// then this method does nothing.
|
||||
func (c *Cache) InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte) {
|
||||
|
||||
@@ -44,67 +44,3 @@ func TestPendingDeposits_OK(t *testing.T) {
|
||||
all := dc.PendingDeposits(context.Background(), nil)
|
||||
assert.Equal(t, len(dc.pendingDeposits), len(all), "PendingDeposits(ctx, nil) did not return all deposits")
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 0)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 6)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 10)
|
||||
expected = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
88
beacon-chain/cache/depositsnapshot/deposit_pruner.go
vendored
Normal file
88
beacon-chain/cache/depositsnapshot/deposit_pruner.go
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// PruneProofs removes proofs from all deposits whose index is equal or less than untilDepositIndex.
|
||||
func (c *Cache) PruneProofs(ctx context.Context, untilDepositIndex int64) error {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PruneProofs")
|
||||
defer span.End()
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
if untilDepositIndex >= int64(len(c.deposits)) {
|
||||
untilDepositIndex = int64(len(c.deposits) - 1)
|
||||
}
|
||||
|
||||
for i := untilDepositIndex; i >= 0; i-- {
|
||||
// Finding a nil proof means that all proofs up to this deposit have been already pruned.
|
||||
if c.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
c.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PruneAllProofs removes proofs from all deposits.
|
||||
// As EIP-6110 applies and the legacy deposit mechanism is deprecated,
|
||||
// proofs in deposit snapshot are no longer needed.
|
||||
// See: https://eips.ethereum.org/EIPS/eip-6110#eth1data-poll-deprecation
|
||||
func (c *Cache) PruneAllProofs(ctx context.Context) {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PruneAllProofs")
|
||||
defer span.End()
|
||||
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
for i := len(c.deposits) - 1; i >= 0; i-- {
|
||||
if c.deposits[i].Deposit.Proof == nil {
|
||||
break
|
||||
}
|
||||
c.deposits[i].Deposit.Proof = nil
|
||||
}
|
||||
}
|
||||
|
||||
// PrunePendingDeposits removes any deposit which is older than the given deposit merkle tree index.
|
||||
func (c *Cache) PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64) {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PrunePendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
if merkleTreeIndex == 0 {
|
||||
log.Debug("Ignoring 0 deposit removal")
|
||||
return
|
||||
}
|
||||
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
cleanDeposits := make([]*ethpb.DepositContainer, 0, len(c.pendingDeposits))
|
||||
for _, dp := range c.pendingDeposits {
|
||||
if dp.Index >= merkleTreeIndex {
|
||||
cleanDeposits = append(cleanDeposits, dp)
|
||||
}
|
||||
}
|
||||
|
||||
c.pendingDeposits = cleanDeposits
|
||||
pendingDepositsCount.Set(float64(len(c.pendingDeposits)))
|
||||
}
|
||||
|
||||
// PruneAllPendingDeposits removes all pending deposits from the cache.
|
||||
// As EIP-6110 applies and the legacy deposit mechanism is deprecated,
|
||||
// pending deposits in deposit snapshot are no longer needed.
|
||||
// See: https://eips.ethereum.org/EIPS/eip-6110#eth1data-poll-deprecation
|
||||
func (c *Cache) PruneAllPendingDeposits(ctx context.Context) {
|
||||
_, span := trace.StartSpan(ctx, "Cache.PruneAllPendingDeposits")
|
||||
defer span.End()
|
||||
|
||||
c.depositsLock.Lock()
|
||||
defer c.depositsLock.Unlock()
|
||||
|
||||
c.pendingDeposits = make([]*ethpb.DepositContainer, 0)
|
||||
pendingDepositsCount.Set(float64(0))
|
||||
}
|
||||
323
beacon-chain/cache/depositsnapshot/deposit_pruner_test.go
vendored
Normal file
323
beacon-chain/cache/depositsnapshot/deposit_pruner_test.go
vendored
Normal file
@@ -0,0 +1,323 @@
|
||||
package depositsnapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestPrunePendingDeposits_ZeroMerkleIndex(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 0)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
func TestPrunePendingDeposits_OK(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 6)
|
||||
expected := []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PrunePendingDeposits(context.Background(), 10)
|
||||
expected = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
func TestPruneAllPendingDeposits(t *testing.T) {
|
||||
dc := Cache{}
|
||||
|
||||
dc.pendingDeposits = []*ethpb.DepositContainer{
|
||||
{Eth1BlockHeight: 2, Index: 2},
|
||||
{Eth1BlockHeight: 4, Index: 4},
|
||||
{Eth1BlockHeight: 6, Index: 6},
|
||||
{Eth1BlockHeight: 8, Index: 8},
|
||||
{Eth1BlockHeight: 10, Index: 10},
|
||||
{Eth1BlockHeight: 12, Index: 12},
|
||||
}
|
||||
|
||||
dc.PruneAllPendingDeposits(context.Background())
|
||||
expected := []*ethpb.DepositContainer{}
|
||||
|
||||
assert.DeepEqual(t, expected, dc.pendingDeposits)
|
||||
}
|
||||
|
||||
func TestPruneProofs_Ok(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 1))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[2].Deposit.Proof)
|
||||
assert.NotNil(t, dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_SomeAlreadyPruned(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: nil, Data: ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}}, index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(), Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 2))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 99))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
require.NoError(t, dc.PruneProofs(context.Background(), 4))
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
|
||||
func TestPruneAllProofs(t *testing.T) {
|
||||
dc, err := New()
|
||||
require.NoError(t, err)
|
||||
|
||||
deposits := []struct {
|
||||
blkNum uint64
|
||||
deposit *ethpb.Deposit
|
||||
index int64
|
||||
}{
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk0"), 48)}},
|
||||
index: 0,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk1"), 48)}},
|
||||
index: 1,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk2"), 48)}},
|
||||
index: 2,
|
||||
},
|
||||
{
|
||||
blkNum: 0,
|
||||
deposit: ðpb.Deposit{Proof: makeDepositProof(),
|
||||
Data: ðpb.Deposit_Data{PublicKey: bytesutil.PadTo([]byte("pk3"), 48)}},
|
||||
index: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, ins := range deposits {
|
||||
assert.NoError(t, dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{}))
|
||||
}
|
||||
|
||||
dc.PruneAllProofs(context.Background())
|
||||
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[0].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[1].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[2].Deposit.Proof)
|
||||
assert.DeepEqual(t, [][]byte(nil), dc.deposits[3].Deposit.Proof)
|
||||
}
|
||||
11
beacon-chain/cache/interfaces.go
vendored
11
beacon-chain/cache/interfaces.go
vendored
@@ -12,6 +12,7 @@ import (
|
||||
type DepositCache interface {
|
||||
DepositFetcher
|
||||
DepositInserter
|
||||
DepositPruner
|
||||
}
|
||||
|
||||
// DepositFetcher defines a struct which can retrieve deposit information from a store.
|
||||
@@ -23,8 +24,6 @@ type DepositFetcher interface {
|
||||
InsertPendingDeposit(ctx context.Context, d *ethpb.Deposit, blockNum uint64, index int64, depositRoot [32]byte)
|
||||
PendingDeposits(ctx context.Context, untilBlk *big.Int) []*ethpb.Deposit
|
||||
PendingContainers(ctx context.Context, untilBlk *big.Int) []*ethpb.DepositContainer
|
||||
PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64)
|
||||
PruneProofs(ctx context.Context, untilDepositIndex int64) error
|
||||
FinalizedFetcher
|
||||
}
|
||||
|
||||
@@ -42,6 +41,14 @@ type FinalizedFetcher interface {
|
||||
NonFinalizedDeposits(ctx context.Context, lastFinalizedIndex int64, untilBlk *big.Int) []*ethpb.Deposit
|
||||
}
|
||||
|
||||
// DepositPruner is an interface for pruning deposits and proofs.
|
||||
type DepositPruner interface {
|
||||
PrunePendingDeposits(ctx context.Context, merkleTreeIndex int64)
|
||||
PruneAllPendingDeposits(ctx context.Context)
|
||||
PruneProofs(ctx context.Context, untilDepositIndex int64) error
|
||||
PruneAllProofs(ctx context.Context)
|
||||
}
|
||||
|
||||
// FinalizedDeposits defines a method to access a merkle tree containing deposits and their indexes.
|
||||
type FinalizedDeposits interface {
|
||||
Deposits() MerkleTree
|
||||
|
||||
132
beacon-chain/cache/payload_attestation.go
vendored
132
beacon-chain/cache/payload_attestation.go
vendored
@@ -1,132 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
var errNilPayloadAttestationMessage = errors.New("nil Payload Attestation Message")
|
||||
|
||||
// PayloadAttestationCache keeps a map of all the PTC votes that were seen,
|
||||
// already aggregated. The key is the beacon block root.
|
||||
type PayloadAttestationCache struct {
|
||||
root [32]byte
|
||||
attestations [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// Seen returns true if a vote for the given Beacon Block Root has already been processed
|
||||
// for this Payload Timeliness Committee index. This will return true even if
|
||||
// the Payload status differs.
|
||||
func (p *PayloadAttestationCache) Seen(root [32]byte, idx uint64) bool {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
if p.root != root {
|
||||
return false
|
||||
}
|
||||
for _, agg := range p.attestations {
|
||||
if agg == nil {
|
||||
continue
|
||||
}
|
||||
if agg.AggregationBits.BitAt(idx) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// messageToPayloadAttestation creates a PayloadAttestation with a single
|
||||
// aggregated bit from the passed PayloadAttestationMessage
|
||||
func messageToPayloadAttestation(att *eth.PayloadAttestationMessage, idx uint64) *eth.PayloadAttestation {
|
||||
bits := primitives.NewPayloadAttestationAggregationBits()
|
||||
bits.SetBitAt(idx, true)
|
||||
data := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: bytesutil.SafeCopyBytes(att.Data.BeaconBlockRoot),
|
||||
Slot: att.Data.Slot,
|
||||
PayloadStatus: att.Data.PayloadStatus,
|
||||
}
|
||||
return ð.PayloadAttestation{
|
||||
AggregationBits: bits,
|
||||
Data: data,
|
||||
Signature: bytesutil.SafeCopyBytes(att.Signature),
|
||||
}
|
||||
}
|
||||
|
||||
// aggregateSigFromMessage returns the aggregated signature from a Payload
|
||||
// Attestation by adding the passed signature in the PayloadAttestationMessage,
|
||||
// no signature validation is performed.
|
||||
func aggregateSigFromMessage(aggregated *eth.PayloadAttestation, message *eth.PayloadAttestationMessage) ([]byte, error) {
|
||||
aggSig, err := bls.SignatureFromBytesNoValidation(aggregated.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sig, err := bls.SignatureFromBytesNoValidation(message.Signature)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bls.AggregateSignatures([]bls.Signature{aggSig, sig}).Marshal(), nil
|
||||
}
|
||||
|
||||
// Add adds a PayloadAttestationMessage to the internal cache of aggregated
|
||||
// PayloadAttestations.
|
||||
// If the index has already been seen for this attestation status the function does nothing.
|
||||
// If the root is not the cached root, the function will clear the previous cache
|
||||
// This function assumes that the message has already been validated. In
|
||||
// particular that the signature is valid and that the block root corresponds to
|
||||
// the given slot in the attestation data.
|
||||
func (p *PayloadAttestationCache) Add(att *eth.PayloadAttestationMessage, idx uint64) error {
|
||||
if att == nil || att.Data == nil || att.Data.BeaconBlockRoot == nil {
|
||||
return errNilPayloadAttestationMessage
|
||||
}
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
root := [32]byte(att.Data.BeaconBlockRoot)
|
||||
if p.root != root {
|
||||
p.root = root
|
||||
p.attestations = [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation{}
|
||||
}
|
||||
agg := p.attestations[att.Data.PayloadStatus]
|
||||
if agg == nil {
|
||||
p.attestations[att.Data.PayloadStatus] = messageToPayloadAttestation(att, idx)
|
||||
return nil
|
||||
}
|
||||
if agg.AggregationBits.BitAt(idx) {
|
||||
return nil
|
||||
}
|
||||
sig, err := aggregateSigFromMessage(agg, att)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
agg.Signature = sig
|
||||
agg.AggregationBits.SetBitAt(idx, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the aggregated PayloadAttestation for the given root and status
|
||||
// if the root doesn't exist or status is invalid, the function returns nil.
|
||||
func (p *PayloadAttestationCache) Get(root [32]byte, status primitives.PTCStatus) *eth.PayloadAttestation {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
if p.root != root {
|
||||
return nil
|
||||
}
|
||||
if status >= primitives.PAYLOAD_INVALID_STATUS {
|
||||
return nil
|
||||
}
|
||||
|
||||
return eth.CopyPayloadAttestation(p.attestations[status])
|
||||
}
|
||||
|
||||
// Clear clears the internal map
|
||||
func (p *PayloadAttestationCache) Clear() {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.root = [32]byte{}
|
||||
p.attestations = [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation{}
|
||||
}
|
||||
143
beacon-chain/cache/payload_attestation_test.go
vendored
143
beacon-chain/cache/payload_attestation_test.go
vendored
@@ -1,143 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestPayloadAttestationCache(t *testing.T) {
|
||||
p := &PayloadAttestationCache{}
|
||||
|
||||
//Test Has seen
|
||||
root := [32]byte{'r'}
|
||||
idx := uint64(5)
|
||||
require.Equal(t, false, p.Seen(root, idx))
|
||||
|
||||
// Test Add
|
||||
msg := ð.PayloadAttestationMessage{
|
||||
Signature: bls.NewAggregateSignature().Marshal(),
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: root[:],
|
||||
Slot: 1,
|
||||
PayloadStatus: primitives.PAYLOAD_PRESENT,
|
||||
},
|
||||
}
|
||||
|
||||
// Add new root
|
||||
require.NoError(t, p.Add(msg, idx))
|
||||
require.Equal(t, true, p.Seen(root, idx))
|
||||
require.Equal(t, root, p.root)
|
||||
att := p.attestations[primitives.PAYLOAD_PRESENT]
|
||||
indices := att.AggregationBits.BitIndices()
|
||||
require.DeepEqual(t, []int{int(idx)}, indices)
|
||||
singleSig := bytesutil.SafeCopyBytes(msg.Signature)
|
||||
require.DeepEqual(t, singleSig, att.Signature)
|
||||
|
||||
// Test Seen
|
||||
require.Equal(t, true, p.Seen(root, idx))
|
||||
require.Equal(t, false, p.Seen(root, idx+1))
|
||||
|
||||
// Add another attestation on the same data
|
||||
msg2 := ð.PayloadAttestationMessage{
|
||||
Signature: bls.NewAggregateSignature().Marshal(),
|
||||
Data: att.Data,
|
||||
}
|
||||
idx2 := uint64(7)
|
||||
require.NoError(t, p.Add(msg2, idx2))
|
||||
att = p.attestations[primitives.PAYLOAD_PRESENT]
|
||||
indices = att.AggregationBits.BitIndices()
|
||||
require.DeepEqual(t, []int{int(idx), int(idx2)}, indices)
|
||||
require.DeepNotEqual(t, att.Signature, msg.Signature)
|
||||
|
||||
// Try again the same index
|
||||
require.NoError(t, p.Add(msg2, idx2))
|
||||
att2 := p.attestations[primitives.PAYLOAD_PRESENT]
|
||||
indices = att.AggregationBits.BitIndices()
|
||||
require.DeepEqual(t, []int{int(idx), int(idx2)}, indices)
|
||||
require.DeepEqual(t, att, att2)
|
||||
|
||||
// Test Seen
|
||||
require.Equal(t, true, p.Seen(root, idx2))
|
||||
require.Equal(t, false, p.Seen(root, idx2+1))
|
||||
|
||||
// Add another payload status for a different payload status
|
||||
msg3 := ð.PayloadAttestationMessage{
|
||||
Signature: bls.NewAggregateSignature().Marshal(),
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: root[:],
|
||||
Slot: 1,
|
||||
PayloadStatus: primitives.PAYLOAD_WITHHELD,
|
||||
},
|
||||
}
|
||||
idx3 := uint64(17)
|
||||
|
||||
require.NoError(t, p.Add(msg3, idx3))
|
||||
att3 := p.attestations[primitives.PAYLOAD_WITHHELD]
|
||||
indices3 := att3.AggregationBits.BitIndices()
|
||||
require.DeepEqual(t, []int{int(idx3)}, indices3)
|
||||
require.DeepEqual(t, singleSig, att3.Signature)
|
||||
|
||||
// Add a different root
|
||||
root2 := [32]byte{'s'}
|
||||
msg.Data.BeaconBlockRoot = root2[:]
|
||||
require.NoError(t, p.Add(msg, idx))
|
||||
require.Equal(t, root2, p.root)
|
||||
require.Equal(t, true, p.Seen(root2, idx))
|
||||
require.Equal(t, false, p.Seen(root, idx))
|
||||
att = p.attestations[primitives.PAYLOAD_PRESENT]
|
||||
indices = att.AggregationBits.BitIndices()
|
||||
require.DeepEqual(t, []int{int(idx)}, indices)
|
||||
}
|
||||
|
||||
func TestPayloadAttestationCache_Get(t *testing.T) {
|
||||
root := [32]byte{1, 2, 3}
|
||||
wrongRoot := [32]byte{4, 5, 6}
|
||||
status := primitives.PAYLOAD_PRESENT
|
||||
invalidStatus := primitives.PAYLOAD_INVALID_STATUS
|
||||
|
||||
cache := &PayloadAttestationCache{
|
||||
root: root,
|
||||
attestations: [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation{
|
||||
{
|
||||
Signature: []byte{1},
|
||||
},
|
||||
{
|
||||
Signature: []byte{2},
|
||||
},
|
||||
{
|
||||
Signature: []byte{3},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("valid root and status", func(t *testing.T) {
|
||||
result := cache.Get(root, status)
|
||||
require.NotNil(t, result, "Expected a non-nil result")
|
||||
require.DeepEqual(t, cache.attestations[status], result)
|
||||
})
|
||||
|
||||
t.Run("invalid root", func(t *testing.T) {
|
||||
result := cache.Get(wrongRoot, status)
|
||||
require.IsNil(t, result)
|
||||
})
|
||||
|
||||
t.Run("status out of bound", func(t *testing.T) {
|
||||
result := cache.Get(root, invalidStatus)
|
||||
require.IsNil(t, result)
|
||||
})
|
||||
|
||||
t.Run("no attestation", func(t *testing.T) {
|
||||
emptyCache := &PayloadAttestationCache{
|
||||
root: root,
|
||||
attestations: [primitives.PAYLOAD_INVALID_STATUS]*eth.PayloadAttestation{},
|
||||
}
|
||||
|
||||
result := emptyCache.Get(root, status)
|
||||
require.IsNil(t, result)
|
||||
})
|
||||
}
|
||||
76
beacon-chain/cache/signed_execution_header.go
vendored
76
beacon-chain/cache/signed_execution_header.go
vendored
@@ -1,76 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
)
|
||||
|
||||
// ExecutionPayloadHeaders is used by the sync service to store signed execution payload headers after they pass validation,
|
||||
// and filter out subsequent headers with lower value.
|
||||
// The signed header from this cache could be used by the proposer when proposing the next slot.
|
||||
type ExecutionPayloadHeaders struct {
|
||||
headers map[primitives.Slot][]*enginev1.SignedExecutionPayloadHeader
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func NewExecutionPayloadHeaders() *ExecutionPayloadHeaders {
|
||||
return &ExecutionPayloadHeaders{
|
||||
headers: make(map[primitives.Slot][]*enginev1.SignedExecutionPayloadHeader),
|
||||
}
|
||||
}
|
||||
|
||||
// SaveSignedExecutionPayloadHeader saves the signed execution payload header to the cache.
|
||||
// The cache stores headers for up to two slots. If the input slot is higher than the lowest slot
|
||||
// currently in the cache, the lowest slot is removed to make space for the new header.
|
||||
// Only the highest value header for a given parent block hash will be stored.
|
||||
// This function assumes caller already checks header's slot is current or next slot, it doesn't account for slot validation.
|
||||
func (c *ExecutionPayloadHeaders) SaveSignedExecutionPayloadHeader(header *enginev1.SignedExecutionPayloadHeader) {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
for s := range c.headers {
|
||||
if s+1 < header.Message.Slot {
|
||||
delete(c.headers, s)
|
||||
}
|
||||
}
|
||||
|
||||
// Add or update the header in the map
|
||||
if _, ok := c.headers[header.Message.Slot]; !ok {
|
||||
c.headers[header.Message.Slot] = []*enginev1.SignedExecutionPayloadHeader{header}
|
||||
} else {
|
||||
found := false
|
||||
for i, h := range c.headers[header.Message.Slot] {
|
||||
if bytes.Equal(h.Message.ParentBlockHash, header.Message.ParentBlockHash) && bytes.Equal(h.Message.ParentBlockRoot, header.Message.ParentBlockRoot) {
|
||||
if header.Message.Value > h.Message.Value {
|
||||
c.headers[header.Message.Slot][i] = header
|
||||
}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
c.headers[header.Message.Slot] = append(c.headers[header.Message.Slot], header)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SignedExecutionPayloadHeader returns the signed payload header for the given slot and parent block hash and block root.
|
||||
// Returns nil if the header is not found.
|
||||
// This should be used when the caller wants the header to match parent block hash and parent block root such as proposer choosing a header to propose.
|
||||
func (c *ExecutionPayloadHeaders) SignedExecutionPayloadHeader(slot primitives.Slot, parentBlockHash []byte, parentBlockRoot []byte) *enginev1.SignedExecutionPayloadHeader {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
if headers, ok := c.headers[slot]; ok {
|
||||
for _, header := range headers {
|
||||
if bytes.Equal(header.Message.ParentBlockHash, parentBlockHash) && bytes.Equal(header.Message.ParentBlockRoot, parentBlockRoot) {
|
||||
return header
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
243
beacon-chain/cache/signed_execution_header_test.go
vendored
243
beacon-chain/cache/signed_execution_header_test.go
vendored
@@ -1,243 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func Test_SaveSignedExecutionPayloadHeader(t *testing.T) {
|
||||
t.Run("First header should be added to cache", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header)
|
||||
require.Equal(t, 1, len(c.headers))
|
||||
require.Equal(t, header, c.headers[1][0])
|
||||
})
|
||||
|
||||
t.Run("Second header with higher slot should be added, and both slots should be in cache", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header1 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header2 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header1)
|
||||
c.SaveSignedExecutionPayloadHeader(header2)
|
||||
require.Equal(t, 2, len(c.headers))
|
||||
require.Equal(t, header1, c.headers[1][0])
|
||||
require.Equal(t, header2, c.headers[2][0])
|
||||
})
|
||||
|
||||
t.Run("Third header with higher slot should replace the oldest slot", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header1 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header2 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header3 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 3,
|
||||
ParentBlockHash: []byte("parent3"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header1)
|
||||
c.SaveSignedExecutionPayloadHeader(header2)
|
||||
c.SaveSignedExecutionPayloadHeader(header3)
|
||||
require.Equal(t, 2, len(c.headers))
|
||||
require.Equal(t, header2, c.headers[2][0])
|
||||
require.Equal(t, header3, c.headers[3][0])
|
||||
})
|
||||
|
||||
t.Run("Header with same slot but higher value should replace the existing one", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header1 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header2 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 200,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header1)
|
||||
c.SaveSignedExecutionPayloadHeader(header2)
|
||||
require.Equal(t, 1, len(c.headers[2]))
|
||||
require.Equal(t, header2, c.headers[2][0])
|
||||
})
|
||||
|
||||
t.Run("Header with different parent block hash should be appended to the same slot", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header1 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header2 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 200,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header1)
|
||||
c.SaveSignedExecutionPayloadHeader(header2)
|
||||
require.Equal(t, 2, len(c.headers[2]))
|
||||
require.Equal(t, header1, c.headers[2][0])
|
||||
require.Equal(t, header2, c.headers[2][1])
|
||||
})
|
||||
}
|
||||
|
||||
func TestSignedExecutionPayloadHeader(t *testing.T) {
|
||||
t.Run("Return header when slot and parentBlockHash match", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
ParentBlockRoot: []byte("root1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header)
|
||||
result := c.SignedExecutionPayloadHeader(1, []byte("parent1"), []byte("root1"))
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, header, result)
|
||||
})
|
||||
|
||||
t.Run("Return nil when no matching slot and parentBlockHash", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
ParentBlockRoot: []byte("root1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header)
|
||||
result := c.SignedExecutionPayloadHeader(2, []byte("parent2"), []byte("root1"))
|
||||
require.IsNil(t, result)
|
||||
})
|
||||
|
||||
t.Run("Return nil when no matching slot and parentBlockRoot", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
ParentBlockRoot: []byte("root1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header)
|
||||
result := c.SignedExecutionPayloadHeader(2, []byte("parent1"), []byte("root2"))
|
||||
require.IsNil(t, result)
|
||||
})
|
||||
|
||||
t.Run("Return header when there are two slots in the cache and a match is found", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header1 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header2 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 200,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header1)
|
||||
c.SaveSignedExecutionPayloadHeader(header2)
|
||||
|
||||
// Check for the first header
|
||||
result1 := c.SignedExecutionPayloadHeader(1, []byte("parent1"), []byte{})
|
||||
require.NotNil(t, result1)
|
||||
require.Equal(t, header1, result1)
|
||||
|
||||
// Check for the second header
|
||||
result2 := c.SignedExecutionPayloadHeader(2, []byte("parent2"), []byte{})
|
||||
require.NotNil(t, result2)
|
||||
require.Equal(t, header2, result2)
|
||||
})
|
||||
|
||||
t.Run("Return nil when slot is evicted from cache", func(t *testing.T) {
|
||||
c := NewExecutionPayloadHeaders()
|
||||
header1 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 1,
|
||||
ParentBlockHash: []byte("parent1"),
|
||||
Value: 100,
|
||||
},
|
||||
}
|
||||
header2 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 2,
|
||||
ParentBlockHash: []byte("parent2"),
|
||||
Value: 200,
|
||||
},
|
||||
}
|
||||
header3 := &enginev1.SignedExecutionPayloadHeader{
|
||||
Message: &enginev1.ExecutionPayloadHeaderEPBS{
|
||||
Slot: 3,
|
||||
ParentBlockHash: []byte("parent3"),
|
||||
Value: 300,
|
||||
},
|
||||
}
|
||||
c.SaveSignedExecutionPayloadHeader(header1)
|
||||
c.SaveSignedExecutionPayloadHeader(header2)
|
||||
c.SaveSignedExecutionPayloadHeader(header3)
|
||||
|
||||
// The first slot should be evicted, so result should be nil
|
||||
result := c.SignedExecutionPayloadHeader(1, []byte("parent1"), []byte{})
|
||||
require.IsNil(t, result)
|
||||
|
||||
// The second slot should still be present
|
||||
result = c.SignedExecutionPayloadHeader(2, []byte("parent2"), []byte{})
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, header2, result)
|
||||
|
||||
// The third slot should be present
|
||||
result = c.SignedExecutionPayloadHeader(3, []byte("parent3"), []byte{})
|
||||
require.NotNil(t, result)
|
||||
require.Equal(t, header3, result)
|
||||
})
|
||||
}
|
||||
138
beacon-chain/cache/tracked_validators.go
vendored
138
beacon-chain/cache/tracked_validators.go
vendored
@@ -1,49 +1,139 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type TrackedValidator struct {
|
||||
Active bool
|
||||
FeeRecipient primitives.ExecutionAddress
|
||||
Index primitives.ValidatorIndex
|
||||
}
|
||||
const (
|
||||
defaultExpiration = 1 * time.Hour
|
||||
cleanupInterval = 15 * time.Minute
|
||||
)
|
||||
|
||||
type TrackedValidatorsCache struct {
|
||||
sync.Mutex
|
||||
trackedValidators map[primitives.ValidatorIndex]TrackedValidator
|
||||
}
|
||||
type (
|
||||
TrackedValidator struct {
|
||||
Active bool
|
||||
FeeRecipient primitives.ExecutionAddress
|
||||
Index primitives.ValidatorIndex
|
||||
}
|
||||
|
||||
TrackedValidatorsCache struct {
|
||||
trackedValidators *cache.Cache
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
// Metrics.
|
||||
trackedValidatorsCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "tracked_validators_cache_miss",
|
||||
Help: "The number of tracked validators requests that are not present in the cache.",
|
||||
})
|
||||
|
||||
trackedValidatorsCacheTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "tracked_validators_cache_total",
|
||||
Help: "The total number of tracked validators requests in the cache.",
|
||||
})
|
||||
|
||||
trackedValidatorsCacheCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "tracked_validators_cache_count",
|
||||
Help: "The number of tracked validators in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// NewTrackedValidatorsCache creates a new cache for tracking validators.
|
||||
func NewTrackedValidatorsCache() *TrackedValidatorsCache {
|
||||
return &TrackedValidatorsCache{
|
||||
trackedValidators: make(map[primitives.ValidatorIndex]TrackedValidator),
|
||||
trackedValidators: cache.New(defaultExpiration, cleanupInterval),
|
||||
}
|
||||
}
|
||||
|
||||
// Validator retrieves a tracked validator from the cache (if present).
|
||||
func (t *TrackedValidatorsCache) Validator(index primitives.ValidatorIndex) (TrackedValidator, bool) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
val, ok := t.trackedValidators[index]
|
||||
return val, ok
|
||||
trackedValidatorsCacheTotal.Inc()
|
||||
|
||||
key := toCacheKey(index)
|
||||
item, ok := t.trackedValidators.Get(key)
|
||||
if !ok {
|
||||
trackedValidatorsCacheMiss.Inc()
|
||||
return TrackedValidator{}, false
|
||||
}
|
||||
|
||||
val, ok := item.(TrackedValidator)
|
||||
if !ok {
|
||||
logrus.Errorf("Failed to cast tracked validator from cache, got unexpected item type %T", item)
|
||||
return TrackedValidator{}, false
|
||||
}
|
||||
|
||||
return val, true
|
||||
}
|
||||
|
||||
// Set adds a tracked validator to the cache.
|
||||
func (t *TrackedValidatorsCache) Set(val TrackedValidator) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidators[val.Index] = val
|
||||
key := toCacheKey(val.Index)
|
||||
t.trackedValidators.Set(key, val, cache.DefaultExpiration)
|
||||
}
|
||||
|
||||
// Delete removes a tracked validator from the cache.
|
||||
func (t *TrackedValidatorsCache) Prune() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
t.trackedValidators = make(map[primitives.ValidatorIndex]TrackedValidator)
|
||||
t.trackedValidators.Flush()
|
||||
trackedValidatorsCacheCount.Set(0)
|
||||
}
|
||||
|
||||
// Validating returns true if there are at least one tracked validators in the cache.
|
||||
func (t *TrackedValidatorsCache) Validating() bool {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
return len(t.trackedValidators) > 0
|
||||
count := t.trackedValidators.ItemCount()
|
||||
trackedValidatorsCacheCount.Set(float64(count))
|
||||
|
||||
return count > 0
|
||||
}
|
||||
|
||||
// ItemCount returns the number of tracked validators in the cache.
|
||||
func (t *TrackedValidatorsCache) ItemCount() int {
|
||||
count := t.trackedValidators.ItemCount()
|
||||
trackedValidatorsCacheCount.Set(float64(count))
|
||||
|
||||
return count
|
||||
}
|
||||
|
||||
// Indices returns a map of validator indices that are being tracked.
|
||||
func (t *TrackedValidatorsCache) Indices() map[primitives.ValidatorIndex]bool {
|
||||
items := t.trackedValidators.Items()
|
||||
count := len(items)
|
||||
trackedValidatorsCacheCount.Set(float64(count))
|
||||
|
||||
indices := make(map[primitives.ValidatorIndex]bool, count)
|
||||
|
||||
for cacheKey := range items {
|
||||
index, err := fromCacheKey(cacheKey)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to get validator index from cache key")
|
||||
continue
|
||||
}
|
||||
|
||||
indices[index] = true
|
||||
}
|
||||
|
||||
return indices
|
||||
}
|
||||
|
||||
// toCacheKey creates a cache key from the validator index.
|
||||
func toCacheKey(validatorIndex primitives.ValidatorIndex) string {
|
||||
return strconv.FormatUint(uint64(validatorIndex), 10)
|
||||
}
|
||||
|
||||
// fromCacheKey gets the validator index from the cache key.
|
||||
func fromCacheKey(key string) (primitives.ValidatorIndex, error) {
|
||||
validatorIndex, err := strconv.ParseUint(key, 10, 64)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "parse Uint: %s", key)
|
||||
}
|
||||
|
||||
return primitives.ValidatorIndex(validatorIndex), nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user