mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
96 Commits
v5.2.0-rc.
...
check_epoc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ef626a4648 | ||
|
|
910609a75f | ||
|
|
f9c202190a | ||
|
|
4a63a194b1 | ||
|
|
d887536eb7 | ||
|
|
1069da1cd2 | ||
|
|
4a487ba3bc | ||
|
|
bf81cd4449 | ||
|
|
00337fe005 | ||
|
|
bb3fba4d8e | ||
|
|
89967fe209 | ||
|
|
56712b5e49 | ||
|
|
0be9391e62 | ||
|
|
4a9c60f75f | ||
|
|
9cf6b93356 | ||
|
|
b4220e35c4 | ||
|
|
536cded4cc | ||
|
|
86fc64c917 | ||
|
|
5d6a406829 | ||
|
|
2c78e501b3 | ||
|
|
c8cb0f37b2 | ||
|
|
78722239da | ||
|
|
3ffef024c7 | ||
|
|
a1eef44492 | ||
|
|
2845ab9365 | ||
|
|
4f43c15ebb | ||
|
|
e473d7cc4d | ||
|
|
794a05af26 | ||
|
|
15df13c7e6 | ||
|
|
b76f7fed2f | ||
|
|
e263687ea5 | ||
|
|
0b16c79c35 | ||
|
|
dc002c2806 | ||
|
|
e7e48dcaf9 | ||
|
|
8f43f6cc84 | ||
|
|
e07341e1d5 | ||
|
|
ef293e52f8 | ||
|
|
72cc63a6a3 | ||
|
|
34ff4c3ea9 | ||
|
|
e8c968326a | ||
|
|
2000ef457b | ||
|
|
e36564c4d3 | ||
|
|
e5784d09f0 | ||
|
|
e577bb0dcf | ||
|
|
153d1872ae | ||
|
|
80aa811ab9 | ||
|
|
39cf2c8f06 | ||
|
|
e99df5e489 | ||
|
|
393e63e8e5 | ||
|
|
c48d40907c | ||
|
|
a21f544219 | ||
|
|
705a9e8dcd | ||
|
|
2dcb015470 | ||
|
|
211e1a4b7c | ||
|
|
1efca9c28d | ||
|
|
97d7ca828b | ||
|
|
31df250496 | ||
|
|
8a439a6f5d | ||
|
|
8cff9356f1 | ||
|
|
afeb05c9a1 | ||
|
|
fa16232924 | ||
|
|
1f720bdbf4 | ||
|
|
79ea77ff57 | ||
|
|
d35cd0788e | ||
|
|
093e3df80a | ||
|
|
699a3b07a7 | ||
|
|
937d441e2e | ||
|
|
ead08d56d0 | ||
|
|
f55e62287a | ||
|
|
c7b2838873 | ||
|
|
9e7c1d6af6 | ||
|
|
6ce6b869e5 | ||
|
|
dbd53bd70d | ||
|
|
d04b361cc3 | ||
|
|
96b31a9f64 | ||
|
|
a7c3004115 | ||
|
|
30d5749ef6 | ||
|
|
bc69ab8a44 | ||
|
|
ed7b511949 | ||
|
|
0b7c005d7d | ||
|
|
65e8c37b48 | ||
|
|
689015ff01 | ||
|
|
08c14f02f6 | ||
|
|
4bb0b44f16 | ||
|
|
29237cb0bc | ||
|
|
2b25ede641 | ||
|
|
b7de64a340 | ||
|
|
11aa51e033 | ||
|
|
fa0dc09ce0 | ||
|
|
d93a1b671c | ||
|
|
1d8ffadd4f | ||
|
|
ac1717f1e4 | ||
|
|
6e6012b12f | ||
|
|
008f157e17 | ||
|
|
7afb8c3c86 | ||
|
|
e925d35d55 |
@@ -1 +1 @@
|
||||
7.1.0
|
||||
7.4.1
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -33,5 +33,5 @@ Fixes #
|
||||
**Acknowledgements**
|
||||
|
||||
- [ ] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
|
||||
- [ ] I have made an appropriate entry to [CHANGELOG.md](https://github.com/prysmaticlabs/prysm/blob/develop/CHANGELOG.md).
|
||||
- [ ] I have included a uniquely named [changelog fragment file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
|
||||
- [ ] I have added a description to this PR with sufficient context for reviewers to understand this PR.
|
||||
|
||||
2
.github/actions/gomodtidy/Dockerfile
vendored
2
.github/actions/gomodtidy/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.22-alpine
|
||||
FROM golang:1.23-alpine
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
|
||||
47
.github/workflows/changelog.yml
vendored
47
.github/workflows/changelog.yml
vendored
@@ -1,33 +1,34 @@
|
||||
name: CI
|
||||
# This workflow will build a golang project
|
||||
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
|
||||
|
||||
name: changelog
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
branches: [ "develop" ]
|
||||
|
||||
jobs:
|
||||
changed_files:
|
||||
runs-on: ubuntu-latest
|
||||
name: Check CHANGELOG.md
|
||||
run-changelog-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: changelog modified
|
||||
id: changelog-modified
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Download unclog binary
|
||||
uses: dsaltares/fetch-gh-release-asset@aa2ab1243d6e0d5b405b973c89fa4d06a2d0fff7 # 1.1.2
|
||||
with:
|
||||
repo: OffchainLabs/unclog
|
||||
version: "tags/v0.1.3"
|
||||
file: "unclog"
|
||||
|
||||
- name: Get new changelog files
|
||||
id: new-changelog-files
|
||||
uses: tj-actions/changed-files@v45
|
||||
with:
|
||||
files: CHANGELOG.md
|
||||
files: |
|
||||
changelog/**.md
|
||||
|
||||
- name: List all changed files
|
||||
- name: Run lint command
|
||||
env:
|
||||
ALL_CHANGED_FILES: ${{ steps.changelog-modified.outputs.all_changed_files }}
|
||||
run: |
|
||||
if [[ ${ALL_CHANGED_FILES[*]} =~ (^|[[:space:]])"CHANGELOG.md"($|[[:space:]]) ]];
|
||||
then
|
||||
echo "CHANGELOG.md was modified.";
|
||||
exit 0;
|
||||
else
|
||||
echo "CHANGELOG.md was not modified.";
|
||||
echo "Please see CHANGELOG.md and follow the instructions to add your changes to that file."
|
||||
echo "In some rare scenarios, a changelog entry is not required and this CI check can be ignored."
|
||||
exit 1;
|
||||
fi
|
||||
ALL_ADDED_MARKDOWN: ${{ steps.new-changelog-files.outputs.added_files }}
|
||||
run: chmod +x unclog && ./unclog check -fragment-env=ALL_ADDED_MARKDOWN
|
||||
|
||||
21
.github/workflows/clang-format.yml
vendored
Normal file
21
.github/workflows/clang-format.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Protobuf Format
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ '*' ]
|
||||
pull_request:
|
||||
branches: [ '*' ]
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
|
||||
jobs:
|
||||
clang-format-checking:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
# Is this step failing for you?
|
||||
# Run: clang-format -i proto/**/*.proto
|
||||
# See: https://clang.llvm.org/docs/ClangFormat.html
|
||||
- uses: RafikFarhad/clang-format-github-action@v3
|
||||
with:
|
||||
sources: "proto/**/*.proto"
|
||||
4
.github/workflows/fuzz.yml
vendored
4
.github/workflows/fuzz.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.3'
|
||||
go-version: '1.23.5'
|
||||
- id: list
|
||||
uses: shogo82148/actions-go-fuzz/list@v0
|
||||
with:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.3'
|
||||
go-version: '1.23.5'
|
||||
- uses: shogo82148/actions-go-fuzz/run@v0
|
||||
with:
|
||||
packages: ${{ matrix.package }}
|
||||
|
||||
12
.github/workflows/go.yml
vendored
12
.github/workflows/go.yml
vendored
@@ -28,10 +28,10 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go 1.22
|
||||
- name: Set up Go 1.23
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.6'
|
||||
go-version: '1.23.5'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
@@ -45,16 +45,16 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go 1.22
|
||||
- name: Set up Go 1.23
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.6'
|
||||
go-version: '1.23.5'
|
||||
id: go
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
version: v1.56.1
|
||||
version: v1.63.4
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.6'
|
||||
go-version: '1.23.5'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
@@ -1,28 +1,20 @@
|
||||
run:
|
||||
skip-files:
|
||||
timeout: 10m
|
||||
go: '1.23.5'
|
||||
|
||||
issues:
|
||||
exclude-files:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
skip-dirs:
|
||||
exclude-dirs:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
timeout: 10m
|
||||
go: '1.22.6'
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
# Deprecated linters:
|
||||
- deadcode
|
||||
- exhaustivestruct
|
||||
- golint
|
||||
- govet
|
||||
- ifshort
|
||||
- interfacer
|
||||
- maligned
|
||||
- nosnakecase
|
||||
- scopelint
|
||||
- structcheck
|
||||
- varcheck
|
||||
|
||||
# Disabled for now:
|
||||
- asasalint
|
||||
@@ -34,6 +26,8 @@ linters:
|
||||
- dogsled
|
||||
- dupl
|
||||
- durationcheck
|
||||
- errname
|
||||
- err113
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
@@ -47,17 +41,17 @@ linters:
|
||||
- gocyclo
|
||||
- godot
|
||||
- godox
|
||||
- goerr113
|
||||
- gofumpt
|
||||
- gomnd
|
||||
- gomoddirectives
|
||||
- gosec
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- intrange
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- mnd
|
||||
- musttag
|
||||
- nakedret
|
||||
- nestif
|
||||
@@ -72,6 +66,7 @@ linters:
|
||||
- predeclared
|
||||
- promlinter
|
||||
- protogetter
|
||||
- recvcheck
|
||||
- revive
|
||||
- spancheck
|
||||
- staticcheck
|
||||
|
||||
143
CHANGELOG.md
143
CHANGELOG.md
@@ -4,7 +4,23 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [Unreleased](https://github.com/prysmaticlabs/prysm/compare/v5.1.2...HEAD)
|
||||
## [v5.2.0](https://github.com/prysmaticlabs/prysm/compare/v5.1.2...v5.2.0)
|
||||
|
||||
Updating to this release is highly recommended, especially for users running v5.1.1 or v5.1.2.
|
||||
This release is **mandatory** for all validator clients using mev-boost with a gas limit increase.
|
||||
Without upgrading to this release, validator clients will default to using local execution blocks
|
||||
when the gas limit starts to increase.
|
||||
|
||||
This release has several fixes and new features. In this release, we have enabled QUIC protocol by
|
||||
default, which uses port 13000 for `--p2p-quic-port`. This may be a [breaking change](https://github.com/prysmaticlabs/prysm/pull/14688#issuecomment-2516713826)
|
||||
if you're using port 13000 already. This release has some improvements for raising the gas limit,
|
||||
but there are [known issues](https://hackmd.io/@ttsao/prysm-gas-limit) with the proposer settings
|
||||
file provided gas limit not being respected for mev-boost outsourced blocks. Signalling an increase
|
||||
for the gas limit works perfectly for local block production as of this release. See [pumpthegas.org](https://pumpthegas.org) for more info on raising the gas limit on L1.
|
||||
|
||||
Notable features:
|
||||
- Prysm can reuse blobs from the EL via engine_getBlobsV1, [potentially saving bandwidth](https://hackmd.io/@ttsao/get-blobs-early-results).
|
||||
- QUIC is enabled by default. This is a UDP based networking protocol with default port 13000.
|
||||
|
||||
### Added
|
||||
|
||||
@@ -33,6 +49,8 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Added a Prometheus error counter metric for SSE requests.
|
||||
- Save light client updates and bootstraps in DB.
|
||||
- Added more comprehensive tests for `BlockToLightClientHeader`. [PR](https://github.com/prysmaticlabs/prysm/pull/14699)
|
||||
- Added light client feature flag check to RPC handlers. [PR](https://github.com/prysmaticlabs/prysm/pull/14736)
|
||||
- Light client: Add better error handling. [PR](https://github.com/prysmaticlabs/prysm/pull/14749)
|
||||
|
||||
### Changed
|
||||
|
||||
@@ -42,7 +60,7 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Updated the default `scrape-interval` in `Client-stats` to 2 minutes to accommodate Beaconcha.in API rate limits.
|
||||
- Switch to compounding when consolidating with source==target.
|
||||
- Revert block db save when saving state fails.
|
||||
- Return false from HasBlock if the block is being synced.
|
||||
- Return false from HasBlock if the block is being synced.
|
||||
- Cleanup forkchoice on failed insertions.
|
||||
- Use read only validator for core processing to avoid unnecessary copying.
|
||||
- Use ROBlock across block processing pipeline.
|
||||
@@ -55,7 +73,7 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Simplified `EjectedValidatorIndices`.
|
||||
- `engine_newPayloadV4`,`engine_getPayloadV4` are changes due to new execution request serialization decisions, [PR](https://github.com/prysmaticlabs/prysm/pull/14580)
|
||||
- Fixed various small things in state-native code.
|
||||
- Use ROBlock earlier in block syncing pipeline.
|
||||
- Use ROBlock earlier in block syncing pipeline.
|
||||
- Changed the signature of `ProcessPayload`.
|
||||
- Only Build the Protobuf state once during serialization.
|
||||
- Capella blocks are execution.
|
||||
@@ -75,8 +93,8 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Updated `Blobs` endpoint to return additional metadata fields.
|
||||
- Made QUIC the default method to connect with peers.
|
||||
- Check kzg commitments align with blobs and proofs for beacon api end point.
|
||||
- Increase Max Payload Size in Gossip.
|
||||
- Revert "Proposer checks gas limit before accepting builder's bid".
|
||||
- Updated quic-go to v0.48.2 .
|
||||
|
||||
### Deprecated
|
||||
|
||||
@@ -88,7 +106,7 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Removed finalized validator index cache, no longer needed.
|
||||
- Removed validator queue position log on key reload and wait for activation.
|
||||
- Removed outdated spectest exclusions for EIP-6110.
|
||||
- Removed support for starting a beacon node with a deterministic interop genesis state via interop flags. Alteratively, create a genesis state with prysmctl and use `--genesis-state`. This removes about 9Mb (~11%) of unnecessary code and dependencies from the final production binary.
|
||||
- Removed support for starting a beacon node with a deterministic interop genesis state via interop flags. Alternatively, create a genesis state with prysmctl and use `--genesis-state`. This removes about 9Mb (~11%) of unnecessary code and dependencies from the final production binary.
|
||||
- Removed kzg proof check from blob reconstructor.
|
||||
|
||||
### Fixed
|
||||
@@ -102,7 +120,7 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- Fix `--backfill-oldest-slot` handling - this flag was totally broken, the code would always backfill to the default slot [pr](https://github.com/prysmaticlabs/prysm/pull/14584)
|
||||
- Fix keymanager API should return corrected error format for malformed tokens
|
||||
- Fix keymanager API so that get keys returns an empty response instead of a 500 error when using an unsupported keystore.
|
||||
- Small log imporvement, removing some redundant or duplicate logs
|
||||
- Small log improvement, removing some redundant or duplicate logs
|
||||
- EIP7521 - Fixes withdrawal bug by accounting for pending partial withdrawals and deducting already withdrawn amounts from the sweep balance. [PR](https://github.com/prysmaticlabs/prysm/pull/14578)
|
||||
- unskip electra merkle spec test
|
||||
- Fix panic in validator REST mode when checking status after removing all keys
|
||||
@@ -114,15 +132,16 @@ The format is based on Keep a Changelog, and this project adheres to Semantic Ve
|
||||
- P2P: Avoid infinite loop when looking for peers in small networks.
|
||||
- Fixed another rollback bug due to a context deadline.
|
||||
- Fix checkpoint sync bug on holesky. [pr](https://github.com/prysmaticlabs/prysm/pull/14689)
|
||||
- Fix proposer boost spec tests being flakey by adjusting start time from 3 to 2s into slot.
|
||||
- Fix segmentation fault in E2E when light-client feature flag is enabled. [PR](https://github.com/prysmaticlabs/prysm/pull/14699)
|
||||
- Fix `searchForPeers` infinite loop in small networks.
|
||||
|
||||
- Fix slashing pool behavior to enforce MaxAttesterSlashings limit in Electra version.
|
||||
|
||||
### Security
|
||||
|
||||
## [v5.1.2](https://github.com/prysmaticlabs/prysm/compare/v5.1.1...v5.1.2) - 2024-10-16
|
||||
## [v5.1.2](https://github.com/prysmaticlabs/prysm/compare/v5.1.1...v5.1.2) - 2024-10-16
|
||||
|
||||
This is a hotfix release with one change.
|
||||
This is a hotfix release with one change.
|
||||
|
||||
Prysm v5.1.1 contains an updated implementation of the beacon api streaming events endpoint. This
|
||||
new implementation contains a bug that can cause a panic in certain conditions. The issue is
|
||||
@@ -131,23 +150,23 @@ meantime we are issuing a patch that recovers from the panic to prevent the node
|
||||
|
||||
This only impacts the v5.1.1 release beacon api event stream endpoints. This endpoint is used by the
|
||||
prysm REST mode validator (a feature which requires the validator to be configured to use the beacon
|
||||
api intead of prysm's stock grpc endpoints) or accessory software that connects to the events api,
|
||||
api instead of prysm's stock grpc endpoints) or accessory software that connects to the events api,
|
||||
like https://github.com/ethpandaops/ethereum-metrics-exporter
|
||||
|
||||
### Fixed
|
||||
### Fixed
|
||||
|
||||
- Recover from panics when writing the event stream [#14545](https://github.com/prysmaticlabs/prysm/pull/14545)
|
||||
|
||||
## [v5.1.1](https://github.com/prysmaticlabs/prysm/compare/v5.1.0...v5.1.1) - 2024-10-15
|
||||
|
||||
This release has a number of features and improvements. Most notably, the feature flag
|
||||
`--enable-experimental-state` has been flipped to "opt out" via `--disable-experimental-state`.
|
||||
This release has a number of features and improvements. Most notably, the feature flag
|
||||
`--enable-experimental-state` has been flipped to "opt out" via `--disable-experimental-state`.
|
||||
The experimental state management design has shown significant improvements in memory usage at
|
||||
runtime. Updates to libp2p's gossipsub have some bandwidith stability improvements with support for
|
||||
IDONTWANT control messages.
|
||||
IDONTWANT control messages.
|
||||
|
||||
The gRPC gateway has been deprecated from Prysm in this release. If you need JSON data, consider the
|
||||
standardized beacon-APIs.
|
||||
standardized beacon-APIs.
|
||||
|
||||
Updating to this release is recommended at your convenience.
|
||||
|
||||
@@ -189,7 +208,7 @@ Updating to this release is recommended at your convenience.
|
||||
- `grpc-gateway-corsdomain` is renamed to http-cors-domain. The old name can still be used as an alias.
|
||||
- `api-timeout` is changed from int flag to duration flag, default value updated.
|
||||
- Light client support: abstracted out the light client headers with different versions.
|
||||
- `ApplyToEveryValidator` has been changed to prevent misuse bugs, it takes a closure that takes a `ReadOnlyValidator` and returns a raw pointer to a `Validator`.
|
||||
- `ApplyToEveryValidator` has been changed to prevent misuse bugs, it takes a closure that takes a `ReadOnlyValidator` and returns a raw pointer to a `Validator`.
|
||||
- Removed gorilla mux library and replaced it with net/http updates in go 1.22.
|
||||
- Clean up `ProposeBlock` for validator client to reduce cognitive scoring and enable further changes.
|
||||
- Updated k8s-io/client-go to v0.30.4 and k8s-io/apimachinery to v0.30.4
|
||||
@@ -200,7 +219,7 @@ Updating to this release is recommended at your convenience.
|
||||
- Updated Sepolia bootnodes.
|
||||
- Make committee aware packing the default by deprecating `--enable-committee-aware-packing`.
|
||||
- Moved `ConvertKzgCommitmentToVersionedHash` to the `primitives` package.
|
||||
- Updated correlation penalty for EIP-7251.
|
||||
- Updated correlation penalty for EIP-7251.
|
||||
|
||||
### Deprecated
|
||||
- `--disable-grpc-gateway` flag is deprecated due to grpc gateway removal.
|
||||
@@ -374,7 +393,7 @@ Updating to this release is recommended at your earliest convenience, especially
|
||||
- use time.NewTimer() to avoid possible memory leaks
|
||||
- paranoid underflow protection without error handling
|
||||
- Fix CommitteeAssignments to not return every validator
|
||||
- Fix dependent root retrival genesis case
|
||||
- Fix dependent root retrieval genesis case
|
||||
- Restrict Dials From Discovery
|
||||
- Always close cache warm chan to prevent blocking
|
||||
- Keep only the latest value in the health channel
|
||||
@@ -526,7 +545,7 @@ block profit. If you want to preserve the existing behavior, set --local-block-v
|
||||
- handle special case of batch size=1
|
||||
- Always Set Inprogress Boolean In Cache
|
||||
- Builder APIs: adding headers to post endpoint
|
||||
- Rename mispelled variable
|
||||
- Rename misspelled variable
|
||||
- allow blob by root within da period
|
||||
- Rewrite Pruning Implementation To Handle EIP 7045
|
||||
- Set default fee recipient if tracked val fails
|
||||
@@ -596,7 +615,7 @@ Known Issues
|
||||
- Support beacon_committee_selections
|
||||
- /eth/v1/beacon/deposit_snapshot
|
||||
- Docker images now have coreutils pre-installed
|
||||
- da_waited_time_milliseconds tracks total time waiting for data availablity check in ReceiveBlock
|
||||
- da_waited_time_milliseconds tracks total time waiting for data availability check in ReceiveBlock
|
||||
- blob_written, blob_disk_count, blob_disk_bytes new metrics for tracking blobs on disk
|
||||
- Backfill supports blob backfilling
|
||||
- Add mainnet deneb fork epoch config
|
||||
@@ -674,34 +693,34 @@ AVX support (eg Celeron) after the Deneb fork. This is not an issue for mainnet.
|
||||
|
||||
- Linter: Wastedassign linter enabled to improve code quality.
|
||||
- API Enhancements:
|
||||
- Added payload return in Wei for /eth/v3/validator/blocks.
|
||||
- Added Holesky Deneb Epoch for better epoch management.
|
||||
- Added payload return in Wei for /eth/v3/validator/blocks.
|
||||
- Added Holesky Deneb Epoch for better epoch management.
|
||||
- Testing Enhancements:
|
||||
- Clear cache in tests of core helpers to ensure test reliability.
|
||||
- Added Debug State Transition Method for improved debugging.
|
||||
- Backfilling test: Enabled backfill in E2E tests for more comprehensive coverage.
|
||||
- Clear cache in tests of core helpers to ensure test reliability.
|
||||
- Added Debug State Transition Method for improved debugging.
|
||||
- Backfilling test: Enabled backfill in E2E tests for more comprehensive coverage.
|
||||
- API Updates: Re-enabled jwt on keymanager API for enhanced security.
|
||||
- Logging Improvements: Enhanced block by root log for better traceability.
|
||||
- Validator Client Improvements:
|
||||
- Added Spans to Core Validator Methods for enhanced monitoring.
|
||||
- Improved readability in validator client code for better maintenance (various commits).
|
||||
- Added Spans to Core Validator Methods for enhanced monitoring.
|
||||
- Improved readability in validator client code for better maintenance (various commits).
|
||||
|
||||
### Changed
|
||||
|
||||
- Optimizations and Refinements:
|
||||
- Lowered resource usage in certain processes for efficiency.
|
||||
- Moved blob rpc validation closer to peer read for optimized processing.
|
||||
- Cleaned up validate beacon block code for clarity and efficiency.
|
||||
- Updated Sepolia Deneb fork epoch for alignment with network changes.
|
||||
- Changed blob latency metrics to milliseconds for more precise measurement.
|
||||
- Altered getLegacyDatabaseLocation message for better clarity.
|
||||
- Improved wait for activation method for enhanced performance.
|
||||
- Capitalized Aggregated Unaggregated Attestations Log for consistency.
|
||||
- Modified HistoricalRoots usage for accuracy.
|
||||
- Adjusted checking of attribute emptiness for efficiency.
|
||||
- Lowered resource usage in certain processes for efficiency.
|
||||
- Moved blob rpc validation closer to peer read for optimized processing.
|
||||
- Cleaned up validate beacon block code for clarity and efficiency.
|
||||
- Updated Sepolia Deneb fork epoch for alignment with network changes.
|
||||
- Changed blob latency metrics to milliseconds for more precise measurement.
|
||||
- Altered getLegacyDatabaseLocation message for better clarity.
|
||||
- Improved wait for activation method for enhanced performance.
|
||||
- Capitalized Aggregated Unaggregated Attestations Log for consistency.
|
||||
- Modified HistoricalRoots usage for accuracy.
|
||||
- Adjusted checking of attribute emptiness for efficiency.
|
||||
- Database Management:
|
||||
- Moved --db-backup-output-dir as a deprecated flag for database management simplification.
|
||||
- Added the Ability to Defragment the Beacon State for improved database performance.
|
||||
- Moved --db-backup-output-dir as a deprecated flag for database management simplification.
|
||||
- Added the Ability to Defragment the Beacon State for improved database performance.
|
||||
- Dependency Update: Bumped quic-go version from 0.39.3 to 0.39.4 for up-to-date dependencies.
|
||||
|
||||
### Removed
|
||||
@@ -712,12 +731,12 @@ AVX support (eg Celeron) after the Deneb fork. This is not an issue for mainnet.
|
||||
### Fixed
|
||||
|
||||
- Bug Fixes:
|
||||
- Fixed off by one error for improved accuracy.
|
||||
- Resolved small typo in error messages for clarity.
|
||||
- Addressed minor issue in blsToExecChange validator for better validation.
|
||||
- Corrected blobsidecar json tag for commitment inclusion proof.
|
||||
- Fixed ssz post-requests content type check.
|
||||
- Resolved issue with port logging in bootnode.
|
||||
- Fixed off by one error for improved accuracy.
|
||||
- Resolved small typo in error messages for clarity.
|
||||
- Addressed minor issue in blsToExecChange validator for better validation.
|
||||
- Corrected blobsidecar json tag for commitment inclusion proof.
|
||||
- Fixed ssz post-requests content type check.
|
||||
- Resolved issue with port logging in bootnode.
|
||||
- Test Fixes: Re-enabled Slasher E2E Test for more comprehensive testing.
|
||||
|
||||
### Security
|
||||
@@ -771,7 +790,7 @@ and Raspberry Pi users.
|
||||
- Add Goerli Deneb Fork Epoch
|
||||
- Use deneb key for deneb state in saveStatesEfficientInternal
|
||||
- Initialize Inactivity Scores Correctly
|
||||
- Excluse DA wait time for chain processing time
|
||||
- Excludes DA wait time for chain processing time
|
||||
- Initialize sig cache for verification.Initializer
|
||||
- Verify roblobs
|
||||
- KZG Commitment inclusion proof verifier
|
||||
@@ -804,7 +823,7 @@ and Raspberry Pi users.
|
||||
- Exit early if blob by root request is empty
|
||||
- Request missing blobs while processing pending queue
|
||||
- Check blob exists before requesting from peer
|
||||
- Passing block as arugment for sidecar validation
|
||||
- Passing block as argument for sidecar validation
|
||||
|
||||
#### Blob Management
|
||||
|
||||
@@ -1101,13 +1120,13 @@ _Most of the PRs here involve shifting our http endpoints to using vanilla http
|
||||
- Remove no-op cancel func
|
||||
- Update Terms of Service
|
||||
- fix head slot in log
|
||||
- DEPRECTATION: Remove exchange transition configuration call
|
||||
- DEPRECATION: Remove exchange transition configuration call
|
||||
- fix segmentation fork when Capella for epoch is MaxUint64
|
||||
- Return Error Gracefully When Removing 4881 Flag
|
||||
- Add zero length check on indices during NextSyncCommitteeIndices
|
||||
- Replace Empty Slice Literals with Nil Slices
|
||||
- Refactor Error String Formatting According to Go Best Practices
|
||||
- Fix redundant type converstion
|
||||
- Fix redundant type conversion
|
||||
- docs: fix typo
|
||||
- Add Clarification To Sync Committee Cache
|
||||
- Fix typos
|
||||
@@ -1129,7 +1148,7 @@ small set of users.
|
||||
|
||||
### Security
|
||||
|
||||
No security issues in thsi release.
|
||||
No security issues in this release.
|
||||
|
||||
## [v4.1.0](https://github.com/prysmaticlabs/prysm/compare/v4.0.8...v4.1.0) - 2023-08-22
|
||||
|
||||
@@ -1144,9 +1163,9 @@ No security issues in thsi release.
|
||||
now features runtime detection, automatically enabling optimized code paths if your CPU supports it.
|
||||
- **Multiarch Containers Preview Available**: multiarch (:wave: arm64 support :wave:) containers will be offered for
|
||||
preview at the following locations:
|
||||
- Beacon Chain: [gcr.io/prylabs-dev/prysm/beacon-chain:v4.1.0](gcr.io/prylabs-dev/prysm/beacon-chain:v4.1.0)
|
||||
- Validator: [gcr.io/prylabs-dev/prysm/validator:v4.1.0](gcr.io/prylabs-dev/prysm/validator:v4.1.0)
|
||||
- Please note that in the next cycle, we will exclusively use these containers at the canonical URLs.
|
||||
- Beacon Chain: [gcr.io/prylabs-dev/prysm/beacon-chain:v4.1.0](gcr.io/prylabs-dev/prysm/beacon-chain:v4.1.0)
|
||||
- Validator: [gcr.io/prylabs-dev/prysm/validator:v4.1.0](gcr.io/prylabs-dev/prysm/validator:v4.1.0)
|
||||
- Please note that in the next cycle, we will exclusively use these containers at the canonical URLs.
|
||||
|
||||
### Added
|
||||
|
||||
@@ -1645,7 +1664,7 @@ notes [here](https://hackmd.io/TtyFurRJRKuklG3n8lMO9Q). This release is **strong
|
||||
Note: The released docker images are using the portable version of the blst cryptography library. The Prysm team will
|
||||
release docker images with the non-portable blst library as the default image. In the meantime, you can compile docker
|
||||
images with blst non-portable locally with the `--define=blst_modern=true` bazel flag, use the "-modern-" assets
|
||||
attached to releases, or set environment varaible USE_PRYSM_MODERN=true when using prysm.sh.
|
||||
attached to releases, or set environment variable USE_PRYSM_MODERN=true when using prysm.sh.
|
||||
|
||||
### Added
|
||||
|
||||
@@ -1994,7 +2013,7 @@ There are some known issues with this release.
|
||||
- Beacon node can bootstrap from non-genesis state (i.e bellatrix state)
|
||||
- Refactor bytesutil, add support for go1.20 slice to array conversions
|
||||
- Add Span information for attestation record save request
|
||||
- Matric addition
|
||||
- Metric addition
|
||||
- Identify invalid signature within batch verification
|
||||
- Support for getting consensus values from beacon config
|
||||
- EIP-4881: Spec implementation
|
||||
@@ -2060,7 +2079,7 @@ See [flashbots/mev-boost#404](https://github.com/flashbots/mev-boost/issues/404)
|
||||
- Added more histogram metrics for block arrival latency times block_arrival_latency_milliseconds
|
||||
- Priority queue RetrieveByKey now uses read lock instead of write lock
|
||||
- Use custom types for certain ethclient requests. Fixes an issue when using prysm on gnosis chain.
|
||||
- Updted forkchoice endpoint /eth/v1/debug/forkchoice (was /eth/v1/debug/beacon/forkchoice)
|
||||
- Updated forkchoice endpoint /eth/v1/debug/forkchoice (was /eth/v1/debug/beacon/forkchoice)
|
||||
- Include empty fields in builder json client.
|
||||
- Computing committee assignments for slots older than the oldest historical root in the beacon state is now forbidden
|
||||
|
||||
@@ -2292,7 +2311,7 @@ There are no security updates in this release.
|
||||
removed: `GetBeaconState`, `ProduceBlock`, `ListForkChoiceHeads`, `ListBlocks`, `SubmitValidatorRegistration`, `GetBlock`, `ProposeBlock`
|
||||
- API: Forkchoice method `GetForkChoice` has been removed.
|
||||
- All previously deprecated feature flags have been
|
||||
removed. `--enable-active-balance-cache`, `--correctly-prune-canonical-atts`, `--correctly-insert-orphaned-atts`, `--enable-next-slot-state-cache`, `--enable-batch-gossip-verification`, `--enable-get-block-optimizations`, `--enable-balance-trie-computation`, `--disable-next-slot-state-cache`, `--attestation-aggregation-strategy`, `--attestation-aggregation-force-opt-maxcover`, `--pyrmont`, `--disable-get-block-optimizations`, `--disable-proposer-atts-selection-using-max-cover`, `--disable-optimized-balance-update`, `--disable-active-balance-cache`, `--disable-balance-trie-computation`, `--disable-batch-gossip-verification`, `--disable-correctly-prune-canonical-atts`, `--disable-correctly-insert-orphaned-atts`, `--enable-native-state`, `--enable-peer-scorer`, `--enable-gossip-batch-aggregation`, `--experimental-disable-boundry-checks`
|
||||
removed. `--enable-active-balance-cache`, `--correctly-prune-canonical-atts`, `--correctly-insert-orphaned-atts`, `--enable-next-slot-state-cache`, `--enable-batch-gossip-verification`, `--enable-get-block-optimizations`, `--enable-balance-trie-computation`, `--disable-next-slot-state-cache`, `--attestation-aggregation-strategy`, `--attestation-aggregation-force-opt-maxcover`, `--pyrmont`, `--disable-get-block-optimizations`, `--disable-proposer-atts-selection-using-max-cover`, `--disable-optimized-balance-update`, `--disable-active-balance-cache`, `--disable-balance-trie-computation`, `--disable-batch-gossip-verification`, `--disable-correctly-prune-canonical-atts`, `--disable-correctly-insert-orphaned-atts`, `--enable-native-state`, `--enable-peer-scorer`, `--enable-gossip-batch-aggregation`, `--experimental-disable-boundary-checks`
|
||||
- Validator Web API: Removed unused ImportAccounts and DeleteAccounts rpc options
|
||||
|
||||
### Fixed
|
||||
@@ -2508,7 +2527,7 @@ There are two known issues with this release:
|
||||
- Bellatrix support. See [kiln testnet instructions](https://hackmd.io/OqIoTiQvS9KOIataIFksBQ?view)
|
||||
- Weak subjectivity sync / checkpoint sync. This is an experimental feature and may have unintended side effects for
|
||||
certain operators serving historical data. See
|
||||
the [documentation](https://docs.prylabs.network/docs/next/prysm-usage/checkpoint-sync) for more details.
|
||||
the [documentation](https://docs.prylabs.network/docs/prysm-usage/checkpoint-sync) for more details.
|
||||
- A faster build of blst for beacon chain on linux amd64. Use the environment variable `USE_PRYSM_MODERN=true` with
|
||||
prysm.sh, use the "modern" binary, or bazel build with `--define=blst_modern=true`.
|
||||
- Vectorized sha256. This may have performance improvements with use of the new flag `--enable-vectorized-htr`.
|
||||
@@ -2689,7 +2708,7 @@ notes [here](https://github.com/prysmaticlabs/prysm-web-ui/releases/tag/v1.0.0)
|
||||
- Added uint64 overflow protection
|
||||
- Sync committee pool returns empty slice instead of nil on cache miss
|
||||
- Improved description of datadir flag
|
||||
- Simplied web password requirements
|
||||
- Simplified web password requirements
|
||||
- Web JWT tokens no longer expire.
|
||||
- Updated keymanager protos
|
||||
- Watch and update jwt secret when auth token file updated on disk.
|
||||
@@ -2700,7 +2719,7 @@ notes [here](https://github.com/prysmaticlabs/prysm-web-ui/releases/tag/v1.0.0)
|
||||
- Refactor for weak subjectivity sync implementation
|
||||
- Update naming for Atlair previous epoch attester
|
||||
- Remove duplicate MerkleizeTrieLeaves method.
|
||||
- Add explict error for validator flag checks on out of bound positions
|
||||
- Add explicit error for validator flag checks on out of bound positions
|
||||
- Simplify method to check if the beacon chain client should update the justified epoch value.
|
||||
- Rename web UI performance endpoint to "summary"
|
||||
- Refactor powchain service to be more functional
|
||||
@@ -2726,7 +2745,7 @@ notes [here](https://github.com/prysmaticlabs/prysm-web-ui/releases/tag/v1.0.0)
|
||||
Upstream go-ethereum is now used with familiar go.mod tooling.
|
||||
- Removed duplicate aggergation validation p2p pipelines.
|
||||
- Metrics calculation removed extra condition
|
||||
- Removed superflous errors from peer scoring parameters registration
|
||||
- Removed superfluous errors from peer scoring parameters registration
|
||||
|
||||
### Fixed
|
||||
|
||||
@@ -2968,4 +2987,4 @@ There are no security updates in this release.
|
||||
|
||||
# Older than v2.0.0
|
||||
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
@@ -125,11 +125,11 @@ Navigate to your fork of the repo on GitHub. On the upper left where the current
|
||||
|
||||
**16. Add an entry to CHANGELOG.md.**
|
||||
|
||||
If your change is user facing, you must include a CHANGELOG.md entry. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
|
||||
All PRs must must include a changelog fragment file in the `changelog` directory. If your change is not user-facing or should not be mentioned in the changelog for some other reason, you may use the `Ignored` changelog section in your fragment's header to satisfy this requirement without altering the final release changelog. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
|
||||
|
||||
**17. Create a pull request.**
|
||||
|
||||
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base master”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls. Ensure that you have added an entry to CHANGELOG.md if your PR is a user-facing change. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
|
||||
Navigate your browser to https://github.com/prysmaticlabs/prysm and click on the new pull request button. In the “base” box on the left, leave the default selection “base develop”, the branch that you want your changes to be applied to. In the “compare” box on the right, select feature-in-progress-branch, the branch containing the changes you want to apply. You will then be asked to answer a few questions about your pull request. After you complete the questionnaire, the pull request will appear in the list of pull requests at https://github.com/prysmaticlabs/prysm/pulls. Ensure that you have added an entry to CHANGELOG.md if your PR is a user-facing change. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
|
||||
|
||||
**18. Respond to comments by Core Contributors.**
|
||||
|
||||
@@ -177,16 +177,10 @@ $ git push myrepo feature-in-progress-branch -f
|
||||
|
||||
## Maintaining CHANGELOG.md
|
||||
|
||||
This project follows the changelog guidelines from [keepachangelog.com](https://keepachangelog.com/en/1.1.0/).
|
||||
|
||||
All PRs with user facing changes should have an entry in the CHANGELOG.md file and the change should be categorized in the appropriate category within the "Unreleased" section. The categories are:
|
||||
|
||||
- `Added` for new features.
|
||||
- `Changed` for changes in existing functionality.
|
||||
- `Deprecated` for soon-to-be removed features.
|
||||
- `Removed` for now removed features.
|
||||
- `Fixed` for any bug fixes.
|
||||
- `Security` in case of vulnerabilities. Please see the [Security Policy](SECURITY.md) for responsible disclosure before adding a change with this category.
|
||||
This project follows the changelog guidelines from [keepachangelog.com](https://keepachangelog.com/en/1.1.0/). In order to minimize conflicts and workflow headaches, we chose to implement a changelog management
|
||||
strategy that uses changelog "fragment" files, managed by our changelog management tool called `unclog`. Each PR must include a new changelog fragment file in the `changelog` directory, as specified by unclog's
|
||||
[README.md](https://github.com/OffchainLabs/unclog?tab=readme-ov-file#what-is-a-changelog-fragment). As the `unclog` README suggests in the [Best Practices](https://github.com/OffchainLabs/unclog?tab=readme-ov-file#best-practices) section,
|
||||
the standard naming convention for your PR's fragment file, to avoid conflicting with another fragment file, is `changelog/<github user name>_<PR branch name>.md`.
|
||||
|
||||
### Releasing
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ This README details how to setup Prysm for interop testing for usage with other
|
||||
|
||||
> [!IMPORTANT]
|
||||
> This guide is likely to be outdated. The Prysm team does not have capacity to troubleshoot
|
||||
> outdated interop guides or instructions. If you experience issues with this guide, please file and
|
||||
> outdated interop guides or instructions. If you experience issues with this guide, please file an
|
||||
> issue for visibility and propose fixes, if possible.
|
||||
|
||||
## Installation & Setup
|
||||
@@ -74,19 +74,19 @@ bazel run //cmd/beacon-chain --config=minimal -- \
|
||||
This will start the system with 256 validators. The flags used can be explained as such:
|
||||
|
||||
- `bazel run //cmd/beacon-chain --config=minimal` builds and runs the beacon node in minimal build configuration.
|
||||
- `--` is a flag divider to distingish between bazel flags and flags that should be passed to the application. All flags and arguments after this divider are passed to the beacon chain.
|
||||
- `--` is a flag divider to distinguish between bazel flags and flags that should be passed to the application. All flags and arguments after this divider are passed to the beacon chain.
|
||||
- `--minimal-config` tells the beacon node to use minimal network configuration. This is different from the compile time state configuration flag `--config=minimal` and both are required.
|
||||
- `--bootstrap-node=` disables the default bootstrap nodes. This prevents the client from attempting to peer with mainnet nodes.
|
||||
- `--datadir=/tmp/beacon-chain-minimal-devnet` sets the data directory in a temporary location. Change this to your preferred destination.
|
||||
- `--force-clear-db` will delete the beaconchain.db file without confirming with the user. This is helpful for iteratively running local devnets without changing the datadir, but less helpful for one off runs where there was no database in the data directory.
|
||||
- `--min-sync-peers=0` allows the beacon node to skip initial sync without peers. This is essential because Prysm expects at least a few peers to start start the blockchain.
|
||||
- `--min-sync-peers=0` allows the beacon node to skip initial sync without peers. This is essential because Prysm expects at least a few peers to start the blockchain.
|
||||
- `--genesis-state=/tmp/genesis.ssz` defines the path to the generated genesis ssz file. The beacon node will use this as the initial genesis state.
|
||||
- `--chain-config-file=/tmp/minimal.yaml` defines the path to the yaml file with the chain configuration.
|
||||
|
||||
As soon as the beacon node has started, start the validator in the other terminal window.
|
||||
|
||||
```
|
||||
bazel run //cmd/validator --config=minimal -- --datadir=/tmp/validator --interopt-num-validators=256 --minimal-config --suggested-fee-recipient=0x8A04d14125D0FDCDc742F4A05C051De07232EDa4
|
||||
bazel run //cmd/validator --config=minimal -- --datadir=/tmp/validator --interop-num-validators=256 --minimal-config --suggested-fee-recipient=0x8A04d14125D0FDCDc742F4A05C051De07232EDa4
|
||||
```
|
||||
|
||||
This will launch and kickstart the system with your 256 validators performing their duties accordingly.
|
||||
|
||||
1689
MODULE.bazel.lock
generated
1689
MODULE.bazel.lock
generated
File diff suppressed because it is too large
Load Diff
46
WORKSPACE
46
WORKSPACE
@@ -16,6 +16,34 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "toolchains_protoc",
|
||||
sha256 = "abb1540f8a9e045422730670ebb2f25b41fa56ca5a7cf795175a110a0a68f4ad",
|
||||
strip_prefix = "toolchains_protoc-0.3.6",
|
||||
url = "https://github.com/aspect-build/toolchains_protoc/releases/download/v0.3.6/toolchains_protoc-v0.3.6.tar.gz",
|
||||
)
|
||||
|
||||
load("@toolchains_protoc//protoc:repositories.bzl", "rules_protoc_dependencies")
|
||||
|
||||
rules_protoc_dependencies()
|
||||
|
||||
load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies")
|
||||
|
||||
rules_proto_dependencies()
|
||||
|
||||
load("@bazel_features//:deps.bzl", "bazel_features_deps")
|
||||
|
||||
bazel_features_deps()
|
||||
|
||||
load("@toolchains_protoc//protoc:toolchain.bzl", "protoc_toolchains")
|
||||
|
||||
protoc_toolchains(
|
||||
name = "protoc_toolchains",
|
||||
version = "v25.3",
|
||||
)
|
||||
|
||||
HERMETIC_CC_TOOLCHAIN_VERSION = "v3.0.1"
|
||||
|
||||
http_archive(
|
||||
@@ -137,10 +165,10 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "80a98277ad1311dacd837f9b16db62887702e9f1d1c4c9f796d0121a46c8e184",
|
||||
sha256 = "b2038e2de2cace18f032249cb4bb0048abf583a36369fa98f687af1b3f880b26",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.48.1/rules_go-v0.48.1.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.48.1/rules_go-v0.48.1.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -182,7 +210,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.22.4",
|
||||
go_version = "1.23.5",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -227,7 +255,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-alpha.9"
|
||||
consensus_spec_version = "v1.5.0-beta.1"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -243,7 +271,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-gHbvlnErUeJGWzW8/8JiVlk28JwmXSMhOzkynEIz+8g=",
|
||||
integrity = "sha256-R6r60geCfEjMaB1Ag3svaMFXFIgaJvkTJhfKsf76rFE=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -259,7 +287,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-hQkQdpm5ng4miGYa5WsOKWa0q8WtZu99Oqbv9QtBeJM=",
|
||||
integrity = "sha256-2Pem2gMHxW/6bBhZ2BaqkQruQSd/dTS3WMaMQO8rZ/o=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -275,7 +303,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-33sBsmApnJpcyYfR3olKaPB+WC1q00ZKNzHa2TczIxk=",
|
||||
integrity = "sha256-5yP05JTV1MhcUZ2kSh+T+kXjG+uW3A5877veC5c1mD4=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -290,7 +318,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-GQulBKLc2khpql2K/MxV+NG/d2kAhLXl+gLnKIg7rt4=",
|
||||
integrity = "sha256-O6Rg6h19T0RsJs0sBDZ9O1k4LnCJ/gu2ilHijFBVfME=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -13,8 +13,10 @@ go_library(
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
@@ -26,6 +28,7 @@ go_library(
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
@@ -22,7 +22,6 @@ type SignedBid interface {
|
||||
// Bid is an interface describing the method set of a builder bid.
|
||||
type Bid interface {
|
||||
Header() (interfaces.ExecutionData, error)
|
||||
BlobKzgCommitments() ([][]byte, error)
|
||||
Value() primitives.Wei
|
||||
Pubkey() []byte
|
||||
Version() int
|
||||
@@ -31,6 +30,18 @@ type Bid interface {
|
||||
HashTreeRootWith(hh *ssz.Hasher) error
|
||||
}
|
||||
|
||||
// BidDeneb is an interface that exposes newly added kzg commitments on top of builder bid
|
||||
type BidDeneb interface {
|
||||
Bid
|
||||
BlobKzgCommitments() [][]byte
|
||||
}
|
||||
|
||||
// BidElectra is an interface that exposes the newly added execution requests on top of the builder bid
|
||||
type BidElectra interface {
|
||||
BidDeneb
|
||||
ExecutionRequests() *v1.ExecutionRequests
|
||||
}
|
||||
|
||||
type signedBuilderBid struct {
|
||||
p *ethpb.SignedBuilderBid
|
||||
}
|
||||
@@ -115,11 +126,6 @@ func (b builderBid) Header() (interfaces.ExecutionData, error) {
|
||||
return blocks.WrappedExecutionPayloadHeader(b.p.Header)
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
func (b builderBid) BlobKzgCommitments() ([][]byte, error) {
|
||||
return [][]byte{}, errors.New("blob kzg commitments not available before Deneb")
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (b builderBid) Version() int {
|
||||
return version.Bellatrix
|
||||
@@ -169,11 +175,6 @@ func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header)
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
func (b builderBidCapella) BlobKzgCommitments() ([][]byte, error) {
|
||||
return [][]byte{}, errors.New("blob kzg commitments not available before Deneb")
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (b builderBidCapella) Version() int {
|
||||
return version.Capella
|
||||
@@ -254,8 +255,8 @@ func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
func (b builderBidDeneb) BlobKzgCommitments() ([][]byte, error) {
|
||||
return b.p.BlobKzgCommitments, nil
|
||||
func (b builderBidDeneb) BlobKzgCommitments() [][]byte {
|
||||
return b.p.BlobKzgCommitments
|
||||
}
|
||||
|
||||
type signedBuilderBidDeneb struct {
|
||||
@@ -290,3 +291,95 @@ func (b signedBuilderBidDeneb) Version() int {
|
||||
func (b signedBuilderBidDeneb) IsNil() bool {
|
||||
return b.p == nil
|
||||
}
|
||||
|
||||
type builderBidElectra struct {
|
||||
p *ethpb.BuilderBidElectra
|
||||
}
|
||||
|
||||
// WrappedBuilderBidElectra is a constructor which wraps a protobuf bid into an interface.
|
||||
func WrappedBuilderBidElectra(p *ethpb.BuilderBidElectra) (Bid, error) {
|
||||
w := builderBidElectra{p: p}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (b builderBidElectra) Version() int {
|
||||
return version.Electra
|
||||
}
|
||||
|
||||
// Value --
|
||||
func (b builderBidElectra) Value() primitives.Wei {
|
||||
return primitives.LittleEndianBytesToWei(b.p.Value)
|
||||
}
|
||||
|
||||
// Pubkey --
|
||||
func (b builderBidElectra) Pubkey() []byte {
|
||||
return b.p.Pubkey
|
||||
}
|
||||
|
||||
// IsNil --
|
||||
func (b builderBidElectra) IsNil() bool {
|
||||
return b.p == nil
|
||||
}
|
||||
|
||||
// HashTreeRoot --
|
||||
func (b builderBidElectra) HashTreeRoot() ([32]byte, error) {
|
||||
return b.p.HashTreeRoot()
|
||||
}
|
||||
|
||||
// HashTreeRootWith --
|
||||
func (b builderBidElectra) HashTreeRootWith(hh *ssz.Hasher) error {
|
||||
return b.p.HashTreeRootWith(hh)
|
||||
}
|
||||
|
||||
// Header --
|
||||
func (b builderBidElectra) Header() (interfaces.ExecutionData, error) {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header)
|
||||
}
|
||||
|
||||
// ExecutionRequests --
|
||||
func (b builderBidElectra) ExecutionRequests() *v1.ExecutionRequests {
|
||||
return b.p.ExecutionRequests // does not copy
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
func (b builderBidElectra) BlobKzgCommitments() [][]byte {
|
||||
return b.p.BlobKzgCommitments
|
||||
}
|
||||
|
||||
type signedBuilderBidElectra struct {
|
||||
p *ethpb.SignedBuilderBidElectra
|
||||
}
|
||||
|
||||
// WrappedSignedBuilderBidElectra is a constructor which wraps a protobuf signed bit into an interface.
|
||||
func WrappedSignedBuilderBidElectra(p *ethpb.SignedBuilderBidElectra) (SignedBid, error) {
|
||||
w := signedBuilderBidElectra{p: p}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Message --
|
||||
func (b signedBuilderBidElectra) Message() (Bid, error) {
|
||||
return WrappedBuilderBidElectra(b.p.Message)
|
||||
}
|
||||
|
||||
// Signature --
|
||||
func (b signedBuilderBidElectra) Signature() []byte {
|
||||
return b.p.Signature
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (b signedBuilderBidElectra) Version() int {
|
||||
return version.Electra
|
||||
}
|
||||
|
||||
// IsNil --
|
||||
func (b signedBuilderBidElectra) IsNil() bool {
|
||||
return b.p == nil
|
||||
}
|
||||
|
||||
@@ -219,8 +219,23 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
if err := json.Unmarshal(hb, v); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
switch strings.ToLower(v.Version) {
|
||||
case strings.ToLower(version.String(version.Deneb)):
|
||||
|
||||
ver, err := version.FromString(strings.ToLower(v.Version))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("unsupported header version %s", strings.ToLower(v.Version)))
|
||||
}
|
||||
if ver >= version.Electra {
|
||||
hr := &ExecHeaderResponseElectra{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
p, err := hr.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from header")
|
||||
}
|
||||
return WrappedSignedBuilderBidElectra(p)
|
||||
}
|
||||
if ver >= version.Deneb {
|
||||
hr := &ExecHeaderResponseDeneb{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
@@ -230,7 +245,8 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from header")
|
||||
}
|
||||
return WrappedSignedBuilderBidDeneb(p)
|
||||
case strings.ToLower(version.String(version.Capella)):
|
||||
}
|
||||
if ver >= version.Capella {
|
||||
hr := &ExecHeaderResponseCapella{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
@@ -240,7 +256,8 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from header")
|
||||
}
|
||||
return WrappedSignedBuilderBidCapella(p)
|
||||
case strings.ToLower(version.String(version.Bellatrix)):
|
||||
}
|
||||
if ver >= version.Bellatrix {
|
||||
hr := &ExecHeaderResponse{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
@@ -250,9 +267,8 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
return nil, errors.Wrap(err, "could not extract proto message from header")
|
||||
}
|
||||
return WrappedSignedBuilderBid(p)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
|
||||
}
|
||||
|
||||
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
|
||||
@@ -282,7 +298,7 @@ func (c *Client) RegisterValidator(ctx context.Context, svr []*ethpb.SignedValid
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithField("num_registrations", len(svr)).Info("successfully registered validator(s) on builder")
|
||||
log.WithField("registrationCount", len(svr)).Debug("Successfully registered validator(s) on builder")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -124,7 +123,7 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
func TestClient_GetHeader(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
var slot types.Slot = 23
|
||||
var slot primitives.Slot = 23
|
||||
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
t.Run("server error", func(t *testing.T) {
|
||||
@@ -267,9 +266,9 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
|
||||
kcgCommitments, err := bid.BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
dbid, ok := bid.(builderBidDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
kcgCommitments := dbid.BlobKzgCommitments()
|
||||
require.Equal(t, len(kcgCommitments) > 0, true)
|
||||
for i := range kcgCommitments {
|
||||
require.Equal(t, len(kcgCommitments[i]) == 48, true)
|
||||
@@ -293,6 +292,50 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.ErrorContains(t, "could not extract proto message from header: too many blob commitments: 7", err)
|
||||
})
|
||||
t.Run("electra", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseElectra)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.NoError(t, err)
|
||||
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
bid, err := h.Message()
|
||||
require.NoError(t, err)
|
||||
bidHeader, err := bid.Header()
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
ebid, ok := bid.(builderBidElectra)
|
||||
require.Equal(t, true, ok)
|
||||
kcgCommitments := ebid.BlobKzgCommitments()
|
||||
require.Equal(t, len(kcgCommitments) > 0, true)
|
||||
for i := range kcgCommitments {
|
||||
require.Equal(t, len(kcgCommitments[i]) == 48, true)
|
||||
}
|
||||
requests := ebid.ExecutionRequests()
|
||||
require.Equal(t, 1, len(requests.Deposits))
|
||||
require.Equal(t, 1, len(requests.Withdrawals))
|
||||
require.Equal(t, 1, len(requests.Consolidations))
|
||||
|
||||
})
|
||||
t.Run("unsupported version", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
@@ -370,7 +413,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(withdrawals))
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Index)
|
||||
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
|
||||
assert.Equal(t, primitives.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
|
||||
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
})
|
||||
@@ -409,7 +452,7 @@ func TestSubmitBlindedBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(withdrawals))
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Index)
|
||||
assert.Equal(t, types.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
|
||||
assert.Equal(t, primitives.ValidatorIndex(1), withdrawals[0].ValidatorIndex)
|
||||
assert.DeepEqual(t, ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943"), withdrawals[0].Address)
|
||||
assert.Equal(t, uint64(1), withdrawals[0].Amount)
|
||||
require.NotNil(t, blobBundle)
|
||||
|
||||
@@ -5,12 +5,15 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
@@ -413,54 +416,10 @@ func FromProtoDeneb(payload *v1.ExecutionPayloadDeneb) (ExecutionPayloadDeneb, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
var errInvalidTypeConversion = errors.New("unable to translate between api and foreign type")
|
||||
|
||||
// ExecutionPayloadResponseFromData converts an ExecutionData interface value to a payload response.
|
||||
// This involves serializing the execution payload value so that the abstract payload envelope can be used.
|
||||
func ExecutionPayloadResponseFromData(ed interfaces.ExecutionData, bundle *v1.BlobsBundle) (*ExecutionPayloadResponse, error) {
|
||||
pb := ed.Proto()
|
||||
var data interface{}
|
||||
var err error
|
||||
var ver string
|
||||
switch pbStruct := pb.(type) {
|
||||
case *v1.ExecutionPayload:
|
||||
ver = version.String(version.Bellatrix)
|
||||
data, err = FromProto(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Bellatrix ExecutionPayload to an API response")
|
||||
}
|
||||
case *v1.ExecutionPayloadCapella:
|
||||
ver = version.String(version.Capella)
|
||||
data, err = FromProtoCapella(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Capella ExecutionPayload to an API response")
|
||||
}
|
||||
case *v1.ExecutionPayloadDeneb:
|
||||
ver = version.String(version.Deneb)
|
||||
payloadStruct, err := FromProtoDeneb(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Deneb ExecutionPayload to an API response")
|
||||
}
|
||||
data = &ExecutionPayloadDenebAndBlobsBundle{
|
||||
ExecutionPayload: &payloadStruct,
|
||||
BlobsBundle: FromBundleProto(bundle),
|
||||
}
|
||||
default:
|
||||
return nil, errInvalidTypeConversion
|
||||
}
|
||||
encoded, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to marshal execution payload version=%s", ver)
|
||||
}
|
||||
return &ExecutionPayloadResponse{
|
||||
Version: ver,
|
||||
Data: encoded,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecHeaderResponseCapella is the response of builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey} for Capella.
|
||||
type ExecHeaderResponseCapella struct {
|
||||
Data struct {
|
||||
Version string `json:"version"`
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *BuilderBidCapella `json:"message"`
|
||||
} `json:"data"`
|
||||
@@ -604,17 +563,25 @@ type BlobBundler interface {
|
||||
BundleProto() (*v1.BlobsBundle, error)
|
||||
}
|
||||
|
||||
// ParsedExecutionRequests can retrieve the underlying execution requests for the given execution payload response.
|
||||
type ParsedExecutionRequests interface {
|
||||
ExecutionRequestsProto() (*v1.ExecutionRequests, error)
|
||||
}
|
||||
|
||||
func (r *ExecutionPayloadResponse) ParsePayload() (ParsedPayload, error) {
|
||||
var toProto ParsedPayload
|
||||
switch r.Version {
|
||||
case version.String(version.Bellatrix):
|
||||
toProto = &ExecutionPayload{}
|
||||
case version.String(version.Capella):
|
||||
toProto = &ExecutionPayloadCapella{}
|
||||
case version.String(version.Deneb):
|
||||
v, err := version.FromString(strings.ToLower(r.Version))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("unsupported version %s", strings.ToLower(r.Version)))
|
||||
}
|
||||
if v >= version.Deneb {
|
||||
toProto = &ExecutionPayloadDenebAndBlobsBundle{}
|
||||
default:
|
||||
return nil, consensusblocks.ErrUnsupportedVersion
|
||||
} else if v >= version.Capella {
|
||||
toProto = &ExecutionPayloadCapella{}
|
||||
} else if v >= version.Bellatrix {
|
||||
toProto = &ExecutionPayload{}
|
||||
} else {
|
||||
return nil, fmt.Errorf("unsupported version %s", strings.ToLower(r.Version))
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(r.Data, toProto); err != nil {
|
||||
@@ -989,7 +956,8 @@ func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
|
||||
|
||||
// ExecHeaderResponseDeneb is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
|
||||
type ExecHeaderResponseDeneb struct {
|
||||
Data struct {
|
||||
Version string `json:"version"`
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *BuilderBidDeneb `json:"message"`
|
||||
} `json:"data"`
|
||||
@@ -1013,7 +981,7 @@ func (bb *BuilderBidDeneb) ToProto() (*eth.BuilderBidDeneb, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(bb.BlobKzgCommitments) > fieldparams.MaxBlobsPerBlock {
|
||||
if len(bb.BlobKzgCommitments) > params.BeaconConfig().DeprecatedMaxBlobsPerBlock {
|
||||
return nil, fmt.Errorf("too many blob commitments: %d", len(bb.BlobKzgCommitments))
|
||||
}
|
||||
kzgCommitments := make([][]byte, len(bb.BlobKzgCommitments))
|
||||
@@ -1306,6 +1274,208 @@ func (p *ExecutionPayloadDeneb) ToProto() (*v1.ExecutionPayloadDeneb, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecHeaderResponseElectra is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
|
||||
type ExecHeaderResponseElectra struct {
|
||||
Version string `json:"version"`
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *BuilderBidElectra `json:"message"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// ToProto creates a SignedBuilderBidElectra Proto from ExecHeaderResponseElectra.
|
||||
func (ehr *ExecHeaderResponseElectra) ToProto() (*eth.SignedBuilderBidElectra, error) {
|
||||
bb, err := ehr.Data.Message.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ð.SignedBuilderBidElectra{
|
||||
Message: bb,
|
||||
Signature: bytesutil.SafeCopyBytes(ehr.Data.Signature),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToProto creates a BuilderBidElectra Proto from BuilderBidElectra.
|
||||
func (bb *BuilderBidElectra) ToProto() (*eth.BuilderBidElectra, error) {
|
||||
header, err := bb.Header.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(bb.BlobKzgCommitments) > params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Electra) {
|
||||
return nil, fmt.Errorf("blob commitment count %d exceeds the maximum %d", len(bb.BlobKzgCommitments), params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Electra))
|
||||
}
|
||||
kzgCommitments := make([][]byte, len(bb.BlobKzgCommitments))
|
||||
for i, commit := range bb.BlobKzgCommitments {
|
||||
if len(commit) != fieldparams.BLSPubkeyLength {
|
||||
return nil, fmt.Errorf("commitment length %d is not %d", len(commit), fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
kzgCommitments[i] = bytesutil.SafeCopyBytes(commit)
|
||||
}
|
||||
// post electra execution requests should not be nil, if no requests exist use an empty request
|
||||
if bb.ExecutionRequests == nil {
|
||||
return nil, errors.New("bid contains nil execution requests")
|
||||
}
|
||||
executionRequests, err := bb.ExecutionRequests.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert ExecutionRequests")
|
||||
}
|
||||
return ð.BuilderBidElectra{
|
||||
Header: header,
|
||||
BlobKzgCommitments: kzgCommitments,
|
||||
ExecutionRequests: executionRequests,
|
||||
// Note that SSZBytes() reverses byte order for the little-endian representation.
|
||||
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecutionRequestsV1 is a wrapper for different execution requests
|
||||
type ExecutionRequestsV1 struct {
|
||||
Deposits []*DepositRequestV1 `json:"deposits"`
|
||||
Withdrawals []*WithdrawalRequestV1 `json:"withdrawals"`
|
||||
Consolidations []*ConsolidationRequestV1 `json:"consolidations"`
|
||||
}
|
||||
|
||||
func (er *ExecutionRequestsV1) ToProto() (*v1.ExecutionRequests, error) {
|
||||
if uint64(len(er.Deposits)) > params.BeaconConfig().MaxDepositRequestsPerPayload {
|
||||
return nil, fmt.Errorf("deposit requests count %d exceeds the maximum %d", len(er.Deposits), params.BeaconConfig().MaxDepositRequestsPerPayload)
|
||||
}
|
||||
deposits := make([]*v1.DepositRequest, len(er.Deposits))
|
||||
for i, dep := range er.Deposits {
|
||||
d, err := dep.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deposits[i] = d
|
||||
}
|
||||
if uint64(len(er.Withdrawals)) > params.BeaconConfig().MaxWithdrawalRequestsPerPayload {
|
||||
return nil, fmt.Errorf("withdrawal requests count %d exceeds the maximum %d", len(er.Withdrawals), params.BeaconConfig().MaxWithdrawalRequestsPerPayload)
|
||||
}
|
||||
withdrawals := make([]*v1.WithdrawalRequest, len(er.Withdrawals))
|
||||
for i, wr := range er.Withdrawals {
|
||||
w, err := wr.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
withdrawals[i] = w
|
||||
}
|
||||
if uint64(len(er.Consolidations)) > params.BeaconConfig().MaxConsolidationsRequestsPerPayload {
|
||||
return nil, fmt.Errorf("consolidation requests count %d exceeds the maximum %d", len(er.Consolidations), params.BeaconConfig().MaxConsolidationsRequestsPerPayload)
|
||||
}
|
||||
consolidations := make([]*v1.ConsolidationRequest, len(er.Consolidations))
|
||||
for i, con := range er.Consolidations {
|
||||
c, err := con.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
consolidations[i] = c
|
||||
}
|
||||
return &v1.ExecutionRequests{
|
||||
Deposits: deposits,
|
||||
Withdrawals: withdrawals,
|
||||
Consolidations: consolidations,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BuilderBidElectra is a field of ExecHeaderResponseElectra.
|
||||
type BuilderBidElectra struct {
|
||||
Header *ExecutionPayloadHeaderDeneb `json:"header"`
|
||||
BlobKzgCommitments []hexutil.Bytes `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequestsV1 `json:"execution_requests"`
|
||||
Value Uint256 `json:"value"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
}
|
||||
|
||||
// WithdrawalRequestV1 is a field of ExecutionRequestsV1.
|
||||
type WithdrawalRequestV1 struct {
|
||||
SourceAddress hexutil.Bytes `json:"source_address"`
|
||||
ValidatorPubkey hexutil.Bytes `json:"validator_pubkey"`
|
||||
Amount Uint256 `json:"amount"`
|
||||
}
|
||||
|
||||
func (wr *WithdrawalRequestV1) ToProto() (*v1.WithdrawalRequest, error) {
|
||||
srcAddress, err := bytesutil.DecodeHexWithLength(wr.SourceAddress.String(), common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "source_address")
|
||||
}
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(wr.ValidatorPubkey.String(), fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "validator_pubkey")
|
||||
}
|
||||
|
||||
return &v1.WithdrawalRequest{
|
||||
SourceAddress: srcAddress,
|
||||
ValidatorPubkey: pubkey,
|
||||
Amount: wr.Amount.Uint64(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DepositRequestV1 is a field of ExecutionRequestsV1.
|
||||
type DepositRequestV1 struct {
|
||||
PubKey hexutil.Bytes `json:"pubkey"`
|
||||
// withdrawalCredentials: DATA, 32 Bytes
|
||||
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"`
|
||||
// amount: QUANTITY, 64 Bits
|
||||
Amount Uint256 `json:"amount"`
|
||||
// signature: DATA, 96 Bytes
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
// index: QUANTITY, 64 Bits
|
||||
Index Uint256 `json:"index"`
|
||||
}
|
||||
|
||||
func (dr *DepositRequestV1) ToProto() (*v1.DepositRequest, error) {
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(dr.PubKey.String(), fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "pubkey")
|
||||
}
|
||||
wc, err := bytesutil.DecodeHexWithLength(dr.WithdrawalCredentials.String(), fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "withdrawal_credentials")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(dr.Signature.String(), fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "signature")
|
||||
}
|
||||
return &v1.DepositRequest{
|
||||
Pubkey: pubkey,
|
||||
WithdrawalCredentials: wc,
|
||||
Amount: dr.Amount.Uint64(),
|
||||
Signature: sig,
|
||||
Index: dr.Index.Uint64(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ConsolidationRequestV1 is a field of ExecutionRequestsV1.
|
||||
type ConsolidationRequestV1 struct {
|
||||
// sourceAddress: DATA, 20 Bytes
|
||||
SourceAddress hexutil.Bytes `json:"source_address"`
|
||||
// sourcePubkey: DATA, 48 Bytes
|
||||
SourcePubkey hexutil.Bytes `json:"source_pubkey"`
|
||||
// targetPubkey: DATA, 48 Bytes
|
||||
TargetPubkey hexutil.Bytes `json:"target_pubkey"`
|
||||
}
|
||||
|
||||
func (cr *ConsolidationRequestV1) ToProto() (*v1.ConsolidationRequest, error) {
|
||||
srcAddress, err := bytesutil.DecodeHexWithLength(cr.SourceAddress.String(), common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "source_address")
|
||||
}
|
||||
sourcePubkey, err := bytesutil.DecodeHexWithLength(cr.SourcePubkey.String(), fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "source_pubkey")
|
||||
}
|
||||
targetPubkey, err := bytesutil.DecodeHexWithLength(cr.TargetPubkey.String(), fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "target_pubkey")
|
||||
}
|
||||
return &v1.ConsolidationRequest{
|
||||
SourceAddress: srcAddress,
|
||||
SourcePubkey: sourcePubkey,
|
||||
TargetPubkey: targetPubkey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ErrorMessage is a JSON representation of the builder API's returned error message.
|
||||
type ErrorMessage struct {
|
||||
Code int `json:"code"`
|
||||
|
||||
@@ -154,6 +154,64 @@ var testExampleHeaderResponseDeneb = `{
|
||||
}
|
||||
}`
|
||||
|
||||
var testExampleHeaderResponseElectra = `{
|
||||
"version": "electra",
|
||||
"data": {
|
||||
"message": {
|
||||
"header": {
|
||||
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"block_number": "1",
|
||||
"gas_limit": "1",
|
||||
"gas_used": "1",
|
||||
"timestamp": "1",
|
||||
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"base_fee_per_gas": "1",
|
||||
"blob_gas_used": "1",
|
||||
"excess_blob_gas": "1",
|
||||
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
},
|
||||
"blob_kzg_commitments": [
|
||||
"0xa94170080872584e54a1cf092d845703b13907f2e6b3b1c0ad573b910530499e3bcd48c6378846b80d2bfa58c81cf3d5"
|
||||
],
|
||||
"execution_requests": {
|
||||
"deposits": [
|
||||
{
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
"withdrawal_credentials": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"amount": "1",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
|
||||
"index": "1"
|
||||
}
|
||||
],
|
||||
"withdrawals": [
|
||||
{
|
||||
"source_address": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"validator_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
"amount": "1"
|
||||
}
|
||||
],
|
||||
"consolidations": [
|
||||
{
|
||||
"source_address": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"source_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
"target_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
}
|
||||
]
|
||||
},
|
||||
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
},
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
}
|
||||
}`
|
||||
|
||||
var testExampleHeaderResponseDenebNoBundle = `{
|
||||
"version": "deneb",
|
||||
"data": {
|
||||
@@ -1924,9 +1982,9 @@ func TestEmptyResponseBody(t *testing.T) {
|
||||
emptyResponse := &ExecutionPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal(empty, emptyResponse))
|
||||
_, err := emptyResponse.ParsePayload()
|
||||
require.ErrorIs(t, err, consensusblocks.ErrUnsupportedVersion)
|
||||
require.ErrorContains(t, "unsupported version", err)
|
||||
})
|
||||
versions := []int{version.Bellatrix, version.Capella, version.Deneb}
|
||||
versions := []int{version.Bellatrix, version.Capella, version.Deneb, version.Electra}
|
||||
for i := range versions {
|
||||
vstr := version.String(versions[i])
|
||||
t.Run("populated version without payload"+vstr, func(t *testing.T) {
|
||||
|
||||
@@ -74,7 +74,7 @@ func AppendHeaders(parent context.Context, headers []string) context.Context {
|
||||
logrus.Warnf("Incorrect gRPC header flag format. Skipping %v", keyValue[0])
|
||||
continue
|
||||
}
|
||||
parent = metadata.AppendToOutgoingContext(parent, keyValue[0], strings.Join(keyValue[1:], "="))
|
||||
parent = metadata.AppendToOutgoingContext(parent, keyValue[0], strings.Join(keyValue[1:], "=")) // nolint:fatcontext
|
||||
}
|
||||
}
|
||||
return parent
|
||||
|
||||
@@ -28,6 +28,7 @@ go_library(
|
||||
"//api/server:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
|
||||
@@ -14,6 +14,10 @@ type SignedMessageJsoner interface {
|
||||
SigString() string
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Phase 0
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type SignedBeaconBlock struct {
|
||||
Message *BeaconBlock `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
@@ -48,6 +52,29 @@ type BeaconBlockBody struct {
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockHeaderContainer struct {
|
||||
Header *SignedBeaconBlockHeader `json:"header"`
|
||||
Root string `json:"root"`
|
||||
Canonical bool `json:"canonical"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockHeader struct {
|
||||
Message *BeaconBlockHeader `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type BeaconBlockHeader struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
BodyRoot string `json:"body_root"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Altair
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type SignedBeaconBlockAltair struct {
|
||||
Message *BeaconBlockAltair `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
@@ -83,6 +110,10 @@ type BeaconBlockBodyAltair struct {
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Bellatrix
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type SignedBeaconBlockBellatrix struct {
|
||||
Message *BeaconBlockBellatrix `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
@@ -155,6 +186,44 @@ type BlindedBeaconBlockBodyBellatrix struct {
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeader `json:"execution_payload_header"`
|
||||
}
|
||||
|
||||
type ExecutionPayload struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeader struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Capella
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type SignedBeaconBlockCapella struct {
|
||||
Message *BeaconBlockCapella `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
@@ -229,6 +298,46 @@ type BlindedBeaconBlockBodyCapella struct {
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadCapella struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderCapella struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Deneb
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type SignedBeaconBlockContentsDeneb struct {
|
||||
SignedBlock *SignedBeaconBlockDeneb `json:"signed_block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
@@ -317,6 +426,50 @@ type BlindedBeaconBlockBodyDeneb struct {
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Electra
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type SignedBeaconBlockContentsElectra struct {
|
||||
SignedBlock *SignedBeaconBlockElectra `json:"signed_block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
@@ -362,7 +515,7 @@ type BeaconBlockBodyElectra struct {
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadElectra `json:"execution_payload"`
|
||||
ExecutionPayload *ExecutionPayloadDeneb `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
@@ -392,156 +545,119 @@ func (s *SignedBlindedBeaconBlockElectra) SigString() string {
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyElectra struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockHeaderContainer struct {
|
||||
Header *SignedBeaconBlockHeader `json:"header"`
|
||||
Root string `json:"root"`
|
||||
Canonical bool `json:"canonical"`
|
||||
type (
|
||||
ExecutionRequests struct {
|
||||
Deposits []*DepositRequest `json:"deposits"`
|
||||
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
|
||||
Consolidations []*ConsolidationRequest `json:"consolidations"`
|
||||
}
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Fulu
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type SignedBeaconBlockContentsFulu struct {
|
||||
SignedBlock *SignedBeaconBlockFulu `json:"signed_block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockHeader struct {
|
||||
Message *BeaconBlockHeader `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
type BeaconBlockContentsFulu struct {
|
||||
Block *BeaconBlockFulu `json:"block"`
|
||||
KzgProofs []string `json:"kzg_proofs"`
|
||||
Blobs []string `json:"blobs"`
|
||||
}
|
||||
|
||||
type BeaconBlockHeader struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
BodyRoot string `json:"body_root"`
|
||||
type SignedBeaconBlockFulu struct {
|
||||
Message *BeaconBlockFulu `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type ExecutionPayload struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockFulu{}
|
||||
|
||||
func (s *SignedBeaconBlockFulu) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeader struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
func (s *SignedBeaconBlockFulu) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type ExecutionPayloadCapella struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
type BeaconBlockFulu struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BeaconBlockBodyFulu `json:"body"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderCapella struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
type BeaconBlockBodyFulu struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayload *ExecutionPayloadDeneb `json:"execution_payload"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
Transactions []string `json:"transactions"`
|
||||
Withdrawals []*Withdrawal `json:"withdrawals"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
type BlindedBeaconBlockFulu struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BlindedBeaconBlockBodyFulu `json:"body"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadElectra = ExecutionPayloadDeneb
|
||||
|
||||
type ExecutionPayloadHeaderDeneb struct {
|
||||
ParentHash string `json:"parent_hash"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
StateRoot string `json:"state_root"`
|
||||
ReceiptsRoot string `json:"receipts_root"`
|
||||
LogsBloom string `json:"logs_bloom"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
BlockNumber string `json:"block_number"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
GasUsed string `json:"gas_used"`
|
||||
Timestamp string `json:"timestamp"`
|
||||
ExtraData string `json:"extra_data"`
|
||||
BaseFeePerGas string `json:"base_fee_per_gas"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
TransactionsRoot string `json:"transactions_root"`
|
||||
WithdrawalsRoot string `json:"withdrawals_root"`
|
||||
BlobGasUsed string `json:"blob_gas_used"`
|
||||
ExcessBlobGas string `json:"excess_blob_gas"`
|
||||
type SignedBlindedBeaconBlockFulu struct {
|
||||
Message *BlindedBeaconBlockFulu `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type ExecutionPayloadHeaderElectra = ExecutionPayloadHeaderDeneb
|
||||
var _ SignedMessageJsoner = &SignedBlindedBeaconBlockFulu{}
|
||||
|
||||
type ExecutionRequests struct {
|
||||
Deposits []*DepositRequest `json:"deposits"`
|
||||
Withdrawals []*WithdrawalRequest `json:"withdrawals"`
|
||||
Consolidations []*ConsolidationRequest `json:"consolidations"`
|
||||
func (s *SignedBlindedBeaconBlockFulu) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBlindedBeaconBlockFulu) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
type BlindedBeaconBlockBodyFulu struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
ExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"execution_payload_header"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
BlobKzgCommitments []string `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequests `json:"execution_requests"`
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
@@ -243,7 +244,7 @@ func (c *ContributionAndProof) ToConsensus() (*eth.ContributionAndProof, error)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregatorIndex")
|
||||
}
|
||||
selectionProof, err := bytesutil.DecodeHexWithLength(c.SelectionProof, 96)
|
||||
selectionProof, err := bytesutil.DecodeHexWithLength(c.SelectionProof, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SelectionProof")
|
||||
}
|
||||
@@ -330,7 +331,7 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Aggregate")
|
||||
}
|
||||
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, 96)
|
||||
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SelectionProof")
|
||||
}
|
||||
@@ -366,7 +367,7 @@ func (a *AggregateAttestationAndProofElectra) ToConsensus() (*eth.AggregateAttes
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Aggregate")
|
||||
}
|
||||
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, 96)
|
||||
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SelectionProof")
|
||||
}
|
||||
@@ -432,6 +433,32 @@ func (a *AttestationElectra) ToConsensus() (*eth.AttestationElectra, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *SingleAttestation) ToConsensus() (*eth.SingleAttestation, error) {
|
||||
ci, err := strconv.ParseUint(a.CommitteeIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "CommitteeIndex")
|
||||
}
|
||||
ai, err := strconv.ParseUint(a.AttesterIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AttesterIndex")
|
||||
}
|
||||
data, err := a.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(a.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
|
||||
return ð.SingleAttestation{
|
||||
CommitteeId: primitives.CommitteeIndex(ci),
|
||||
AttesterIndex: primitives.ValidatorIndex(ai),
|
||||
Data: data,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func AttElectraFromConsensus(a *eth.AttestationElectra) *AttestationElectra {
|
||||
return &AttestationElectra{
|
||||
AggregationBits: hexutil.Encode(a.AggregationBits),
|
||||
@@ -708,6 +735,10 @@ func (s *AttesterSlashingElectra) ToConsensus() (*eth.AttesterSlashingElectra, e
|
||||
}
|
||||
|
||||
func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
if err := slice.VerifyMaxLength(a.AttestingIndices, params.BeaconConfig().MaxValidatorsPerCommittee); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indices := make([]uint64, len(a.AttestingIndices))
|
||||
var err error
|
||||
for i, ix := range a.AttestingIndices {
|
||||
@@ -733,6 +764,13 @@ func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
}
|
||||
|
||||
func (a *IndexedAttestationElectra) ToConsensus() (*eth.IndexedAttestationElectra, error) {
|
||||
if err := slice.VerifyMaxLength(
|
||||
a.AttestingIndices,
|
||||
params.BeaconConfig().MaxValidatorsPerCommittee*params.BeaconConfig().MaxCommitteesPerSlot,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indices := make([]uint64, len(a.AttestingIndices))
|
||||
var err error
|
||||
for i, ix := range a.AttestingIndices {
|
||||
@@ -1163,7 +1201,7 @@ func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 2)
|
||||
err := slice.VerifyMaxLength(src, fieldparams.MaxAttesterSlashingsElectra)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1184,7 +1222,7 @@ func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
|
||||
}
|
||||
err = slice.VerifyMaxLength(s.Attestation1.AttestingIndices, 2048)
|
||||
err = slice.VerifyMaxLength(s.Attestation1.AttestingIndices, params.BeaconConfig().MaxValidatorsPerCommittee*params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices", i))
|
||||
}
|
||||
@@ -1204,7 +1242,7 @@ func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Signature", i))
|
||||
}
|
||||
err = slice.VerifyMaxLength(s.Attestation2.AttestingIndices, 2048)
|
||||
err = slice.VerifyMaxLength(s.Attestation2.AttestingIndices, params.BeaconConfig().MaxValidatorsPerCommittee*params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices", i))
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -176,9 +176,9 @@ func lightClientHeaderToJSON(header interfaces.LightClientHeader) (json.RawMessa
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ex, ok := exInterface.Proto().(*enginev1.ExecutionPayloadHeaderElectra)
|
||||
ex, ok := exInterface.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("execution data is not %T", &enginev1.ExecutionPayloadHeaderElectra{})
|
||||
return nil, fmt.Errorf("execution data is not %T", &enginev1.ExecutionPayloadHeaderDeneb{})
|
||||
}
|
||||
execution, err := ExecutionPayloadHeaderElectraFromConsensus(ex)
|
||||
if err != nil {
|
||||
|
||||
@@ -11,6 +11,10 @@ import (
|
||||
|
||||
var errPayloadHeaderNotFound = errors.New("expected payload header not found")
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Phase 0
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
@@ -97,6 +101,10 @@ func BeaconStateFromConsensus(st beaconState.BeaconState) (*BeaconState, error)
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Altair
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAltair, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
@@ -202,6 +210,10 @@ func BeaconStateAltairFromConsensus(st beaconState.BeaconState) (*BeaconStateAlt
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Bellatrix
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconStateBellatrix, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
@@ -320,6 +332,10 @@ func BeaconStateBellatrixFromConsensus(st beaconState.BeaconState) (*BeaconState
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Capella
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCapella, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
@@ -457,6 +473,10 @@ func BeaconStateCapellaFromConsensus(st beaconState.BeaconState) (*BeaconStateCa
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Deneb
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDeneb, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
@@ -594,6 +614,10 @@ func BeaconStateDenebFromConsensus(st beaconState.BeaconState) (*BeaconStateDene
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Electra
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateElectra, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
@@ -775,3 +799,189 @@ func BeaconStateElectraFromConsensus(st beaconState.BeaconState) (*BeaconStateEl
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Fulu
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func BeaconStateFuluFromConsensus(st beaconState.BeaconState) (*BeaconStateFulu, error) {
|
||||
srcBr := st.BlockRoots()
|
||||
br := make([]string, len(srcBr))
|
||||
for i, r := range srcBr {
|
||||
br[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcSr := st.StateRoots()
|
||||
sr := make([]string, len(srcSr))
|
||||
for i, r := range srcSr {
|
||||
sr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcHr, err := st.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hr := make([]string, len(srcHr))
|
||||
for i, r := range srcHr {
|
||||
hr[i] = hexutil.Encode(r)
|
||||
}
|
||||
srcVotes := st.Eth1DataVotes()
|
||||
votes := make([]*Eth1Data, len(srcVotes))
|
||||
for i, e := range srcVotes {
|
||||
votes[i] = Eth1DataFromConsensus(e)
|
||||
}
|
||||
srcVals := st.Validators()
|
||||
vals := make([]*Validator, len(srcVals))
|
||||
for i, v := range srcVals {
|
||||
vals[i] = ValidatorFromConsensus(v)
|
||||
}
|
||||
srcBals := st.Balances()
|
||||
bals := make([]string, len(srcBals))
|
||||
for i, b := range srcBals {
|
||||
bals[i] = fmt.Sprintf("%d", b)
|
||||
}
|
||||
srcRm := st.RandaoMixes()
|
||||
rm := make([]string, len(srcRm))
|
||||
for i, m := range srcRm {
|
||||
rm[i] = hexutil.Encode(m)
|
||||
}
|
||||
srcSlashings := st.Slashings()
|
||||
slashings := make([]string, len(srcSlashings))
|
||||
for i, s := range srcSlashings {
|
||||
slashings[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
srcPrevPart, err := st.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevPart := make([]string, len(srcPrevPart))
|
||||
for i, p := range srcPrevPart {
|
||||
prevPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcCurrPart, err := st.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currPart := make([]string, len(srcCurrPart))
|
||||
for i, p := range srcCurrPart {
|
||||
currPart[i] = fmt.Sprintf("%d", p)
|
||||
}
|
||||
srcIs, err := st.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
is := make([]string, len(srcIs))
|
||||
for i, s := range srcIs {
|
||||
is[i] = fmt.Sprintf("%d", s)
|
||||
}
|
||||
currSc, err := st.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSc, err := st.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
execData, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcPayload, ok := execData.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
|
||||
if !ok {
|
||||
return nil, errPayloadHeaderNotFound
|
||||
}
|
||||
payload, err := ExecutionPayloadHeaderElectraFromConsensus(srcPayload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcHs, err := st.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hs := make([]*HistoricalSummary, len(srcHs))
|
||||
for i, s := range srcHs {
|
||||
hs[i] = HistoricalSummaryFromConsensus(s)
|
||||
}
|
||||
nwi, err := st.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nwvi, err := st.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
drsi, err := st.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbtc, err := st.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ebtc, err := st.ExitBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eee, err := st.EarliestExitEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cbtc, err := st.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ece, err := st.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pbd, err := st.PendingDeposits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ppw, err := st.PendingPartialWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pc, err := st.PendingConsolidations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BeaconStateFulu{
|
||||
GenesisTime: fmt.Sprintf("%d", st.GenesisTime()),
|
||||
GenesisValidatorsRoot: hexutil.Encode(st.GenesisValidatorsRoot()),
|
||||
Slot: fmt.Sprintf("%d", st.Slot()),
|
||||
Fork: ForkFromConsensus(st.Fork()),
|
||||
LatestBlockHeader: BeaconBlockHeaderFromConsensus(st.LatestBlockHeader()),
|
||||
BlockRoots: br,
|
||||
StateRoots: sr,
|
||||
HistoricalRoots: hr,
|
||||
Eth1Data: Eth1DataFromConsensus(st.Eth1Data()),
|
||||
Eth1DataVotes: votes,
|
||||
Eth1DepositIndex: fmt.Sprintf("%d", st.Eth1DepositIndex()),
|
||||
Validators: vals,
|
||||
Balances: bals,
|
||||
RandaoMixes: rm,
|
||||
Slashings: slashings,
|
||||
PreviousEpochParticipation: prevPart,
|
||||
CurrentEpochParticipation: currPart,
|
||||
JustificationBits: hexutil.Encode(st.JustificationBits()),
|
||||
PreviousJustifiedCheckpoint: CheckpointFromConsensus(st.PreviousJustifiedCheckpoint()),
|
||||
CurrentJustifiedCheckpoint: CheckpointFromConsensus(st.CurrentJustifiedCheckpoint()),
|
||||
FinalizedCheckpoint: CheckpointFromConsensus(st.FinalizedCheckpoint()),
|
||||
InactivityScores: is,
|
||||
CurrentSyncCommittee: SyncCommitteeFromConsensus(currSc),
|
||||
NextSyncCommittee: SyncCommitteeFromConsensus(nextSc),
|
||||
LatestExecutionPayloadHeader: payload,
|
||||
NextWithdrawalIndex: fmt.Sprintf("%d", nwi),
|
||||
NextWithdrawalValidatorIndex: fmt.Sprintf("%d", nwvi),
|
||||
HistoricalSummaries: hs,
|
||||
DepositRequestsStartIndex: fmt.Sprintf("%d", drsi),
|
||||
DepositBalanceToConsume: fmt.Sprintf("%d", dbtc),
|
||||
ExitBalanceToConsume: fmt.Sprintf("%d", ebtc),
|
||||
EarliestExitEpoch: fmt.Sprintf("%d", eee),
|
||||
ConsolidationBalanceToConsume: fmt.Sprintf("%d", cbtc),
|
||||
EarliestConsolidationEpoch: fmt.Sprintf("%d", ece),
|
||||
PendingDeposits: PendingDepositsFromConsensus(pbd),
|
||||
PendingPartialWithdrawals: PendingPartialWithdrawalsFromConsensus(ppw),
|
||||
PendingConsolidations: PendingConsolidationsFromConsensus(pc),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -36,6 +36,13 @@ type AttestationElectra struct {
|
||||
CommitteeBits string `json:"committee_bits"`
|
||||
}
|
||||
|
||||
type SingleAttestation struct {
|
||||
CommitteeIndex string `json:"committee_index"`
|
||||
AttesterIndex string `json:"attester_index"`
|
||||
Data *AttestationData `json:"data"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type AttestationData struct {
|
||||
Slot string `json:"slot"`
|
||||
CommitteeIndex string `json:"index"`
|
||||
|
||||
@@ -142,41 +142,81 @@ type BeaconStateDeneb struct {
|
||||
}
|
||||
|
||||
type BeaconStateElectra struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *Fork `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots"`
|
||||
StateRoots []string `json:"state_roots"`
|
||||
HistoricalRoots []string `json:"historical_roots"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*Validator `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation []string `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits"`
|
||||
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderElectra `json:"latest_execution_payload_header"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
|
||||
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
|
||||
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
|
||||
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
|
||||
EarliestExitEpoch string `json:"earliest_exit_epoch"`
|
||||
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
|
||||
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *Fork `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots"`
|
||||
StateRoots []string `json:"state_roots"`
|
||||
HistoricalRoots []string `json:"historical_roots"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*Validator `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation []string `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits"`
|
||||
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"latest_execution_payload_header"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
|
||||
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
|
||||
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
|
||||
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
|
||||
EarliestExitEpoch string `json:"earliest_exit_epoch"`
|
||||
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
|
||||
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
}
|
||||
|
||||
type BeaconStateFulu struct {
|
||||
GenesisTime string `json:"genesis_time"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root"`
|
||||
Slot string `json:"slot"`
|
||||
Fork *Fork `json:"fork"`
|
||||
LatestBlockHeader *BeaconBlockHeader `json:"latest_block_header"`
|
||||
BlockRoots []string `json:"block_roots"`
|
||||
StateRoots []string `json:"state_roots"`
|
||||
HistoricalRoots []string `json:"historical_roots"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Eth1DataVotes []*Eth1Data `json:"eth1_data_votes"`
|
||||
Eth1DepositIndex string `json:"eth1_deposit_index"`
|
||||
Validators []*Validator `json:"validators"`
|
||||
Balances []string `json:"balances"`
|
||||
RandaoMixes []string `json:"randao_mixes"`
|
||||
Slashings []string `json:"slashings"`
|
||||
PreviousEpochParticipation []string `json:"previous_epoch_participation"`
|
||||
CurrentEpochParticipation []string `json:"current_epoch_participation"`
|
||||
JustificationBits string `json:"justification_bits"`
|
||||
PreviousJustifiedCheckpoint *Checkpoint `json:"previous_justified_checkpoint"`
|
||||
CurrentJustifiedCheckpoint *Checkpoint `json:"current_justified_checkpoint"`
|
||||
FinalizedCheckpoint *Checkpoint `json:"finalized_checkpoint"`
|
||||
InactivityScores []string `json:"inactivity_scores"`
|
||||
CurrentSyncCommittee *SyncCommittee `json:"current_sync_committee"`
|
||||
NextSyncCommittee *SyncCommittee `json:"next_sync_committee"`
|
||||
LatestExecutionPayloadHeader *ExecutionPayloadHeaderDeneb `json:"latest_execution_payload_header"`
|
||||
NextWithdrawalIndex string `json:"next_withdrawal_index"`
|
||||
NextWithdrawalValidatorIndex string `json:"next_withdrawal_validator_index"`
|
||||
HistoricalSummaries []*HistoricalSummary `json:"historical_summaries"`
|
||||
DepositRequestsStartIndex string `json:"deposit_requests_start_index"`
|
||||
DepositBalanceToConsume string `json:"deposit_balance_to_consume"`
|
||||
ExitBalanceToConsume string `json:"exit_balance_to_consume"`
|
||||
EarliestExitEpoch string `json:"earliest_exit_epoch"`
|
||||
ConsolidationBalanceToConsume string `json:"consolidation_balance_to_consume"`
|
||||
EarliestConsolidationEpoch string `json:"earliest_consolidation_epoch"`
|
||||
PendingDeposits []*PendingDeposit `json:"pending_deposits"`
|
||||
PendingPartialWithdrawals []*PendingPartialWithdrawal `json:"pending_partial_withdrawals"`
|
||||
PendingConsolidations []*PendingConsolidation `json:"pending_consolidations"`
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
@@ -140,6 +141,7 @@ go_test(
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/light-client:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -11,5 +12,5 @@ func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(io.Discard)
|
||||
|
||||
m.Run()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
@@ -268,6 +268,9 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution requests")
|
||||
}
|
||||
if requests == nil {
|
||||
return false, errors.New("nil execution requests")
|
||||
}
|
||||
}
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
|
||||
@@ -362,15 +365,16 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
var attr payloadattribute.Attributer
|
||||
switch st.Version() {
|
||||
case version.Deneb, version.Electra:
|
||||
v := st.Version()
|
||||
|
||||
if v >= version.Deneb {
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV3{
|
||||
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV3{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
@@ -381,13 +385,18 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
case version.Capella:
|
||||
|
||||
return attr
|
||||
}
|
||||
|
||||
if v >= version.Capella {
|
||||
withdrawals, _, err := st.ExpectedWithdrawals()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get expected withdrawals to get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributesV2{
|
||||
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributesV2{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
@@ -397,8 +406,12 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
case version.Bellatrix:
|
||||
attr, err = payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
|
||||
return attr
|
||||
}
|
||||
|
||||
if v >= version.Bellatrix {
|
||||
attr, err := payloadattribute.New(&enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: val.FeeRecipient[:],
|
||||
@@ -407,12 +420,12 @@ func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState,
|
||||
log.WithError(err).Error("Could not get payload attribute")
|
||||
return emptyAttri
|
||||
}
|
||||
default:
|
||||
log.WithField("version", st.Version()).Error("Could not get payload attribute due to unknown state version")
|
||||
return emptyAttri
|
||||
|
||||
return attr
|
||||
}
|
||||
|
||||
return attr
|
||||
log.WithField("version", version.String(st.Version())).Error("Could not get payload attribute due to unknown state version")
|
||||
return emptyAttri
|
||||
}
|
||||
|
||||
// removeInvalidBlockAndState removes the invalid block, blob and its corresponding state from the cache and DB.
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
@@ -404,13 +405,19 @@ func (s *Service) saveOrphanedOperations(ctx context.Context, orphanedRoot [32]b
|
||||
if a.GetData().Slot+params.BeaconConfig().SlotsPerEpoch < s.CurrentSlot() {
|
||||
continue
|
||||
}
|
||||
if helpers.IsAggregated(a) {
|
||||
if err := s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.cfg.AttestationCache.Add(a); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
if a.IsAggregated() {
|
||||
if err = s.cfg.AttPool.SaveAggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err = s.cfg.AttPool.SaveUnaggregatedAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
saveOrphanedAttCount.Inc()
|
||||
|
||||
@@ -20,16 +20,17 @@ func Verify(sidecars ...blocks.ROBlob) error {
|
||||
cmts := make([]GoKZG.KZGCommitment, len(sidecars))
|
||||
proofs := make([]GoKZG.KZGProof, len(sidecars))
|
||||
for i, sidecar := range sidecars {
|
||||
blobs[i] = bytesToBlob(sidecar.Blob)
|
||||
blobs[i] = *bytesToBlob(sidecar.Blob)
|
||||
cmts[i] = bytesToCommitment(sidecar.KzgCommitment)
|
||||
proofs[i] = bytesToKZGProof(sidecar.KzgProof)
|
||||
}
|
||||
return kzgContext.VerifyBlobKZGProofBatch(blobs, cmts, proofs)
|
||||
}
|
||||
|
||||
func bytesToBlob(blob []byte) (ret GoKZG.Blob) {
|
||||
func bytesToBlob(blob []byte) *GoKZG.Blob {
|
||||
var ret GoKZG.Blob
|
||||
copy(ret[:], blob)
|
||||
return
|
||||
return &ret
|
||||
}
|
||||
|
||||
func bytesToCommitment(commitment []byte) (ret GoKZG.KZGCommitment) {
|
||||
|
||||
@@ -10,11 +10,11 @@ import (
|
||||
)
|
||||
|
||||
func GenerateCommitmentAndProof(blob GoKZG.Blob) (GoKZG.KZGCommitment, GoKZG.KZGProof, error) {
|
||||
commitment, err := kzgContext.BlobToKZGCommitment(blob, 0)
|
||||
commitment, err := kzgContext.BlobToKZGCommitment(&blob, 0)
|
||||
if err != nil {
|
||||
return GoKZG.KZGCommitment{}, GoKZG.KZGProof{}, err
|
||||
}
|
||||
proof, err := kzgContext.ComputeBlobKZGProof(blob, commitment, 0)
|
||||
proof, err := kzgContext.ComputeBlobKZGProof(&blob, commitment, 0)
|
||||
if err != nil {
|
||||
return GoKZG.KZGCommitment{}, GoKZG.KZGProof{}, err
|
||||
}
|
||||
@@ -31,7 +31,7 @@ func TestBytesToAny(t *testing.T) {
|
||||
blob := GoKZG.Blob{0x01, 0x02}
|
||||
commitment := GoKZG.KZGCommitment{0x01, 0x02}
|
||||
proof := GoKZG.KZGProof{0x01, 0x02}
|
||||
require.DeepEqual(t, blob, bytesToBlob(bytes))
|
||||
require.DeepEqual(t, blob, *bytesToBlob(bytes))
|
||||
require.DeepEqual(t, commitment, bytesToCommitment(bytes))
|
||||
require.DeepEqual(t, proof, bytesToKZGProof(bytes))
|
||||
}
|
||||
|
||||
@@ -85,6 +85,14 @@ func WithTrackedValidatorsCache(c *cache.TrackedValidatorsCache) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithAttestationCache for attestation lifecycle after chain inclusion.
|
||||
func WithAttestationCache(c *cache.AttestationCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.AttestationCache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAttestationPool for attestation lifecycle after chain inclusion.
|
||||
func WithAttestationPool(p attestations.Pool) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
@@ -70,6 +69,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
defer s.saveLightClientUpdate(cfg)
|
||||
defer s.saveLightClientBootstrap(cfg)
|
||||
}
|
||||
defer s.sendStateFeedOnBlock(cfg)
|
||||
defer reportProcessingTime(startTime)
|
||||
@@ -379,7 +379,11 @@ func (s *Service) handleBlockAttestations(ctx context.Context, blk interfaces.Re
|
||||
r := bytesutil.ToBytes32(a.GetData().BeaconBlockRoot)
|
||||
if s.cfg.ForkChoiceStore.HasNode(r) {
|
||||
s.cfg.ForkChoiceStore.ProcessAttestation(ctx, indices, r, a.GetData().Target.Epoch)
|
||||
} else if err := s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
|
||||
} else if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.cfg.AttestationCache.Add(a); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err = s.cfg.AttPool.SaveBlockAttestation(a); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -419,7 +423,11 @@ func (s *Service) savePostStateInfo(ctx context.Context, r [32]byte, b interface
|
||||
func (s *Service) pruneAttsFromPool(headBlock interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
atts := headBlock.Block().Body().Attestations()
|
||||
for _, att := range atts {
|
||||
if helpers.IsAggregated(att) {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := s.cfg.AttestationCache.DeleteCovered(att); err != nil {
|
||||
return errors.Wrap(err, "could not delete attestation")
|
||||
}
|
||||
} else if att.IsAggregated() {
|
||||
if err := s.cfg.AttPool.DeleteAggregatedAttestation(att); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -496,14 +504,15 @@ func (s *Service) runLateBlockTasks() {
|
||||
// It returns a map where each key represents a missing BlobSidecar index.
|
||||
// An empty map means we have all indices; a non-empty map can be used to compare incoming
|
||||
// BlobSidecars against the set of known missing sidecars.
|
||||
func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte) (map[uint64]struct{}, error) {
|
||||
func missingIndices(bs *filesystem.BlobStorage, root [32]byte, expected [][]byte, slot primitives.Slot) (map[uint64]struct{}, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(expected) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(expected) > fieldparams.MaxBlobsPerBlock {
|
||||
if len(expected) > maxBlobsPerBlock {
|
||||
return nil, errMaxBlobsExceeded
|
||||
}
|
||||
indices, err := bs.Indices(root)
|
||||
indices, err := bs.Indices(root, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -552,7 +561,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
return nil
|
||||
}
|
||||
// get a map of BlobSidecar indices that are not currently available.
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments)
|
||||
missing, err := missingIndices(s.blobStorage, root, kzgCommitments, block.Slot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -563,7 +572,7 @@ func (s *Service) isDataAvailable(ctx context.Context, root [32]byte, signed int
|
||||
|
||||
// The gossip handler for blobs writes the index of each verified blob referencing the given
|
||||
// root to the channel returned by blobNotifiers.forRoot.
|
||||
nc := s.blobNotifiers.forRoot(root)
|
||||
nc := s.blobNotifiers.forRoot(root, block.Slot())
|
||||
|
||||
// Log for DA checks that cross over into the next slot; helpful for debugging.
|
||||
nextSlot := slots.BeginsAt(signed.Block().Slot()+1, s.genesisTime)
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
lightclient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
@@ -128,7 +130,7 @@ func (s *Service) saveLightClientUpdate(cfg *postBlockProcessConfig) {
|
||||
attestedRoot := cfg.roblock.Block().ParentRoot()
|
||||
attestedBlock, err := s.getBlock(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Saving light client update failed: Could not get attested block")
|
||||
log.WithError(err).Errorf("Saving light client update failed: Could not get attested block for root %#x", attestedRoot)
|
||||
return
|
||||
}
|
||||
if attestedBlock == nil || attestedBlock.IsNil() {
|
||||
@@ -137,7 +139,7 @@ func (s *Service) saveLightClientUpdate(cfg *postBlockProcessConfig) {
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(cfg.ctx, attestedRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Saving light client update failed: Could not get attested state")
|
||||
log.WithError(err).Errorf("Saving light client update failed: Could not get attested state for root %#x", attestedRoot)
|
||||
return
|
||||
}
|
||||
if attestedState == nil || attestedState.IsNil() {
|
||||
@@ -148,7 +150,11 @@ func (s *Service) saveLightClientUpdate(cfg *postBlockProcessConfig) {
|
||||
finalizedRoot := attestedState.FinalizedCheckpoint().Root
|
||||
finalizedBlock, err := s.getBlock(cfg.ctx, [32]byte(finalizedRoot))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Saving light client update failed: Could not get finalized block")
|
||||
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
|
||||
log.Debugf("Skipping saving light client update: Finalized block is nil for root %#x", finalizedRoot)
|
||||
} else {
|
||||
log.WithError(err).Errorf("Saving light client update failed: Could not get finalized block for root %#x", finalizedRoot)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -223,21 +229,28 @@ func (s *Service) processLightClientFinalityUpdate(
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get attested block")
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get attested state")
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
|
||||
var finalizedBlock interfaces.ReadOnlySignedBeaconBlock
|
||||
finalizedCheckPoint := attestedState.FinalizedCheckpoint()
|
||||
if finalizedCheckPoint != nil {
|
||||
finalizedRoot := bytesutil.ToBytes32(finalizedCheckPoint.Root)
|
||||
finalizedBlock, err = s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
finalizedBlock = nil
|
||||
finalizedCheckpoint := attestedState.FinalizedCheckpoint()
|
||||
|
||||
// Check if the finalized checkpoint has changed
|
||||
if finalizedCheckpoint == nil || bytes.Equal(finalizedCheckpoint.GetRoot(), postState.FinalizedCheckpoint().Root) {
|
||||
return nil
|
||||
}
|
||||
|
||||
finalizedRoot := bytesutil.ToBytes32(finalizedCheckpoint.Root)
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
if errors.Is(err, errBlockNotFoundInCacheOrDB) {
|
||||
log.Debugf("Skipping processing light client finality update: Finalized block is nil for root %#x", finalizedRoot)
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "could not get finalized block for root %#x", finalizedRoot)
|
||||
}
|
||||
|
||||
update, err := lightclient.NewLightClientFinalityUpdateFromBeaconState(
|
||||
@@ -266,11 +279,11 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
|
||||
attestedRoot := signed.Block().ParentRoot()
|
||||
attestedBlock, err := s.cfg.BeaconDB.Block(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get attested block")
|
||||
return errors.Wrapf(err, "could not get attested block for root %#x", attestedRoot)
|
||||
}
|
||||
attestedState, err := s.cfg.StateGen.StateByRoot(ctx, attestedRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get attested state")
|
||||
return errors.Wrapf(err, "could not get attested state for root %#x", attestedRoot)
|
||||
}
|
||||
|
||||
update, err := lightclient.NewLightClientOptimisticUpdateFromBeaconState(
|
||||
@@ -283,6 +296,10 @@ func (s *Service) processLightClientOptimisticUpdate(ctx context.Context, signed
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), lightclient.ErrNotEnoughSyncCommitteeBits) {
|
||||
log.WithError(err).Debug("Skipping processing light client optimistic update")
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "could not create light client optimistic update")
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
lightClient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/das"
|
||||
@@ -1075,6 +1076,48 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
service.InsertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
|
||||
}
|
||||
|
||||
func TestService_insertSlashingsToForkChoiceStoreElectra(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisStateElectra(t, 100)
|
||||
att1 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := privKeys[0].Sign(signingRoot[:])
|
||||
sig1 := privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = privKeys[0].Sign(signingRoot[:])
|
||||
sig1 = privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att2.Signature = aggregateSig.Marshal()
|
||||
slashings := []*ethpb.AttesterSlashingElectra{
|
||||
{
|
||||
Attestation_1: att1,
|
||||
Attestation_2: att2,
|
||||
},
|
||||
}
|
||||
b := util.NewBeaconBlockElectra()
|
||||
b.Block.Body.AttesterSlashings = slashings
|
||||
wb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
service.InsertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
|
||||
}
|
||||
|
||||
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
@@ -2205,23 +2248,23 @@ func TestMissingIndices(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "expected exceeds max",
|
||||
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock + 1),
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0) + 1),
|
||||
err: errMaxBlobsExceeded,
|
||||
},
|
||||
{
|
||||
name: "first missing",
|
||||
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock),
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
present: []uint64{1, 2, 3, 4, 5},
|
||||
result: fakeResult([]uint64{0}),
|
||||
},
|
||||
{
|
||||
name: "all missing",
|
||||
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock),
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
result: fakeResult([]uint64{0, 1, 2, 3, 4, 5}),
|
||||
},
|
||||
{
|
||||
name: "none missing",
|
||||
expected: fakeCommitments(fieldparams.MaxBlobsPerBlock),
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
present: []uint64{0, 1, 2, 3, 4, 5},
|
||||
result: fakeResult([]uint64{}),
|
||||
},
|
||||
@@ -2255,7 +2298,7 @@ func TestMissingIndices(t *testing.T) {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, c.present...))
|
||||
missing, err := missingIndices(bs, c.root, c.expected)
|
||||
missing, err := missingIndices(bs, c.root, c.expected, 0)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
@@ -2505,173 +2548,500 @@ func fakeResult(missing []uint64) map[uint64]struct{} {
|
||||
}
|
||||
|
||||
func TestSaveLightClientUpdate(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
|
||||
s, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
t.Run("No old update", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
s.saveLightClientUpdate(cfg)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
// Check that the light client update is saved
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// Check that the light client update is saved
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Altair)
|
||||
})
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Altair)
|
||||
t.Run("New update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
|
||||
reset()
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Altair)
|
||||
})
|
||||
|
||||
t.Run("Old update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
require.NoError(t, err)
|
||||
|
||||
scb := make([]byte, 64)
|
||||
for i := 0; i < 5; i++ {
|
||||
scb[i] = 0x01
|
||||
}
|
||||
oldUpdate.SetSyncAggregate(ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: scb,
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
})
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, oldUpdate, u)
|
||||
require.Equal(t, u.Version(), version.Altair)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
t.Run("No old update", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false)
|
||||
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false)
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
s.saveLightClientUpdate(cfg)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
// Check that the light client update is saved
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Capella)
|
||||
})
|
||||
|
||||
// Check that the light client update is saved
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Capella)
|
||||
t.Run("New update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false)
|
||||
|
||||
reset()
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Capella)
|
||||
})
|
||||
|
||||
t.Run("Old update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
require.NoError(t, err)
|
||||
|
||||
scb := make([]byte, 64)
|
||||
for i := 0; i < 5; i++ {
|
||||
scb[i] = 0x01
|
||||
}
|
||||
oldUpdate.SetSyncAggregate(ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: scb,
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
})
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, oldUpdate, u)
|
||||
require.Equal(t, u.Version(), version.Capella)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
t.Run("No old update", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false)
|
||||
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false)
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
s.saveLightClientUpdate(cfg)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
// Check that the light client update is saved
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Deneb)
|
||||
})
|
||||
|
||||
// Check that the light client update is saved
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Deneb)
|
||||
t.Run("New update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false)
|
||||
|
||||
reset()
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
attestedStateRoot, err := l.AttestedState.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, attestedStateRoot, [32]byte(u.AttestedHeader().Beacon().StateRoot))
|
||||
require.Equal(t, u.Version(), version.Deneb)
|
||||
})
|
||||
|
||||
t.Run("Old update is better", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
|
||||
err := s.cfg.BeaconDB.SaveBlock(ctx, l.AttestedBlock)
|
||||
require.NoError(t, err)
|
||||
attestedBlockRoot, err := l.AttestedBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.AttestedState, attestedBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
currentBlockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
roblock, err := consensusblocks.NewROBlockWithRoot(l.Block, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, roblock)
|
||||
require.NoError(t, err)
|
||||
err = s.cfg.BeaconDB.SaveState(ctx, l.State, currentBlockRoot)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = s.cfg.BeaconDB.SaveBlock(ctx, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roblock,
|
||||
postState: l.State,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
period := slots.SyncCommitteePeriod(slots.ToEpoch(l.AttestedState.Slot()))
|
||||
|
||||
// create and save old update
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(s.CurrentSlot(), l.AttestedState)
|
||||
require.NoError(t, err)
|
||||
|
||||
scb := make([]byte, 64)
|
||||
for i := 0; i < 5; i++ {
|
||||
scb[i] = 0x01
|
||||
}
|
||||
oldUpdate.SetSyncAggregate(ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: scb,
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
})
|
||||
|
||||
err = s.cfg.BeaconDB.SaveLightClientUpdate(ctx, period, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.saveLightClientUpdate(cfg)
|
||||
|
||||
u, err := s.cfg.BeaconDB.LightClientUpdate(ctx, period)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, u)
|
||||
require.DeepEqual(t, oldUpdate, u)
|
||||
require.Equal(t, u.Version(), version.Deneb)
|
||||
})
|
||||
})
|
||||
|
||||
reset()
|
||||
}
|
||||
|
||||
func TestSaveLightClientBootstrap(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
|
||||
s, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
t.Run("Altair", func(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().AltairForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
@@ -2704,15 +3074,9 @@ func TestSaveLightClientBootstrap(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
|
||||
require.Equal(t, b.Version(), version.Altair)
|
||||
|
||||
reset()
|
||||
})
|
||||
|
||||
t.Run("Capella", func(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().CapellaForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
@@ -2745,15 +3109,9 @@ func TestSaveLightClientBootstrap(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
|
||||
require.Equal(t, b.Version(), version.Capella)
|
||||
|
||||
reset()
|
||||
})
|
||||
|
||||
t.Run("Deneb", func(t *testing.T) {
|
||||
featCfg := &features.Flags{}
|
||||
featCfg.EnableLightClient = true
|
||||
reset := features.InitWithReset(featCfg)
|
||||
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false)
|
||||
|
||||
s.genesisTime = time.Unix(time.Now().Unix()-(int64(params.BeaconConfig().DenebForkEpoch)*int64(params.BeaconConfig().SlotsPerEpoch)*int64(params.BeaconConfig().SecondsPerSlot)), 0)
|
||||
@@ -2786,7 +3144,7 @@ func TestSaveLightClientBootstrap(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stateRoot, [32]byte(b.Header().Beacon().StateRoot))
|
||||
require.Equal(t, b.Version(), version.Deneb)
|
||||
|
||||
reset()
|
||||
})
|
||||
|
||||
reset()
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -166,7 +167,13 @@ func (s *Service) UpdateHead(ctx context.Context, proposingSlot primitives.Slot)
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
func (s *Service) processAttestations(ctx context.Context, disparity time.Duration) {
|
||||
atts := s.cfg.AttPool.ForkchoiceAttestations()
|
||||
var atts []ethpb.Att
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
atts = s.cfg.AttestationCache.ForkchoiceAttestations()
|
||||
} else {
|
||||
atts = s.cfg.AttPool.ForkchoiceAttestations()
|
||||
}
|
||||
|
||||
for _, a := range atts {
|
||||
// Based on the spec, don't process the attestation until the subsequent slot.
|
||||
// This delays consideration in the fork choice until their slot is in the past.
|
||||
@@ -182,7 +189,11 @@ func (s *Service) processAttestations(ctx context.Context, disparity time.Durati
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.cfg.AttPool.DeleteForkchoiceAttestation(a); err != nil {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := s.cfg.AttestationCache.DeleteForkchoiceAttestation(a); err != nil {
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
} else if err := s.cfg.AttPool.DeleteForkchoiceAttestation(a); err != nil {
|
||||
log.WithError(err).Error("Could not delete fork choice attestation in pool")
|
||||
}
|
||||
|
||||
|
||||
@@ -4,12 +4,13 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// SendNewBlobEvent sends a message to the BlobNotifier channel that the blob
|
||||
// for the block root `root` is ready in the database
|
||||
func (s *Service) sendNewBlobEvent(root [32]byte, index uint64) {
|
||||
s.blobNotifiers.notifyIndex(root, index)
|
||||
func (s *Service) sendNewBlobEvent(root [32]byte, index uint64, slot primitives.Slot) {
|
||||
s.blobNotifiers.notifyIndex(root, index, slot)
|
||||
}
|
||||
|
||||
// ReceiveBlob saves the blob to database and sends the new event
|
||||
@@ -18,6 +19,6 @@ func (s *Service) ReceiveBlob(ctx context.Context, b blocks.VerifiedROBlob) erro
|
||||
return err
|
||||
}
|
||||
|
||||
s.sendNewBlobEvent(b.BlockRoot(), b.Index)
|
||||
s.sendNewBlobEvent(b.BlockRoot(), b.Index, b.Slot())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
@@ -468,6 +469,9 @@ func (s *Service) validateStateTransition(ctx context.Context, preState state.Be
|
||||
stateTransitionStartTime := time.Now()
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
if ctx.Err() != nil || electra.IsExecutionRequestError(err) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, invalidBlock{error: err}
|
||||
}
|
||||
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
|
||||
|
||||
@@ -33,10 +33,10 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -75,6 +75,7 @@ type config struct {
|
||||
DepositCache cache.DepositCache
|
||||
PayloadIDCache *cache.PayloadIDCache
|
||||
TrackedValidatorsCache *cache.TrackedValidatorsCache
|
||||
AttestationCache *cache.AttestationCache
|
||||
AttPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
@@ -104,18 +105,22 @@ var ErrMissingClockSetter = errors.New("blockchain Service initialized without a
|
||||
type blobNotifierMap struct {
|
||||
sync.RWMutex
|
||||
notifiers map[[32]byte]chan uint64
|
||||
seenIndex map[[32]byte][fieldparams.MaxBlobsPerBlock]bool
|
||||
seenIndex map[[32]byte][]bool
|
||||
}
|
||||
|
||||
// notifyIndex notifies a blob by its index for a given root.
|
||||
// It uses internal maps to keep track of seen indices and notifier channels.
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
if idx >= fieldparams.MaxBlobsPerBlock {
|
||||
func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64, slot primitives.Slot) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if idx >= uint64(maxBlobsPerBlock) {
|
||||
return
|
||||
}
|
||||
|
||||
bn.Lock()
|
||||
seen := bn.seenIndex[root]
|
||||
if seen == nil {
|
||||
seen = make([]bool, maxBlobsPerBlock)
|
||||
}
|
||||
if seen[idx] {
|
||||
bn.Unlock()
|
||||
return
|
||||
@@ -126,7 +131,7 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
// Retrieve or create the notifier channel for the given root.
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
|
||||
@@ -135,12 +140,13 @@ func (bn *blobNotifierMap) notifyIndex(root [32]byte, idx uint64) {
|
||||
c <- idx
|
||||
}
|
||||
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte) chan uint64 {
|
||||
func (bn *blobNotifierMap) forRoot(root [32]byte, slot primitives.Slot) chan uint64 {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
bn.Lock()
|
||||
defer bn.Unlock()
|
||||
c, ok := bn.notifiers[root]
|
||||
if !ok {
|
||||
c = make(chan uint64, fieldparams.MaxBlobsPerBlock)
|
||||
c = make(chan uint64, maxBlobsPerBlock)
|
||||
bn.notifiers[root] = c
|
||||
}
|
||||
return c
|
||||
@@ -166,7 +172,7 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
bn := &blobNotifierMap{
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
}
|
||||
srv := &Service{
|
||||
ctx: ctx,
|
||||
|
||||
@@ -587,7 +587,7 @@ func (s *MockClockSetter) SetClock(g *startup.Clock) error {
|
||||
func TestNotifyIndex(t *testing.T) {
|
||||
// Initialize a blobNotifierMap
|
||||
bn := &blobNotifierMap{
|
||||
seenIndex: make(map[[32]byte][fieldparams.MaxBlobsPerBlock]bool),
|
||||
seenIndex: make(map[[32]byte][]bool),
|
||||
notifiers: make(map[[32]byte]chan uint64),
|
||||
}
|
||||
|
||||
@@ -596,7 +596,7 @@ func TestNotifyIndex(t *testing.T) {
|
||||
copy(root[:], "exampleRoot")
|
||||
|
||||
// Test notifying a new index
|
||||
bn.notifyIndex(root, 1)
|
||||
bn.notifyIndex(root, 1, 1)
|
||||
if !bn.seenIndex[root][1] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
@@ -607,13 +607,13 @@ func TestNotifyIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test notifying an already seen index
|
||||
bn.notifyIndex(root, 1)
|
||||
bn.notifyIndex(root, 1, 1)
|
||||
if len(bn.notifiers[root]) > 1 {
|
||||
t.Errorf("Notifier channel should not receive multiple messages for the same index")
|
||||
}
|
||||
|
||||
// Test notifying a new index again
|
||||
bn.notifyIndex(root, 2)
|
||||
bn.notifyIndex(root, 2, 1)
|
||||
if !bn.seenIndex[root][2] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ type MockBuilderService struct {
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
BidDeneb *ethpb.SignedBuilderBidDeneb
|
||||
BidElectra *ethpb.SignedBuilderBidElectra
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
@@ -59,7 +60,7 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
|
||||
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
|
||||
}
|
||||
return w, nil, s.ErrSubmitBlindedBlock
|
||||
case version.Deneb:
|
||||
case version.Deneb, version.Electra:
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
|
||||
@@ -72,6 +73,9 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
|
||||
|
||||
// GetHeader for mocking.
|
||||
func (s *MockBuilderService) GetHeader(_ context.Context, slot primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch || s.BidElectra != nil {
|
||||
return builder.WrappedSignedBuilderBidElectra(s.BidElectra)
|
||||
}
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch || s.BidDeneb != nil {
|
||||
return builder.WrappedSignedBuilderBidDeneb(s.BidDeneb)
|
||||
}
|
||||
|
||||
8
beacon-chain/cache/BUILD.bazel
vendored
8
beacon-chain/cache/BUILD.bazel
vendored
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"active_balance.go",
|
||||
"active_balance_disabled.go", # keep
|
||||
"attestation.go",
|
||||
"attestation_data.go",
|
||||
"balance_cache_key.go",
|
||||
"checkpoint_state.go",
|
||||
@@ -36,18 +37,21 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/operations/attestations/attmap:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
@@ -66,6 +70,7 @@ go_test(
|
||||
srcs = [
|
||||
"active_balance_test.go",
|
||||
"attestation_data_test.go",
|
||||
"attestation_test.go",
|
||||
"cache_test.go",
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
@@ -88,14 +93,17 @@ go_test(
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls/blst:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
|
||||
275
beacon-chain/cache/attestation.go
vendored
Normal file
275
beacon-chain/cache/attestation.go
vendored
Normal file
@@ -0,0 +1,275 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/attestations/attmap"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type attGroup struct {
|
||||
slot primitives.Slot
|
||||
atts []ethpb.Att
|
||||
}
|
||||
|
||||
// AttestationCache holds a map of attGroup items that group together all attestations for a single slot.
|
||||
// When we add an attestation to the cache by calling Add, we either create a new group with this attestation
|
||||
// (if this is the first attestation for some slot) or two things can happen:
|
||||
//
|
||||
// - If the attestation is unaggregated, we add its attestation bit to attestation bits of the first
|
||||
// attestation in the group.
|
||||
// - If the attestation is aggregated, we append it to the group. There should be no redundancy
|
||||
// in the list because we ignore redundant aggregates in gossip.
|
||||
//
|
||||
// The first bullet point above means that we keep one aggregate attestation to which we keep appending bits
|
||||
// as new single-bit attestations arrive. This means that at any point during seconds 0-4 of a slot
|
||||
// we will have only one attestation for this slot in the cache.
|
||||
//
|
||||
// NOTE: This design in principle can result in worse aggregates since we lose the ability to aggregate some
|
||||
// single bit attestations in case of overlaps with incoming aggregates.
|
||||
//
|
||||
// The cache also keeps forkchoice attestations in a separate struct. These attestations are used for
|
||||
// forkchoice-related operations.
|
||||
type AttestationCache struct {
|
||||
atts map[attestation.Id]*attGroup
|
||||
sync.RWMutex
|
||||
forkchoiceAtts *attmap.Attestations
|
||||
}
|
||||
|
||||
// NewAttestationCache creates a new cache instance.
|
||||
func NewAttestationCache() *AttestationCache {
|
||||
return &AttestationCache{
|
||||
atts: make(map[attestation.Id]*attGroup),
|
||||
forkchoiceAtts: attmap.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// Add does one of two things:
|
||||
//
|
||||
// - For unaggregated attestations, it adds the attestation bit to attestation bits of the running aggregate,
|
||||
// which is the first aggregate for the slot.
|
||||
// - For aggregated attestations, it appends the attestation to the existing list of attestations for the slot.
|
||||
func (c *AttestationCache) Add(att ethpb.Att) error {
|
||||
if att.IsNil() {
|
||||
log.Debug("Attempted to add a nil attestation to the attestation cache")
|
||||
return nil
|
||||
}
|
||||
if len(att.GetAggregationBits().BitIndices()) == 0 {
|
||||
log.Debug("Attempted to add an attestation with 0 bits set to the attestation cache")
|
||||
return nil
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
group := c.atts[id]
|
||||
if group == nil {
|
||||
group = &attGroup{
|
||||
slot: att.GetData().Slot,
|
||||
atts: []ethpb.Att{att},
|
||||
}
|
||||
c.atts[id] = group
|
||||
return nil
|
||||
}
|
||||
|
||||
if att.IsAggregated() {
|
||||
group.atts = append(group.atts, att.Clone())
|
||||
return nil
|
||||
}
|
||||
|
||||
// This should never happen because we return early for a new group.
|
||||
if len(group.atts) == 0 {
|
||||
log.Error("Attestation group contains no attestations, skipping insertion")
|
||||
return nil
|
||||
}
|
||||
|
||||
a := group.atts[0]
|
||||
|
||||
// Indexing is safe because we have guarded against 0 bits set.
|
||||
bit := att.GetAggregationBits().BitIndices()[0]
|
||||
if a.GetAggregationBits().BitAt(uint64(bit)) {
|
||||
return nil
|
||||
}
|
||||
sig, err := aggregateSig(a, att)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not aggregate signatures")
|
||||
}
|
||||
|
||||
a.GetAggregationBits().SetBitAt(uint64(bit), true)
|
||||
a.SetSignature(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAll returns all attestations in the cache, excluding forkchoice attestations.
|
||||
func (c *AttestationCache) GetAll() []ethpb.Att {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
var result []ethpb.Att
|
||||
for _, group := range c.atts {
|
||||
result = append(result, group.atts...)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Count returns the number of all attestations in the cache, excluding forkchoice attestations.
|
||||
func (c *AttestationCache) Count() int {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
count := 0
|
||||
for _, group := range c.atts {
|
||||
count += len(group.atts)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// DeleteCovered removes all attestations whose attestation bits are a proper subset of the passed-in attestation.
|
||||
func (c *AttestationCache) DeleteCovered(att ethpb.Att) error {
|
||||
if att.IsNil() {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
group := c.atts[id]
|
||||
if group == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
idx := 0
|
||||
for _, a := range group.atts {
|
||||
if covered, err := att.GetAggregationBits().Contains(a.GetAggregationBits()); err != nil {
|
||||
return err
|
||||
} else if !covered {
|
||||
group.atts[idx] = a
|
||||
idx++
|
||||
}
|
||||
}
|
||||
group.atts = group.atts[:idx]
|
||||
|
||||
if len(group.atts) == 0 {
|
||||
delete(c.atts, id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PruneBefore removes all attestations whose slot is earlier than the passed-in slot.
|
||||
func (c *AttestationCache) PruneBefore(slot primitives.Slot) uint64 {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
var pruneCount int
|
||||
for id, group := range c.atts {
|
||||
if group.slot < slot {
|
||||
pruneCount += len(group.atts)
|
||||
delete(c.atts, id)
|
||||
}
|
||||
}
|
||||
return uint64(pruneCount)
|
||||
}
|
||||
|
||||
// AggregateIsRedundant checks whether all attestation bits of the passed-in aggregate
|
||||
// are already included by any aggregate in the cache.
|
||||
func (c *AttestationCache) AggregateIsRedundant(att ethpb.Att) (bool, error) {
|
||||
if att.IsNil() {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
if err != nil {
|
||||
return true, errors.Wrapf(err, "could not create attestation ID")
|
||||
}
|
||||
|
||||
group := c.atts[id]
|
||||
if group == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, a := range group.atts {
|
||||
if redundant, err := a.GetAggregationBits().Contains(att.GetAggregationBits()); err != nil {
|
||||
return true, err
|
||||
} else if redundant {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// SaveForkchoiceAttestations saves forkchoice attestations.
|
||||
func (c *AttestationCache) SaveForkchoiceAttestations(att []ethpb.Att) error {
|
||||
return c.forkchoiceAtts.SaveMany(att)
|
||||
}
|
||||
|
||||
// ForkchoiceAttestations returns all forkchoice attestations.
|
||||
func (c *AttestationCache) ForkchoiceAttestations() []ethpb.Att {
|
||||
return c.forkchoiceAtts.GetAll()
|
||||
}
|
||||
|
||||
// DeleteForkchoiceAttestation deletes a forkchoice attestation.
|
||||
func (c *AttestationCache) DeleteForkchoiceAttestation(att ethpb.Att) error {
|
||||
return c.forkchoiceAtts.Delete(att)
|
||||
}
|
||||
|
||||
// GetBySlotAndCommitteeIndex returns all attestations in the cache that match the provided slot
|
||||
// and committee index. Forkchoice attestations are not returned.
|
||||
//
|
||||
// NOTE: This function cannot be declared as a method on the AttestationCache because it is a generic function.
|
||||
func GetBySlotAndCommitteeIndex[T ethpb.Att](c *AttestationCache, slot primitives.Slot, committeeIndex primitives.CommitteeIndex) []T {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
var result []T
|
||||
|
||||
for _, group := range c.atts {
|
||||
if len(group.atts) > 0 {
|
||||
// We can safely compare the first attestation because all attestations in a group
|
||||
// must have the same slot and committee index, since they are under the same key.
|
||||
a, ok := group.atts[0].(T)
|
||||
if ok && a.GetData().Slot == slot && a.CommitteeBitsVal().BitAt(uint64(committeeIndex)) {
|
||||
for _, a := range group.atts {
|
||||
a, ok := a.(T)
|
||||
if ok {
|
||||
result = append(result, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func aggregateSig(agg ethpb.Att, att ethpb.Att) ([]byte, error) {
|
||||
aggSig, err := bls.SignatureFromBytesNoValidation(agg.GetSignature())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attSig, err := bls.SignatureFromBytesNoValidation(att.GetSignature())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bls.AggregateSignatures([]bls.Signature{aggSig, attSig}).Marshal(), nil
|
||||
}
|
||||
14
beacon-chain/cache/attestation_data.go
vendored
14
beacon-chain/cache/attestation_data.go
vendored
@@ -15,24 +15,24 @@ type AttestationConsensusData struct {
|
||||
Source forkchoicetypes.Checkpoint
|
||||
}
|
||||
|
||||
// AttestationCache stores cached results of AttestationData requests.
|
||||
type AttestationCache struct {
|
||||
// AttestationDataCache stores cached results of AttestationData requests.
|
||||
type AttestationDataCache struct {
|
||||
a *AttestationConsensusData
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// NewAttestationCache creates a new instance of AttestationCache.
|
||||
func NewAttestationCache() *AttestationCache {
|
||||
return &AttestationCache{}
|
||||
// NewAttestationDataCache creates a new instance of AttestationDataCache.
|
||||
func NewAttestationDataCache() *AttestationDataCache {
|
||||
return &AttestationDataCache{}
|
||||
}
|
||||
|
||||
// Get retrieves cached attestation data, recording a cache hit or miss. This method is lock free.
|
||||
func (c *AttestationCache) Get() *AttestationConsensusData {
|
||||
func (c *AttestationDataCache) Get() *AttestationConsensusData {
|
||||
return c.a
|
||||
}
|
||||
|
||||
// Put adds a response to the cache. This method is lock free.
|
||||
func (c *AttestationCache) Put(a *AttestationConsensusData) error {
|
||||
func (c *AttestationDataCache) Put(a *AttestationConsensusData) error {
|
||||
if a == nil {
|
||||
return errors.New("attestation cannot be nil")
|
||||
}
|
||||
|
||||
2
beacon-chain/cache/attestation_data_test.go
vendored
2
beacon-chain/cache/attestation_data_test.go
vendored
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestAttestationCache_RoundTrip(t *testing.T) {
|
||||
c := cache.NewAttestationCache()
|
||||
c := cache.NewAttestationDataCache()
|
||||
|
||||
a := c.Get()
|
||||
require.Nil(t, a)
|
||||
|
||||
353
beacon-chain/cache/attestation_test.go
vendored
Normal file
353
beacon-chain/cache/attestation_test.go
vendored
Normal file
@@ -0,0 +1,353 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls/blst"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/attestation"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestAdd(t *testing.T) {
|
||||
k, err := blst.RandKey()
|
||||
require.NoError(t, err)
|
||||
sig := k.Sign([]byte{'X'})
|
||||
|
||||
t.Run("new ID", func(t *testing.T) {
|
||||
t.Run("first ID ever", func(t *testing.T) {
|
||||
c := NewAttestationCache()
|
||||
ab := bitfield.NewBitlist(8)
|
||||
ab.SetBitAt(0, true)
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: ab,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.Add(att))
|
||||
|
||||
require.Equal(t, 1, len(c.atts))
|
||||
group, ok := c.atts[id]
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), group.slot)
|
||||
require.Equal(t, 1, len(group.atts))
|
||||
assert.DeepEqual(t, group.atts[0], att)
|
||||
})
|
||||
t.Run("other ID exists", func(t *testing.T) {
|
||||
c := NewAttestationCache()
|
||||
ab := bitfield.NewBitlist(8)
|
||||
ab.SetBitAt(0, true)
|
||||
existingAtt := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: ab,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
existingId, err := attestation.NewId(existingAtt, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
c.atts[existingId] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: ab,
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
id, err := attestation.NewId(att, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.Add(att))
|
||||
|
||||
require.Equal(t, 2, len(c.atts))
|
||||
group, ok := c.atts[id]
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), group.slot)
|
||||
require.Equal(t, 1, len(group.atts))
|
||||
assert.DeepEqual(t, group.atts[0], att)
|
||||
})
|
||||
})
|
||||
t.Run("aggregated", func(t *testing.T) {
|
||||
c := NewAttestationCache()
|
||||
existingAtt := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
id, err := attestation.NewId(existingAtt, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
c.atts[id] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
att.AggregationBits.SetBitAt(0, true)
|
||||
att.AggregationBits.SetBitAt(1, true)
|
||||
require.NoError(t, c.Add(att))
|
||||
|
||||
require.Equal(t, 1, len(c.atts))
|
||||
group, ok := c.atts[id]
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), group.slot)
|
||||
require.Equal(t, 2, len(group.atts))
|
||||
assert.DeepEqual(t, group.atts[0], existingAtt)
|
||||
assert.DeepEqual(t, group.atts[1], att)
|
||||
})
|
||||
t.Run("unaggregated - existing bit", func(t *testing.T) {
|
||||
c := NewAttestationCache()
|
||||
existingAtt := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
existingAtt.AggregationBits.SetBitAt(0, true)
|
||||
id, err := attestation.NewId(existingAtt, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
c.atts[id] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
att.AggregationBits.SetBitAt(0, true)
|
||||
require.NoError(t, c.Add(att))
|
||||
|
||||
require.Equal(t, 1, len(c.atts))
|
||||
group, ok := c.atts[id]
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), group.slot)
|
||||
require.Equal(t, 1, len(group.atts))
|
||||
assert.DeepEqual(t, []int{0}, group.atts[0].GetAggregationBits().BitIndices())
|
||||
})
|
||||
t.Run("unaggregated - new bit", func(t *testing.T) {
|
||||
c := NewAttestationCache()
|
||||
existingAtt := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
existingAtt.AggregationBits.SetBitAt(0, true)
|
||||
id, err := attestation.NewId(existingAtt, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
c.atts[id] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
|
||||
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
att.AggregationBits.SetBitAt(1, true)
|
||||
require.NoError(t, c.Add(att))
|
||||
|
||||
require.Equal(t, 1, len(c.atts))
|
||||
group, ok := c.atts[id]
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, primitives.Slot(123), group.slot)
|
||||
require.Equal(t, 1, len(group.atts))
|
||||
assert.DeepEqual(t, []int{0, 1}, group.atts[0].GetAggregationBits().BitIndices())
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetAll(t *testing.T) {
|
||||
c := NewAttestationCache()
|
||||
c.atts[bytesutil.ToBytes32([]byte("id1"))] = &attGroup{atts: []ethpb.Att{ðpb.Attestation{}, ðpb.Attestation{}}}
|
||||
c.atts[bytesutil.ToBytes32([]byte("id2"))] = &attGroup{atts: []ethpb.Att{ðpb.Attestation{}}}
|
||||
|
||||
assert.Equal(t, 3, len(c.GetAll()))
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
c := NewAttestationCache()
|
||||
c.atts[bytesutil.ToBytes32([]byte("id1"))] = &attGroup{atts: []ethpb.Att{ðpb.Attestation{}, ðpb.Attestation{}}}
|
||||
c.atts[bytesutil.ToBytes32([]byte("id2"))] = &attGroup{atts: []ethpb.Att{ðpb.Attestation{}}}
|
||||
|
||||
assert.Equal(t, 3, c.Count())
|
||||
}
|
||||
|
||||
func TestDeleteCovered(t *testing.T) {
|
||||
k, err := blst.RandKey()
|
||||
require.NoError(t, err)
|
||||
sig := k.Sign([]byte{'X'})
|
||||
|
||||
att1 := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
att1.AggregationBits.SetBitAt(0, true)
|
||||
|
||||
att2 := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
att2.AggregationBits.SetBitAt(1, true)
|
||||
att2.AggregationBits.SetBitAt(2, true)
|
||||
|
||||
att3 := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
att3.AggregationBits.SetBitAt(1, true)
|
||||
att3.AggregationBits.SetBitAt(3, true)
|
||||
att3.AggregationBits.SetBitAt(4, true)
|
||||
|
||||
c := NewAttestationCache()
|
||||
id, err := attestation.NewId(att1, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
c.atts[id] = &attGroup{slot: att1.Data.Slot, atts: []ethpb.Att{att1, att2, att3}}
|
||||
|
||||
t.Run("no matching group", func(t *testing.T) {
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 456, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
att.AggregationBits.SetBitAt(0, true)
|
||||
att.AggregationBits.SetBitAt(1, true)
|
||||
att.AggregationBits.SetBitAt(2, true)
|
||||
att.AggregationBits.SetBitAt(3, true)
|
||||
att.AggregationBits.SetBitAt(4, true)
|
||||
require.NoError(t, c.DeleteCovered(att))
|
||||
|
||||
assert.Equal(t, 3, len(c.atts[id].atts))
|
||||
})
|
||||
t.Run("covered atts deleted", func(t *testing.T) {
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
att.AggregationBits.SetBitAt(0, true)
|
||||
att.AggregationBits.SetBitAt(1, true)
|
||||
att.AggregationBits.SetBitAt(3, true)
|
||||
att.AggregationBits.SetBitAt(4, true)
|
||||
require.NoError(t, c.DeleteCovered(att))
|
||||
|
||||
atts := c.atts[id].atts
|
||||
require.Equal(t, 1, len(atts))
|
||||
assert.DeepEqual(t, att2, atts[0])
|
||||
})
|
||||
t.Run("last att in group deleted", func(t *testing.T) {
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
att.AggregationBits.SetBitAt(0, true)
|
||||
att.AggregationBits.SetBitAt(1, true)
|
||||
att.AggregationBits.SetBitAt(2, true)
|
||||
att.AggregationBits.SetBitAt(3, true)
|
||||
att.AggregationBits.SetBitAt(4, true)
|
||||
require.NoError(t, c.DeleteCovered(att))
|
||||
|
||||
assert.Equal(t, 0, len(c.atts))
|
||||
})
|
||||
}
|
||||
|
||||
func TestPruneBefore(t *testing.T) {
|
||||
c := NewAttestationCache()
|
||||
c.atts[bytesutil.ToBytes32([]byte("id1"))] = &attGroup{slot: 1, atts: []ethpb.Att{ðpb.Attestation{}, ðpb.Attestation{}}}
|
||||
c.atts[bytesutil.ToBytes32([]byte("id2"))] = &attGroup{slot: 3, atts: []ethpb.Att{ðpb.Attestation{}}}
|
||||
c.atts[bytesutil.ToBytes32([]byte("id3"))] = &attGroup{slot: 2, atts: []ethpb.Att{ðpb.Attestation{}}}
|
||||
|
||||
count := c.PruneBefore(3)
|
||||
|
||||
require.Equal(t, 1, len(c.atts))
|
||||
_, ok := c.atts[bytesutil.ToBytes32([]byte("id2"))]
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, uint64(3), count)
|
||||
}
|
||||
|
||||
func TestAggregateIsRedundant(t *testing.T) {
|
||||
k, err := blst.RandKey()
|
||||
require.NoError(t, err)
|
||||
sig := k.Sign([]byte{'X'})
|
||||
|
||||
c := NewAttestationCache()
|
||||
existingAtt := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 123, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
existingAtt.AggregationBits.SetBitAt(0, true)
|
||||
existingAtt.AggregationBits.SetBitAt(1, true)
|
||||
id, err := attestation.NewId(existingAtt, attestation.Data)
|
||||
require.NoError(t, err)
|
||||
c.atts[id] = &attGroup{slot: existingAtt.Data.Slot, atts: []ethpb.Att{existingAtt}}
|
||||
|
||||
t.Run("no matching group", func(t *testing.T) {
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: 456, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
att.AggregationBits.SetBitAt(0, true)
|
||||
|
||||
redundant, err := c.AggregateIsRedundant(att)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, redundant)
|
||||
})
|
||||
t.Run("redundant", func(t *testing.T) {
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: existingAtt.Data.Slot, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
att.AggregationBits.SetBitAt(0, true)
|
||||
|
||||
redundant, err := c.AggregateIsRedundant(att)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, redundant)
|
||||
})
|
||||
t.Run("not redundant", func(t *testing.T) {
|
||||
t.Run("strictly better", func(t *testing.T) {
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: existingAtt.Data.Slot, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
att.AggregationBits.SetBitAt(0, true)
|
||||
att.AggregationBits.SetBitAt(1, true)
|
||||
att.AggregationBits.SetBitAt(2, true)
|
||||
|
||||
redundant, err := c.AggregateIsRedundant(att)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, redundant)
|
||||
})
|
||||
t.Run("overlapping and new bits", func(t *testing.T) {
|
||||
att := ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{Slot: existingAtt.Data.Slot, BeaconBlockRoot: make([]byte, 32), Source: ðpb.Checkpoint{Root: make([]byte, 32)}, Target: ðpb.Checkpoint{Root: make([]byte, 32)}},
|
||||
AggregationBits: bitfield.NewBitlist(8),
|
||||
Signature: sig.Marshal(),
|
||||
}
|
||||
att.AggregationBits.SetBitAt(0, true)
|
||||
att.AggregationBits.SetBitAt(2, true)
|
||||
|
||||
redundant, err := c.AggregateIsRedundant(att)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, false, redundant)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetBySlotAndCommitteeIndex(t *testing.T) {
|
||||
c := NewAttestationCache()
|
||||
c.atts[bytesutil.ToBytes32([]byte("id1"))] = &attGroup{slot: 1, atts: []ethpb.Att{ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 1}}, ðpb.Attestation{Data: ðpb.AttestationData{Slot: 1, CommitteeIndex: 1}}}}
|
||||
c.atts[bytesutil.ToBytes32([]byte("id2"))] = &attGroup{slot: 2, atts: []ethpb.Att{ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2, CommitteeIndex: 2}}}}
|
||||
c.atts[bytesutil.ToBytes32([]byte("id3"))] = &attGroup{slot: 1, atts: []ethpb.Att{ðpb.Attestation{Data: ðpb.AttestationData{Slot: 2, CommitteeIndex: 2}}}}
|
||||
|
||||
// committeeIndex has to be small enough to fit in the bitvector
|
||||
atts := GetBySlotAndCommitteeIndex[*ethpb.Attestation](c, 1, 1)
|
||||
require.Equal(t, 2, len(atts))
|
||||
assert.Equal(t, primitives.Slot(1), atts[0].Data.Slot)
|
||||
assert.Equal(t, primitives.Slot(1), atts[1].Data.Slot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(1), atts[0].Data.CommitteeIndex)
|
||||
assert.Equal(t, primitives.CommitteeIndex(1), atts[1].Data.CommitteeIndex)
|
||||
}
|
||||
3
beacon-chain/cache/cache_test.go
vendored
3
beacon-chain/cache/cache_test.go
vendored
@@ -1,9 +1,10 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
m.Run()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
@@ -105,10 +105,9 @@ func TestProcessSlashings_NotSlashed(t *testing.T) {
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoAltair(base)
|
||||
require.NoError(t, err)
|
||||
newState, err := epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplierAltair)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, epoch.ProcessSlashings(s))
|
||||
wanted := params.BeaconConfig().MaxEffectiveBalance
|
||||
assert.Equal(t, wanted, newState.Balances()[0], "Unexpected slashed balance")
|
||||
assert.Equal(t, wanted, s.Balances()[0], "Unexpected slashed balance")
|
||||
}
|
||||
|
||||
func TestProcessSlashings_SlashedLess(t *testing.T) {
|
||||
@@ -176,9 +175,8 @@ func TestProcessSlashings_SlashedLess(t *testing.T) {
|
||||
original := proto.Clone(tt.state)
|
||||
s, err := state_native.InitializeFromProtoAltair(tt.state)
|
||||
require.NoError(t, err)
|
||||
newState, err := epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplierAltair)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, newState.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, newState.Balances()[0])
|
||||
require.NoError(t, epoch.ProcessSlashings(s))
|
||||
assert.Equal(t, tt.want, s.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, s.Balances()[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -192,6 +190,5 @@ func TestProcessSlashings_BadValue(t *testing.T) {
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoAltair(base)
|
||||
require.NoError(t, err)
|
||||
_, err = epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplierAltair)
|
||||
require.ErrorContains(t, "addition overflows", err)
|
||||
require.ErrorContains(t, "addition overflows", epoch.ProcessSlashings(s))
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package altair
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
goErrors "errors"
|
||||
"fmt"
|
||||
"time"
|
||||
@@ -22,8 +23,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
const maxRandomByte = uint64(1<<8 - 1)
|
||||
|
||||
var (
|
||||
ErrTooLate = errors.New("sync message is too late")
|
||||
)
|
||||
@@ -91,19 +90,22 @@ func NextSyncCommittee(ctx context.Context, s state.BeaconState) (*ethpb.SyncCom
|
||||
// """
|
||||
// epoch = Epoch(get_current_epoch(state) + 1)
|
||||
//
|
||||
// MAX_RANDOM_BYTE = 2**8 - 1
|
||||
// MAX_RANDOM_VALUE = 2**16 - 1 # [Modified in Electra]
|
||||
// active_validator_indices = get_active_validator_indices(state, epoch)
|
||||
// active_validator_count = uint64(len(active_validator_indices))
|
||||
// seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
|
||||
// i = 0
|
||||
// i = uint64(0)
|
||||
// sync_committee_indices: List[ValidatorIndex] = []
|
||||
// while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
|
||||
// shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed)
|
||||
// candidate_index = active_validator_indices[shuffled_index]
|
||||
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||
// # [Modified in Electra]
|
||||
// random_bytes = hash(seed + uint_to_bytes(i // 16))
|
||||
// offset = i % 16 * 2
|
||||
// random_value = bytes_to_uint64(random_bytes[offset:offset + 2])
|
||||
// effective_balance = state.validators[candidate_index].effective_balance
|
||||
// # [Modified in Electra:EIP7251]
|
||||
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_byte:
|
||||
// if effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value:
|
||||
// sync_committee_indices.append(candidate_index)
|
||||
// i += 1
|
||||
// return sync_committee_indices
|
||||
@@ -123,12 +125,11 @@ func NextSyncCommitteeIndices(ctx context.Context, s state.BeaconState) ([]primi
|
||||
cIndices := make([]primitives.ValidatorIndex, 0, syncCommitteeSize)
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
|
||||
maxEB := cfg.MaxEffectiveBalanceElectra
|
||||
if s.Version() < version.Electra {
|
||||
maxEB = cfg.MaxEffectiveBalance
|
||||
}
|
||||
// Preallocate buffers to avoid repeated allocations
|
||||
seedBuffer := make([]byte, len(seed)+8)
|
||||
copy(seedBuffer, seed[:])
|
||||
|
||||
for i := primitives.ValidatorIndex(0); uint64(len(cIndices)) < params.BeaconConfig().SyncCommitteeSize; i++ {
|
||||
for i := primitives.ValidatorIndex(0); uint64(len(cIndices)) < syncCommitteeSize; i++ {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
@@ -137,18 +138,30 @@ func NextSyncCommitteeIndices(ctx context.Context, s state.BeaconState) ([]primi
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b := append(seed[:], bytesutil.Bytes8(uint64(i.Div(32)))...)
|
||||
randomByte := hashFunc(b)[i%32]
|
||||
cIndex := indices[sIndex]
|
||||
v, err := s.ValidatorAtIndexReadOnly(cIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
effectiveBal := v.EffectiveBalance()
|
||||
if effectiveBal*maxRandomByte >= maxEB*uint64(randomByte) {
|
||||
cIndices = append(cIndices, cIndex)
|
||||
|
||||
if s.Version() >= version.Electra {
|
||||
// Use the preallocated seed buffer
|
||||
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], uint64(i/16))
|
||||
randomByte := hashFunc(seedBuffer)
|
||||
offset := (i % 16) * 2
|
||||
randomValue := uint64(randomByte[offset]) | uint64(randomByte[offset+1])<<8
|
||||
|
||||
if effectiveBal*fieldparams.MaxRandomValueElectra >= cfg.MaxEffectiveBalanceElectra*randomValue {
|
||||
cIndices = append(cIndices, cIndex)
|
||||
}
|
||||
} else {
|
||||
// Use the preallocated seed buffer
|
||||
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], uint64(i/32))
|
||||
randomByte := hashFunc(seedBuffer)[i%32]
|
||||
if effectiveBal*fieldparams.MaxRandomByte >= cfg.MaxEffectiveBalance*uint64(randomByte) {
|
||||
cIndices = append(cIndices, cIndex)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -69,12 +69,7 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
}
|
||||
|
||||
// Modified in Altair and Bellatrix.
|
||||
proportionalSlashingMultiplier, err := state.ProportionalSlashingMultiplier()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessSlashings(state, proportionalSlashingMultiplier)
|
||||
if err != nil {
|
||||
if err := e.ProcessSlashings(state); err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = e.ProcessEth1DataReset(state)
|
||||
|
||||
@@ -7,12 +7,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
@@ -105,293 +107,162 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
|
||||
for _, vv := range beaconState.Validators() {
|
||||
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
statePhase0, keysPhase0 := util.DeterministicGenesisState(t, 100)
|
||||
stateAltair, keysAltair := util.DeterministicGenesisStateAltair(t, 100)
|
||||
stateBellatrix, keysBellatrix := util.DeterministicGenesisStateBellatrix(t, 100)
|
||||
stateCapella, keysCapella := util.DeterministicGenesisStateCapella(t, 100)
|
||||
stateDeneb, keysDeneb := util.DeterministicGenesisStateDeneb(t, 100)
|
||||
stateElectra, keysElectra := util.DeterministicGenesisStateElectra(t, 100)
|
||||
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1Phase0 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := privKeys[0].Sign(signingRoot[:])
|
||||
sig1 := privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2Phase0 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = privKeys[0].Sign(signingRoot[:])
|
||||
sig1 = privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: att1,
|
||||
Attestation_2: att2,
|
||||
},
|
||||
}
|
||||
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, beaconState.SetSlot(currentSlot))
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: slashings,
|
||||
},
|
||||
}
|
||||
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
// Given the intersection of slashable indices is [1], only validator
|
||||
// at index 1 should be slashed and exited. We confirm this below.
|
||||
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
|
||||
t.Errorf(
|
||||
`
|
||||
Expected validator at index 1's exit epoch to match
|
||||
%d, received %d instead
|
||||
`,
|
||||
beaconState.Validators()[1].ExitEpoch,
|
||||
newRegistry[1].ExitEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
require.Equal(t, uint64(31750000000), newState.Balances()[1])
|
||||
require.Equal(t, uint64(32000000000), newState.Balances()[2])
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
|
||||
for _, vv := range beaconState.Validators() {
|
||||
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1Electra := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := privKeys[0].Sign(signingRoot[:])
|
||||
sig1 := privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2Electra := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = privKeys[0].Sign(signingRoot[:])
|
||||
sig1 = privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
slashingPhase0 := ðpb.AttesterSlashing{
|
||||
Attestation_1: att1Phase0,
|
||||
Attestation_2: att2Phase0,
|
||||
}
|
||||
slashingElectra := ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: att1Electra,
|
||||
Attestation_2: att2Electra,
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
st state.BeaconState
|
||||
keys []bls.SecretKey
|
||||
att1 ethpb.IndexedAtt
|
||||
att2 ethpb.IndexedAtt
|
||||
slashing ethpb.AttSlashing
|
||||
slashedBalance uint64
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
Attestation_1: att1,
|
||||
Attestation_2: att2,
|
||||
name: "phase0",
|
||||
st: statePhase0,
|
||||
keys: keysPhase0,
|
||||
att1: att1Phase0,
|
||||
att2: att2Phase0,
|
||||
slashing: slashingPhase0,
|
||||
slashedBalance: 31750000000,
|
||||
},
|
||||
{
|
||||
name: "altair",
|
||||
st: stateAltair,
|
||||
keys: keysAltair,
|
||||
att1: att1Phase0,
|
||||
att2: att2Phase0,
|
||||
slashing: slashingPhase0,
|
||||
slashedBalance: 31500000000,
|
||||
},
|
||||
{
|
||||
name: "bellatrix",
|
||||
st: stateBellatrix,
|
||||
keys: keysBellatrix,
|
||||
att1: att1Phase0,
|
||||
att2: att2Phase0,
|
||||
slashing: slashingPhase0,
|
||||
slashedBalance: 31000000000,
|
||||
},
|
||||
{
|
||||
name: "capella",
|
||||
st: stateCapella,
|
||||
keys: keysCapella,
|
||||
att1: att1Phase0,
|
||||
att2: att2Phase0,
|
||||
slashing: slashingPhase0,
|
||||
slashedBalance: 31000000000,
|
||||
},
|
||||
{
|
||||
name: "deneb",
|
||||
st: stateDeneb,
|
||||
keys: keysDeneb,
|
||||
att1: att1Phase0,
|
||||
att2: att2Phase0,
|
||||
slashing: slashingPhase0,
|
||||
slashedBalance: 31000000000,
|
||||
},
|
||||
{
|
||||
name: "electra",
|
||||
st: stateElectra,
|
||||
keys: keysElectra,
|
||||
att1: att1Electra,
|
||||
att2: att2Electra,
|
||||
slashing: slashingElectra,
|
||||
slashedBalance: 31992187500,
|
||||
},
|
||||
}
|
||||
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, beaconState.SetSlot(currentSlot))
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
for _, vv := range tc.st.Validators() {
|
||||
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: slashings,
|
||||
},
|
||||
}
|
||||
domain, err := signing.Domain(tc.st.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, tc.st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(tc.att1.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := tc.keys[0].Sign(signingRoot[:])
|
||||
sig1 := tc.keys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
if tc.att1.Version() >= version.Electra {
|
||||
tc.att1.(*ethpb.IndexedAttestationElectra).Signature = aggregateSig.Marshal()
|
||||
} else {
|
||||
tc.att1.(*ethpb.IndexedAttestation).Signature = aggregateSig.Marshal()
|
||||
}
|
||||
|
||||
// Given the intersection of slashable indices is [1], only validator
|
||||
// at index 1 should be slashed and exited. We confirm this below.
|
||||
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
|
||||
t.Errorf(
|
||||
`
|
||||
signingRoot, err = signing.ComputeSigningRoot(tc.att2.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = tc.keys[0].Sign(signingRoot[:])
|
||||
sig1 = tc.keys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
|
||||
if tc.att2.Version() >= version.Electra {
|
||||
tc.att2.(*ethpb.IndexedAttestationElectra).Signature = aggregateSig.Marshal()
|
||||
} else {
|
||||
tc.att2.(*ethpb.IndexedAttestation).Signature = aggregateSig.Marshal()
|
||||
}
|
||||
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, tc.st.SetSlot(currentSlot))
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
// Given the intersection of slashable indices is [1], only validator
|
||||
// at index 1 should be slashed and exited. We confirm this below.
|
||||
if newRegistry[1].ExitEpoch != tc.st.Validators()[1].ExitEpoch {
|
||||
t.Errorf(
|
||||
`
|
||||
Expected validator at index 1's exit epoch to match
|
||||
%d, received %d instead
|
||||
`,
|
||||
beaconState.Validators()[1].ExitEpoch,
|
||||
newRegistry[1].ExitEpoch,
|
||||
)
|
||||
}
|
||||
tc.st.Validators()[1].ExitEpoch,
|
||||
newRegistry[1].ExitEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
require.Equal(t, uint64(31500000000), newState.Balances()[1])
|
||||
require.Equal(t, uint64(32000000000), newState.Balances()[2])
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, 100)
|
||||
for _, vv := range beaconState.Validators() {
|
||||
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := privKeys[0].Sign(signingRoot[:])
|
||||
sig1 := privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = privKeys[0].Sign(signingRoot[:])
|
||||
sig1 = privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: att1,
|
||||
Attestation_2: att2,
|
||||
},
|
||||
}
|
||||
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, beaconState.SetSlot(currentSlot))
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: slashings,
|
||||
},
|
||||
}
|
||||
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
// Given the intersection of slashable indices is [1], only validator
|
||||
// at index 1 should be slashed and exited. We confirm this below.
|
||||
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
|
||||
t.Errorf(
|
||||
`
|
||||
Expected validator at index 1's exit epoch to match
|
||||
%d, received %d instead
|
||||
`,
|
||||
beaconState.Validators()[1].ExitEpoch,
|
||||
newRegistry[1].ExitEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
require.Equal(t, uint64(31000000000), newState.Balances()[1])
|
||||
require.Equal(t, uint64(32000000000), newState.Balances()[2])
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_AppliesCorrectStatusCapella(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateCapella(t, 100)
|
||||
for _, vv := range beaconState.Validators() {
|
||||
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := privKeys[0].Sign(signingRoot[:])
|
||||
sig1 := privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = privKeys[0].Sign(signingRoot[:])
|
||||
sig1 = privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: att1,
|
||||
Attestation_2: att2,
|
||||
},
|
||||
}
|
||||
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, beaconState.SetSlot(currentSlot))
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: slashings,
|
||||
},
|
||||
}
|
||||
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
// Given the intersection of slashable indices is [1], only validator
|
||||
// at index 1 should be slashed and exited. We confirm this below.
|
||||
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
|
||||
t.Errorf(
|
||||
`
|
||||
Expected validator at index 1's exit epoch to match
|
||||
%d, received %d instead
|
||||
`,
|
||||
beaconState.Validators()[1].ExitEpoch,
|
||||
newRegistry[1].ExitEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
require.Equal(t, uint64(31000000000), newState.Balances()[1])
|
||||
require.Equal(t, uint64(32000000000), newState.Balances()[2])
|
||||
require.Equal(t, tc.slashedBalance, newState.Balances()[1])
|
||||
require.Equal(t, uint64(32000000000), newState.Balances()[2])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
package blocks
|
||||
|
||||
var ProcessBLSToExecutionChange = processBLSToExecutionChange
|
||||
|
||||
var ErrInvalidBLSPrefix = errInvalidBLSPrefix
|
||||
var VerifyBlobCommitmentCount = verifyBlobCommitmentCount
|
||||
|
||||
@@ -198,7 +198,7 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadElectra{
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
|
||||
@@ -8,10 +8,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -210,7 +211,7 @@ func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBod
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := verifyBlobCommitmentCount(body); err != nil {
|
||||
if err := verifyBlobCommitmentCount(st.Slot(), body); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ValidatePayloadWhenMergeCompletes(st, payload); err != nil {
|
||||
@@ -225,7 +226,7 @@ func ProcessPayload(st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBod
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
func verifyBlobCommitmentCount(slot primitives.Slot, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
if body.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
@@ -233,7 +234,8 @@ func verifyBlobCommitmentCount(body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(kzgs) > field_params.MaxBlobsPerBlock {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(kzgs) > maxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -923,10 +924,10 @@ func TestVerifyBlobCommitmentCount(t *testing.T) {
|
||||
b := ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{}}
|
||||
rb, err := consensusblocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Body()))
|
||||
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, fieldparams.MaxBlobsPerBlock+1)}}
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1)}}
|
||||
rb, err = consensusblocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", fieldparams.MaxBlobsPerBlock+1), blocks.VerifyBlobCommitmentCount(rb.Body()))
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
}
|
||||
|
||||
@@ -100,8 +100,11 @@ func ValidateBLSToExecutionChange(st state.ReadOnlyBeaconState, signed *ethpb.Si
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if val == nil {
|
||||
return nil, errors.Wrap(errInvalidWithdrawalCredentials, "validator is nil") // This should not be possible.
|
||||
}
|
||||
cred := val.WithdrawalCredentials
|
||||
if cred[0] != params.BeaconConfig().BLSWithdrawalPrefixByte {
|
||||
if len(cred) < 2 || cred[0] != params.BeaconConfig().BLSWithdrawalPrefixByte {
|
||||
return nil, errInvalidBLSPrefix
|
||||
}
|
||||
|
||||
|
||||
@@ -113,7 +113,42 @@ func TestProcessBLSToExecutionChange(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, digest[:], val.WithdrawalCredentials)
|
||||
})
|
||||
t.Run("nil validator does not panic", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pubkey := priv.PublicKey().Marshal()
|
||||
|
||||
message := ðpb.BLSToExecutionChange{
|
||||
ToExecutionAddress: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13},
|
||||
ValidatorIndex: 0,
|
||||
FromBlsPubkey: pubkey,
|
||||
}
|
||||
|
||||
registry := []*ethpb.Validator{
|
||||
nil,
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{
|
||||
Validators: registry,
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
},
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch * 5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
signature, err := signing.ComputeDomainAndSign(st, time.CurrentEpoch(st), message, params.BeaconConfig().DomainBLSToExecutionChange, priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
signed := ðpb.SignedBLSToExecutionChange{
|
||||
Message: message,
|
||||
Signature: signature,
|
||||
}
|
||||
_, err = blocks.ValidateBLSToExecutionChange(st, signed)
|
||||
// The state should return an empty validator, even when the validator object in the registry is
|
||||
// nil. This error should return when the withdrawal credentials are invalid or too short.
|
||||
require.ErrorIs(t, err, blocks.ErrInvalidBLSPrefix)
|
||||
})
|
||||
t.Run("non-existent validator", func(t *testing.T) {
|
||||
priv, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
@@ -1117,7 +1152,7 @@ func TestProcessWithdrawals(t *testing.T) {
|
||||
}
|
||||
st, err = state_native.InitializeFromProtoUnsafeElectra(spb)
|
||||
require.NoError(t, err)
|
||||
p, err = consensusblocks.WrappedExecutionPayloadElectra(&enginev1.ExecutionPayloadElectra{Withdrawals: test.Args.Withdrawals})
|
||||
p, err = consensusblocks.WrappedExecutionPayloadDeneb(&enginev1.ExecutionPayloadDeneb{Withdrawals: test.Args.Withdrawals})
|
||||
require.NoError(t, err)
|
||||
default:
|
||||
t.Fatalf("Add a beacon state setup for version %s", version.String(fork))
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"consolidations.go",
|
||||
"deposits.go",
|
||||
"effective_balance_updates.go",
|
||||
"error.go",
|
||||
"registry_updates.go",
|
||||
"transition.go",
|
||||
"transition_no_verify_sig.go",
|
||||
@@ -55,13 +56,16 @@ go_test(
|
||||
"deposit_fuzz_test.go",
|
||||
"deposits_test.go",
|
||||
"effective_balance_updates_test.go",
|
||||
"error_test.go",
|
||||
"export_test.go",
|
||||
"registry_updates_test.go",
|
||||
"transition_no_verify_sig_test.go",
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -86,6 +90,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -20,7 +21,7 @@ func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Val
|
||||
vals := make([]*eth.Validator, num)
|
||||
for i := range vals {
|
||||
wd := make([]byte, 32)
|
||||
wd[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
wd[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
wd[31] = byte(i)
|
||||
|
||||
vals[i] = ð.Validator{
|
||||
@@ -129,6 +130,57 @@ func TestComputeConsolidationEpochAndUpdateChurn(t *testing.T) {
|
||||
expectedEpoch: 16, // Flows into another epoch.
|
||||
expectedConsolidationBalanceToConsume: 200000000000, // 200 ETH
|
||||
},
|
||||
{
|
||||
name: "balance to consume is zero, consolidation balance at limit",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
activeBal := 32000000000000000 // 32M ETH
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 16,
|
||||
ConsolidationBalanceToConsume: 0,
|
||||
Validators: createValidatorsWithTotalActiveBalance(primitives.Gwei(activeBal)),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: helpers.ConsolidationChurnLimit(32000000000000000),
|
||||
expectedEpoch: 17, // Flows into another epoch.
|
||||
expectedConsolidationBalanceToConsume: 0,
|
||||
},
|
||||
{
|
||||
name: "consolidation balance equals consolidation balance to consume",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
activeBal := 32000000000000000 // 32M ETH
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 16,
|
||||
ConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(32000000000000000),
|
||||
Validators: createValidatorsWithTotalActiveBalance(primitives.Gwei(activeBal)),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: helpers.ConsolidationChurnLimit(32000000000000000),
|
||||
expectedEpoch: 16,
|
||||
expectedConsolidationBalanceToConsume: 0,
|
||||
},
|
||||
{
|
||||
name: "consolidation balance exceeds limit by one",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
activeBal := 32000000000000000 // 32M ETH
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 16,
|
||||
ConsolidationBalanceToConsume: 0,
|
||||
Validators: createValidatorsWithTotalActiveBalance(primitives.Gwei(activeBal)),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: helpers.ConsolidationChurnLimit(32000000000000000)+1,
|
||||
expectedEpoch: 18, // Flows into another epoch.
|
||||
expectedConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(32000000000000000)-1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -37,8 +38,7 @@ import (
|
||||
// break
|
||||
//
|
||||
// # Calculate the consolidated balance
|
||||
// max_effective_balance = get_max_effective_balance(source_validator)
|
||||
// source_effective_balance = min(state.balances[pending_consolidation.source_index], max_effective_balance)
|
||||
// source_effective_balance = min(state.balances[pending_consolidation.source_index], source_validator.effective_balance)
|
||||
//
|
||||
// # Move active balance to target. Excess balance is withdrawable.
|
||||
// decrease_balance(state, pending_consolidation.source_index, source_effective_balance)
|
||||
@@ -78,7 +78,7 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b := min(validatorBalance, helpers.ValidatorMaxEffectiveBalance(sourceValidator))
|
||||
b := min(validatorBalance, sourceValidator.EffectiveBalance())
|
||||
|
||||
if err := helpers.DecreaseBalance(st, pc.SourceIndex, b); err != nil {
|
||||
return err
|
||||
@@ -141,8 +141,8 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
// if not (has_correct_credential and is_correct_source_address):
|
||||
// return
|
||||
//
|
||||
// # Verify that target has execution withdrawal credentials
|
||||
// if not has_execution_withdrawal_credential(target_validator):
|
||||
// # Verify that target has compounding withdrawal credentials
|
||||
// if not has_compounding_withdrawal_credential(target_validator):
|
||||
// return
|
||||
//
|
||||
// # Verify the source and the target are active
|
||||
@@ -175,10 +175,6 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
// source_index=source_index,
|
||||
// target_index=target_index
|
||||
// ))
|
||||
//
|
||||
// # Churn any target excess active balance of target and raise its max
|
||||
// if has_eth1_withdrawal_credential(target_validator):
|
||||
// switch_to_compounding_validator(state, target_index)
|
||||
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
|
||||
if len(reqs) == 0 || st == nil {
|
||||
return nil
|
||||
@@ -189,6 +185,9 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
|
||||
|
||||
for _, cr := range reqs {
|
||||
if cr == nil {
|
||||
return errors.New("nil consolidation request")
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
|
||||
}
|
||||
@@ -238,13 +237,18 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
roSrcV, err := state_native.NewValidator(srcV)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
// Verify source withdrawal credentials
|
||||
if !helpers.HasExecutionWithdrawalCredentials(srcV) {
|
||||
if !roSrcV.HasExecutionWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||
@@ -253,7 +257,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
}
|
||||
|
||||
// Target validator must have their withdrawal credentials set appropriately.
|
||||
if !helpers.HasExecutionWithdrawalCredentials(tgtV) {
|
||||
if !tgtV.HasCompoundingWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -261,7 +265,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
|
||||
continue
|
||||
}
|
||||
// Neither validator are exiting.
|
||||
// Neither validator is exiting.
|
||||
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
|
||||
continue
|
||||
}
|
||||
@@ -298,13 +302,6 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if err := st.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
|
||||
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
if helpers.HasETH1WithdrawalCredential(tgtV) {
|
||||
if err := SwitchToCompoundingValidator(st, tgtIdx); err != nil {
|
||||
log.WithError(err).Error("failed to switch to compounding validator")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -376,7 +373,7 @@ func IsValidSwitchToCompoundingRequest(st state.BeaconState, req *enginev1.Conso
|
||||
return false
|
||||
}
|
||||
|
||||
if !helpers.HasETH1WithdrawalCredential(srcV) {
|
||||
if !srcV.HasETH1WithdrawalCredentials() {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -46,6 +46,7 @@ func TestProcessPendingConsolidations(t *testing.T) {
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xFF},
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
},
|
||||
{
|
||||
WithdrawalCredentials: []byte{0x01, 0xAB},
|
||||
@@ -208,7 +209,22 @@ func TestProcessConsolidationRequests(t *testing.T) {
|
||||
state state.BeaconState
|
||||
reqs []*enginev1.ConsolidationRequest
|
||||
validate func(*testing.T, state.BeaconState)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil request",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{nil},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
require.DeepEqual(t, st, st)
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "one valid request",
|
||||
state: func() state.BeaconState {
|
||||
@@ -218,7 +234,7 @@ func TestProcessConsolidationRequests(t *testing.T) {
|
||||
}
|
||||
// Validator scenario setup. See comments in reqs section.
|
||||
st.Validators[3].WithdrawalCredentials = bytesutil.Bytes32(0)
|
||||
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(0)
|
||||
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(1)
|
||||
st.Validators[9].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
st.Validators[12].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
st.Validators[13].ExitEpoch = 10
|
||||
@@ -246,7 +262,7 @@ func TestProcessConsolidationRequests(t *testing.T) {
|
||||
SourcePubkey: []byte("val_5"),
|
||||
TargetPubkey: []byte("val_6"),
|
||||
},
|
||||
// Target does not have their withdrawal credentials set appropriately.
|
||||
// Target does not have their withdrawal credentials set appropriately. (Using eth1 address prefix)
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(7)),
|
||||
SourcePubkey: []byte("val_7"),
|
||||
@@ -404,7 +420,13 @@ func TestProcessConsolidationRequests(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
|
||||
require.NoError(t, err)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tt.validate != nil {
|
||||
tt.validate(t, tt.state)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
@@ -273,7 +272,7 @@ func ProcessPendingDeposits(ctx context.Context, st state.BeaconState, activeBal
|
||||
isChurnLimitReached := false
|
||||
|
||||
var pendingDepositsToBatchVerify []*ethpb.PendingDeposit
|
||||
var pendingDepositsToPostpone []*eth.PendingDeposit
|
||||
var pendingDepositsToPostpone []*ethpb.PendingDeposit
|
||||
|
||||
depBalToConsume, err := st.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
@@ -386,14 +385,8 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
return errors.Wrap(err, "batch signature verification failed")
|
||||
}
|
||||
|
||||
pubKeyMap := make(map[[48]byte]struct{}, len(pendingDeposits))
|
||||
|
||||
// Process each deposit individually
|
||||
for _, pendingDeposit := range pendingDeposits {
|
||||
_, found := pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)]
|
||||
if !found {
|
||||
pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)] = struct{}{}
|
||||
}
|
||||
validSignature := allSignaturesVerified
|
||||
|
||||
// If batch verification failed, check the individual deposit signature
|
||||
@@ -411,7 +404,8 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
|
||||
// Add validator to the registry if the signature is valid
|
||||
if validSignature {
|
||||
if found {
|
||||
_, has := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
|
||||
if has {
|
||||
index, _ := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
|
||||
if err := helpers.IncreaseBalance(state, index, pendingDeposit.Amount); err != nil {
|
||||
return errors.Wrap(err, "could not increase balance")
|
||||
@@ -593,10 +587,10 @@ func processDepositRequest(beaconState state.BeaconState, request *enginev1.Depo
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get deposit requests start index")
|
||||
}
|
||||
if request == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
}
|
||||
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
|
||||
if request == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
}
|
||||
if err := beaconState.SetDepositRequestsStartIndex(request.Index); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set deposit requests start index")
|
||||
}
|
||||
|
||||
@@ -333,6 +333,7 @@ func TestProcessDepositRequests(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetDepositRequestsStartIndex(1))
|
||||
|
||||
t.Run("empty requests continues", func(t *testing.T) {
|
||||
newSt, err := electra.ProcessDepositRequests(context.Background(), st, []*enginev1.DepositRequest{})
|
||||
|
||||
@@ -3,7 +3,6 @@ package electra
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -40,7 +39,7 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
|
||||
|
||||
// Update effective balances with hysteresis.
|
||||
validatorFunc := func(idx int, val state.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
|
||||
if val == nil {
|
||||
if val.IsNil() {
|
||||
return nil, fmt.Errorf("validator %d is nil in state", idx)
|
||||
}
|
||||
if idx >= len(bals) {
|
||||
@@ -49,7 +48,7 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
|
||||
balance := bals[idx]
|
||||
|
||||
effectiveBalanceLimit := params.BeaconConfig().MinActivationBalance
|
||||
if helpers.HasCompoundingWithdrawalCredential(val) {
|
||||
if val.HasCompoundingWithdrawalCredentials() {
|
||||
effectiveBalanceLimit = params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ func TestProcessEffectiveBalnceUpdates(t *testing.T) {
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance / 2,
|
||||
WithdrawalCredentials: nil,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
|
||||
16
beacon-chain/core/electra/error.go
Normal file
16
beacon-chain/core/electra/error.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package electra
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
type execReqErr struct {
|
||||
error
|
||||
}
|
||||
|
||||
// IsExecutionRequestError returns true if the error has `execReqErr`.
|
||||
func IsExecutionRequestError(e error) bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
var d execReqErr
|
||||
return errors.As(e, &d)
|
||||
}
|
||||
45
beacon-chain/core/electra/error_test.go
Normal file
45
beacon-chain/core/electra/error_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestIsExecutionRequestError(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
err error
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "nil error",
|
||||
err: nil,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "random error",
|
||||
err: errors.New("some error"),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "execution request error",
|
||||
err: execReqErr{errors.New("execution request failed")},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "wrapped execution request error",
|
||||
err: errors.Wrap(execReqErr{errors.New("execution request failed")}, "wrapped"),
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := IsExecutionRequestError(tt.err)
|
||||
if got != tt.want {
|
||||
t.Errorf("IsExecutionRequestError(%v) = %v, want %v", tt.err, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -18,23 +18,24 @@ import (
|
||||
//
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def process_registry_updates(state: BeaconState) -> None:
|
||||
// # Process activation eligibility and ejections
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if is_eligible_for_activation_queue(validator):
|
||||
// validator.activation_eligibility_epoch = get_current_epoch(state) + 1
|
||||
// def process_registry_updates(state: BeaconState) -> None:
|
||||
// # Process activation eligibility and ejections
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if is_eligible_for_activation_queue(validator): # [Modified in Electra:EIP7251]
|
||||
// validator.activation_eligibility_epoch = get_current_epoch(state) + 1
|
||||
//
|
||||
// if (
|
||||
// is_active_validator(validator, get_current_epoch(state))
|
||||
// and validator.effective_balance <= EJECTION_BALANCE
|
||||
// ):
|
||||
// initiate_validator_exit(state, ValidatorIndex(index))
|
||||
// if (
|
||||
// is_active_validator(validator, get_current_epoch(state))
|
||||
// and validator.effective_balance <= EJECTION_BALANCE
|
||||
// ):
|
||||
// initiate_validator_exit(state, ValidatorIndex(index)) # [Modified in Electra:EIP7251]
|
||||
//
|
||||
// # Activate all eligible validators
|
||||
// activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
// for validator in state.validators:
|
||||
// if is_eligible_for_activation(state, validator):
|
||||
// validator.activation_epoch = activation_epoch
|
||||
// # Activate all eligible validators
|
||||
// # [Modified in Electra:EIP7251]
|
||||
// activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
// for validator in state.validators:
|
||||
// if is_eligible_for_activation(state, validator):
|
||||
// validator.activation_epoch = activation_epoch
|
||||
func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
|
||||
currentEpoch := time.CurrentEpoch(st)
|
||||
ejectionBal := params.BeaconConfig().EjectionBalance
|
||||
@@ -90,8 +91,8 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Activate all eligible validators.
|
||||
for _, idx := range eligibleForActivation {
|
||||
// Activate all eligible validators.
|
||||
v, err := st.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -40,14 +40,17 @@ var (
|
||||
// process_justification_and_finalization(state)
|
||||
// process_inactivity_updates(state)
|
||||
// process_rewards_and_penalties(state)
|
||||
// process_registry_updates(state)
|
||||
// process_slashings(state)
|
||||
// process_registry_updates(state) # [Modified in Electra:EIP7251]
|
||||
// process_slashings(state) # [Modified in Electra:EIP7251]
|
||||
// process_eth1_data_reset(state)
|
||||
// process_pending_deposits(state) # New in EIP7251
|
||||
// process_pending_consolidations(state) # New in EIP7251
|
||||
// process_effective_balance_updates(state)
|
||||
// process_pending_deposits(state) # [New in Electra:EIP7251]
|
||||
// process_pending_consolidations(state) # [New in Electra:EIP7251]
|
||||
// process_effective_balance_updates(state) # [Modified in Electra:EIP7251]
|
||||
// process_slashings_reset(state)
|
||||
// process_randao_mixes_reset(state)
|
||||
// process_historical_summaries_update(state)
|
||||
// process_participation_flag_updates(state)
|
||||
// process_sync_committee_updates(state)
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessEpoch")
|
||||
defer span.End()
|
||||
@@ -75,24 +78,16 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not process rewards and penalties")
|
||||
}
|
||||
|
||||
if err := ProcessRegistryUpdates(ctx, state); err != nil {
|
||||
return errors.Wrap(err, "could not process registry updates")
|
||||
}
|
||||
|
||||
proportionalSlashingMultiplier, err := state.ProportionalSlashingMultiplier()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = ProcessSlashings(state, proportionalSlashingMultiplier)
|
||||
if err != nil {
|
||||
if err := ProcessSlashings(state); err != nil {
|
||||
return err
|
||||
}
|
||||
state, err = ProcessEth1DataReset(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = ProcessPendingDeposits(ctx, state, primitives.Gwei(bp.ActiveCurrentEpoch)); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -102,7 +97,6 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
if err = ProcessEffectiveBalanceUpdates(state); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state, err = ProcessSlashingsReset(state)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -115,17 +109,14 @@ func ProcessEpoch(ctx context.Context, state state.BeaconState) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state, err = ProcessParticipationFlagUpdates(state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = ProcessSyncCommitteeUpdates(ctx, state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
@@ -82,16 +81,31 @@ func ProcessOperations(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution requests")
|
||||
}
|
||||
for _, d := range requests.Deposits {
|
||||
if d == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
}
|
||||
}
|
||||
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit requests")
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process deposit requests")}
|
||||
}
|
||||
for _, w := range requests.Withdrawals {
|
||||
if w == nil {
|
||||
return nil, errors.New("nil withdrawal request")
|
||||
}
|
||||
}
|
||||
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawal requests")
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process withdrawal requests")}
|
||||
}
|
||||
for _, c := range requests.Consolidations {
|
||||
if c == nil {
|
||||
return nil, errors.New("nil consolidation request")
|
||||
}
|
||||
}
|
||||
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, fmt.Errorf("could not process consolidation requests: %w", err)
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process consolidation requests")}
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
61
beacon-chain/core/electra/transition_no_verify_sig_test.go
Normal file
61
beacon-chain/core/electra/transition_no_verify_sig_test.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessOperationsWithNilRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "Nil deposit request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
|
||||
},
|
||||
errMsg: "nil deposit request",
|
||||
},
|
||||
{
|
||||
name: "Nil withdrawal request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
|
||||
},
|
||||
errMsg: "nil withdrawal request",
|
||||
},
|
||||
{
|
||||
name: "Nil consolidation request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
|
||||
},
|
||||
errMsg: "nil consolidation request",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
|
||||
_, err = electra.ProcessOperations(context.Background(), st, b.Block())
|
||||
require.ErrorContains(t, tc.errMsg, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -194,7 +194,7 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
if val.ActivationEpoch() == params.BeaconConfig().FarFutureEpoch {
|
||||
preActivationIndices = append(preActivationIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
if helpers.HasCompoundingWithdrawalCredential(val) {
|
||||
if val.HasCompoundingWithdrawalCredentials() {
|
||||
compoundWithdrawalIndices = append(compoundWithdrawalIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
return nil
|
||||
@@ -240,7 +240,7 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderElectra{
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
|
||||
@@ -102,7 +102,7 @@ func TestUpgradeToElectra(t *testing.T) {
|
||||
|
||||
header, err := mSt.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderElectra)
|
||||
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
prevHeader, err := preForkState.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
@@ -111,7 +111,7 @@ func TestUpgradeToElectra(t *testing.T) {
|
||||
|
||||
wdRoot, err := prevHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
wanted := &enginev1.ExecutionPayloadHeaderElectra{
|
||||
wanted := &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: prevHeader.ParentHash(),
|
||||
FeeRecipient: prevHeader.FeeRecipient(),
|
||||
StateRoot: prevHeader.StateRoot(),
|
||||
|
||||
@@ -116,7 +116,7 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
return nil, err
|
||||
}
|
||||
// Verify withdrawal credentials
|
||||
hasCorrectCredential := helpers.HasExecutionWithdrawalCredentials(validator)
|
||||
hasCorrectCredential := validator.HasExecutionWithdrawalCredentials()
|
||||
wc := validator.GetWithdrawalCredentials()
|
||||
isCorrectSourceAddress := bytes.Equal(wc[12:], wr.SourceAddress)
|
||||
if !hasCorrectCredential || !isCorrectSourceAddress {
|
||||
@@ -165,7 +165,7 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
hasExcessBalance := vBal > params.BeaconConfig().MinActivationBalance+pendingBalanceToWithdraw
|
||||
|
||||
// Only allow partial withdrawals with compounding withdrawal credentials
|
||||
if helpers.HasCompoundingWithdrawalCredential(validator) && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
if validator.HasCompoundingWithdrawalCredentials() && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
// Spec definition:
|
||||
// to_withdraw = min(
|
||||
// state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw,
|
||||
|
||||
@@ -38,6 +38,17 @@ func TestProcessWithdrawRequests(t *testing.T) {
|
||||
wantFn func(t *testing.T, got state.BeaconState)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil request",
|
||||
args: args{
|
||||
st: func() state.BeaconState { return st }(),
|
||||
wrs: []*enginev1.WithdrawalRequest{nil},
|
||||
},
|
||||
wantErr: true,
|
||||
wantFn: func(t *testing.T, got state.BeaconState) {
|
||||
require.DeepEqual(t, got, nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "happy path exit and withdrawal only",
|
||||
args: args{
|
||||
|
||||
@@ -141,29 +141,76 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// ProcessSlashings processes the slashed validators during epoch processing,
|
||||
// ProcessSlashings processes the slashed validators during epoch processing. This is a state mutating method.
|
||||
//
|
||||
// Electra spec definition:
|
||||
//
|
||||
// def process_slashings(state: BeaconState) -> None:
|
||||
// epoch = get_current_epoch(state)
|
||||
// total_balance = get_total_active_balance(state)
|
||||
// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance)
|
||||
// if state.version == electra:
|
||||
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from total balance to avoid uint64 overflow
|
||||
// penalty_per_effective_balance_increment = adjusted_total_slashing_balance // (total_balance // increment)
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
// penalty = penalty_numerator // total_balance * increment
|
||||
// if state.version == electra:
|
||||
// epoch = get_current_epoch(state)
|
||||
// total_balance = get_total_active_balance(state)
|
||||
// adjusted_total_slashing_balance = min(
|
||||
// sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX,
|
||||
// total_balance
|
||||
// )
|
||||
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from total balance to avoid uint64 overflow
|
||||
// penalty_per_effective_balance_increment = adjusted_total_slashing_balance // (total_balance // increment)
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
// effective_balance_increments = validator.effective_balance // increment
|
||||
// # [Modified in Electra:EIP7251]
|
||||
// penalty = penalty_per_effective_balance_increment * effective_balance_increments
|
||||
// decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
func ProcessSlashings(st state.BeaconState, slashingMultiplier uint64) (state.BeaconState, error) {
|
||||
// decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
//
|
||||
// Bellatrix spec definition:
|
||||
//
|
||||
// def process_slashings(state: BeaconState) -> None:
|
||||
// epoch = get_current_epoch(state)
|
||||
// total_balance = get_total_active_balance(state)
|
||||
// adjusted_total_slashing_balance = min(
|
||||
// sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX, # [Modified in Bellatrix]
|
||||
// total_balance
|
||||
// )
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
// penalty = penalty_numerator // total_balance * increment
|
||||
// decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
//
|
||||
// Altair spec definition:
|
||||
//
|
||||
// def process_slashings(state: BeaconState) -> None:
|
||||
// epoch = get_current_epoch(state)
|
||||
// total_balance = get_total_active_balance(state)
|
||||
// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR, total_balance)
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
// penalty = penalty_numerator // total_balance * increment
|
||||
// decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
//
|
||||
// Phase0 spec definition:
|
||||
//
|
||||
// def process_slashings(state: BeaconState) -> None:
|
||||
// epoch = get_current_epoch(state)
|
||||
// total_balance = get_total_active_balance(state)
|
||||
// adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER, total_balance)
|
||||
// for index, validator in enumerate(state.validators):
|
||||
// if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
// increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
// penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
// penalty = penalty_numerator // total_balance * increment
|
||||
// decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
func ProcessSlashings(st state.BeaconState) error {
|
||||
slashingMultiplier, err := st.ProportionalSlashingMultiplier()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get proportional slashing multiplier")
|
||||
}
|
||||
currentEpoch := time.CurrentEpoch(st)
|
||||
totalBalance, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get total active balance")
|
||||
return errors.Wrap(err, "could not get total active balance")
|
||||
}
|
||||
|
||||
// Compute slashed balances in the current epoch
|
||||
@@ -175,7 +222,7 @@ func ProcessSlashings(st state.BeaconState, slashingMultiplier uint64) (state.Be
|
||||
for _, slashing := range slashings {
|
||||
totalSlashing, err = math.Add64(totalSlashing, slashing)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,14 +256,14 @@ func ProcessSlashings(st state.BeaconState, slashingMultiplier uint64) (state.Be
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if changed {
|
||||
if err := st.SetBalances(bals); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
return st, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessEth1DataReset processes updates to ETH1 data votes during epoch processing.
|
||||
|
||||
@@ -32,10 +32,9 @@ func TestProcessSlashings_NotSlashed(t *testing.T) {
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
newState, err := epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplier)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, epoch.ProcessSlashings(s))
|
||||
wanted := params.BeaconConfig().MaxEffectiveBalance
|
||||
assert.Equal(t, wanted, newState.Balances()[0], "Unexpected slashed balance")
|
||||
assert.Equal(t, wanted, s.Balances()[0], "Unexpected slashed balance")
|
||||
}
|
||||
|
||||
func TestProcessSlashings_SlashedLess(t *testing.T) {
|
||||
@@ -111,9 +110,8 @@ func TestProcessSlashings_SlashedLess(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoPhase0(tt.state)
|
||||
require.NoError(t, err)
|
||||
helpers.ClearCache()
|
||||
newState, err := epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplier)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, newState.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, newState.Balances()[0])
|
||||
require.NoError(t, epoch.ProcessSlashings(s))
|
||||
assert.Equal(t, tt.want, s.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, s.Balances()[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -365,8 +363,7 @@ func TestProcessSlashings_BadValue(t *testing.T) {
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoPhase0(base)
|
||||
require.NoError(t, err)
|
||||
_, err = epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplier)
|
||||
require.ErrorContains(t, "addition overflows", err)
|
||||
require.ErrorContains(t, "addition overflows", epoch.ProcessSlashings(s))
|
||||
}
|
||||
|
||||
func TestProcessHistoricalDataUpdate(t *testing.T) {
|
||||
@@ -514,9 +511,8 @@ func TestProcessSlashings_SlashedElectra(t *testing.T) {
|
||||
s, err := state_native.InitializeFromProtoElectra(tt.state)
|
||||
require.NoError(t, err)
|
||||
helpers.ClearCache()
|
||||
newState, err := epoch.ProcessSlashings(s, params.BeaconConfig().ProportionalSlashingMultiplierBellatrix)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, newState.Balances()[0], "ProcessSlashings({%v}) = newState; newState.Balances[0] = %d", original, newState.Balances()[0])
|
||||
require.NoError(t, epoch.ProcessSlashings(s))
|
||||
assert.Equal(t, tt.want, s.Balances()[0], "ProcessSlashings({%v}); s.Balances[0] = %d", original, s.Balances()[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
32
beacon-chain/core/fulu/BUILD.bazel
Normal file
32
beacon-chain/core/fulu/BUILD.bazel
Normal file
@@ -0,0 +1,32 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["upgrade.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/fulu",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["upgrade_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
],
|
||||
)
|
||||
175
beacon-chain/core/fulu/upgrade.go
Normal file
175
beacon-chain/core/fulu/upgrade.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package fulu
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
// UpgradeToFulu updates inputs a generic state to return the version Fulu state.
|
||||
// https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/fork.md#upgrading-the-state
|
||||
func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := beaconState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := beaconState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := beaconState.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHeader, err := beaconState.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txRoot, err := payloadHeader.TransactionsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wi, err := beaconState.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vi, err := beaconState.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summaries, err := beaconState.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
historicalRoots, err := beaconState.HistoricalRoots()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
excessBlobGas, err := payloadHeader.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobGasUsed, err := payloadHeader.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depositBalanceToConsume, err := beaconState.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exitBalanceToConsume, err := beaconState.ExitBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
earliestExitEpoch, err := beaconState.EarliestExitEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
consolidationBalanceToConsume, err := beaconState.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
earliestConsolidationEpoch, err := beaconState.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pendingDeposits, err := beaconState.PendingDeposits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pendingPartialWithdrawals, err := beaconState.PendingPartialWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pendingConsolidations, err := beaconState.PendingConsolidations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateFulu{
|
||||
GenesisTime: beaconState.GenesisTime(),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: beaconState.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().FuluForkVersion,
|
||||
Epoch: time.CurrentEpoch(beaconState),
|
||||
},
|
||||
LatestBlockHeader: beaconState.LatestBlockHeader(),
|
||||
BlockRoots: beaconState.BlockRoots(),
|
||||
StateRoots: beaconState.StateRoots(),
|
||||
HistoricalRoots: historicalRoots,
|
||||
Eth1Data: beaconState.Eth1Data(),
|
||||
Eth1DataVotes: beaconState.Eth1DataVotes(),
|
||||
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
|
||||
Validators: beaconState.Validators(),
|
||||
Balances: beaconState.Balances(),
|
||||
RandaoMixes: beaconState.RandaoMixes(),
|
||||
Slashings: beaconState.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: beaconState.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: beaconState.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: beaconState.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: beaconState.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
DepositBalanceToConsume: depositBalanceToConsume,
|
||||
ExitBalanceToConsume: exitBalanceToConsume,
|
||||
EarliestExitEpoch: earliestExitEpoch,
|
||||
ConsolidationBalanceToConsume: consolidationBalanceToConsume,
|
||||
EarliestConsolidationEpoch: earliestConsolidationEpoch,
|
||||
PendingDeposits: pendingDeposits,
|
||||
PendingPartialWithdrawals: pendingPartialWithdrawals,
|
||||
PendingConsolidations: pendingConsolidations,
|
||||
}
|
||||
|
||||
// Need to cast the beaconState to use in helper functions
|
||||
post, err := state_native.InitializeFromProtoUnsafeFulu(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to initialize post fulu beaconState")
|
||||
}
|
||||
|
||||
return post, nil
|
||||
}
|
||||
185
beacon-chain/core/fulu/upgrade_test.go
Normal file
185
beacon-chain/core/fulu/upgrade_test.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package fulu_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/fulu"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestUpgradeToFulu(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
require.NoError(t, st.SetHistoricalRoots([][]byte{{1}}))
|
||||
vals := st.Validators()
|
||||
vals[0].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
vals[1].WithdrawalCredentials = []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte}
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[1] = params.BeaconConfig().MinActivationBalance + 1000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
|
||||
preForkState := st.Copy()
|
||||
mSt, err := fulu.UpgradeToFulu(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
|
||||
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
|
||||
require.Equal(t, preForkState.Slot(), mSt.Slot())
|
||||
|
||||
f := mSt.Fork()
|
||||
require.DeepSSZEqual(t, ðpb.Fork{
|
||||
PreviousVersion: st.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().FuluForkVersion,
|
||||
Epoch: time.CurrentEpoch(st),
|
||||
}, f)
|
||||
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
|
||||
hr1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
hr2, err := mSt.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hr1, hr2)
|
||||
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
|
||||
require.DeepSSZEqual(t, preForkState.Validators(), mSt.Validators())
|
||||
require.DeepSSZEqual(t, preForkState.Balances(), mSt.Balances())
|
||||
require.DeepSSZEqual(t, preForkState.RandaoMixes(), mSt.RandaoMixes())
|
||||
require.DeepSSZEqual(t, preForkState.Slashings(), mSt.Slashings())
|
||||
|
||||
numValidators := mSt.NumValidators()
|
||||
|
||||
p, err := mSt.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
|
||||
p, err = mSt.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
|
||||
require.DeepSSZEqual(t, preForkState.JustificationBits(), mSt.JustificationBits())
|
||||
require.DeepSSZEqual(t, preForkState.PreviousJustifiedCheckpoint(), mSt.PreviousJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.CurrentJustifiedCheckpoint(), mSt.CurrentJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.FinalizedCheckpoint(), mSt.FinalizedCheckpoint())
|
||||
|
||||
s, err := mSt.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
|
||||
|
||||
csc, err := mSt.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
psc, err := preForkState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, psc, csc)
|
||||
|
||||
nsc, err := mSt.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
psc, err = preForkState.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, psc, nsc)
|
||||
|
||||
header, err := mSt.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
protoHeader, ok := header.Proto().(*enginev1.ExecutionPayloadHeaderDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
prevHeader, err := preForkState.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
txRoot, err := prevHeader.TransactionsRoot()
|
||||
require.NoError(t, err)
|
||||
wdRoot, err := prevHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
wanted := &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: prevHeader.ParentHash(),
|
||||
FeeRecipient: prevHeader.FeeRecipient(),
|
||||
StateRoot: prevHeader.StateRoot(),
|
||||
ReceiptsRoot: prevHeader.ReceiptsRoot(),
|
||||
LogsBloom: prevHeader.LogsBloom(),
|
||||
PrevRandao: prevHeader.PrevRandao(),
|
||||
BlockNumber: prevHeader.BlockNumber(),
|
||||
GasLimit: prevHeader.GasLimit(),
|
||||
GasUsed: prevHeader.GasUsed(),
|
||||
Timestamp: prevHeader.Timestamp(),
|
||||
ExtraData: prevHeader.ExtraData(),
|
||||
BaseFeePerGas: prevHeader.BaseFeePerGas(),
|
||||
BlockHash: prevHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
}
|
||||
require.DeepEqual(t, wanted, protoHeader)
|
||||
|
||||
nwi, err := mSt.NextWithdrawalIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), nwi)
|
||||
|
||||
lwvi, err := mSt.NextWithdrawalValidatorIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), lwvi)
|
||||
|
||||
summaries, err := mSt.HistoricalSummaries()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(summaries))
|
||||
|
||||
preDepositRequestsStartIndex, err := preForkState.DepositRequestsStartIndex()
|
||||
require.NoError(t, err)
|
||||
postDepositRequestsStartIndex, err := mSt.DepositRequestsStartIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, preDepositRequestsStartIndex, postDepositRequestsStartIndex)
|
||||
|
||||
preDepositBalanceToConsume, err := preForkState.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
postDepositBalanceToConsume, err := mSt.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, preDepositBalanceToConsume, postDepositBalanceToConsume)
|
||||
|
||||
preExitBalanceToConsume, err := preForkState.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
postExitBalanceToConsume, err := mSt.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, preExitBalanceToConsume, postExitBalanceToConsume)
|
||||
|
||||
preEarliestExitEpoch, err := preForkState.EarliestExitEpoch()
|
||||
require.NoError(t, err)
|
||||
postEarliestExitEpoch, err := mSt.EarliestExitEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, preEarliestExitEpoch, postEarliestExitEpoch)
|
||||
|
||||
preConsolidationBalanceToConsume, err := preForkState.ConsolidationBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
postConsolidationBalanceToConsume, err := mSt.ConsolidationBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, preConsolidationBalanceToConsume, postConsolidationBalanceToConsume)
|
||||
|
||||
preEarliesConsolidationEoch, err := preForkState.EarliestConsolidationEpoch()
|
||||
require.NoError(t, err)
|
||||
postEarliestConsolidationEpoch, err := mSt.EarliestConsolidationEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, preEarliesConsolidationEoch, postEarliestConsolidationEpoch)
|
||||
|
||||
prePendingDeposits, err := preForkState.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
postPendingDeposits, err := mSt.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, prePendingDeposits, postPendingDeposits)
|
||||
|
||||
prePendingPartialWithdrawals, err := preForkState.PendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
postPendingPartialWithdrawals, err := mSt.PendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, prePendingPartialWithdrawals, postPendingPartialWithdrawals)
|
||||
|
||||
prePendingConsolidations, err := preForkState.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
postPendingConsolidations, err := mSt.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, prePendingConsolidations, postPendingConsolidations)
|
||||
}
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"beacon_committee.go",
|
||||
"block.go",
|
||||
"genesis.go",
|
||||
"legacy.go",
|
||||
"metrics.go",
|
||||
"randao.go",
|
||||
"rewards_penalties.go",
|
||||
@@ -25,7 +26,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
@@ -53,6 +53,7 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"beacon_committee_test.go",
|
||||
"block_test.go",
|
||||
"legacy_test.go",
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
"randao_test.go",
|
||||
@@ -87,5 +88,6 @@ go_test(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -32,6 +32,9 @@ func ValidateNilAttestation(attestation ethpb.Att) error {
|
||||
if attestation.GetData().Target == nil {
|
||||
return errors.New("attestation's target can't be nil")
|
||||
}
|
||||
if attestation.IsSingle() {
|
||||
return nil
|
||||
}
|
||||
if attestation.GetAggregationBits() == nil {
|
||||
return errors.New("attestation's bitfield can't be nil")
|
||||
}
|
||||
@@ -67,12 +70,6 @@ func IsAggregator(committeeCount uint64, slotSig []byte) (bool, error) {
|
||||
return binary.LittleEndian.Uint64(b[:8])%modulo == 0, nil
|
||||
}
|
||||
|
||||
// IsAggregated returns true if the attestation is an aggregated attestation,
|
||||
// false otherwise.
|
||||
func IsAggregated(attestation ethpb.Att) bool {
|
||||
return attestation.GetAggregationBits().Count() > 1
|
||||
}
|
||||
|
||||
// ComputeSubnetForAttestation returns the subnet for which the provided attestation will be broadcasted to.
|
||||
// This differs from the spec definition by instead passing in the active validators indices in the attestation's
|
||||
// given epoch.
|
||||
|
||||
@@ -308,6 +308,16 @@ func TestValidateNilAttestation(t *testing.T) {
|
||||
},
|
||||
errString: "",
|
||||
},
|
||||
{
|
||||
name: "single attestation",
|
||||
attestation: ðpb.SingleAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Target: ðpb.Checkpoint{},
|
||||
Source: ðpb.Checkpoint{},
|
||||
},
|
||||
},
|
||||
errString: "",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -97,6 +97,13 @@ func TestVerifyBitfieldLength_OK(t *testing.T) {
|
||||
assert.NoError(t, helpers.VerifyBitfieldLength(bf, committeeSize), "Bitfield is not validated when it was supposed to be")
|
||||
}
|
||||
|
||||
func TestVerifyBitfieldLength_Incorrect(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
bf := bitfield.NewBitlist(1)
|
||||
require.ErrorContains(t, "wanted participants bitfield length 2, got: 1", helpers.VerifyBitfieldLength(bf, 2))
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_CannotRetrieveFutureEpoch(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
|
||||
20
beacon-chain/core/helpers/legacy.go
Normal file
20
beacon-chain/core/helpers/legacy.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// DepositRequestsStarted determines if the deposit requests have started.
|
||||
func DepositRequestsStarted(beaconState state.BeaconState) bool {
|
||||
if beaconState.Version() < version.Electra {
|
||||
return false
|
||||
}
|
||||
|
||||
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return beaconState.Eth1DepositIndex() == requestsStartIndex
|
||||
}
|
||||
33
beacon-chain/core/helpers/legacy_test.go
Normal file
33
beacon-chain/core/helpers/legacy_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestDepositRequestHaveStarted contains several test cases for depositRequestHaveStarted.
|
||||
func TestDepositRequestHaveStarted(t *testing.T) {
|
||||
t.Run("Version below Electra returns false", func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
result := helpers.DepositRequestsStarted(st)
|
||||
require.False(t, result)
|
||||
})
|
||||
|
||||
t.Run("Version is Electra or higher, no error, but Eth1DepositIndex != requestsStartIndex returns false", func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
require.NoError(t, st.SetEth1DepositIndex(1))
|
||||
result := helpers.DepositRequestsStarted(st)
|
||||
require.False(t, result)
|
||||
})
|
||||
|
||||
t.Run("Version is Electra or higher, no error, and Eth1DepositIndex == requestsStartIndex returns true", func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
require.NoError(t, st.SetEth1DepositIndex(33))
|
||||
require.NoError(t, st.SetDepositRequestsStartIndex(33))
|
||||
result := helpers.DepositRequestsStarted(st)
|
||||
require.True(t, result)
|
||||
})
|
||||
}
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
func BalanceChurnLimit(activeBalance primitives.Gwei) primitives.Gwei {
|
||||
churn := max(
|
||||
params.BeaconConfig().MinPerEpochChurnLimitElectra,
|
||||
(uint64(activeBalance) / params.BeaconConfig().ChurnLimitQuotient),
|
||||
uint64(activeBalance)/params.BeaconConfig().ChurnLimitQuotient,
|
||||
)
|
||||
return primitives.Gwei(churn - churn%params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package helpers
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -11,8 +12,8 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -347,27 +348,33 @@ func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconSt
|
||||
// Spec pseudocode definition:
|
||||
//
|
||||
// def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
|
||||
// """
|
||||
// Return from ``indices`` a random index sampled by effective balance.
|
||||
// """
|
||||
// assert len(indices) > 0
|
||||
// MAX_RANDOM_BYTE = 2**8 - 1
|
||||
// i = uint64(0)
|
||||
// total = uint64(len(indices))
|
||||
// while True:
|
||||
// candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
|
||||
// random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||
// effective_balance = state.validators[candidate_index].effective_balance
|
||||
// if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_byte: #[Modified in Electra:EIP7251]
|
||||
// return candidate_index
|
||||
// i += 1
|
||||
// """
|
||||
// Return from ``indices`` a random index sampled by effective balance.
|
||||
// """
|
||||
// assert len(indices) > 0
|
||||
// MAX_RANDOM_VALUE = 2**16 - 1 # [Modified in Electra]
|
||||
// i = uint64(0)
|
||||
// total = uint64(len(indices))
|
||||
// while True:
|
||||
// candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
|
||||
// # [Modified in Electra]
|
||||
// random_bytes = hash(seed + uint_to_bytes(i // 16))
|
||||
// offset = i % 16 * 2
|
||||
// random_value = bytes_to_uint64(random_bytes[offset:offset + 2])
|
||||
// effective_balance = state.validators[candidate_index].effective_balance
|
||||
// # [Modified in Electra:EIP7251]
|
||||
// if effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value:
|
||||
// return candidate_index
|
||||
// i += 1
|
||||
func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []primitives.ValidatorIndex, seed [32]byte) (primitives.ValidatorIndex, error) {
|
||||
length := uint64(len(activeIndices))
|
||||
if length == 0 {
|
||||
return 0, errors.New("empty active indices list")
|
||||
}
|
||||
maxRandomByte := uint64(1<<8 - 1)
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
beaconConfig := params.BeaconConfig()
|
||||
seedBuffer := make([]byte, len(seed)+8)
|
||||
copy(seedBuffer, seed[:])
|
||||
|
||||
for i := uint64(0); ; i++ {
|
||||
candidateIndex, err := ComputeShuffledIndex(primitives.ValidatorIndex(i%length), length, seed, true /* shuffle */)
|
||||
@@ -378,21 +385,28 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
|
||||
if uint64(candidateIndex) >= uint64(bState.NumValidators()) {
|
||||
return 0, errors.New("active index out of range")
|
||||
}
|
||||
b := append(seed[:], bytesutil.Bytes8(i/32)...)
|
||||
randomByte := hashFunc(b)[i%32]
|
||||
|
||||
v, err := bState.ValidatorAtIndexReadOnly(candidateIndex)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
effectiveBal := v.EffectiveBalance()
|
||||
|
||||
maxEB := params.BeaconConfig().MaxEffectiveBalance
|
||||
if bState.Version() >= version.Electra {
|
||||
maxEB = params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], i/16)
|
||||
randomBytes := hashFunc(seedBuffer)
|
||||
offset := (i % 16) * 2
|
||||
randomValue := uint64(randomBytes[offset]) | uint64(randomBytes[offset+1])<<8
|
||||
|
||||
if effectiveBal*maxRandomByte >= maxEB*uint64(randomByte) {
|
||||
return candidateIndex, nil
|
||||
if effectiveBal*fieldparams.MaxRandomValueElectra >= beaconConfig.MaxEffectiveBalanceElectra*randomValue {
|
||||
return candidateIndex, nil
|
||||
}
|
||||
} else {
|
||||
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], i/32)
|
||||
randomByte := hashFunc(seedBuffer)[i%32]
|
||||
|
||||
if effectiveBal*fieldparams.MaxRandomByte >= beaconConfig.MaxEffectiveBalance*uint64(randomByte) {
|
||||
return candidateIndex, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -500,63 +514,6 @@ func LastActivatedValidatorIndex(ctx context.Context, st state.ReadOnlyBeaconSta
|
||||
return lastActivatedvalidatorIndex, nil
|
||||
}
|
||||
|
||||
// hasETH1WithdrawalCredential returns whether the validator has an ETH1
|
||||
// Withdrawal prefix. It assumes that the caller has a lock on the state
|
||||
func HasETH1WithdrawalCredential(val interfaces.WithWithdrawalCredentials) bool {
|
||||
if val == nil {
|
||||
return false
|
||||
}
|
||||
return isETH1WithdrawalCredential(val.GetWithdrawalCredentials())
|
||||
}
|
||||
|
||||
func isETH1WithdrawalCredential(creds []byte) bool {
|
||||
return bytes.HasPrefix(creds, []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte})
|
||||
}
|
||||
|
||||
// HasCompoundingWithdrawalCredential checks if the validator has a compounding withdrawal credential.
|
||||
// New in Electra EIP-7251: https://eips.ethereum.org/EIPS/eip-7251
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def has_compounding_withdrawal_credential(validator: Validator) -> bool:
|
||||
// """
|
||||
// Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal credential.
|
||||
// """
|
||||
// return is_compounding_withdrawal_credential(validator.withdrawal_credentials)
|
||||
func HasCompoundingWithdrawalCredential(v interfaces.WithWithdrawalCredentials) bool {
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
return IsCompoundingWithdrawalCredential(v.GetWithdrawalCredentials())
|
||||
}
|
||||
|
||||
// IsCompoundingWithdrawalCredential checks if the credentials are a compounding withdrawal credential.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def is_compounding_withdrawal_credential(withdrawal_credentials: Bytes32) -> bool:
|
||||
// return withdrawal_credentials[:1] == COMPOUNDING_WITHDRAWAL_PREFIX
|
||||
func IsCompoundingWithdrawalCredential(creds []byte) bool {
|
||||
return bytes.HasPrefix(creds, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte})
|
||||
}
|
||||
|
||||
// HasExecutionWithdrawalCredentials checks if the validator has an execution withdrawal credential or compounding credential.
|
||||
// New in Electra EIP-7251: https://eips.ethereum.org/EIPS/eip-7251
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def has_execution_withdrawal_credential(validator: Validator) -> bool:
|
||||
// """
|
||||
// Check if ``validator`` has a 0x01 or 0x02 prefixed withdrawal credential.
|
||||
// """
|
||||
// return has_compounding_withdrawal_credential(validator) or has_eth1_withdrawal_credential(validator)
|
||||
func HasExecutionWithdrawalCredentials(v interfaces.WithWithdrawalCredentials) bool {
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
return HasCompoundingWithdrawalCredential(v) || HasETH1WithdrawalCredential(v)
|
||||
}
|
||||
|
||||
// IsSameWithdrawalCredentials returns true if both validators have the same withdrawal credentials.
|
||||
//
|
||||
// return a.withdrawal_credentials[12:] == b.withdrawal_credentials[12:]
|
||||
@@ -591,10 +548,10 @@ func IsFullyWithdrawableValidator(val state.ReadOnlyValidator, balance uint64, e
|
||||
|
||||
// Electra / EIP-7251 logic
|
||||
if fork >= version.Electra {
|
||||
return HasExecutionWithdrawalCredentials(val) && val.WithdrawableEpoch() <= epoch
|
||||
return val.HasExecutionWithdrawalCredentials() && val.WithdrawableEpoch() <= epoch
|
||||
}
|
||||
|
||||
return HasETH1WithdrawalCredential(val) && val.WithdrawableEpoch() <= epoch
|
||||
return val.HasETH1WithdrawalCredentials() && val.WithdrawableEpoch() <= epoch
|
||||
}
|
||||
|
||||
// IsPartiallyWithdrawableValidator returns whether the validator is able to perform a
|
||||
@@ -622,7 +579,7 @@ func IsPartiallyWithdrawableValidator(val state.ReadOnlyValidator, balance uint6
|
||||
// """
|
||||
// Check if ``validator`` is partially withdrawable.
|
||||
// """
|
||||
// max_effective_balance = get_validator_max_effective_balance(validator)
|
||||
// max_effective_balance = get_max_effective_balance(validator)
|
||||
// has_max_effective_balance = validator.effective_balance == max_effective_balance # [Modified in Electra:EIP7251]
|
||||
// has_excess_balance = balance > max_effective_balance # [Modified in Electra:EIP7251]
|
||||
// return (
|
||||
@@ -635,7 +592,7 @@ func isPartiallyWithdrawableValidatorElectra(val state.ReadOnlyValidator, balanc
|
||||
hasMaxBalance := val.EffectiveBalance() == maxEB
|
||||
hasExcessBalance := balance > maxEB
|
||||
|
||||
return HasExecutionWithdrawalCredentials(val) &&
|
||||
return val.HasExecutionWithdrawalCredentials() &&
|
||||
hasMaxBalance &&
|
||||
hasExcessBalance
|
||||
}
|
||||
@@ -655,14 +612,14 @@ func isPartiallyWithdrawableValidatorElectra(val state.ReadOnlyValidator, balanc
|
||||
func isPartiallyWithdrawableValidatorCapella(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch) bool {
|
||||
hasMaxBalance := val.EffectiveBalance() == params.BeaconConfig().MaxEffectiveBalance
|
||||
hasExcessBalance := balance > params.BeaconConfig().MaxEffectiveBalance
|
||||
return HasETH1WithdrawalCredential(val) && hasExcessBalance && hasMaxBalance
|
||||
return val.HasETH1WithdrawalCredentials() && hasExcessBalance && hasMaxBalance
|
||||
}
|
||||
|
||||
// ValidatorMaxEffectiveBalance returns the maximum effective balance for a validator.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def get_validator_max_effective_balance(validator: Validator) -> Gwei:
|
||||
// def get_max_effective_balance(validator: Validator) -> Gwei:
|
||||
// """
|
||||
// Get max effective balance for ``validator``.
|
||||
// """
|
||||
@@ -671,7 +628,7 @@ func isPartiallyWithdrawableValidatorCapella(val state.ReadOnlyValidator, balanc
|
||||
// else:
|
||||
// return MIN_ACTIVATION_BALANCE
|
||||
func ValidatorMaxEffectiveBalance(val state.ReadOnlyValidator) uint64 {
|
||||
if HasCompoundingWithdrawalCredential(val) {
|
||||
if val.HasCompoundingWithdrawalCredentials() {
|
||||
return params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
return params.BeaconConfig().MinActivationBalance
|
||||
|
||||
@@ -841,7 +841,6 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
|
||||
if length == 0 {
|
||||
return 0, errors.New("empty active indices list")
|
||||
}
|
||||
maxRandomByte := uint64(1<<8 - 1)
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
|
||||
for i := uint64(0); ; i++ {
|
||||
@@ -860,7 +859,7 @@ func computeProposerIndexWithValidators(validators []*ethpb.Validator, activeInd
|
||||
if v != nil {
|
||||
effectiveBal = v.EffectiveBalance
|
||||
}
|
||||
if effectiveBal*maxRandomByte >= params.BeaconConfig().MaxEffectiveBalance*uint64(randomByte) {
|
||||
if effectiveBal*fieldparams.MaxRandomByte >= params.BeaconConfig().MaxEffectiveBalance*uint64(randomByte) {
|
||||
return candidateIndex, nil
|
||||
}
|
||||
}
|
||||
@@ -911,13 +910,15 @@ func TestProposerIndexFromCheckpoint(t *testing.T) {
|
||||
func TestHasETH1WithdrawalCredentials(t *testing.T) {
|
||||
creds := []byte{0xFA, 0xCC}
|
||||
v := ðpb.Validator{WithdrawalCredentials: creds}
|
||||
require.Equal(t, false, helpers.HasETH1WithdrawalCredential(v))
|
||||
roV, err := state_native.NewValidator(v)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, roV.HasETH1WithdrawalCredentials())
|
||||
creds = []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC}
|
||||
v = ðpb.Validator{WithdrawalCredentials: creds}
|
||||
require.Equal(t, true, helpers.HasETH1WithdrawalCredential(v))
|
||||
roV, err = state_native.NewValidator(v)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, roV.HasETH1WithdrawalCredentials())
|
||||
// No Withdrawal cred
|
||||
v = ðpb.Validator{}
|
||||
require.Equal(t, false, helpers.HasETH1WithdrawalCredential(v))
|
||||
}
|
||||
|
||||
func TestHasCompoundingWithdrawalCredential(t *testing.T) {
|
||||
@@ -932,11 +933,12 @@ func TestHasCompoundingWithdrawalCredential(t *testing.T) {
|
||||
{"Does not have compounding withdrawal credential",
|
||||
ðpb.Validator{WithdrawalCredentials: bytesutil.PadTo([]byte{0x00}, 32)},
|
||||
false},
|
||||
{"Handles nil case", nil, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.HasCompoundingWithdrawalCredential(tt.validator))
|
||||
roV, err := state_native.NewValidator(tt.validator)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, roV.HasCompoundingWithdrawalCredentials())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -956,11 +958,12 @@ func TestHasExecutionWithdrawalCredentials(t *testing.T) {
|
||||
{"Does not have compounding withdrawal credential or eth1 withdrawal credential",
|
||||
ðpb.Validator{WithdrawalCredentials: bytesutil.PadTo([]byte{0x00}, 32)},
|
||||
false},
|
||||
{"Handles nil case", nil, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.HasExecutionWithdrawalCredentials(tt.validator))
|
||||
roV, err := state_native.NewValidator(tt.validator)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, roV.HasExecutionWithdrawalCredentials())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -33,6 +34,7 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -23,6 +24,8 @@ import (
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const ErrNotEnoughSyncCommitteeBits = "sync committee bits count is less than required"
|
||||
|
||||
func NewLightClientFinalityUpdateFromBeaconState(
|
||||
ctx context.Context,
|
||||
currentSlot primitives.Slot,
|
||||
@@ -83,7 +86,12 @@ func NewLightClientUpdateFromBeaconState(
|
||||
return nil, errors.Wrap(err, "could not get sync aggregate")
|
||||
}
|
||||
if syncAggregate.SyncCommitteeBits.Count() < params.BeaconConfig().MinSyncCommitteeParticipants {
|
||||
return nil, fmt.Errorf("invalid sync committee bits count %d", syncAggregate.SyncCommitteeBits.Count())
|
||||
return nil, fmt.Errorf(
|
||||
"%s (got %d, need %d)",
|
||||
ErrNotEnoughSyncCommitteeBits,
|
||||
syncAggregate.SyncCommitteeBits.Count(),
|
||||
params.BeaconConfig().MinSyncCommitteeParticipants,
|
||||
)
|
||||
}
|
||||
|
||||
// assert state.slot == state.latest_block_header.slot
|
||||
@@ -281,56 +289,247 @@ func CreateDefaultLightClientUpdate(currentSlot primitives.Slot, attestedState s
|
||||
if currentEpoch < params.BeaconConfig().CapellaForkEpoch {
|
||||
m = &pb.LightClientUpdateAltair{
|
||||
AttestedHeader: &pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{},
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
|
||||
FinalityBranch: finalityBranch,
|
||||
FinalizedHeader: &pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
SyncAggregate: &pb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, 64),
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
} else if currentEpoch < params.BeaconConfig().DenebForkEpoch {
|
||||
m = &pb.LightClientUpdateCapella{
|
||||
AttestedHeader: &pb.LightClientHeaderCapella{
|
||||
Beacon: &pb.BeaconBlockHeader{},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderCapella{},
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
|
||||
FinalityBranch: finalityBranch,
|
||||
FinalizedHeader: &pb.LightClientHeaderCapella{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
SyncAggregate: &pb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, 64),
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
} else if currentEpoch < params.BeaconConfig().ElectraForkEpoch {
|
||||
m = &pb.LightClientUpdateDeneb{
|
||||
AttestedHeader: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
|
||||
FinalityBranch: finalityBranch,
|
||||
FinalizedHeader: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
SyncAggregate: &pb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, 64),
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
if attestedState.Version() >= version.Electra {
|
||||
m = &pb.LightClientUpdateElectra{
|
||||
AttestedHeader: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
|
||||
FinalityBranch: finalityBranch,
|
||||
FinalizedHeader: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
SyncAggregate: &pb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, 64),
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
m = &pb.LightClientUpdateDeneb{
|
||||
AttestedHeader: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{},
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
NextSyncCommitteeBranch: nextSyncCommitteeBranch,
|
||||
FinalityBranch: finalityBranch,
|
||||
FinalizedHeader: &pb.LightClientHeaderDeneb{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Execution: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
GasLimit: 0,
|
||||
GasUsed: 0,
|
||||
},
|
||||
ExecutionBranch: executionBranch,
|
||||
},
|
||||
SyncAggregate: &pb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, 64),
|
||||
SyncCommitteeSignature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -404,18 +603,15 @@ func BlockToLightClientHeader(
|
||||
var payloadProof [][]byte
|
||||
|
||||
if blockEpoch < params.BeaconConfig().CapellaForkEpoch {
|
||||
payloadHeader = &enginev1.ExecutionPayloadHeaderCapella{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
var ok bool
|
||||
|
||||
p, err := execution.EmptyExecutionPayloadHeader(version.Capella)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get payload header")
|
||||
}
|
||||
payloadHeader, ok = p.(*enginev1.ExecutionPayloadHeaderCapella)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("payload header type %T is not %T", p, &enginev1.ExecutionPayloadHeaderCapella{})
|
||||
}
|
||||
payloadProof = emptyPayloadProof()
|
||||
} else {
|
||||
@@ -472,18 +668,15 @@ func BlockToLightClientHeader(
|
||||
var payloadProof [][]byte
|
||||
|
||||
if blockEpoch < params.BeaconConfig().CapellaForkEpoch {
|
||||
payloadHeader = &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
TransactionsRoot: make([]byte, fieldparams.RootLength),
|
||||
WithdrawalsRoot: make([]byte, fieldparams.RootLength),
|
||||
var ok bool
|
||||
|
||||
p, err := execution.EmptyExecutionPayloadHeader(version.Deneb)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get payload header")
|
||||
}
|
||||
payloadHeader, ok = p.(*enginev1.ExecutionPayloadHeaderDeneb)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("payload header type %T is not %T", p, &enginev1.ExecutionPayloadHeaderDeneb{})
|
||||
}
|
||||
payloadProof = emptyPayloadProof()
|
||||
} else {
|
||||
|
||||
@@ -2,15 +2,18 @@ package light_client_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
light_client "github.com/prysmaticlabs/prysm/v5/consensus-types/light-client"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
lightClient "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/light-client"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensustypes "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
|
||||
v11 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
pb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -694,7 +697,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
excessBlobGas, err := payload.ExcessBlobGas()
|
||||
require.NoError(t, err)
|
||||
|
||||
executionHeader := &v11.ExecutionPayloadHeaderElectra{
|
||||
executionHeader := &v11.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payload.ParentHash(),
|
||||
FeeRecipient: payload.FeeRecipient(),
|
||||
StateRoot: payload.StateRoot(),
|
||||
@@ -759,7 +762,7 @@ func TestLightClient_BlockToLightClientHeader(t *testing.T) {
|
||||
excessBlobGas, err := payload.ExcessBlobGas()
|
||||
require.NoError(t, err)
|
||||
|
||||
executionHeader := &v11.ExecutionPayloadHeaderElectra{
|
||||
executionHeader := &v11.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payload.ParentHash(),
|
||||
FeeRecipient: payload.FeeRecipient(),
|
||||
StateRoot: payload.StateRoot(),
|
||||
@@ -972,3 +975,667 @@ func convertArrayToSlice(arr [4][32]uint8) [][]uint8 {
|
||||
}
|
||||
return slice
|
||||
}
|
||||
|
||||
// When the update has relevant sync committee
|
||||
func createNonEmptySyncCommitteeBranch() [][]byte {
|
||||
res := make([][]byte, fieldparams.SyncCommitteeBranchDepth)
|
||||
res[0] = []byte(strings.Repeat("x", 32))
|
||||
for i := 1; i < len(res); i++ {
|
||||
res[i] = make([]byte, fieldparams.RootLength)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// When the update has finality
|
||||
func createNonEmptyFinalityBranch() [][]byte {
|
||||
res := make([][]byte, fieldparams.FinalityBranchDepth)
|
||||
res[0] = []byte(strings.Repeat("x", 32))
|
||||
for i := 1; i < fieldparams.FinalityBranchDepth; i++ {
|
||||
res[i] = make([]byte, 32)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func TestIsBetterUpdate(t *testing.T) {
|
||||
config := params.BeaconConfig()
|
||||
st, err := util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("new has supermajority but old doesn't", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b01111100, 0b1}, // [0,0,1,1,1,1,1,0]
|
||||
})
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b11111100, 0b1}, // [0,0,1,1,1,1,1,1]
|
||||
})
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("old has supermajority but new doesn't", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b11111100, 0b1}, // [0,0,1,1,1,1,1,1]
|
||||
})
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b01111100, 0b1}, // [0,0,1,1,1,1,1,0]
|
||||
})
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("new doesn't have supermajority and newNumActiveParticipants is greater than oldNumActiveParticipants", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b01111100, 0b1}, // [0,0,1,1,1,1,1,0]
|
||||
})
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("new doesn't have supermajority and newNumActiveParticipants is lesser than oldNumActiveParticipants", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b01111100, 0b1}, // [0,0,1,1,1,1,1,0]
|
||||
})
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("new has relevant sync committee but old doesn't", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
oldAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetAttestedHeader(oldAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
oldUpdate.SetSignatureSlot(9999)
|
||||
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
newAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000001,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetAttestedHeader(newAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
newUpdate.SetSignatureSlot(1000000)
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("old has relevant sync committee but new doesn't", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
oldAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000001,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetAttestedHeader(oldAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
oldUpdate.SetSignatureSlot(1000000)
|
||||
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
newAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetAttestedHeader(newAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
newUpdate.SetSignatureSlot(9999)
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("new has finality but old doesn't", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
oldAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetAttestedHeader(oldAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
oldUpdate.SetSignatureSlot(9999)
|
||||
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
newAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetAttestedHeader(newAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
newUpdate.SetSignatureSlot(9999)
|
||||
err = newUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("old has finality but new doesn't", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
oldAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetAttestedHeader(oldAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
oldUpdate.SetSignatureSlot(9999)
|
||||
err = oldUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
newAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetAttestedHeader(newAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
newUpdate.SetSignatureSlot(9999)
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("new has finality and sync committee finality both but old doesn't have sync committee finality", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
oldAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetAttestedHeader(oldAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
oldUpdate.SetSignatureSlot(9999)
|
||||
oldFinalizedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 9999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetFinalizedHeader(oldFinalizedHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b01111100, 0b1}, // [0,0,1,1,1,1,1,0]
|
||||
})
|
||||
newAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetAttestedHeader(newAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
newUpdate.SetSignatureSlot(999999)
|
||||
err = newUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
newFinalizedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 999999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetFinalizedHeader(newFinalizedHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("new has finality but doesn't have sync committee finality and old has sync committee finality", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
oldAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetAttestedHeader(oldAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
oldUpdate.SetSignatureSlot(999999)
|
||||
oldFinalizedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 999999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetFinalizedHeader(oldFinalizedHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
newAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetAttestedHeader(newAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
newUpdate.SetSignatureSlot(9999)
|
||||
err = newUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
newFinalizedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 9999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetFinalizedHeader(newFinalizedHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("new has more active participants than old", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b01111100, 0b1}, // [0,1,1,1,1,1,0,0]
|
||||
})
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("new has less active participants than old", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b01111100, 0b1}, // [0,1,1,1,1,1,0,0]
|
||||
})
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("new's attested header's slot is lesser than old's attested header's slot", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
oldAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetAttestedHeader(oldAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
oldUpdate.SetSignatureSlot(9999)
|
||||
oldFinalizedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 9999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetFinalizedHeader(oldFinalizedHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
newAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 999999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetAttestedHeader(newAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
newUpdate.SetSignatureSlot(9999)
|
||||
err = newUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
newFinalizedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 9999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetFinalizedHeader(newFinalizedHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("new's attested header's slot is greater than old's attested header's slot", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
oldAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 999999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetAttestedHeader(oldAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
oldUpdate.SetSignatureSlot(9999)
|
||||
oldFinalizedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 9999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetFinalizedHeader(oldFinalizedHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
newAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetAttestedHeader(newAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
newUpdate.SetSignatureSlot(9999)
|
||||
err = newUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
newFinalizedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 9999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetFinalizedHeader(newFinalizedHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, result)
|
||||
})
|
||||
|
||||
t.Run("none of the above conditions are met and new signature's slot is less than old signature's slot", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
oldAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetAttestedHeader(oldAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
oldUpdate.SetSignatureSlot(9999)
|
||||
oldFinalizedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 9999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetFinalizedHeader(oldFinalizedHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
newAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetAttestedHeader(newAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
newUpdate.SetSignatureSlot(9998)
|
||||
err = newUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
newFinalizedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 9999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetFinalizedHeader(newFinalizedHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, result)
|
||||
})
|
||||
|
||||
t.Run("none of the above conditions are met and new signature's slot is greater than old signature's slot", func(t *testing.T) {
|
||||
oldUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(1), st)
|
||||
require.NoError(t, err)
|
||||
newUpdate, err := lightClient.CreateDefaultLightClientUpdate(primitives.Slot(config.AltairForkEpoch*primitives.Epoch(config.SlotsPerEpoch)).Add(2), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
oldAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetAttestedHeader(oldAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
oldUpdate.SetSignatureSlot(9998)
|
||||
oldFinalizedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 9999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = oldUpdate.SetFinalizedHeader(oldFinalizedHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
newUpdate.SetSyncAggregate(&pb.SyncAggregate{
|
||||
SyncCommitteeBits: []byte{0b00111100, 0b1}, // [0,0,1,1,1,1,0,0]
|
||||
})
|
||||
newAttestedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 1000000,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetAttestedHeader(newAttestedHeader)
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetNextSyncCommitteeBranch(createNonEmptySyncCommitteeBranch())
|
||||
require.NoError(t, err)
|
||||
newUpdate.SetSignatureSlot(9999)
|
||||
err = newUpdate.SetFinalityBranch(createNonEmptyFinalityBranch())
|
||||
require.NoError(t, err)
|
||||
newFinalizedHeader, err := light_client.NewWrappedHeader(&pb.LightClientHeaderAltair{
|
||||
Beacon: &pb.BeaconBlockHeader{
|
||||
Slot: 9999,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = newUpdate.SetFinalizedHeader(newFinalizedHeader)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := lightClient.IsBetterUpdate(newUpdate, oldUpdate)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, result)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -99,6 +99,15 @@ func CanUpgradeToElectra(slot primitives.Slot) bool {
|
||||
return epochStart && electraEpoch
|
||||
}
|
||||
|
||||
// CanUpgradeToFulu returns true if the input `slot` can upgrade to Fulu.
|
||||
// Spec code:
|
||||
// If state.slot % SLOTS_PER_EPOCH == 0 and compute_epoch_at_slot(state.slot) == FULU_FORK_EPOCH
|
||||
func CanUpgradeToFulu(slot primitives.Slot) bool {
|
||||
epochStart := slots.IsEpochStart(slot)
|
||||
fuluEpoch := slots.ToEpoch(slot) == params.BeaconConfig().FuluForkEpoch
|
||||
return epochStart && fuluEpoch
|
||||
}
|
||||
|
||||
// CanProcessEpoch checks the eligibility to process epoch.
|
||||
// The epoch can be processed at the end of the last slot of every epoch.
|
||||
//
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package time_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
@@ -83,76 +84,6 @@ func TestNextEpoch_OK(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanUpgradeToAltair(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bc)
|
||||
tests := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not epoch start",
|
||||
slot: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "not altair epoch",
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "altair epoch",
|
||||
slot: primitives.Slot(params.BeaconConfig().AltairForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := time.CanUpgradeToAltair(tt.slot); got != tt.want {
|
||||
t.Errorf("canUpgradeToAltair() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanUpgradeBellatrix(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc.BellatrixForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bc)
|
||||
tests := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not epoch start",
|
||||
slot: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "not bellatrix epoch",
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "bellatrix epoch",
|
||||
slot: primitives.Slot(params.BeaconConfig().BellatrixForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := time.CanUpgradeToBellatrix(tt.slot); got != tt.want {
|
||||
t.Errorf("CanUpgradeToBellatrix() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanProcessEpoch_TrueOnEpochsLastSlot(t *testing.T) {
|
||||
tests := []struct {
|
||||
slot primitives.Slot
|
||||
@@ -264,107 +195,79 @@ func TestAltairCompatible(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanUpgradeToCapella(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc.CapellaForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bc)
|
||||
tests := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not epoch start",
|
||||
slot: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "not capella epoch",
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "capella epoch",
|
||||
slot: primitives.Slot(params.BeaconConfig().CapellaForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := time.CanUpgradeToCapella(tt.slot); got != tt.want {
|
||||
t.Errorf("CanUpgradeToCapella() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestCanUpgradeTo(t *testing.T) {
|
||||
beaconConfig := params.BeaconConfig()
|
||||
|
||||
func TestCanUpgradeToDeneb(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc.DenebForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bc)
|
||||
tests := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
want bool
|
||||
outerTestCases := []struct {
|
||||
name string
|
||||
forkEpoch *primitives.Epoch
|
||||
upgradeFunc func(primitives.Slot) bool
|
||||
}{
|
||||
{
|
||||
name: "not epoch start",
|
||||
slot: 1,
|
||||
want: false,
|
||||
name: "Altair",
|
||||
forkEpoch: &beaconConfig.AltairForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToAltair,
|
||||
},
|
||||
{
|
||||
name: "not deneb epoch",
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
want: false,
|
||||
name: "Bellatrix",
|
||||
forkEpoch: &beaconConfig.BellatrixForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToBellatrix,
|
||||
},
|
||||
{
|
||||
name: "deneb epoch",
|
||||
slot: primitives.Slot(params.BeaconConfig().DenebForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: true,
|
||||
name: "Capella",
|
||||
forkEpoch: &beaconConfig.CapellaForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToCapella,
|
||||
},
|
||||
{
|
||||
name: "Deneb",
|
||||
forkEpoch: &beaconConfig.DenebForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToDeneb,
|
||||
},
|
||||
{
|
||||
name: "Electra",
|
||||
forkEpoch: &beaconConfig.ElectraForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToElectra,
|
||||
},
|
||||
{
|
||||
name: "Fulu",
|
||||
forkEpoch: &beaconConfig.FuluForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToFulu,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := time.CanUpgradeToDeneb(tt.slot); got != tt.want {
|
||||
t.Errorf("CanUpgradeToDeneb() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanUpgradeToElectra(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc.ElectraForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bc)
|
||||
tests := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not epoch start",
|
||||
slot: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "not electra epoch",
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "electra epoch",
|
||||
slot: primitives.Slot(params.BeaconConfig().ElectraForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := time.CanUpgradeToElectra(tt.slot); got != tt.want {
|
||||
t.Errorf("CanUpgradeToElectra() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
for _, otc := range outerTestCases {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
*otc.forkEpoch = 5
|
||||
params.OverrideBeaconConfig(beaconConfig)
|
||||
|
||||
innerTestCases := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not epoch start",
|
||||
slot: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: fmt.Sprintf("not %s epoch", otc.name),
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: fmt.Sprintf("%s epoch", otc.name),
|
||||
slot: primitives.Slot(*otc.forkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, itc := range innerTestCases {
|
||||
t.Run(fmt.Sprintf("%s-%s", otc.name, itc.name), func(t *testing.T) {
|
||||
if got := otc.upgradeFunc(itc.slot); got != itc.want {
|
||||
t.Errorf("CanUpgradeTo%s() = %v, want %v", otc.name, got, itc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ go_library(
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/fulu:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
e "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/epoch/precompute"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/fulu"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/features"
|
||||
@@ -298,7 +299,7 @@ func ProcessSlotsCore(ctx context.Context, span trace.Span, state state.BeaconSt
|
||||
func ProcessEpoch(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
var err error
|
||||
if time.CanProcessEpoch(state) {
|
||||
if state.Version() == version.Electra {
|
||||
if state.Version() >= version.Electra {
|
||||
if err = electra.ProcessEpoch(ctx, state); err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("could not process %s epoch", version.String(state.Version())))
|
||||
}
|
||||
@@ -322,9 +323,11 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
defer span.End()
|
||||
|
||||
var err error
|
||||
|
||||
slot := state.Slot()
|
||||
upgraded := false
|
||||
|
||||
if time.CanUpgradeToAltair(state.Slot()) {
|
||||
if time.CanUpgradeToAltair(slot) {
|
||||
state, err = altair.UpgradeToAltair(ctx, state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -333,7 +336,7 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
upgraded = true
|
||||
}
|
||||
|
||||
if time.CanUpgradeToBellatrix(state.Slot()) {
|
||||
if time.CanUpgradeToBellatrix(slot) {
|
||||
state, err = execution.UpgradeToBellatrix(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -342,7 +345,7 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
upgraded = true
|
||||
}
|
||||
|
||||
if time.CanUpgradeToCapella(state.Slot()) {
|
||||
if time.CanUpgradeToCapella(slot) {
|
||||
state, err = capella.UpgradeToCapella(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -351,7 +354,7 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
upgraded = true
|
||||
}
|
||||
|
||||
if time.CanUpgradeToDeneb(state.Slot()) {
|
||||
if time.CanUpgradeToDeneb(slot) {
|
||||
state, err = deneb.UpgradeToDeneb(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -360,7 +363,7 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
upgraded = true
|
||||
}
|
||||
|
||||
if time.CanUpgradeToElectra(state.Slot()) {
|
||||
if time.CanUpgradeToElectra(slot) {
|
||||
state, err = electra.UpgradeToElectra(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -369,8 +372,17 @@ func UpgradeState(ctx context.Context, state state.BeaconState) (state.BeaconSta
|
||||
upgraded = true
|
||||
}
|
||||
|
||||
if time.CanUpgradeToFulu(slot) {
|
||||
state, err = fulu.UpgradeToFulu(state)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, err
|
||||
}
|
||||
upgraded = true
|
||||
}
|
||||
|
||||
if upgraded {
|
||||
log.Debugf("upgraded state to %s", version.String(state.Version()))
|
||||
log.WithField("version", version.String(state.Version())).Info("Upgraded state to")
|
||||
}
|
||||
|
||||
return state, nil
|
||||
@@ -391,11 +403,15 @@ func VerifyOperationLengths(_ context.Context, state state.BeaconState, b interf
|
||||
)
|
||||
}
|
||||
|
||||
if uint64(len(body.AttesterSlashings())) > params.BeaconConfig().MaxAttesterSlashings {
|
||||
maxSlashings := params.BeaconConfig().MaxAttesterSlashings
|
||||
if body.Version() >= version.Electra {
|
||||
maxSlashings = params.BeaconConfig().MaxAttesterSlashingsElectra
|
||||
}
|
||||
if uint64(len(body.AttesterSlashings())) > maxSlashings {
|
||||
return nil, fmt.Errorf(
|
||||
"number of attester slashings (%d) in block body exceeds allowed threshold of %d",
|
||||
len(body.AttesterSlashings()),
|
||||
params.BeaconConfig().MaxAttesterSlashings,
|
||||
maxSlashings,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -437,6 +437,25 @@ func TestProcessBlock_OverMaxAttesterSlashings(t *testing.T) {
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestProcessBlock_OverMaxAttesterSlashingsElectra(t *testing.T) {
|
||||
maxSlashings := params.BeaconConfig().MaxAttesterSlashingsElectra
|
||||
b := ðpb.SignedBeaconBlockElectra{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
AttesterSlashings: make([]*ethpb.AttesterSlashingElectra, maxSlashings+1),
|
||||
},
|
||||
},
|
||||
}
|
||||
want := fmt.Sprintf("number of attester slashings (%d) in block body exceeds allowed threshold of %d",
|
||||
len(b.Block.Body.AttesterSlashings), params.BeaconConfig().MaxAttesterSlashingsElectra)
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ðpb.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block())
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestProcessBlock_OverMaxAttestations(t *testing.T) {
|
||||
b := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
@@ -665,6 +684,20 @@ func TestProcessSlots_ThroughElectraEpoch(t *testing.T) {
|
||||
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
|
||||
}
|
||||
|
||||
func TestProcessSlots_ThroughFuluEpoch(t *testing.T) {
|
||||
transition.SkipSlotCache.Disable()
|
||||
params.SetupTestConfigCleanup(t)
|
||||
conf := params.BeaconConfig()
|
||||
conf.FuluForkEpoch = 5
|
||||
params.OverrideBeaconConfig(conf)
|
||||
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, params.BeaconConfig().MaxValidatorsPerCommittee)
|
||||
st, err := transition.ProcessSlots(context.Background(), st, params.BeaconConfig().SlotsPerEpoch*10)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.Fulu, st.Version())
|
||||
require.Equal(t, params.BeaconConfig().SlotsPerEpoch*10, st.Slot())
|
||||
}
|
||||
|
||||
func TestProcessSlotsUsingNextSlotCache(t *testing.T) {
|
||||
s, _ := util.DeterministicGenesisState(t, 1)
|
||||
r := []byte{'a'}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user