mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
41 Commits
changelog-
...
check_epoc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ef626a4648 | ||
|
|
910609a75f | ||
|
|
f9c202190a | ||
|
|
4a63a194b1 | ||
|
|
d887536eb7 | ||
|
|
1069da1cd2 | ||
|
|
4a487ba3bc | ||
|
|
bf81cd4449 | ||
|
|
00337fe005 | ||
|
|
bb3fba4d8e | ||
|
|
89967fe209 | ||
|
|
56712b5e49 | ||
|
|
0be9391e62 | ||
|
|
4a9c60f75f | ||
|
|
9cf6b93356 | ||
|
|
b4220e35c4 | ||
|
|
536cded4cc | ||
|
|
86fc64c917 | ||
|
|
5d6a406829 | ||
|
|
2c78e501b3 | ||
|
|
c8cb0f37b2 | ||
|
|
78722239da | ||
|
|
3ffef024c7 | ||
|
|
a1eef44492 | ||
|
|
2845ab9365 | ||
|
|
4f43c15ebb | ||
|
|
e473d7cc4d | ||
|
|
794a05af26 | ||
|
|
15df13c7e6 | ||
|
|
b76f7fed2f | ||
|
|
e263687ea5 | ||
|
|
0b16c79c35 | ||
|
|
dc002c2806 | ||
|
|
e7e48dcaf9 | ||
|
|
8f43f6cc84 | ||
|
|
e07341e1d5 | ||
|
|
ef293e52f8 | ||
|
|
72cc63a6a3 | ||
|
|
34ff4c3ea9 | ||
|
|
e8c968326a | ||
|
|
2000ef457b |
@@ -1 +1 @@
|
||||
7.1.0
|
||||
7.4.1
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -33,5 +33,5 @@ Fixes #
|
||||
**Acknowledgements**
|
||||
|
||||
- [ ] I have read [CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
|
||||
- [ ] I have made an appropriate entry to [CHANGELOG.md](https://github.com/prysmaticlabs/prysm/blob/develop/CHANGELOG.md).
|
||||
- [ ] I have included a uniquely named [changelog fragment file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
|
||||
- [ ] I have added a description to this PR with sufficient context for reviewers to understand this PR.
|
||||
|
||||
2
.github/actions/gomodtidy/Dockerfile
vendored
2
.github/actions/gomodtidy/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.22-alpine
|
||||
FROM golang:1.23-alpine
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
|
||||
47
.github/workflows/changelog.yml
vendored
47
.github/workflows/changelog.yml
vendored
@@ -1,33 +1,34 @@
|
||||
name: CI
|
||||
# This workflow will build a golang project
|
||||
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
|
||||
|
||||
name: changelog
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- develop
|
||||
branches: [ "develop" ]
|
||||
|
||||
jobs:
|
||||
changed_files:
|
||||
runs-on: ubuntu-latest
|
||||
name: Check CHANGELOG.md
|
||||
run-changelog-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: changelog modified
|
||||
id: changelog-modified
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Download unclog binary
|
||||
uses: dsaltares/fetch-gh-release-asset@aa2ab1243d6e0d5b405b973c89fa4d06a2d0fff7 # 1.1.2
|
||||
with:
|
||||
repo: OffchainLabs/unclog
|
||||
version: "tags/v0.1.3"
|
||||
file: "unclog"
|
||||
|
||||
- name: Get new changelog files
|
||||
id: new-changelog-files
|
||||
uses: tj-actions/changed-files@v45
|
||||
with:
|
||||
files: CHANGELOG.md
|
||||
files: |
|
||||
changelog/**.md
|
||||
|
||||
- name: List all changed files
|
||||
- name: Run lint command
|
||||
env:
|
||||
ALL_CHANGED_FILES: ${{ steps.changelog-modified.outputs.all_changed_files }}
|
||||
run: |
|
||||
if [[ ${ALL_CHANGED_FILES[*]} =~ (^|[[:space:]])"CHANGELOG.md"($|[[:space:]]) ]];
|
||||
then
|
||||
echo "CHANGELOG.md was modified.";
|
||||
exit 0;
|
||||
else
|
||||
echo "CHANGELOG.md was not modified.";
|
||||
echo "Please see CHANGELOG.md and follow the instructions to add your changes to that file."
|
||||
echo "In some rare scenarios, a changelog entry is not required and this CI check can be ignored."
|
||||
exit 1;
|
||||
fi
|
||||
ALL_ADDED_MARKDOWN: ${{ steps.new-changelog-files.outputs.added_files }}
|
||||
run: chmod +x unclog && ./unclog check -fragment-env=ALL_ADDED_MARKDOWN
|
||||
|
||||
21
.github/workflows/clang-format.yml
vendored
Normal file
21
.github/workflows/clang-format.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Protobuf Format
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ '*' ]
|
||||
pull_request:
|
||||
branches: [ '*' ]
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
|
||||
jobs:
|
||||
clang-format-checking:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
# Is this step failing for you?
|
||||
# Run: clang-format -i proto/**/*.proto
|
||||
# See: https://clang.llvm.org/docs/ClangFormat.html
|
||||
- uses: RafikFarhad/clang-format-github-action@v3
|
||||
with:
|
||||
sources: "proto/**/*.proto"
|
||||
4
.github/workflows/fuzz.yml
vendored
4
.github/workflows/fuzz.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.10'
|
||||
go-version: '1.23.5'
|
||||
- id: list
|
||||
uses: shogo82148/actions-go-fuzz/list@v0
|
||||
with:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.10'
|
||||
go-version: '1.23.5'
|
||||
- uses: shogo82148/actions-go-fuzz/run@v0
|
||||
with:
|
||||
packages: ${{ matrix.package }}
|
||||
|
||||
12
.github/workflows/go.yml
vendored
12
.github/workflows/go.yml
vendored
@@ -28,10 +28,10 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go 1.22
|
||||
- name: Set up Go 1.23
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.10'
|
||||
go-version: '1.23.5'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
@@ -45,16 +45,16 @@ jobs:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go 1.22
|
||||
- name: Set up Go 1.23
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.10'
|
||||
go-version: '1.23.5'
|
||||
id: go
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
version: v1.56.1
|
||||
version: v1.63.4
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
|
||||
build:
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.22.10'
|
||||
go-version: '1.23.5'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
@@ -1,28 +1,20 @@
|
||||
run:
|
||||
skip-files:
|
||||
timeout: 10m
|
||||
go: '1.23.5'
|
||||
|
||||
issues:
|
||||
exclude-files:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
skip-dirs:
|
||||
exclude-dirs:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
timeout: 10m
|
||||
go: '1.22.10'
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
# Deprecated linters:
|
||||
- deadcode
|
||||
- exhaustivestruct
|
||||
- golint
|
||||
- govet
|
||||
- ifshort
|
||||
- interfacer
|
||||
- maligned
|
||||
- nosnakecase
|
||||
- scopelint
|
||||
- structcheck
|
||||
- varcheck
|
||||
|
||||
# Disabled for now:
|
||||
- asasalint
|
||||
@@ -34,6 +26,8 @@ linters:
|
||||
- dogsled
|
||||
- dupl
|
||||
- durationcheck
|
||||
- errname
|
||||
- err113
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
@@ -47,17 +41,17 @@ linters:
|
||||
- gocyclo
|
||||
- godot
|
||||
- godox
|
||||
- goerr113
|
||||
- gofumpt
|
||||
- gomnd
|
||||
- gomoddirectives
|
||||
- gosec
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- intrange
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- mnd
|
||||
- musttag
|
||||
- nakedret
|
||||
- nestif
|
||||
@@ -72,6 +66,7 @@ linters:
|
||||
- predeclared
|
||||
- promlinter
|
||||
- protogetter
|
||||
- recvcheck
|
||||
- revive
|
||||
- spancheck
|
||||
- staticcheck
|
||||
|
||||
144
CHANGELOG.md
144
CHANGELOG.md
@@ -4,70 +4,6 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [Unreleased](https://github.com/prysmaticlabs/prysm/compare/v5.2.0...HEAD)
|
||||
|
||||
### Added
|
||||
|
||||
- Added proper gas limit check for header from the builder.
|
||||
- Added an error field to log `Finished building block`.
|
||||
- Implemented a new `EmptyExecutionPayloadHeader` function.
|
||||
- `Finished building block`: Display error only if not nil.
|
||||
- Added support to update target and max blob count to different values per hard fork config.
|
||||
- Log before blob filesystem cache warm-up.
|
||||
- New design for the attestation pool. [PR](https://github.com/prysmaticlabs/prysm/pull/14324)
|
||||
- Add field param placeholder for Electra blob target and max to pass spec tests.
|
||||
- Add EIP-7691: Blob throughput increase.
|
||||
- SSZ files generation: Remove the `// Hash: ...` header.
|
||||
- DB optimization for saving light client bootstraps (save unique sync committees only).
|
||||
- Trace IDONTWANT Messages in Pubsub.
|
||||
- Add Fulu fork boilerplate.
|
||||
- Separate type for unaggregated network attestations. [PR](https://github.com/prysmaticlabs/prysm/pull/14659)
|
||||
- Update spec tests to v1.5.0-beta.0.
|
||||
|
||||
### Changed
|
||||
|
||||
- Process light client finality updates only for new finalized epochs instead of doing it for every block.
|
||||
- Refactor subnets subscriptions.
|
||||
- Refactor RPC handlers subscriptions.
|
||||
- Go deps upgrade, from `ioutil` to `io`
|
||||
- Move successfully registered validator(s) on builder log to debug.
|
||||
- Update some test files to use `crypto/rand` instead of `math/rand`
|
||||
- Enforce Compound prefix (0x02) for target when processing pending consolidation request.
|
||||
- Limit consolidating by validator's effective balance.
|
||||
- Use 16-bit random value for proposer and sync committee selection filter.
|
||||
- Re-organize the content of the `*.proto` files (No functional change).
|
||||
- Updated spec definitions for `process_slashings` in godocs. Simplified `ProcessSlashings` API.
|
||||
- Updated spec definition electra `process_registry_updates`.
|
||||
- Updated Electra spec definition for `process_epoch`.
|
||||
- Update our `go-libp2p-pubsub` dependency.
|
||||
- Re-organize the content of files to ease the creation of a new fork boilerplate.
|
||||
- Fixed Metadata errors for peers connected via QUIC.
|
||||
- Process light client finality updates only for new finalized epochs instead of doing it for every block.
|
||||
- Update blobs by rpc topics from V2 to V1.
|
||||
- Updated geth to 1.14 ~
|
||||
- E2e tests start from bellatrix
|
||||
|
||||
### Deprecated
|
||||
|
||||
|
||||
### Removed
|
||||
|
||||
- Cleanup ProcessSlashings method to remove unnecessary argument.
|
||||
- Remove `/proto/eth/v2` directory. [PR](https://github.com/prysmaticlabs/prysm/pull/14765)
|
||||
- Remove `/memsize/` pprof endpoint as it will no longer be supported in go 1.23, geth also removed in https://github.com/ethereum/go-ethereum/commit/e4675771eda550e7eeb63a8884816982c1980644
|
||||
|
||||
### Fixed
|
||||
|
||||
- Added check to prevent nil pointer deference or out of bounds array access when validating the BLSToExecutionChange on an impossibly nil validator.
|
||||
- EIP-7691: Ensure new blobs subnets are subscribed on epoch in advance.
|
||||
- Fix kzg commitment inclusion proof depth minimal value.
|
||||
|
||||
### Security
|
||||
|
||||
- go version upgrade to 1.22.10 for CVE CVE-2024-34156
|
||||
- Update golang.org/x/crypto to v0.31.0 to address CVE-2024-45337
|
||||
- Update golang.org/x/net to v0.33.0 to address CVE-2024-45338
|
||||
|
||||
## [v5.2.0](https://github.com/prysmaticlabs/prysm/compare/v5.1.2...v5.2.0)
|
||||
|
||||
Updating to this release is highly recommended, especially for users running v5.1.1 or v5.1.2.
|
||||
@@ -124,7 +60,7 @@ Notable features:
|
||||
- Updated the default `scrape-interval` in `Client-stats` to 2 minutes to accommodate Beaconcha.in API rate limits.
|
||||
- Switch to compounding when consolidating with source==target.
|
||||
- Revert block db save when saving state fails.
|
||||
- Return false from HasBlock if the block is being synced.
|
||||
- Return false from HasBlock if the block is being synced.
|
||||
- Cleanup forkchoice on failed insertions.
|
||||
- Use read only validator for core processing to avoid unnecessary copying.
|
||||
- Use ROBlock across block processing pipeline.
|
||||
@@ -137,7 +73,7 @@ Notable features:
|
||||
- Simplified `EjectedValidatorIndices`.
|
||||
- `engine_newPayloadV4`,`engine_getPayloadV4` are changes due to new execution request serialization decisions, [PR](https://github.com/prysmaticlabs/prysm/pull/14580)
|
||||
- Fixed various small things in state-native code.
|
||||
- Use ROBlock earlier in block syncing pipeline.
|
||||
- Use ROBlock earlier in block syncing pipeline.
|
||||
- Changed the signature of `ProcessPayload`.
|
||||
- Only Build the Protobuf state once during serialization.
|
||||
- Capella blocks are execution.
|
||||
@@ -203,9 +139,9 @@ Notable features:
|
||||
|
||||
### Security
|
||||
|
||||
## [v5.1.2](https://github.com/prysmaticlabs/prysm/compare/v5.1.1...v5.1.2) - 2024-10-16
|
||||
## [v5.1.2](https://github.com/prysmaticlabs/prysm/compare/v5.1.1...v5.1.2) - 2024-10-16
|
||||
|
||||
This is a hotfix release with one change.
|
||||
This is a hotfix release with one change.
|
||||
|
||||
Prysm v5.1.1 contains an updated implementation of the beacon api streaming events endpoint. This
|
||||
new implementation contains a bug that can cause a panic in certain conditions. The issue is
|
||||
@@ -217,20 +153,20 @@ prysm REST mode validator (a feature which requires the validator to be configur
|
||||
api instead of prysm's stock grpc endpoints) or accessory software that connects to the events api,
|
||||
like https://github.com/ethpandaops/ethereum-metrics-exporter
|
||||
|
||||
### Fixed
|
||||
### Fixed
|
||||
|
||||
- Recover from panics when writing the event stream [#14545](https://github.com/prysmaticlabs/prysm/pull/14545)
|
||||
|
||||
## [v5.1.1](https://github.com/prysmaticlabs/prysm/compare/v5.1.0...v5.1.1) - 2024-10-15
|
||||
|
||||
This release has a number of features and improvements. Most notably, the feature flag
|
||||
`--enable-experimental-state` has been flipped to "opt out" via `--disable-experimental-state`.
|
||||
This release has a number of features and improvements. Most notably, the feature flag
|
||||
`--enable-experimental-state` has been flipped to "opt out" via `--disable-experimental-state`.
|
||||
The experimental state management design has shown significant improvements in memory usage at
|
||||
runtime. Updates to libp2p's gossipsub have some bandwidith stability improvements with support for
|
||||
IDONTWANT control messages.
|
||||
IDONTWANT control messages.
|
||||
|
||||
The gRPC gateway has been deprecated from Prysm in this release. If you need JSON data, consider the
|
||||
standardized beacon-APIs.
|
||||
standardized beacon-APIs.
|
||||
|
||||
Updating to this release is recommended at your convenience.
|
||||
|
||||
@@ -272,7 +208,7 @@ Updating to this release is recommended at your convenience.
|
||||
- `grpc-gateway-corsdomain` is renamed to http-cors-domain. The old name can still be used as an alias.
|
||||
- `api-timeout` is changed from int flag to duration flag, default value updated.
|
||||
- Light client support: abstracted out the light client headers with different versions.
|
||||
- `ApplyToEveryValidator` has been changed to prevent misuse bugs, it takes a closure that takes a `ReadOnlyValidator` and returns a raw pointer to a `Validator`.
|
||||
- `ApplyToEveryValidator` has been changed to prevent misuse bugs, it takes a closure that takes a `ReadOnlyValidator` and returns a raw pointer to a `Validator`.
|
||||
- Removed gorilla mux library and replaced it with net/http updates in go 1.22.
|
||||
- Clean up `ProposeBlock` for validator client to reduce cognitive scoring and enable further changes.
|
||||
- Updated k8s-io/client-go to v0.30.4 and k8s-io/apimachinery to v0.30.4
|
||||
@@ -283,7 +219,7 @@ Updating to this release is recommended at your convenience.
|
||||
- Updated Sepolia bootnodes.
|
||||
- Make committee aware packing the default by deprecating `--enable-committee-aware-packing`.
|
||||
- Moved `ConvertKzgCommitmentToVersionedHash` to the `primitives` package.
|
||||
- Updated correlation penalty for EIP-7251.
|
||||
- Updated correlation penalty for EIP-7251.
|
||||
|
||||
### Deprecated
|
||||
- `--disable-grpc-gateway` flag is deprecated due to grpc gateway removal.
|
||||
@@ -757,34 +693,34 @@ AVX support (eg Celeron) after the Deneb fork. This is not an issue for mainnet.
|
||||
|
||||
- Linter: Wastedassign linter enabled to improve code quality.
|
||||
- API Enhancements:
|
||||
- Added payload return in Wei for /eth/v3/validator/blocks.
|
||||
- Added Holesky Deneb Epoch for better epoch management.
|
||||
- Added payload return in Wei for /eth/v3/validator/blocks.
|
||||
- Added Holesky Deneb Epoch for better epoch management.
|
||||
- Testing Enhancements:
|
||||
- Clear cache in tests of core helpers to ensure test reliability.
|
||||
- Added Debug State Transition Method for improved debugging.
|
||||
- Backfilling test: Enabled backfill in E2E tests for more comprehensive coverage.
|
||||
- Clear cache in tests of core helpers to ensure test reliability.
|
||||
- Added Debug State Transition Method for improved debugging.
|
||||
- Backfilling test: Enabled backfill in E2E tests for more comprehensive coverage.
|
||||
- API Updates: Re-enabled jwt on keymanager API for enhanced security.
|
||||
- Logging Improvements: Enhanced block by root log for better traceability.
|
||||
- Validator Client Improvements:
|
||||
- Added Spans to Core Validator Methods for enhanced monitoring.
|
||||
- Improved readability in validator client code for better maintenance (various commits).
|
||||
- Added Spans to Core Validator Methods for enhanced monitoring.
|
||||
- Improved readability in validator client code for better maintenance (various commits).
|
||||
|
||||
### Changed
|
||||
|
||||
- Optimizations and Refinements:
|
||||
- Lowered resource usage in certain processes for efficiency.
|
||||
- Moved blob rpc validation closer to peer read for optimized processing.
|
||||
- Cleaned up validate beacon block code for clarity and efficiency.
|
||||
- Updated Sepolia Deneb fork epoch for alignment with network changes.
|
||||
- Changed blob latency metrics to milliseconds for more precise measurement.
|
||||
- Altered getLegacyDatabaseLocation message for better clarity.
|
||||
- Improved wait for activation method for enhanced performance.
|
||||
- Capitalized Aggregated Unaggregated Attestations Log for consistency.
|
||||
- Modified HistoricalRoots usage for accuracy.
|
||||
- Adjusted checking of attribute emptiness for efficiency.
|
||||
- Lowered resource usage in certain processes for efficiency.
|
||||
- Moved blob rpc validation closer to peer read for optimized processing.
|
||||
- Cleaned up validate beacon block code for clarity and efficiency.
|
||||
- Updated Sepolia Deneb fork epoch for alignment with network changes.
|
||||
- Changed blob latency metrics to milliseconds for more precise measurement.
|
||||
- Altered getLegacyDatabaseLocation message for better clarity.
|
||||
- Improved wait for activation method for enhanced performance.
|
||||
- Capitalized Aggregated Unaggregated Attestations Log for consistency.
|
||||
- Modified HistoricalRoots usage for accuracy.
|
||||
- Adjusted checking of attribute emptiness for efficiency.
|
||||
- Database Management:
|
||||
- Moved --db-backup-output-dir as a deprecated flag for database management simplification.
|
||||
- Added the Ability to Defragment the Beacon State for improved database performance.
|
||||
- Moved --db-backup-output-dir as a deprecated flag for database management simplification.
|
||||
- Added the Ability to Defragment the Beacon State for improved database performance.
|
||||
- Dependency Update: Bumped quic-go version from 0.39.3 to 0.39.4 for up-to-date dependencies.
|
||||
|
||||
### Removed
|
||||
@@ -795,12 +731,12 @@ AVX support (eg Celeron) after the Deneb fork. This is not an issue for mainnet.
|
||||
### Fixed
|
||||
|
||||
- Bug Fixes:
|
||||
- Fixed off by one error for improved accuracy.
|
||||
- Resolved small typo in error messages for clarity.
|
||||
- Addressed minor issue in blsToExecChange validator for better validation.
|
||||
- Corrected blobsidecar json tag for commitment inclusion proof.
|
||||
- Fixed ssz post-requests content type check.
|
||||
- Resolved issue with port logging in bootnode.
|
||||
- Fixed off by one error for improved accuracy.
|
||||
- Resolved small typo in error messages for clarity.
|
||||
- Addressed minor issue in blsToExecChange validator for better validation.
|
||||
- Corrected blobsidecar json tag for commitment inclusion proof.
|
||||
- Fixed ssz post-requests content type check.
|
||||
- Resolved issue with port logging in bootnode.
|
||||
- Test Fixes: Re-enabled Slasher E2E Test for more comprehensive testing.
|
||||
|
||||
### Security
|
||||
@@ -1227,9 +1163,9 @@ No security issues in this release.
|
||||
now features runtime detection, automatically enabling optimized code paths if your CPU supports it.
|
||||
- **Multiarch Containers Preview Available**: multiarch (:wave: arm64 support :wave:) containers will be offered for
|
||||
preview at the following locations:
|
||||
- Beacon Chain: [gcr.io/prylabs-dev/prysm/beacon-chain:v4.1.0](gcr.io/prylabs-dev/prysm/beacon-chain:v4.1.0)
|
||||
- Validator: [gcr.io/prylabs-dev/prysm/validator:v4.1.0](gcr.io/prylabs-dev/prysm/validator:v4.1.0)
|
||||
- Please note that in the next cycle, we will exclusively use these containers at the canonical URLs.
|
||||
- Beacon Chain: [gcr.io/prylabs-dev/prysm/beacon-chain:v4.1.0](gcr.io/prylabs-dev/prysm/beacon-chain:v4.1.0)
|
||||
- Validator: [gcr.io/prylabs-dev/prysm/validator:v4.1.0](gcr.io/prylabs-dev/prysm/validator:v4.1.0)
|
||||
- Please note that in the next cycle, we will exclusively use these containers at the canonical URLs.
|
||||
|
||||
### Added
|
||||
|
||||
@@ -3051,4 +2987,4 @@ There are no security updates in this release.
|
||||
|
||||
# Older than v2.0.0
|
||||
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
For changelog history for releases older than v2.0.0, please refer to https://github.com/prysmaticlabs/prysm/releases
|
||||
@@ -125,7 +125,7 @@ Navigate to your fork of the repo on GitHub. On the upper left where the current
|
||||
|
||||
**16. Add an entry to CHANGELOG.md.**
|
||||
|
||||
If your change is user facing, you must include a CHANGELOG.md entry. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
|
||||
All PRs must must include a changelog fragment file in the `changelog` directory. If your change is not user-facing or should not be mentioned in the changelog for some other reason, you may use the `Ignored` changelog section in your fragment's header to satisfy this requirement without altering the final release changelog. See the [Maintaining CHANGELOG.md](#maintaining-changelogmd) section for more information.
|
||||
|
||||
**17. Create a pull request.**
|
||||
|
||||
@@ -177,16 +177,10 @@ $ git push myrepo feature-in-progress-branch -f
|
||||
|
||||
## Maintaining CHANGELOG.md
|
||||
|
||||
This project follows the changelog guidelines from [keepachangelog.com](https://keepachangelog.com/en/1.1.0/).
|
||||
|
||||
All PRs with user facing changes should have an entry in the CHANGELOG.md file and the change should be categorized in the appropriate category within the "Unreleased" section. The categories are:
|
||||
|
||||
- `Added` for new features.
|
||||
- `Changed` for changes in existing functionality.
|
||||
- `Deprecated` for soon-to-be removed features.
|
||||
- `Removed` for now removed features.
|
||||
- `Fixed` for any bug fixes.
|
||||
- `Security` in case of vulnerabilities. Please see the [Security Policy](SECURITY.md) for responsible disclosure before adding a change with this category.
|
||||
This project follows the changelog guidelines from [keepachangelog.com](https://keepachangelog.com/en/1.1.0/). In order to minimize conflicts and workflow headaches, we chose to implement a changelog management
|
||||
strategy that uses changelog "fragment" files, managed by our changelog management tool called `unclog`. Each PR must include a new changelog fragment file in the `changelog` directory, as specified by unclog's
|
||||
[README.md](https://github.com/OffchainLabs/unclog?tab=readme-ov-file#what-is-a-changelog-fragment). As the `unclog` README suggests in the [Best Practices](https://github.com/OffchainLabs/unclog?tab=readme-ov-file#best-practices) section,
|
||||
the standard naming convention for your PR's fragment file, to avoid conflicting with another fragment file, is `changelog/<github user name>_<PR branch name>.md`.
|
||||
|
||||
### Releasing
|
||||
|
||||
|
||||
1689
MODULE.bazel.lock
generated
1689
MODULE.bazel.lock
generated
File diff suppressed because it is too large
Load Diff
46
WORKSPACE
46
WORKSPACE
@@ -16,6 +16,34 @@ load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "toolchains_protoc",
|
||||
sha256 = "abb1540f8a9e045422730670ebb2f25b41fa56ca5a7cf795175a110a0a68f4ad",
|
||||
strip_prefix = "toolchains_protoc-0.3.6",
|
||||
url = "https://github.com/aspect-build/toolchains_protoc/releases/download/v0.3.6/toolchains_protoc-v0.3.6.tar.gz",
|
||||
)
|
||||
|
||||
load("@toolchains_protoc//protoc:repositories.bzl", "rules_protoc_dependencies")
|
||||
|
||||
rules_protoc_dependencies()
|
||||
|
||||
load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies")
|
||||
|
||||
rules_proto_dependencies()
|
||||
|
||||
load("@bazel_features//:deps.bzl", "bazel_features_deps")
|
||||
|
||||
bazel_features_deps()
|
||||
|
||||
load("@toolchains_protoc//protoc:toolchain.bzl", "protoc_toolchains")
|
||||
|
||||
protoc_toolchains(
|
||||
name = "protoc_toolchains",
|
||||
version = "v25.3",
|
||||
)
|
||||
|
||||
HERMETIC_CC_TOOLCHAIN_VERSION = "v3.0.1"
|
||||
|
||||
http_archive(
|
||||
@@ -137,10 +165,10 @@ http_archive(
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
sha256 = "80a98277ad1311dacd837f9b16db62887702e9f1d1c4c9f796d0121a46c8e184",
|
||||
sha256 = "b2038e2de2cace18f032249cb4bb0048abf583a36369fa98f687af1b3f880b26",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.46.0/rules_go-v0.46.0.zip",
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.48.1/rules_go-v0.48.1.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.48.1/rules_go-v0.48.1.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -182,7 +210,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.22.10",
|
||||
go_version = "1.23.5",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -227,7 +255,7 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.5.0-beta.0"
|
||||
consensus_spec_version = "v1.5.0-beta.1"
|
||||
|
||||
bls_test_version = "v0.1.1"
|
||||
|
||||
@@ -243,7 +271,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-HdMlTN3wv+hUMCkIRPk+EHcLixY1cSZlvkx3obEp4AM=",
|
||||
integrity = "sha256-R6r60geCfEjMaB1Ag3svaMFXFIgaJvkTJhfKsf76rFE=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -259,7 +287,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-eX/ihmHQ+OvfoGJxSMgy22yAU3SZ3xjsX0FU0EaZrSs=",
|
||||
integrity = "sha256-2Pem2gMHxW/6bBhZ2BaqkQruQSd/dTS3WMaMQO8rZ/o=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -275,7 +303,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-k3Onf42vOzIqyddecR6G82sDy3mmDA+R8RN66QjB0GI=",
|
||||
integrity = "sha256-5yP05JTV1MhcUZ2kSh+T+kXjG+uW3A5877veC5c1mD4=",
|
||||
url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -290,7 +318,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-N/d4AwdOSlb70Dr+2l20dfXxNSzJDj/qKA9Rkn8Gb5w=",
|
||||
integrity = "sha256-O6Rg6h19T0RsJs0sBDZ9O1k4LnCJ/gu2ilHijFBVfME=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
|
||||
@@ -13,6 +13,7 @@ go_library(
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/client:go_default_library",
|
||||
"//api/server:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -27,6 +28,7 @@ go_library(
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package builder
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
consensus_types "github.com/prysmaticlabs/prysm/v5/consensus-types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
@@ -22,7 +22,6 @@ type SignedBid interface {
|
||||
// Bid is an interface describing the method set of a builder bid.
|
||||
type Bid interface {
|
||||
Header() (interfaces.ExecutionData, error)
|
||||
BlobKzgCommitments() ([][]byte, error)
|
||||
Value() primitives.Wei
|
||||
Pubkey() []byte
|
||||
Version() int
|
||||
@@ -31,6 +30,18 @@ type Bid interface {
|
||||
HashTreeRootWith(hh *ssz.Hasher) error
|
||||
}
|
||||
|
||||
// BidDeneb is an interface that exposes newly added kzg commitments on top of builder bid
|
||||
type BidDeneb interface {
|
||||
Bid
|
||||
BlobKzgCommitments() [][]byte
|
||||
}
|
||||
|
||||
// BidElectra is an interface that exposes the newly added execution requests on top of the builder bid
|
||||
type BidElectra interface {
|
||||
BidDeneb
|
||||
ExecutionRequests() *v1.ExecutionRequests
|
||||
}
|
||||
|
||||
type signedBuilderBid struct {
|
||||
p *ethpb.SignedBuilderBid
|
||||
}
|
||||
@@ -115,11 +126,6 @@ func (b builderBid) Header() (interfaces.ExecutionData, error) {
|
||||
return blocks.WrappedExecutionPayloadHeader(b.p.Header)
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
func (b builderBid) BlobKzgCommitments() ([][]byte, error) {
|
||||
return [][]byte{}, errors.New("blob kzg commitments not available before Deneb")
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (b builderBid) Version() int {
|
||||
return version.Bellatrix
|
||||
@@ -169,11 +175,6 @@ func (b builderBidCapella) Header() (interfaces.ExecutionData, error) {
|
||||
return blocks.WrappedExecutionPayloadHeaderCapella(b.p.Header)
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
func (b builderBidCapella) BlobKzgCommitments() ([][]byte, error) {
|
||||
return [][]byte{}, errors.New("blob kzg commitments not available before Deneb")
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (b builderBidCapella) Version() int {
|
||||
return version.Capella
|
||||
@@ -254,8 +255,8 @@ func (b builderBidDeneb) Header() (interfaces.ExecutionData, error) {
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
func (b builderBidDeneb) BlobKzgCommitments() ([][]byte, error) {
|
||||
return b.p.BlobKzgCommitments, nil
|
||||
func (b builderBidDeneb) BlobKzgCommitments() [][]byte {
|
||||
return b.p.BlobKzgCommitments
|
||||
}
|
||||
|
||||
type signedBuilderBidDeneb struct {
|
||||
@@ -290,3 +291,95 @@ func (b signedBuilderBidDeneb) Version() int {
|
||||
func (b signedBuilderBidDeneb) IsNil() bool {
|
||||
return b.p == nil
|
||||
}
|
||||
|
||||
type builderBidElectra struct {
|
||||
p *ethpb.BuilderBidElectra
|
||||
}
|
||||
|
||||
// WrappedBuilderBidElectra is a constructor which wraps a protobuf bid into an interface.
|
||||
func WrappedBuilderBidElectra(p *ethpb.BuilderBidElectra) (Bid, error) {
|
||||
w := builderBidElectra{p: p}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (b builderBidElectra) Version() int {
|
||||
return version.Electra
|
||||
}
|
||||
|
||||
// Value --
|
||||
func (b builderBidElectra) Value() primitives.Wei {
|
||||
return primitives.LittleEndianBytesToWei(b.p.Value)
|
||||
}
|
||||
|
||||
// Pubkey --
|
||||
func (b builderBidElectra) Pubkey() []byte {
|
||||
return b.p.Pubkey
|
||||
}
|
||||
|
||||
// IsNil --
|
||||
func (b builderBidElectra) IsNil() bool {
|
||||
return b.p == nil
|
||||
}
|
||||
|
||||
// HashTreeRoot --
|
||||
func (b builderBidElectra) HashTreeRoot() ([32]byte, error) {
|
||||
return b.p.HashTreeRoot()
|
||||
}
|
||||
|
||||
// HashTreeRootWith --
|
||||
func (b builderBidElectra) HashTreeRootWith(hh *ssz.Hasher) error {
|
||||
return b.p.HashTreeRootWith(hh)
|
||||
}
|
||||
|
||||
// Header --
|
||||
func (b builderBidElectra) Header() (interfaces.ExecutionData, error) {
|
||||
// We have to convert big endian to little endian because the value is coming from the execution layer.
|
||||
return blocks.WrappedExecutionPayloadHeaderDeneb(b.p.Header)
|
||||
}
|
||||
|
||||
// ExecutionRequests --
|
||||
func (b builderBidElectra) ExecutionRequests() *v1.ExecutionRequests {
|
||||
return b.p.ExecutionRequests // does not copy
|
||||
}
|
||||
|
||||
// BlobKzgCommitments --
|
||||
func (b builderBidElectra) BlobKzgCommitments() [][]byte {
|
||||
return b.p.BlobKzgCommitments
|
||||
}
|
||||
|
||||
type signedBuilderBidElectra struct {
|
||||
p *ethpb.SignedBuilderBidElectra
|
||||
}
|
||||
|
||||
// WrappedSignedBuilderBidElectra is a constructor which wraps a protobuf signed bit into an interface.
|
||||
func WrappedSignedBuilderBidElectra(p *ethpb.SignedBuilderBidElectra) (SignedBid, error) {
|
||||
w := signedBuilderBidElectra{p: p}
|
||||
if w.IsNil() {
|
||||
return nil, consensus_types.ErrNilObjectWrapped
|
||||
}
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Message --
|
||||
func (b signedBuilderBidElectra) Message() (Bid, error) {
|
||||
return WrappedBuilderBidElectra(b.p.Message)
|
||||
}
|
||||
|
||||
// Signature --
|
||||
func (b signedBuilderBidElectra) Signature() []byte {
|
||||
return b.p.Signature
|
||||
}
|
||||
|
||||
// Version --
|
||||
func (b signedBuilderBidElectra) Version() int {
|
||||
return version.Electra
|
||||
}
|
||||
|
||||
// IsNil --
|
||||
func (b signedBuilderBidElectra) IsNil() bool {
|
||||
return b.p == nil
|
||||
}
|
||||
|
||||
@@ -219,8 +219,23 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
if err := json.Unmarshal(hb, v); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
switch strings.ToLower(v.Version) {
|
||||
case strings.ToLower(version.String(version.Deneb)):
|
||||
|
||||
ver, err := version.FromString(strings.ToLower(v.Version))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("unsupported header version %s", strings.ToLower(v.Version)))
|
||||
}
|
||||
if ver >= version.Electra {
|
||||
hr := &ExecHeaderResponseElectra{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
}
|
||||
p, err := hr.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from header")
|
||||
}
|
||||
return WrappedSignedBuilderBidElectra(p)
|
||||
}
|
||||
if ver >= version.Deneb {
|
||||
hr := &ExecHeaderResponseDeneb{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
@@ -230,7 +245,8 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from header")
|
||||
}
|
||||
return WrappedSignedBuilderBidDeneb(p)
|
||||
case strings.ToLower(version.String(version.Capella)):
|
||||
}
|
||||
if ver >= version.Capella {
|
||||
hr := &ExecHeaderResponseCapella{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
@@ -240,7 +256,8 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
return nil, errors.Wrapf(err, "could not extract proto message from header")
|
||||
}
|
||||
return WrappedSignedBuilderBidCapella(p)
|
||||
case strings.ToLower(version.String(version.Bellatrix)):
|
||||
}
|
||||
if ver >= version.Bellatrix {
|
||||
hr := &ExecHeaderResponse{}
|
||||
if err := json.Unmarshal(hb, hr); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshaling the builder GetHeader response, using slot=%d, parentHash=%#x, pubkey=%#x", slot, parentHash, pubkey)
|
||||
@@ -250,9 +267,8 @@ func (c *Client) GetHeader(ctx context.Context, slot primitives.Slot, parentHash
|
||||
return nil, errors.Wrap(err, "could not extract proto message from header")
|
||||
}
|
||||
return WrappedSignedBuilderBid(p)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
|
||||
}
|
||||
return nil, fmt.Errorf("unsupported header version %s", strings.ToLower(v.Version))
|
||||
}
|
||||
|
||||
// RegisterValidator encodes the SignedValidatorRegistrationV1 message to json (including hex-encoding the byte
|
||||
|
||||
@@ -266,9 +266,9 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
|
||||
kcgCommitments, err := bid.BlobKzgCommitments()
|
||||
require.NoError(t, err)
|
||||
dbid, ok := bid.(builderBidDeneb)
|
||||
require.Equal(t, true, ok)
|
||||
kcgCommitments := dbid.BlobKzgCommitments()
|
||||
require.Equal(t, len(kcgCommitments) > 0, true)
|
||||
for i := range kcgCommitments {
|
||||
require.Equal(t, len(kcgCommitments[i]) == 48, true)
|
||||
@@ -292,6 +292,50 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
_, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.ErrorContains(t, "could not extract proto message from header: too many blob commitments: 7", err)
|
||||
})
|
||||
t.Run("electra", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(bytes.NewBufferString(testExampleHeaderResponseElectra)),
|
||||
Request: r.Clone(ctx),
|
||||
}, nil
|
||||
}),
|
||||
}
|
||||
c := &Client{
|
||||
hc: hc,
|
||||
baseURL: &url.URL{Host: "localhost:3500", Scheme: "http"},
|
||||
}
|
||||
h, err := c.GetHeader(ctx, slot, bytesutil.ToBytes32(parentHash), bytesutil.ToBytes48(pubkey))
|
||||
require.NoError(t, err)
|
||||
expectedWithdrawalsRoot := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
bid, err := h.Message()
|
||||
require.NoError(t, err)
|
||||
bidHeader, err := bid.Header()
|
||||
require.NoError(t, err)
|
||||
withdrawalsRoot, err := bidHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, bytes.Equal(expectedWithdrawalsRoot, withdrawalsRoot))
|
||||
|
||||
bidStr := "652312848583266388373324160190187140051835877600158453279131187530910662656"
|
||||
value, err := stringToUint256(bidStr)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, value.Int.Cmp(primitives.WeiToBigInt(bid.Value())))
|
||||
require.Equal(t, bidStr, primitives.WeiToBigInt(bid.Value()).String())
|
||||
ebid, ok := bid.(builderBidElectra)
|
||||
require.Equal(t, true, ok)
|
||||
kcgCommitments := ebid.BlobKzgCommitments()
|
||||
require.Equal(t, len(kcgCommitments) > 0, true)
|
||||
for i := range kcgCommitments {
|
||||
require.Equal(t, len(kcgCommitments[i]) == 48, true)
|
||||
}
|
||||
requests := ebid.ExecutionRequests()
|
||||
require.Equal(t, 1, len(requests.Deposits))
|
||||
require.Equal(t, 1, len(requests.Withdrawals))
|
||||
require.Equal(t, 1, len(requests.Consolidations))
|
||||
|
||||
})
|
||||
t.Run("unsupported version", func(t *testing.T) {
|
||||
hc := &http.Client{
|
||||
Transport: roundtrip(func(r *http.Request) (*http.Response, error) {
|
||||
|
||||
@@ -5,13 +5,15 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
consensusblocks "github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
types "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/math"
|
||||
@@ -414,54 +416,10 @@ func FromProtoDeneb(payload *v1.ExecutionPayloadDeneb) (ExecutionPayloadDeneb, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
var errInvalidTypeConversion = errors.New("unable to translate between api and foreign type")
|
||||
|
||||
// ExecutionPayloadResponseFromData converts an ExecutionData interface value to a payload response.
|
||||
// This involves serializing the execution payload value so that the abstract payload envelope can be used.
|
||||
func ExecutionPayloadResponseFromData(ed interfaces.ExecutionData, bundle *v1.BlobsBundle) (*ExecutionPayloadResponse, error) {
|
||||
pb := ed.Proto()
|
||||
var data interface{}
|
||||
var err error
|
||||
var ver string
|
||||
switch pbStruct := pb.(type) {
|
||||
case *v1.ExecutionPayload:
|
||||
ver = version.String(version.Bellatrix)
|
||||
data, err = FromProto(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Bellatrix ExecutionPayload to an API response")
|
||||
}
|
||||
case *v1.ExecutionPayloadCapella:
|
||||
ver = version.String(version.Capella)
|
||||
data, err = FromProtoCapella(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Capella ExecutionPayload to an API response")
|
||||
}
|
||||
case *v1.ExecutionPayloadDeneb:
|
||||
ver = version.String(version.Deneb)
|
||||
payloadStruct, err := FromProtoDeneb(pbStruct)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert a Deneb ExecutionPayload to an API response")
|
||||
}
|
||||
data = &ExecutionPayloadDenebAndBlobsBundle{
|
||||
ExecutionPayload: &payloadStruct,
|
||||
BlobsBundle: FromBundleProto(bundle),
|
||||
}
|
||||
default:
|
||||
return nil, errInvalidTypeConversion
|
||||
}
|
||||
encoded, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to marshal execution payload version=%s", ver)
|
||||
}
|
||||
return &ExecutionPayloadResponse{
|
||||
Version: ver,
|
||||
Data: encoded,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecHeaderResponseCapella is the response of builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey} for Capella.
|
||||
type ExecHeaderResponseCapella struct {
|
||||
Data struct {
|
||||
Version string `json:"version"`
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *BuilderBidCapella `json:"message"`
|
||||
} `json:"data"`
|
||||
@@ -605,17 +563,25 @@ type BlobBundler interface {
|
||||
BundleProto() (*v1.BlobsBundle, error)
|
||||
}
|
||||
|
||||
// ParsedExecutionRequests can retrieve the underlying execution requests for the given execution payload response.
|
||||
type ParsedExecutionRequests interface {
|
||||
ExecutionRequestsProto() (*v1.ExecutionRequests, error)
|
||||
}
|
||||
|
||||
func (r *ExecutionPayloadResponse) ParsePayload() (ParsedPayload, error) {
|
||||
var toProto ParsedPayload
|
||||
switch r.Version {
|
||||
case version.String(version.Bellatrix):
|
||||
toProto = &ExecutionPayload{}
|
||||
case version.String(version.Capella):
|
||||
toProto = &ExecutionPayloadCapella{}
|
||||
case version.String(version.Deneb):
|
||||
v, err := version.FromString(strings.ToLower(r.Version))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("unsupported version %s", strings.ToLower(r.Version)))
|
||||
}
|
||||
if v >= version.Deneb {
|
||||
toProto = &ExecutionPayloadDenebAndBlobsBundle{}
|
||||
default:
|
||||
return nil, consensusblocks.ErrUnsupportedVersion
|
||||
} else if v >= version.Capella {
|
||||
toProto = &ExecutionPayloadCapella{}
|
||||
} else if v >= version.Bellatrix {
|
||||
toProto = &ExecutionPayload{}
|
||||
} else {
|
||||
return nil, fmt.Errorf("unsupported version %s", strings.ToLower(r.Version))
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(r.Data, toProto); err != nil {
|
||||
@@ -990,7 +956,8 @@ func (ch *BLSToExecutionChange) MarshalJSON() ([]byte, error) {
|
||||
|
||||
// ExecHeaderResponseDeneb is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
|
||||
type ExecHeaderResponseDeneb struct {
|
||||
Data struct {
|
||||
Version string `json:"version"`
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *BuilderBidDeneb `json:"message"`
|
||||
} `json:"data"`
|
||||
@@ -1307,6 +1274,208 @@ func (p *ExecutionPayloadDeneb) ToProto() (*v1.ExecutionPayloadDeneb, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecHeaderResponseElectra is the header response for builder API /eth/v1/builder/header/{slot}/{parent_hash}/{pubkey}.
|
||||
type ExecHeaderResponseElectra struct {
|
||||
Version string `json:"version"`
|
||||
Data struct {
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
Message *BuilderBidElectra `json:"message"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// ToProto creates a SignedBuilderBidElectra Proto from ExecHeaderResponseElectra.
|
||||
func (ehr *ExecHeaderResponseElectra) ToProto() (*eth.SignedBuilderBidElectra, error) {
|
||||
bb, err := ehr.Data.Message.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ð.SignedBuilderBidElectra{
|
||||
Message: bb,
|
||||
Signature: bytesutil.SafeCopyBytes(ehr.Data.Signature),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToProto creates a BuilderBidElectra Proto from BuilderBidElectra.
|
||||
func (bb *BuilderBidElectra) ToProto() (*eth.BuilderBidElectra, error) {
|
||||
header, err := bb.Header.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(bb.BlobKzgCommitments) > params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Electra) {
|
||||
return nil, fmt.Errorf("blob commitment count %d exceeds the maximum %d", len(bb.BlobKzgCommitments), params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Electra))
|
||||
}
|
||||
kzgCommitments := make([][]byte, len(bb.BlobKzgCommitments))
|
||||
for i, commit := range bb.BlobKzgCommitments {
|
||||
if len(commit) != fieldparams.BLSPubkeyLength {
|
||||
return nil, fmt.Errorf("commitment length %d is not %d", len(commit), fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
kzgCommitments[i] = bytesutil.SafeCopyBytes(commit)
|
||||
}
|
||||
// post electra execution requests should not be nil, if no requests exist use an empty request
|
||||
if bb.ExecutionRequests == nil {
|
||||
return nil, errors.New("bid contains nil execution requests")
|
||||
}
|
||||
executionRequests, err := bb.ExecutionRequests.ToProto()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert ExecutionRequests")
|
||||
}
|
||||
return ð.BuilderBidElectra{
|
||||
Header: header,
|
||||
BlobKzgCommitments: kzgCommitments,
|
||||
ExecutionRequests: executionRequests,
|
||||
// Note that SSZBytes() reverses byte order for the little-endian representation.
|
||||
// Uint256.Bytes() is big-endian, SSZBytes takes this value and reverses it.
|
||||
Value: bytesutil.SafeCopyBytes(bb.Value.SSZBytes()),
|
||||
Pubkey: bytesutil.SafeCopyBytes(bb.Pubkey),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecutionRequestsV1 is a wrapper for different execution requests
|
||||
type ExecutionRequestsV1 struct {
|
||||
Deposits []*DepositRequestV1 `json:"deposits"`
|
||||
Withdrawals []*WithdrawalRequestV1 `json:"withdrawals"`
|
||||
Consolidations []*ConsolidationRequestV1 `json:"consolidations"`
|
||||
}
|
||||
|
||||
func (er *ExecutionRequestsV1) ToProto() (*v1.ExecutionRequests, error) {
|
||||
if uint64(len(er.Deposits)) > params.BeaconConfig().MaxDepositRequestsPerPayload {
|
||||
return nil, fmt.Errorf("deposit requests count %d exceeds the maximum %d", len(er.Deposits), params.BeaconConfig().MaxDepositRequestsPerPayload)
|
||||
}
|
||||
deposits := make([]*v1.DepositRequest, len(er.Deposits))
|
||||
for i, dep := range er.Deposits {
|
||||
d, err := dep.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deposits[i] = d
|
||||
}
|
||||
if uint64(len(er.Withdrawals)) > params.BeaconConfig().MaxWithdrawalRequestsPerPayload {
|
||||
return nil, fmt.Errorf("withdrawal requests count %d exceeds the maximum %d", len(er.Withdrawals), params.BeaconConfig().MaxWithdrawalRequestsPerPayload)
|
||||
}
|
||||
withdrawals := make([]*v1.WithdrawalRequest, len(er.Withdrawals))
|
||||
for i, wr := range er.Withdrawals {
|
||||
w, err := wr.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
withdrawals[i] = w
|
||||
}
|
||||
if uint64(len(er.Consolidations)) > params.BeaconConfig().MaxConsolidationsRequestsPerPayload {
|
||||
return nil, fmt.Errorf("consolidation requests count %d exceeds the maximum %d", len(er.Consolidations), params.BeaconConfig().MaxConsolidationsRequestsPerPayload)
|
||||
}
|
||||
consolidations := make([]*v1.ConsolidationRequest, len(er.Consolidations))
|
||||
for i, con := range er.Consolidations {
|
||||
c, err := con.ToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
consolidations[i] = c
|
||||
}
|
||||
return &v1.ExecutionRequests{
|
||||
Deposits: deposits,
|
||||
Withdrawals: withdrawals,
|
||||
Consolidations: consolidations,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BuilderBidElectra is a field of ExecHeaderResponseElectra.
|
||||
type BuilderBidElectra struct {
|
||||
Header *ExecutionPayloadHeaderDeneb `json:"header"`
|
||||
BlobKzgCommitments []hexutil.Bytes `json:"blob_kzg_commitments"`
|
||||
ExecutionRequests *ExecutionRequestsV1 `json:"execution_requests"`
|
||||
Value Uint256 `json:"value"`
|
||||
Pubkey hexutil.Bytes `json:"pubkey"`
|
||||
}
|
||||
|
||||
// WithdrawalRequestV1 is a field of ExecutionRequestsV1.
|
||||
type WithdrawalRequestV1 struct {
|
||||
SourceAddress hexutil.Bytes `json:"source_address"`
|
||||
ValidatorPubkey hexutil.Bytes `json:"validator_pubkey"`
|
||||
Amount Uint256 `json:"amount"`
|
||||
}
|
||||
|
||||
func (wr *WithdrawalRequestV1) ToProto() (*v1.WithdrawalRequest, error) {
|
||||
srcAddress, err := bytesutil.DecodeHexWithLength(wr.SourceAddress.String(), common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "source_address")
|
||||
}
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(wr.ValidatorPubkey.String(), fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "validator_pubkey")
|
||||
}
|
||||
|
||||
return &v1.WithdrawalRequest{
|
||||
SourceAddress: srcAddress,
|
||||
ValidatorPubkey: pubkey,
|
||||
Amount: wr.Amount.Uint64(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DepositRequestV1 is a field of ExecutionRequestsV1.
|
||||
type DepositRequestV1 struct {
|
||||
PubKey hexutil.Bytes `json:"pubkey"`
|
||||
// withdrawalCredentials: DATA, 32 Bytes
|
||||
WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"`
|
||||
// amount: QUANTITY, 64 Bits
|
||||
Amount Uint256 `json:"amount"`
|
||||
// signature: DATA, 96 Bytes
|
||||
Signature hexutil.Bytes `json:"signature"`
|
||||
// index: QUANTITY, 64 Bits
|
||||
Index Uint256 `json:"index"`
|
||||
}
|
||||
|
||||
func (dr *DepositRequestV1) ToProto() (*v1.DepositRequest, error) {
|
||||
pubkey, err := bytesutil.DecodeHexWithLength(dr.PubKey.String(), fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "pubkey")
|
||||
}
|
||||
wc, err := bytesutil.DecodeHexWithLength(dr.WithdrawalCredentials.String(), fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "withdrawal_credentials")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(dr.Signature.String(), fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "signature")
|
||||
}
|
||||
return &v1.DepositRequest{
|
||||
Pubkey: pubkey,
|
||||
WithdrawalCredentials: wc,
|
||||
Amount: dr.Amount.Uint64(),
|
||||
Signature: sig,
|
||||
Index: dr.Index.Uint64(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ConsolidationRequestV1 is a field of ExecutionRequestsV1.
|
||||
type ConsolidationRequestV1 struct {
|
||||
// sourceAddress: DATA, 20 Bytes
|
||||
SourceAddress hexutil.Bytes `json:"source_address"`
|
||||
// sourcePubkey: DATA, 48 Bytes
|
||||
SourcePubkey hexutil.Bytes `json:"source_pubkey"`
|
||||
// targetPubkey: DATA, 48 Bytes
|
||||
TargetPubkey hexutil.Bytes `json:"target_pubkey"`
|
||||
}
|
||||
|
||||
func (cr *ConsolidationRequestV1) ToProto() (*v1.ConsolidationRequest, error) {
|
||||
srcAddress, err := bytesutil.DecodeHexWithLength(cr.SourceAddress.String(), common.AddressLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "source_address")
|
||||
}
|
||||
sourcePubkey, err := bytesutil.DecodeHexWithLength(cr.SourcePubkey.String(), fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "source_pubkey")
|
||||
}
|
||||
targetPubkey, err := bytesutil.DecodeHexWithLength(cr.TargetPubkey.String(), fieldparams.BLSPubkeyLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "target_pubkey")
|
||||
}
|
||||
return &v1.ConsolidationRequest{
|
||||
SourceAddress: srcAddress,
|
||||
SourcePubkey: sourcePubkey,
|
||||
TargetPubkey: targetPubkey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ErrorMessage is a JSON representation of the builder API's returned error message.
|
||||
type ErrorMessage struct {
|
||||
Code int `json:"code"`
|
||||
|
||||
@@ -154,6 +154,64 @@ var testExampleHeaderResponseDeneb = `{
|
||||
}
|
||||
}`
|
||||
|
||||
var testExampleHeaderResponseElectra = `{
|
||||
"version": "electra",
|
||||
"data": {
|
||||
"message": {
|
||||
"header": {
|
||||
"parent_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"fee_recipient": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"state_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"receipts_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"logs_bloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"block_number": "1",
|
||||
"gas_limit": "1",
|
||||
"gas_used": "1",
|
||||
"timestamp": "1",
|
||||
"extra_data": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"base_fee_per_gas": "1",
|
||||
"blob_gas_used": "1",
|
||||
"excess_blob_gas": "1",
|
||||
"block_hash": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"transactions_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"withdrawals_root": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
},
|
||||
"blob_kzg_commitments": [
|
||||
"0xa94170080872584e54a1cf092d845703b13907f2e6b3b1c0ad573b910530499e3bcd48c6378846b80d2bfa58c81cf3d5"
|
||||
],
|
||||
"execution_requests": {
|
||||
"deposits": [
|
||||
{
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
"withdrawal_credentials": "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2",
|
||||
"amount": "1",
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505",
|
||||
"index": "1"
|
||||
}
|
||||
],
|
||||
"withdrawals": [
|
||||
{
|
||||
"source_address": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"validator_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
"amount": "1"
|
||||
}
|
||||
],
|
||||
"consolidations": [
|
||||
{
|
||||
"source_address": "0xabcf8e0d4e9587369b2301d0790347320302cc09",
|
||||
"source_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a",
|
||||
"target_pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
}
|
||||
]
|
||||
},
|
||||
"value": "652312848583266388373324160190187140051835877600158453279131187530910662656",
|
||||
"pubkey": "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
},
|
||||
"signature": "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505"
|
||||
}
|
||||
}`
|
||||
|
||||
var testExampleHeaderResponseDenebNoBundle = `{
|
||||
"version": "deneb",
|
||||
"data": {
|
||||
@@ -1924,9 +1982,9 @@ func TestEmptyResponseBody(t *testing.T) {
|
||||
emptyResponse := &ExecutionPayloadResponse{}
|
||||
require.NoError(t, json.Unmarshal(empty, emptyResponse))
|
||||
_, err := emptyResponse.ParsePayload()
|
||||
require.ErrorIs(t, err, consensusblocks.ErrUnsupportedVersion)
|
||||
require.ErrorContains(t, "unsupported version", err)
|
||||
})
|
||||
versions := []int{version.Bellatrix, version.Capella, version.Deneb}
|
||||
versions := []int{version.Bellatrix, version.Capella, version.Deneb, version.Electra}
|
||||
for i := range versions {
|
||||
vstr := version.String(versions[i])
|
||||
t.Run("populated version without payload"+vstr, func(t *testing.T) {
|
||||
|
||||
@@ -74,7 +74,7 @@ func AppendHeaders(parent context.Context, headers []string) context.Context {
|
||||
logrus.Warnf("Incorrect gRPC header flag format. Skipping %v", keyValue[0])
|
||||
continue
|
||||
}
|
||||
parent = metadata.AppendToOutgoingContext(parent, keyValue[0], strings.Join(keyValue[1:], "="))
|
||||
parent = metadata.AppendToOutgoingContext(parent, keyValue[0], strings.Join(keyValue[1:], "=")) // nolint:fatcontext
|
||||
}
|
||||
}
|
||||
return parent
|
||||
|
||||
@@ -28,6 +28,7 @@ go_library(
|
||||
"//api/server:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/validator:go_default_library",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/validator"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
@@ -243,7 +244,7 @@ func (c *ContributionAndProof) ToConsensus() (*eth.ContributionAndProof, error)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregatorIndex")
|
||||
}
|
||||
selectionProof, err := bytesutil.DecodeHexWithLength(c.SelectionProof, 96)
|
||||
selectionProof, err := bytesutil.DecodeHexWithLength(c.SelectionProof, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SelectionProof")
|
||||
}
|
||||
@@ -330,7 +331,7 @@ func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationA
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Aggregate")
|
||||
}
|
||||
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, 96)
|
||||
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SelectionProof")
|
||||
}
|
||||
@@ -366,7 +367,7 @@ func (a *AggregateAttestationAndProofElectra) ToConsensus() (*eth.AggregateAttes
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Aggregate")
|
||||
}
|
||||
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, 96)
|
||||
proof, err := bytesutil.DecodeHexWithLength(a.SelectionProof, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SelectionProof")
|
||||
}
|
||||
@@ -734,6 +735,10 @@ func (s *AttesterSlashingElectra) ToConsensus() (*eth.AttesterSlashingElectra, e
|
||||
}
|
||||
|
||||
func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
if err := slice.VerifyMaxLength(a.AttestingIndices, params.BeaconConfig().MaxValidatorsPerCommittee); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indices := make([]uint64, len(a.AttestingIndices))
|
||||
var err error
|
||||
for i, ix := range a.AttestingIndices {
|
||||
@@ -759,6 +764,13 @@ func (a *IndexedAttestation) ToConsensus() (*eth.IndexedAttestation, error) {
|
||||
}
|
||||
|
||||
func (a *IndexedAttestationElectra) ToConsensus() (*eth.IndexedAttestationElectra, error) {
|
||||
if err := slice.VerifyMaxLength(
|
||||
a.AttestingIndices,
|
||||
params.BeaconConfig().MaxValidatorsPerCommittee*params.BeaconConfig().MaxCommitteesPerSlot,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indices := make([]uint64, len(a.AttestingIndices))
|
||||
var err error
|
||||
for i, ix := range a.AttestingIndices {
|
||||
@@ -1189,7 +1201,7 @@ func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth
|
||||
if src == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
err := slice.VerifyMaxLength(src, 2)
|
||||
err := slice.VerifyMaxLength(src, fieldparams.MaxAttesterSlashingsElectra)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1210,7 +1222,7 @@ func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.Signature", i))
|
||||
}
|
||||
err = slice.VerifyMaxLength(s.Attestation1.AttestingIndices, 2048)
|
||||
err = slice.VerifyMaxLength(s.Attestation1.AttestingIndices, params.BeaconConfig().MaxValidatorsPerCommittee*params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation1.AttestingIndices", i))
|
||||
}
|
||||
@@ -1230,7 +1242,7 @@ func AttesterSlashingsElectraToConsensus(src []*AttesterSlashingElectra) ([]*eth
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.Signature", i))
|
||||
}
|
||||
err = slice.VerifyMaxLength(s.Attestation2.AttestingIndices, 2048)
|
||||
err = slice.VerifyMaxLength(s.Attestation2.AttestingIndices, params.BeaconConfig().MaxValidatorsPerCommittee*params.BeaconConfig().MaxCommitteesPerSlot)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d].Attestation2.AttestingIndices", i))
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/container/slice"
|
||||
@@ -2519,6 +2520,7 @@ func (b *BeaconBlockContentsElectra) ToConsensus() (*eth.BeaconBlockContentsElec
|
||||
}, nil
|
||||
}
|
||||
|
||||
// nolint:gocognit
|
||||
func (b *BeaconBlockElectra) ToConsensus() (*eth.BeaconBlockElectra, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
@@ -2706,6 +2708,9 @@ func (b *BeaconBlockElectra) ToConsensus() (*eth.BeaconBlockElectra, error) {
|
||||
return nil, server.NewDecodeError(errors.New("nil execution requests"), "Body.ExequtionRequests")
|
||||
}
|
||||
|
||||
if err = slice.VerifyMaxLength(b.Body.ExecutionRequests.Deposits, params.BeaconConfig().MaxDepositRequestsPerPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depositRequests := make([]*enginev1.DepositRequest, len(b.Body.ExecutionRequests.Deposits))
|
||||
for i, d := range b.Body.ExecutionRequests.Deposits {
|
||||
depositRequests[i], err = d.ToConsensus()
|
||||
@@ -2714,6 +2719,9 @@ func (b *BeaconBlockElectra) ToConsensus() (*eth.BeaconBlockElectra, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if err = slice.VerifyMaxLength(b.Body.ExecutionRequests.Withdrawals, params.BeaconConfig().MaxWithdrawalRequestsPerPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
withdrawalRequests := make([]*enginev1.WithdrawalRequest, len(b.Body.ExecutionRequests.Withdrawals))
|
||||
for i, w := range b.Body.ExecutionRequests.Withdrawals {
|
||||
withdrawalRequests[i], err = w.ToConsensus()
|
||||
@@ -2722,6 +2730,9 @@ func (b *BeaconBlockElectra) ToConsensus() (*eth.BeaconBlockElectra, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if err = slice.VerifyMaxLength(b.Body.ExecutionRequests.Consolidations, params.BeaconConfig().MaxConsolidationsRequestsPerPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
consolidationRequests := make([]*enginev1.ConsolidationRequest, len(b.Body.ExecutionRequests.Consolidations))
|
||||
for i, c := range b.Body.ExecutionRequests.Consolidations {
|
||||
consolidationRequests[i], err = c.ToConsensus()
|
||||
@@ -3003,9 +3014,14 @@ func (b *BlindedBeaconBlockElectra) ToConsensus() (*eth.BlindedBeaconBlockElectr
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body.ExecutionPayload.ExcessBlobGas")
|
||||
}
|
||||
|
||||
if b.Body.ExecutionRequests == nil {
|
||||
return nil, server.NewDecodeError(errors.New("nil execution requests"), "Body.ExecutionRequests")
|
||||
}
|
||||
|
||||
if err = slice.VerifyMaxLength(b.Body.ExecutionRequests.Deposits, params.BeaconConfig().MaxDepositRequestsPerPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
depositRequests := make([]*enginev1.DepositRequest, len(b.Body.ExecutionRequests.Deposits))
|
||||
for i, d := range b.Body.ExecutionRequests.Deposits {
|
||||
depositRequests[i], err = d.ToConsensus()
|
||||
@@ -3014,6 +3030,9 @@ func (b *BlindedBeaconBlockElectra) ToConsensus() (*eth.BlindedBeaconBlockElectr
|
||||
}
|
||||
}
|
||||
|
||||
if err = slice.VerifyMaxLength(b.Body.ExecutionRequests.Withdrawals, params.BeaconConfig().MaxWithdrawalRequestsPerPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
withdrawalRequests := make([]*enginev1.WithdrawalRequest, len(b.Body.ExecutionRequests.Withdrawals))
|
||||
for i, w := range b.Body.ExecutionRequests.Withdrawals {
|
||||
withdrawalRequests[i], err = w.ToConsensus()
|
||||
@@ -3022,6 +3041,9 @@ func (b *BlindedBeaconBlockElectra) ToConsensus() (*eth.BlindedBeaconBlockElectr
|
||||
}
|
||||
}
|
||||
|
||||
if err = slice.VerifyMaxLength(b.Body.ExecutionRequests.Consolidations, params.BeaconConfig().MaxConsolidationsRequestsPerPayload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
consolidationRequests := make([]*enginev1.ConsolidationRequest, len(b.Body.ExecutionRequests.Consolidations))
|
||||
for i, c := range b.Body.ExecutionRequests.Consolidations {
|
||||
consolidationRequests[i], err = c.ToConsensus()
|
||||
|
||||
@@ -43,6 +43,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/electra:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -11,5 +12,5 @@ func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(io.Discard)
|
||||
|
||||
m.Run()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
@@ -268,6 +268,9 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "could not get execution requests")
|
||||
}
|
||||
if requests == nil {
|
||||
return false, errors.New("nil execution requests")
|
||||
}
|
||||
}
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
|
||||
|
||||
@@ -1076,6 +1076,48 @@ func TestService_insertSlashingsToForkChoiceStore(t *testing.T) {
|
||||
service.InsertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
|
||||
}
|
||||
|
||||
func TestService_insertSlashingsToForkChoiceStoreElectra(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisStateElectra(t, 100)
|
||||
att1 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := privKeys[0].Sign(signingRoot[:])
|
||||
sig1 := privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = privKeys[0].Sign(signingRoot[:])
|
||||
sig1 = privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att2.Signature = aggregateSig.Marshal()
|
||||
slashings := []*ethpb.AttesterSlashingElectra{
|
||||
{
|
||||
Attestation_1: att1,
|
||||
Attestation_2: att2,
|
||||
},
|
||||
}
|
||||
b := util.NewBeaconBlockElectra()
|
||||
b.Block.Body.AttesterSlashings = slashings
|
||||
wb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
service.InsertSlashingsToForkChoiceStore(ctx, wb.Block().Body().AttesterSlashings())
|
||||
}
|
||||
|
||||
func TestOnBlock_ProcessBlocksParallel(t *testing.T) {
|
||||
service, tr := minimalTestService(t)
|
||||
ctx := tr.ctx
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
@@ -468,6 +469,9 @@ func (s *Service) validateStateTransition(ctx context.Context, preState state.Be
|
||||
stateTransitionStartTime := time.Now()
|
||||
postState, err := transition.ExecuteStateTransition(ctx, preState, signed)
|
||||
if err != nil {
|
||||
if ctx.Err() != nil || electra.IsExecutionRequestError(err) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, invalidBlock{error: err}
|
||||
}
|
||||
stateTransitionProcessingTime.Observe(float64(time.Since(stateTransitionStartTime).Milliseconds()))
|
||||
|
||||
@@ -33,6 +33,7 @@ type MockBuilderService struct {
|
||||
Bid *ethpb.SignedBuilderBid
|
||||
BidCapella *ethpb.SignedBuilderBidCapella
|
||||
BidDeneb *ethpb.SignedBuilderBidDeneb
|
||||
BidElectra *ethpb.SignedBuilderBidElectra
|
||||
RegistrationCache *cache.RegistrationCache
|
||||
ErrGetHeader error
|
||||
ErrRegisterValidator error
|
||||
@@ -59,7 +60,7 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
|
||||
return nil, nil, errors.Wrap(err, "could not wrap capella payload")
|
||||
}
|
||||
return w, nil, s.ErrSubmitBlindedBlock
|
||||
case version.Deneb:
|
||||
case version.Deneb, version.Electra:
|
||||
w, err := blocks.WrappedExecutionPayloadDeneb(s.PayloadDeneb)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not wrap deneb payload")
|
||||
@@ -72,6 +73,9 @@ func (s *MockBuilderService) SubmitBlindedBlock(_ context.Context, b interfaces.
|
||||
|
||||
// GetHeader for mocking.
|
||||
func (s *MockBuilderService) GetHeader(_ context.Context, slot primitives.Slot, _ [32]byte, _ [48]byte) (builder.SignedBid, error) {
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().ElectraForkEpoch || s.BidElectra != nil {
|
||||
return builder.WrappedSignedBuilderBidElectra(s.BidElectra)
|
||||
}
|
||||
if slots.ToEpoch(slot) >= params.BeaconConfig().DenebForkEpoch || s.BidDeneb != nil {
|
||||
return builder.WrappedSignedBuilderBidDeneb(s.BidDeneb)
|
||||
}
|
||||
|
||||
3
beacon-chain/cache/cache_test.go
vendored
3
beacon-chain/cache/cache_test.go
vendored
@@ -1,9 +1,10 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
m.Run()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
@@ -7,12 +7,14 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/signing"
|
||||
v "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/validators"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
@@ -105,293 +107,162 @@ func TestProcessAttesterSlashings_IndexedAttestationFailedToVerify(t *testing.T)
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 100)
|
||||
for _, vv := range beaconState.Validators() {
|
||||
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
statePhase0, keysPhase0 := util.DeterministicGenesisState(t, 100)
|
||||
stateAltair, keysAltair := util.DeterministicGenesisStateAltair(t, 100)
|
||||
stateBellatrix, keysBellatrix := util.DeterministicGenesisStateBellatrix(t, 100)
|
||||
stateCapella, keysCapella := util.DeterministicGenesisStateCapella(t, 100)
|
||||
stateDeneb, keysDeneb := util.DeterministicGenesisStateDeneb(t, 100)
|
||||
stateElectra, keysElectra := util.DeterministicGenesisStateElectra(t, 100)
|
||||
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1Phase0 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := privKeys[0].Sign(signingRoot[:])
|
||||
sig1 := privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2Phase0 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = privKeys[0].Sign(signingRoot[:])
|
||||
sig1 = privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: att1,
|
||||
Attestation_2: att2,
|
||||
},
|
||||
}
|
||||
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, beaconState.SetSlot(currentSlot))
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: slashings,
|
||||
},
|
||||
}
|
||||
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
// Given the intersection of slashable indices is [1], only validator
|
||||
// at index 1 should be slashed and exited. We confirm this below.
|
||||
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
|
||||
t.Errorf(
|
||||
`
|
||||
Expected validator at index 1's exit epoch to match
|
||||
%d, received %d instead
|
||||
`,
|
||||
beaconState.Validators()[1].ExitEpoch,
|
||||
newRegistry[1].ExitEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
require.Equal(t, uint64(31750000000), newState.Balances()[1])
|
||||
require.Equal(t, uint64(32000000000), newState.Balances()[2])
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_AppliesCorrectStatusAltair(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateAltair(t, 100)
|
||||
for _, vv := range beaconState.Validators() {
|
||||
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att1Electra := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := privKeys[0].Sign(signingRoot[:])
|
||||
sig1 := privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
att2Electra := util.HydrateIndexedAttestationElectra(ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = privKeys[0].Sign(signingRoot[:])
|
||||
sig1 = privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
slashingPhase0 := ðpb.AttesterSlashing{
|
||||
Attestation_1: att1Phase0,
|
||||
Attestation_2: att2Phase0,
|
||||
}
|
||||
slashingElectra := ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: att1Electra,
|
||||
Attestation_2: att2Electra,
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
st state.BeaconState
|
||||
keys []bls.SecretKey
|
||||
att1 ethpb.IndexedAtt
|
||||
att2 ethpb.IndexedAtt
|
||||
slashing ethpb.AttSlashing
|
||||
slashedBalance uint64
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
Attestation_1: att1,
|
||||
Attestation_2: att2,
|
||||
name: "phase0",
|
||||
st: statePhase0,
|
||||
keys: keysPhase0,
|
||||
att1: att1Phase0,
|
||||
att2: att2Phase0,
|
||||
slashing: slashingPhase0,
|
||||
slashedBalance: 31750000000,
|
||||
},
|
||||
{
|
||||
name: "altair",
|
||||
st: stateAltair,
|
||||
keys: keysAltair,
|
||||
att1: att1Phase0,
|
||||
att2: att2Phase0,
|
||||
slashing: slashingPhase0,
|
||||
slashedBalance: 31500000000,
|
||||
},
|
||||
{
|
||||
name: "bellatrix",
|
||||
st: stateBellatrix,
|
||||
keys: keysBellatrix,
|
||||
att1: att1Phase0,
|
||||
att2: att2Phase0,
|
||||
slashing: slashingPhase0,
|
||||
slashedBalance: 31000000000,
|
||||
},
|
||||
{
|
||||
name: "capella",
|
||||
st: stateCapella,
|
||||
keys: keysCapella,
|
||||
att1: att1Phase0,
|
||||
att2: att2Phase0,
|
||||
slashing: slashingPhase0,
|
||||
slashedBalance: 31000000000,
|
||||
},
|
||||
{
|
||||
name: "deneb",
|
||||
st: stateDeneb,
|
||||
keys: keysDeneb,
|
||||
att1: att1Phase0,
|
||||
att2: att2Phase0,
|
||||
slashing: slashingPhase0,
|
||||
slashedBalance: 31000000000,
|
||||
},
|
||||
{
|
||||
name: "electra",
|
||||
st: stateElectra,
|
||||
keys: keysElectra,
|
||||
att1: att1Electra,
|
||||
att2: att2Electra,
|
||||
slashing: slashingElectra,
|
||||
slashedBalance: 31992187500,
|
||||
},
|
||||
}
|
||||
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, beaconState.SetSlot(currentSlot))
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
for _, vv := range tc.st.Validators() {
|
||||
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: slashings,
|
||||
},
|
||||
}
|
||||
domain, err := signing.Domain(tc.st.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, tc.st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(tc.att1.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := tc.keys[0].Sign(signingRoot[:])
|
||||
sig1 := tc.keys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
if tc.att1.Version() >= version.Electra {
|
||||
tc.att1.(*ethpb.IndexedAttestationElectra).Signature = aggregateSig.Marshal()
|
||||
} else {
|
||||
tc.att1.(*ethpb.IndexedAttestation).Signature = aggregateSig.Marshal()
|
||||
}
|
||||
|
||||
// Given the intersection of slashable indices is [1], only validator
|
||||
// at index 1 should be slashed and exited. We confirm this below.
|
||||
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
|
||||
t.Errorf(
|
||||
`
|
||||
signingRoot, err = signing.ComputeSigningRoot(tc.att2.GetData(), domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = tc.keys[0].Sign(signingRoot[:])
|
||||
sig1 = tc.keys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
|
||||
if tc.att2.Version() >= version.Electra {
|
||||
tc.att2.(*ethpb.IndexedAttestationElectra).Signature = aggregateSig.Marshal()
|
||||
} else {
|
||||
tc.att2.(*ethpb.IndexedAttestation).Signature = aggregateSig.Marshal()
|
||||
}
|
||||
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, tc.st.SetSlot(currentSlot))
|
||||
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
// Given the intersection of slashable indices is [1], only validator
|
||||
// at index 1 should be slashed and exited. We confirm this below.
|
||||
if newRegistry[1].ExitEpoch != tc.st.Validators()[1].ExitEpoch {
|
||||
t.Errorf(
|
||||
`
|
||||
Expected validator at index 1's exit epoch to match
|
||||
%d, received %d instead
|
||||
`,
|
||||
beaconState.Validators()[1].ExitEpoch,
|
||||
newRegistry[1].ExitEpoch,
|
||||
)
|
||||
}
|
||||
tc.st.Validators()[1].ExitEpoch,
|
||||
newRegistry[1].ExitEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
require.Equal(t, uint64(31500000000), newState.Balances()[1])
|
||||
require.Equal(t, uint64(32000000000), newState.Balances()[2])
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_AppliesCorrectStatusBellatrix(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateBellatrix(t, 100)
|
||||
for _, vv := range beaconState.Validators() {
|
||||
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := privKeys[0].Sign(signingRoot[:])
|
||||
sig1 := privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = privKeys[0].Sign(signingRoot[:])
|
||||
sig1 = privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: att1,
|
||||
Attestation_2: att2,
|
||||
},
|
||||
}
|
||||
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, beaconState.SetSlot(currentSlot))
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: slashings,
|
||||
},
|
||||
}
|
||||
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
// Given the intersection of slashable indices is [1], only validator
|
||||
// at index 1 should be slashed and exited. We confirm this below.
|
||||
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
|
||||
t.Errorf(
|
||||
`
|
||||
Expected validator at index 1's exit epoch to match
|
||||
%d, received %d instead
|
||||
`,
|
||||
beaconState.Validators()[1].ExitEpoch,
|
||||
newRegistry[1].ExitEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
require.Equal(t, uint64(31000000000), newState.Balances()[1])
|
||||
require.Equal(t, uint64(32000000000), newState.Balances()[2])
|
||||
}
|
||||
|
||||
func TestProcessAttesterSlashings_AppliesCorrectStatusCapella(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateCapella(t, 100)
|
||||
for _, vv := range beaconState.Validators() {
|
||||
vv.WithdrawableEpoch = primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)
|
||||
}
|
||||
|
||||
att1 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Epoch: 1},
|
||||
},
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
domain, err := signing.Domain(beaconState.Fork(), 0, params.BeaconConfig().DomainBeaconAttester, beaconState.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
signingRoot, err := signing.ComputeSigningRoot(att1.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 := privKeys[0].Sign(signingRoot[:])
|
||||
sig1 := privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig := bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att1.Signature = aggregateSig.Marshal()
|
||||
|
||||
att2 := util.HydrateIndexedAttestation(ðpb.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
})
|
||||
signingRoot, err = signing.ComputeSigningRoot(att2.Data, domain)
|
||||
assert.NoError(t, err, "Could not get signing root of beacon block header")
|
||||
sig0 = privKeys[0].Sign(signingRoot[:])
|
||||
sig1 = privKeys[1].Sign(signingRoot[:])
|
||||
aggregateSig = bls.AggregateSignatures([]bls.Signature{sig0, sig1})
|
||||
att2.Signature = aggregateSig.Marshal()
|
||||
|
||||
slashings := []*ethpb.AttesterSlashing{
|
||||
{
|
||||
Attestation_1: att1,
|
||||
Attestation_2: att2,
|
||||
},
|
||||
}
|
||||
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, beaconState.SetSlot(currentSlot))
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block = ðpb.BeaconBlock{
|
||||
Body: ðpb.BeaconBlockBody{
|
||||
AttesterSlashings: slashings,
|
||||
},
|
||||
}
|
||||
|
||||
ss := make([]ethpb.AttSlashing, len(b.Block.Body.AttesterSlashings))
|
||||
for i, s := range b.Block.Body.AttesterSlashings {
|
||||
ss[i] = s
|
||||
}
|
||||
newState, err := blocks.ProcessAttesterSlashings(context.Background(), beaconState, ss, v.SlashValidator)
|
||||
require.NoError(t, err)
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
// Given the intersection of slashable indices is [1], only validator
|
||||
// at index 1 should be slashed and exited. We confirm this below.
|
||||
if newRegistry[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
|
||||
t.Errorf(
|
||||
`
|
||||
Expected validator at index 1's exit epoch to match
|
||||
%d, received %d instead
|
||||
`,
|
||||
beaconState.Validators()[1].ExitEpoch,
|
||||
newRegistry[1].ExitEpoch,
|
||||
)
|
||||
}
|
||||
|
||||
require.Equal(t, uint64(31000000000), newState.Balances()[1])
|
||||
require.Equal(t, uint64(32000000000), newState.Balances()[2])
|
||||
require.Equal(t, tc.slashedBalance, newState.Balances()[1])
|
||||
require.Equal(t, uint64(32000000000), newState.Balances()[2])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ go_library(
|
||||
"consolidations.go",
|
||||
"deposits.go",
|
||||
"effective_balance_updates.go",
|
||||
"error.go",
|
||||
"registry_updates.go",
|
||||
"transition.go",
|
||||
"transition_no_verify_sig.go",
|
||||
@@ -55,13 +56,16 @@ go_test(
|
||||
"deposit_fuzz_test.go",
|
||||
"deposits_test.go",
|
||||
"effective_balance_updates_test.go",
|
||||
"error_test.go",
|
||||
"export_test.go",
|
||||
"registry_updates_test.go",
|
||||
"transition_no_verify_sig_test.go",
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -86,6 +90,7 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
@@ -129,6 +130,57 @@ func TestComputeConsolidationEpochAndUpdateChurn(t *testing.T) {
|
||||
expectedEpoch: 16, // Flows into another epoch.
|
||||
expectedConsolidationBalanceToConsume: 200000000000, // 200 ETH
|
||||
},
|
||||
{
|
||||
name: "balance to consume is zero, consolidation balance at limit",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
activeBal := 32000000000000000 // 32M ETH
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 16,
|
||||
ConsolidationBalanceToConsume: 0,
|
||||
Validators: createValidatorsWithTotalActiveBalance(primitives.Gwei(activeBal)),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: helpers.ConsolidationChurnLimit(32000000000000000),
|
||||
expectedEpoch: 17, // Flows into another epoch.
|
||||
expectedConsolidationBalanceToConsume: 0,
|
||||
},
|
||||
{
|
||||
name: "consolidation balance equals consolidation balance to consume",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
activeBal := 32000000000000000 // 32M ETH
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 16,
|
||||
ConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(32000000000000000),
|
||||
Validators: createValidatorsWithTotalActiveBalance(primitives.Gwei(activeBal)),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: helpers.ConsolidationChurnLimit(32000000000000000),
|
||||
expectedEpoch: 16,
|
||||
expectedConsolidationBalanceToConsume: 0,
|
||||
},
|
||||
{
|
||||
name: "consolidation balance exceeds limit by one",
|
||||
state: func(t *testing.T) state.BeaconState {
|
||||
activeBal := 32000000000000000 // 32M ETH
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ð.BeaconStateElectra{
|
||||
Slot: slots.UnsafeEpochStart(10),
|
||||
EarliestConsolidationEpoch: 16,
|
||||
ConsolidationBalanceToConsume: 0,
|
||||
Validators: createValidatorsWithTotalActiveBalance(primitives.Gwei(activeBal)),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(t),
|
||||
consolidationBalance: helpers.ConsolidationChurnLimit(32000000000000000)+1,
|
||||
expectedEpoch: 18, // Flows into another epoch.
|
||||
expectedConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(32000000000000000)-1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -184,6 +185,9 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
|
||||
|
||||
for _, cr := range reqs {
|
||||
if cr == nil {
|
||||
return errors.New("nil consolidation request")
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
|
||||
}
|
||||
@@ -233,13 +237,18 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
roSrcV, err := state_native.NewValidator(srcV)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
// Verify source withdrawal credentials
|
||||
if !helpers.HasExecutionWithdrawalCredentials(srcV) {
|
||||
if !roSrcV.HasExecutionWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||
@@ -248,7 +257,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
}
|
||||
|
||||
// Target validator must have their withdrawal credentials set appropriately.
|
||||
if !helpers.HasCompoundingWithdrawalCredential(tgtV) {
|
||||
if !tgtV.HasCompoundingWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -256,7 +265,7 @@ func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, req
|
||||
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
|
||||
continue
|
||||
}
|
||||
// Neither validator are exiting.
|
||||
// Neither validator is exiting.
|
||||
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
|
||||
continue
|
||||
}
|
||||
@@ -364,7 +373,7 @@ func IsValidSwitchToCompoundingRequest(st state.BeaconState, req *enginev1.Conso
|
||||
return false
|
||||
}
|
||||
|
||||
if !helpers.HasETH1WithdrawalCredential(srcV) {
|
||||
if !srcV.HasETH1WithdrawalCredentials() {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -209,7 +209,22 @@ func TestProcessConsolidationRequests(t *testing.T) {
|
||||
state state.BeaconState
|
||||
reqs []*enginev1.ConsolidationRequest
|
||||
validate func(*testing.T, state.BeaconState)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil request",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{nil},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
require.DeepEqual(t, st, st)
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "one valid request",
|
||||
state: func() state.BeaconState {
|
||||
@@ -405,7 +420,13 @@ func TestProcessConsolidationRequests(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
|
||||
require.NoError(t, err)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tt.validate != nil {
|
||||
tt.validate(t, tt.state)
|
||||
}
|
||||
|
||||
@@ -385,14 +385,8 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
return errors.Wrap(err, "batch signature verification failed")
|
||||
}
|
||||
|
||||
pubKeyMap := make(map[[48]byte]struct{}, len(pendingDeposits))
|
||||
|
||||
// Process each deposit individually
|
||||
for _, pendingDeposit := range pendingDeposits {
|
||||
_, found := pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)]
|
||||
if !found {
|
||||
pubKeyMap[bytesutil.ToBytes48(pendingDeposit.PublicKey)] = struct{}{}
|
||||
}
|
||||
validSignature := allSignaturesVerified
|
||||
|
||||
// If batch verification failed, check the individual deposit signature
|
||||
@@ -410,7 +404,8 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
|
||||
// Add validator to the registry if the signature is valid
|
||||
if validSignature {
|
||||
if found {
|
||||
_, has := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
|
||||
if has {
|
||||
index, _ := state.ValidatorIndexByPubkey(bytesutil.ToBytes48(pendingDeposit.PublicKey))
|
||||
if err := helpers.IncreaseBalance(state, index, pendingDeposit.Amount); err != nil {
|
||||
return errors.Wrap(err, "could not increase balance")
|
||||
@@ -592,10 +587,10 @@ func processDepositRequest(beaconState state.BeaconState, request *enginev1.Depo
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get deposit requests start index")
|
||||
}
|
||||
if request == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
}
|
||||
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
|
||||
if request == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
}
|
||||
if err := beaconState.SetDepositRequestsStartIndex(request.Index); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set deposit requests start index")
|
||||
}
|
||||
|
||||
@@ -333,6 +333,7 @@ func TestProcessDepositRequests(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetDepositRequestsStartIndex(1))
|
||||
|
||||
t.Run("empty requests continues", func(t *testing.T) {
|
||||
newSt, err := electra.ProcessDepositRequests(context.Background(), st, []*enginev1.DepositRequest{})
|
||||
|
||||
@@ -3,7 +3,6 @@ package electra
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -40,7 +39,7 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
|
||||
|
||||
// Update effective balances with hysteresis.
|
||||
validatorFunc := func(idx int, val state.ReadOnlyValidator) (newVal *ethpb.Validator, err error) {
|
||||
if val == nil {
|
||||
if val.IsNil() {
|
||||
return nil, fmt.Errorf("validator %d is nil in state", idx)
|
||||
}
|
||||
if idx >= len(bals) {
|
||||
@@ -49,7 +48,7 @@ func ProcessEffectiveBalanceUpdates(st state.BeaconState) error {
|
||||
balance := bals[idx]
|
||||
|
||||
effectiveBalanceLimit := params.BeaconConfig().MinActivationBalance
|
||||
if helpers.HasCompoundingWithdrawalCredential(val) {
|
||||
if val.HasCompoundingWithdrawalCredentials() {
|
||||
effectiveBalanceLimit = params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ func TestProcessEffectiveBalnceUpdates(t *testing.T) {
|
||||
Validators: []*eth.Validator{
|
||||
{
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance / 2,
|
||||
WithdrawalCredentials: nil,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
Balances: []uint64{
|
||||
|
||||
16
beacon-chain/core/electra/error.go
Normal file
16
beacon-chain/core/electra/error.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package electra
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
type execReqErr struct {
|
||||
error
|
||||
}
|
||||
|
||||
// IsExecutionRequestError returns true if the error has `execReqErr`.
|
||||
func IsExecutionRequestError(e error) bool {
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
var d execReqErr
|
||||
return errors.As(e, &d)
|
||||
}
|
||||
45
beacon-chain/core/electra/error_test.go
Normal file
45
beacon-chain/core/electra/error_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package electra
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestIsExecutionRequestError(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
err error
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "nil error",
|
||||
err: nil,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "random error",
|
||||
err: errors.New("some error"),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "execution request error",
|
||||
err: execReqErr{errors.New("execution request failed")},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "wrapped execution request error",
|
||||
err: errors.Wrap(execReqErr{errors.New("execution request failed")}, "wrapped"),
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := IsExecutionRequestError(tt.err)
|
||||
if got != tt.want {
|
||||
t.Errorf("IsExecutionRequestError(%v) = %v, want %v", tt.err, got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
@@ -82,16 +81,31 @@ func ProcessOperations(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution requests")
|
||||
}
|
||||
for _, d := range requests.Deposits {
|
||||
if d == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
}
|
||||
}
|
||||
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit requests")
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process deposit requests")}
|
||||
}
|
||||
for _, w := range requests.Withdrawals {
|
||||
if w == nil {
|
||||
return nil, errors.New("nil withdrawal request")
|
||||
}
|
||||
}
|
||||
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process withdrawal requests")
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process withdrawal requests")}
|
||||
}
|
||||
for _, c := range requests.Consolidations {
|
||||
if c == nil {
|
||||
return nil, errors.New("nil consolidation request")
|
||||
}
|
||||
}
|
||||
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, fmt.Errorf("could not process consolidation requests: %w", err)
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process consolidation requests")}
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
61
beacon-chain/core/electra/transition_no_verify_sig_test.go
Normal file
61
beacon-chain/core/electra/transition_no_verify_sig_test.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/electra"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessOperationsWithNilRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "Nil deposit request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
|
||||
},
|
||||
errMsg: "nil deposit request",
|
||||
},
|
||||
{
|
||||
name: "Nil withdrawal request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
|
||||
},
|
||||
errMsg: "nil withdrawal request",
|
||||
},
|
||||
{
|
||||
name: "Nil consolidation request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
|
||||
},
|
||||
errMsg: "nil consolidation request",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
|
||||
_, err = electra.ProcessOperations(context.Background(), st, b.Block())
|
||||
require.ErrorContains(t, tc.errMsg, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -194,7 +194,7 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
if val.ActivationEpoch() == params.BeaconConfig().FarFutureEpoch {
|
||||
preActivationIndices = append(preActivationIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
if helpers.HasCompoundingWithdrawalCredential(val) {
|
||||
if val.HasCompoundingWithdrawalCredentials() {
|
||||
compoundWithdrawalIndices = append(compoundWithdrawalIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -116,7 +116,7 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
return nil, err
|
||||
}
|
||||
// Verify withdrawal credentials
|
||||
hasCorrectCredential := helpers.HasExecutionWithdrawalCredentials(validator)
|
||||
hasCorrectCredential := validator.HasExecutionWithdrawalCredentials()
|
||||
wc := validator.GetWithdrawalCredentials()
|
||||
isCorrectSourceAddress := bytes.Equal(wc[12:], wr.SourceAddress)
|
||||
if !hasCorrectCredential || !isCorrectSourceAddress {
|
||||
@@ -165,7 +165,7 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
hasExcessBalance := vBal > params.BeaconConfig().MinActivationBalance+pendingBalanceToWithdraw
|
||||
|
||||
// Only allow partial withdrawals with compounding withdrawal credentials
|
||||
if helpers.HasCompoundingWithdrawalCredential(validator) && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
if validator.HasCompoundingWithdrawalCredentials() && hasSufficientEffectiveBalance && hasExcessBalance {
|
||||
// Spec definition:
|
||||
// to_withdraw = min(
|
||||
// state.balances[index] - MIN_ACTIVATION_BALANCE - pending_balance_to_withdraw,
|
||||
|
||||
@@ -38,6 +38,17 @@ func TestProcessWithdrawRequests(t *testing.T) {
|
||||
wantFn func(t *testing.T, got state.BeaconState)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil request",
|
||||
args: args{
|
||||
st: func() state.BeaconState { return st }(),
|
||||
wrs: []*enginev1.WithdrawalRequest{nil},
|
||||
},
|
||||
wantErr: true,
|
||||
wantFn: func(t *testing.T, got state.BeaconState) {
|
||||
require.DeepEqual(t, got, nil)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "happy path exit and withdrawal only",
|
||||
args: args{
|
||||
|
||||
@@ -6,15 +6,12 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/fulu",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -24,7 +21,6 @@ go_test(
|
||||
srcs = ["upgrade_test.go"],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -32,6 +28,5 @@ go_test(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,18 +1,13 @@
|
||||
package fulu
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
state_native "github.com/prysmaticlabs/prysm/v5/beacon-chain/state/state-native"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// UpgradeToFulu updates inputs a generic state to return the version Fulu state.
|
||||
@@ -74,32 +69,37 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
earliestExitEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(beaconState))
|
||||
preActivationIndices := make([]primitives.ValidatorIndex, 0)
|
||||
compoundWithdrawalIndices := make([]primitives.ValidatorIndex, 0)
|
||||
if err = beaconState.ReadFromEveryValidator(func(index int, val state.ReadOnlyValidator) error {
|
||||
if val.ExitEpoch() != params.BeaconConfig().FarFutureEpoch && val.ExitEpoch() > earliestExitEpoch {
|
||||
earliestExitEpoch = val.ExitEpoch()
|
||||
}
|
||||
if val.ActivationEpoch() == params.BeaconConfig().FarFutureEpoch {
|
||||
preActivationIndices = append(preActivationIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
if helpers.HasCompoundingWithdrawalCredential(val) {
|
||||
compoundWithdrawalIndices = append(compoundWithdrawalIndices, primitives.ValidatorIndex(index))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
depositBalanceToConsume, err := beaconState.DepositBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
earliestExitEpoch++ // Increment to find the earliest possible exit epoch
|
||||
|
||||
// note: should be the same in prestate and post beaconState.
|
||||
// we are deviating from the specs a bit as it calls for using the post beaconState
|
||||
tab, err := helpers.TotalActiveBalance(beaconState)
|
||||
exitBalanceToConsume, err := beaconState.ExitBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get total active balance")
|
||||
return nil, err
|
||||
}
|
||||
earliestExitEpoch, err := beaconState.EarliestExitEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
consolidationBalanceToConsume, err := beaconState.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
earliestConsolidationEpoch, err := beaconState.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pendingDeposits, err := beaconState.PendingDeposits()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pendingPartialWithdrawals, err := beaconState.PendingPartialWithdrawals()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pendingConsolidations, err := beaconState.PendingConsolidations()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateFulu{
|
||||
@@ -155,25 +155,16 @@ func UpgradeToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
HistoricalSummaries: summaries,
|
||||
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
DepositBalanceToConsume: 0,
|
||||
ExitBalanceToConsume: helpers.ActivationExitChurnLimit(primitives.Gwei(tab)),
|
||||
DepositBalanceToConsume: depositBalanceToConsume,
|
||||
ExitBalanceToConsume: exitBalanceToConsume,
|
||||
EarliestExitEpoch: earliestExitEpoch,
|
||||
ConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(primitives.Gwei(tab)),
|
||||
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
|
||||
PendingDeposits: make([]*ethpb.PendingDeposit, 0),
|
||||
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
|
||||
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
|
||||
ConsolidationBalanceToConsume: consolidationBalanceToConsume,
|
||||
EarliestConsolidationEpoch: earliestConsolidationEpoch,
|
||||
PendingDeposits: pendingDeposits,
|
||||
PendingPartialWithdrawals: pendingPartialWithdrawals,
|
||||
PendingConsolidations: pendingConsolidations,
|
||||
}
|
||||
|
||||
// Sorting preActivationIndices based on a custom criteria
|
||||
sort.Slice(preActivationIndices, func(i, j int) bool {
|
||||
// Comparing based on ActivationEligibilityEpoch and then by index if the epochs are the same
|
||||
if s.Validators[preActivationIndices[i]].ActivationEligibilityEpoch == s.Validators[preActivationIndices[j]].ActivationEligibilityEpoch {
|
||||
return preActivationIndices[i] < preActivationIndices[j]
|
||||
}
|
||||
return s.Validators[preActivationIndices[i]].ActivationEligibilityEpoch < s.Validators[preActivationIndices[j]].ActivationEligibilityEpoch
|
||||
})
|
||||
|
||||
// Need to cast the beaconState to use in helper functions
|
||||
post, err := state_native.InitializeFromProtoUnsafeFulu(s)
|
||||
if err != nil {
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/fulu"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -12,7 +11,6 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
func TestUpgradeToFulu(t *testing.T) {
|
||||
@@ -33,57 +31,6 @@ func TestUpgradeToFulu(t *testing.T) {
|
||||
require.Equal(t, preForkState.GenesisTime(), mSt.GenesisTime())
|
||||
require.DeepSSZEqual(t, preForkState.GenesisValidatorsRoot(), mSt.GenesisValidatorsRoot())
|
||||
require.Equal(t, preForkState.Slot(), mSt.Slot())
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
require.DeepSSZEqual(t, preForkState.Validators()[2:], mSt.Validators()[2:])
|
||||
require.DeepSSZEqual(t, preForkState.Balances()[2:], mSt.Balances()[2:])
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
|
||||
require.DeepSSZEqual(t, preForkState.RandaoMixes(), mSt.RandaoMixes())
|
||||
require.DeepSSZEqual(t, preForkState.Slashings(), mSt.Slashings())
|
||||
require.DeepSSZEqual(t, preForkState.JustificationBits(), mSt.JustificationBits())
|
||||
require.DeepSSZEqual(t, preForkState.PreviousJustifiedCheckpoint(), mSt.PreviousJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.CurrentJustifiedCheckpoint(), mSt.CurrentJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.FinalizedCheckpoint(), mSt.FinalizedCheckpoint())
|
||||
|
||||
require.Equal(t, len(preForkState.Validators()), len(mSt.Validators()))
|
||||
|
||||
preVal, err := preForkState.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, preVal.EffectiveBalance)
|
||||
|
||||
preVal2, err := preForkState.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MaxEffectiveBalance, preVal2.EffectiveBalance)
|
||||
|
||||
// TODO: Fix this test
|
||||
// mVal, err := mSt.ValidatorAtIndex(0)
|
||||
_, err = mSt.ValidatorAtIndex(0)
|
||||
require.NoError(t, err)
|
||||
// require.Equal(t, uint64(0), mVal.EffectiveBalance)
|
||||
|
||||
mVal2, err := mSt.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().MinActivationBalance, mVal2.EffectiveBalance)
|
||||
|
||||
numValidators := mSt.NumValidators()
|
||||
p, err := mSt.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
p, err = mSt.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
s, err := mSt.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
|
||||
|
||||
hr1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
hr2, err := mSt.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hr1, hr2)
|
||||
|
||||
f := mSt.Fork()
|
||||
require.DeepSSZEqual(t, ðpb.Fork{
|
||||
@@ -91,11 +38,50 @@ func TestUpgradeToFulu(t *testing.T) {
|
||||
CurrentVersion: params.BeaconConfig().FuluForkVersion,
|
||||
Epoch: time.CurrentEpoch(st),
|
||||
}, f)
|
||||
|
||||
require.DeepSSZEqual(t, preForkState.LatestBlockHeader(), mSt.LatestBlockHeader())
|
||||
require.DeepSSZEqual(t, preForkState.BlockRoots(), mSt.BlockRoots())
|
||||
require.DeepSSZEqual(t, preForkState.StateRoots(), mSt.StateRoots())
|
||||
|
||||
hr1, err := preForkState.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
hr2, err := mSt.HistoricalRoots()
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, hr1, hr2)
|
||||
|
||||
require.DeepSSZEqual(t, preForkState.Eth1Data(), mSt.Eth1Data())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DataVotes(), mSt.Eth1DataVotes())
|
||||
require.DeepSSZEqual(t, preForkState.Eth1DepositIndex(), mSt.Eth1DepositIndex())
|
||||
require.DeepSSZEqual(t, preForkState.Validators(), mSt.Validators())
|
||||
require.DeepSSZEqual(t, preForkState.Balances(), mSt.Balances())
|
||||
require.DeepSSZEqual(t, preForkState.RandaoMixes(), mSt.RandaoMixes())
|
||||
require.DeepSSZEqual(t, preForkState.Slashings(), mSt.Slashings())
|
||||
|
||||
numValidators := mSt.NumValidators()
|
||||
|
||||
p, err := mSt.PreviousEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
|
||||
p, err = mSt.CurrentEpochParticipation()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]byte, numValidators), p)
|
||||
|
||||
require.DeepSSZEqual(t, preForkState.JustificationBits(), mSt.JustificationBits())
|
||||
require.DeepSSZEqual(t, preForkState.PreviousJustifiedCheckpoint(), mSt.PreviousJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.CurrentJustifiedCheckpoint(), mSt.CurrentJustifiedCheckpoint())
|
||||
require.DeepSSZEqual(t, preForkState.FinalizedCheckpoint(), mSt.FinalizedCheckpoint())
|
||||
|
||||
s, err := mSt.InactivityScores()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, make([]uint64, numValidators), s)
|
||||
|
||||
csc, err := mSt.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
psc, err := preForkState.CurrentSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, psc, csc)
|
||||
|
||||
nsc, err := mSt.NextSyncCommittee()
|
||||
require.NoError(t, err)
|
||||
psc, err = preForkState.NextSyncCommittee()
|
||||
@@ -110,7 +96,6 @@ func TestUpgradeToFulu(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
txRoot, err := prevHeader.TransactionsRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
wdRoot, err := prevHeader.WithdrawalsRoot()
|
||||
require.NoError(t, err)
|
||||
wanted := &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
@@ -144,45 +129,57 @@ func TestUpgradeToFulu(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(summaries))
|
||||
|
||||
startIndex, err := mSt.DepositRequestsStartIndex()
|
||||
preDepositRequestsStartIndex, err := preForkState.DepositRequestsStartIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().UnsetDepositRequestsStartIndex, startIndex)
|
||||
postDepositRequestsStartIndex, err := mSt.DepositRequestsStartIndex()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, preDepositRequestsStartIndex, postDepositRequestsStartIndex)
|
||||
|
||||
balance, err := mSt.DepositBalanceToConsume()
|
||||
preDepositBalanceToConsume, err := preForkState.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.Gwei(0), balance)
|
||||
postDepositBalanceToConsume, err := mSt.DepositBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, preDepositBalanceToConsume, postDepositBalanceToConsume)
|
||||
|
||||
tab, err := helpers.TotalActiveBalance(mSt)
|
||||
preExitBalanceToConsume, err := preForkState.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
postExitBalanceToConsume, err := mSt.ExitBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, preExitBalanceToConsume, postExitBalanceToConsume)
|
||||
|
||||
ebtc, err := mSt.ExitBalanceToConsume()
|
||||
preEarliestExitEpoch, err := preForkState.EarliestExitEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ActivationExitChurnLimit(primitives.Gwei(tab)), ebtc)
|
||||
postEarliestExitEpoch, err := mSt.EarliestExitEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, preEarliestExitEpoch, postEarliestExitEpoch)
|
||||
|
||||
eee, err := mSt.EarliestExitEpoch()
|
||||
preConsolidationBalanceToConsume, err := preForkState.ConsolidationBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ActivationExitEpoch(primitives.Epoch(1)), eee)
|
||||
postConsolidationBalanceToConsume, err := mSt.ConsolidationBalanceToConsume()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, preConsolidationBalanceToConsume, postConsolidationBalanceToConsume)
|
||||
|
||||
cbtc, err := mSt.ConsolidationBalanceToConsume()
|
||||
preEarliesConsolidationEoch, err := preForkState.EarliestConsolidationEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ConsolidationChurnLimit(primitives.Gwei(tab)), cbtc)
|
||||
postEarliestConsolidationEpoch, err := mSt.EarliestConsolidationEpoch()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, preEarliesConsolidationEoch, postEarliestConsolidationEpoch)
|
||||
|
||||
earliestConsolidationEpoch, err := mSt.EarliestConsolidationEpoch()
|
||||
prePendingDeposits, err := preForkState.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, helpers.ActivationExitEpoch(slots.ToEpoch(preForkState.Slot())), earliestConsolidationEpoch)
|
||||
postPendingDeposits, err := mSt.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, prePendingDeposits, postPendingDeposits)
|
||||
|
||||
// TODO: Fix this test
|
||||
// pendingDeposits, err := mSt.PendingDeposits()
|
||||
_, err = mSt.PendingDeposits()
|
||||
prePendingPartialWithdrawals, err := preForkState.PendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
// require.Equal(t, 2, len(pendingDeposits))
|
||||
// require.Equal(t, uint64(1000), pendingDeposits[1].Amount)
|
||||
postPendingPartialWithdrawals, err := mSt.PendingPartialWithdrawals()
|
||||
require.NoError(t, err)
|
||||
require.DeepSSZEqual(t, prePendingPartialWithdrawals, postPendingPartialWithdrawals)
|
||||
|
||||
numPendingPartialWithdrawals, err := mSt.NumPendingPartialWithdrawals()
|
||||
prePendingConsolidations, err := preForkState.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(0), numPendingPartialWithdrawals)
|
||||
|
||||
consolidations, err := mSt.PendingConsolidations()
|
||||
postPendingConsolidations, err := mSt.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(consolidations))
|
||||
require.DeepSSZEqual(t, prePendingConsolidations, postPendingConsolidations)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"beacon_committee.go",
|
||||
"block.go",
|
||||
"genesis.go",
|
||||
"legacy.go",
|
||||
"metrics.go",
|
||||
"randao.go",
|
||||
"rewards_penalties.go",
|
||||
@@ -25,7 +26,6 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
@@ -53,6 +53,7 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"beacon_committee_test.go",
|
||||
"block_test.go",
|
||||
"legacy_test.go",
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
"randao_test.go",
|
||||
@@ -87,5 +88,6 @@ go_test(
|
||||
"//time:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
20
beacon-chain/core/helpers/legacy.go
Normal file
20
beacon-chain/core/helpers/legacy.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
)
|
||||
|
||||
// DepositRequestsStarted determines if the deposit requests have started.
|
||||
func DepositRequestsStarted(beaconState state.BeaconState) bool {
|
||||
if beaconState.Version() < version.Electra {
|
||||
return false
|
||||
}
|
||||
|
||||
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return beaconState.Eth1DepositIndex() == requestsStartIndex
|
||||
}
|
||||
33
beacon-chain/core/helpers/legacy_test.go
Normal file
33
beacon-chain/core/helpers/legacy_test.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestDepositRequestHaveStarted contains several test cases for depositRequestHaveStarted.
|
||||
func TestDepositRequestHaveStarted(t *testing.T) {
|
||||
t.Run("Version below Electra returns false", func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
result := helpers.DepositRequestsStarted(st)
|
||||
require.False(t, result)
|
||||
})
|
||||
|
||||
t.Run("Version is Electra or higher, no error, but Eth1DepositIndex != requestsStartIndex returns false", func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
require.NoError(t, st.SetEth1DepositIndex(1))
|
||||
result := helpers.DepositRequestsStarted(st)
|
||||
require.False(t, result)
|
||||
})
|
||||
|
||||
t.Run("Version is Electra or higher, no error, and Eth1DepositIndex == requestsStartIndex returns true", func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
require.NoError(t, st.SetEth1DepositIndex(33))
|
||||
require.NoError(t, st.SetDepositRequestsStartIndex(33))
|
||||
result := helpers.DepositRequestsStarted(st)
|
||||
require.True(t, result)
|
||||
})
|
||||
}
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
func BalanceChurnLimit(activeBalance primitives.Gwei) primitives.Gwei {
|
||||
churn := max(
|
||||
params.BeaconConfig().MinPerEpochChurnLimitElectra,
|
||||
(uint64(activeBalance) / params.BeaconConfig().ChurnLimitQuotient),
|
||||
uint64(activeBalance)/params.BeaconConfig().ChurnLimitQuotient,
|
||||
)
|
||||
return primitives.Gwei(churn - churn%params.BeaconConfig().EffectiveBalanceIncrement)
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/interfaces"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
@@ -394,9 +393,9 @@ func ComputeProposerIndex(bState state.ReadOnlyBeaconState, activeIndices []prim
|
||||
effectiveBal := v.EffectiveBalance()
|
||||
if bState.Version() >= version.Electra {
|
||||
binary.LittleEndian.PutUint64(seedBuffer[len(seed):], i/16)
|
||||
randomByte := hashFunc(seedBuffer)
|
||||
randomBytes := hashFunc(seedBuffer)
|
||||
offset := (i % 16) * 2
|
||||
randomValue := uint64(randomByte[offset]) | uint64(randomByte[offset+1])<<8
|
||||
randomValue := uint64(randomBytes[offset]) | uint64(randomBytes[offset+1])<<8
|
||||
|
||||
if effectiveBal*fieldparams.MaxRandomValueElectra >= beaconConfig.MaxEffectiveBalanceElectra*randomValue {
|
||||
return candidateIndex, nil
|
||||
@@ -515,63 +514,6 @@ func LastActivatedValidatorIndex(ctx context.Context, st state.ReadOnlyBeaconSta
|
||||
return lastActivatedvalidatorIndex, nil
|
||||
}
|
||||
|
||||
// hasETH1WithdrawalCredential returns whether the validator has an ETH1
|
||||
// Withdrawal prefix. It assumes that the caller has a lock on the state
|
||||
func HasETH1WithdrawalCredential(val interfaces.WithWithdrawalCredentials) bool {
|
||||
if val == nil {
|
||||
return false
|
||||
}
|
||||
return isETH1WithdrawalCredential(val.GetWithdrawalCredentials())
|
||||
}
|
||||
|
||||
func isETH1WithdrawalCredential(creds []byte) bool {
|
||||
return bytes.HasPrefix(creds, []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte})
|
||||
}
|
||||
|
||||
// HasCompoundingWithdrawalCredential checks if the validator has a compounding withdrawal credential.
|
||||
// New in Electra EIP-7251: https://eips.ethereum.org/EIPS/eip-7251
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def has_compounding_withdrawal_credential(validator: Validator) -> bool:
|
||||
// """
|
||||
// Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal credential.
|
||||
// """
|
||||
// return is_compounding_withdrawal_credential(validator.withdrawal_credentials)
|
||||
func HasCompoundingWithdrawalCredential(v interfaces.WithWithdrawalCredentials) bool {
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
return IsCompoundingWithdrawalCredential(v.GetWithdrawalCredentials())
|
||||
}
|
||||
|
||||
// IsCompoundingWithdrawalCredential checks if the credentials are a compounding withdrawal credential.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def is_compounding_withdrawal_credential(withdrawal_credentials: Bytes32) -> bool:
|
||||
// return withdrawal_credentials[:1] == COMPOUNDING_WITHDRAWAL_PREFIX
|
||||
func IsCompoundingWithdrawalCredential(creds []byte) bool {
|
||||
return bytes.HasPrefix(creds, []byte{params.BeaconConfig().CompoundingWithdrawalPrefixByte})
|
||||
}
|
||||
|
||||
// HasExecutionWithdrawalCredentials checks if the validator has an execution withdrawal credential or compounding credential.
|
||||
// New in Electra EIP-7251: https://eips.ethereum.org/EIPS/eip-7251
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def has_execution_withdrawal_credential(validator: Validator) -> bool:
|
||||
// """
|
||||
// Check if ``validator`` has a 0x01 or 0x02 prefixed withdrawal credential.
|
||||
// """
|
||||
// return has_compounding_withdrawal_credential(validator) or has_eth1_withdrawal_credential(validator)
|
||||
func HasExecutionWithdrawalCredentials(v interfaces.WithWithdrawalCredentials) bool {
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
return HasCompoundingWithdrawalCredential(v) || HasETH1WithdrawalCredential(v)
|
||||
}
|
||||
|
||||
// IsSameWithdrawalCredentials returns true if both validators have the same withdrawal credentials.
|
||||
//
|
||||
// return a.withdrawal_credentials[12:] == b.withdrawal_credentials[12:]
|
||||
@@ -606,10 +548,10 @@ func IsFullyWithdrawableValidator(val state.ReadOnlyValidator, balance uint64, e
|
||||
|
||||
// Electra / EIP-7251 logic
|
||||
if fork >= version.Electra {
|
||||
return HasExecutionWithdrawalCredentials(val) && val.WithdrawableEpoch() <= epoch
|
||||
return val.HasExecutionWithdrawalCredentials() && val.WithdrawableEpoch() <= epoch
|
||||
}
|
||||
|
||||
return HasETH1WithdrawalCredential(val) && val.WithdrawableEpoch() <= epoch
|
||||
return val.HasETH1WithdrawalCredentials() && val.WithdrawableEpoch() <= epoch
|
||||
}
|
||||
|
||||
// IsPartiallyWithdrawableValidator returns whether the validator is able to perform a
|
||||
@@ -637,7 +579,7 @@ func IsPartiallyWithdrawableValidator(val state.ReadOnlyValidator, balance uint6
|
||||
// """
|
||||
// Check if ``validator`` is partially withdrawable.
|
||||
// """
|
||||
// max_effective_balance = get_validator_max_effective_balance(validator)
|
||||
// max_effective_balance = get_max_effective_balance(validator)
|
||||
// has_max_effective_balance = validator.effective_balance == max_effective_balance # [Modified in Electra:EIP7251]
|
||||
// has_excess_balance = balance > max_effective_balance # [Modified in Electra:EIP7251]
|
||||
// return (
|
||||
@@ -650,7 +592,7 @@ func isPartiallyWithdrawableValidatorElectra(val state.ReadOnlyValidator, balanc
|
||||
hasMaxBalance := val.EffectiveBalance() == maxEB
|
||||
hasExcessBalance := balance > maxEB
|
||||
|
||||
return HasExecutionWithdrawalCredentials(val) &&
|
||||
return val.HasExecutionWithdrawalCredentials() &&
|
||||
hasMaxBalance &&
|
||||
hasExcessBalance
|
||||
}
|
||||
@@ -670,14 +612,14 @@ func isPartiallyWithdrawableValidatorElectra(val state.ReadOnlyValidator, balanc
|
||||
func isPartiallyWithdrawableValidatorCapella(val state.ReadOnlyValidator, balance uint64, epoch primitives.Epoch) bool {
|
||||
hasMaxBalance := val.EffectiveBalance() == params.BeaconConfig().MaxEffectiveBalance
|
||||
hasExcessBalance := balance > params.BeaconConfig().MaxEffectiveBalance
|
||||
return HasETH1WithdrawalCredential(val) && hasExcessBalance && hasMaxBalance
|
||||
return val.HasETH1WithdrawalCredentials() && hasExcessBalance && hasMaxBalance
|
||||
}
|
||||
|
||||
// ValidatorMaxEffectiveBalance returns the maximum effective balance for a validator.
|
||||
//
|
||||
// Spec definition:
|
||||
//
|
||||
// def get_validator_max_effective_balance(validator: Validator) -> Gwei:
|
||||
// def get_max_effective_balance(validator: Validator) -> Gwei:
|
||||
// """
|
||||
// Get max effective balance for ``validator``.
|
||||
// """
|
||||
@@ -686,7 +628,7 @@ func isPartiallyWithdrawableValidatorCapella(val state.ReadOnlyValidator, balanc
|
||||
// else:
|
||||
// return MIN_ACTIVATION_BALANCE
|
||||
func ValidatorMaxEffectiveBalance(val state.ReadOnlyValidator) uint64 {
|
||||
if HasCompoundingWithdrawalCredential(val) {
|
||||
if val.HasCompoundingWithdrawalCredentials() {
|
||||
return params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
}
|
||||
return params.BeaconConfig().MinActivationBalance
|
||||
|
||||
@@ -910,13 +910,15 @@ func TestProposerIndexFromCheckpoint(t *testing.T) {
|
||||
func TestHasETH1WithdrawalCredentials(t *testing.T) {
|
||||
creds := []byte{0xFA, 0xCC}
|
||||
v := ðpb.Validator{WithdrawalCredentials: creds}
|
||||
require.Equal(t, false, helpers.HasETH1WithdrawalCredential(v))
|
||||
roV, err := state_native.NewValidator(v)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, roV.HasETH1WithdrawalCredentials())
|
||||
creds = []byte{params.BeaconConfig().ETH1AddressWithdrawalPrefixByte, 0xCC}
|
||||
v = ðpb.Validator{WithdrawalCredentials: creds}
|
||||
require.Equal(t, true, helpers.HasETH1WithdrawalCredential(v))
|
||||
roV, err = state_native.NewValidator(v)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, roV.HasETH1WithdrawalCredentials())
|
||||
// No Withdrawal cred
|
||||
v = ðpb.Validator{}
|
||||
require.Equal(t, false, helpers.HasETH1WithdrawalCredential(v))
|
||||
}
|
||||
|
||||
func TestHasCompoundingWithdrawalCredential(t *testing.T) {
|
||||
@@ -931,11 +933,12 @@ func TestHasCompoundingWithdrawalCredential(t *testing.T) {
|
||||
{"Does not have compounding withdrawal credential",
|
||||
ðpb.Validator{WithdrawalCredentials: bytesutil.PadTo([]byte{0x00}, 32)},
|
||||
false},
|
||||
{"Handles nil case", nil, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.HasCompoundingWithdrawalCredential(tt.validator))
|
||||
roV, err := state_native.NewValidator(tt.validator)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, roV.HasCompoundingWithdrawalCredentials())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -955,11 +958,12 @@ func TestHasExecutionWithdrawalCredentials(t *testing.T) {
|
||||
{"Does not have compounding withdrawal credential or eth1 withdrawal credential",
|
||||
ðpb.Validator{WithdrawalCredentials: bytesutil.PadTo([]byte{0x00}, 32)},
|
||||
false},
|
||||
{"Handles nil case", nil, false},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, helpers.HasExecutionWithdrawalCredentials(tt.validator))
|
||||
roV, err := state_native.NewValidator(tt.validator)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want, roV.HasExecutionWithdrawalCredentials())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,76 +84,6 @@ func TestNextEpoch_OK(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanUpgradeToAltair(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc.AltairForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bc)
|
||||
tests := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not epoch start",
|
||||
slot: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "not altair epoch",
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "altair epoch",
|
||||
slot: primitives.Slot(params.BeaconConfig().AltairForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := time.CanUpgradeToAltair(tt.slot); got != tt.want {
|
||||
t.Errorf("canUpgradeToAltair() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanUpgradeBellatrix(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bc := params.BeaconConfig()
|
||||
bc.BellatrixForkEpoch = 5
|
||||
params.OverrideBeaconConfig(bc)
|
||||
tests := []struct {
|
||||
name string
|
||||
slot primitives.Slot
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not epoch start",
|
||||
slot: 1,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "not bellatrix epoch",
|
||||
slot: params.BeaconConfig().SlotsPerEpoch,
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "bellatrix epoch",
|
||||
slot: primitives.Slot(params.BeaconConfig().BellatrixForkEpoch) * params.BeaconConfig().SlotsPerEpoch,
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := time.CanUpgradeToBellatrix(tt.slot); got != tt.want {
|
||||
t.Errorf("CanUpgradeToBellatrix() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCanProcessEpoch_TrueOnEpochsLastSlot(t *testing.T) {
|
||||
tests := []struct {
|
||||
slot primitives.Slot
|
||||
@@ -273,6 +203,16 @@ func TestCanUpgradeTo(t *testing.T) {
|
||||
forkEpoch *primitives.Epoch
|
||||
upgradeFunc func(primitives.Slot) bool
|
||||
}{
|
||||
{
|
||||
name: "Altair",
|
||||
forkEpoch: &beaconConfig.AltairForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToAltair,
|
||||
},
|
||||
{
|
||||
name: "Bellatrix",
|
||||
forkEpoch: &beaconConfig.BellatrixForkEpoch,
|
||||
upgradeFunc: time.CanUpgradeToBellatrix,
|
||||
},
|
||||
{
|
||||
name: "Capella",
|
||||
forkEpoch: &beaconConfig.CapellaForkEpoch,
|
||||
|
||||
@@ -403,11 +403,15 @@ func VerifyOperationLengths(_ context.Context, state state.BeaconState, b interf
|
||||
)
|
||||
}
|
||||
|
||||
if uint64(len(body.AttesterSlashings())) > params.BeaconConfig().MaxAttesterSlashings {
|
||||
maxSlashings := params.BeaconConfig().MaxAttesterSlashings
|
||||
if body.Version() >= version.Electra {
|
||||
maxSlashings = params.BeaconConfig().MaxAttesterSlashingsElectra
|
||||
}
|
||||
if uint64(len(body.AttesterSlashings())) > maxSlashings {
|
||||
return nil, fmt.Errorf(
|
||||
"number of attester slashings (%d) in block body exceeds allowed threshold of %d",
|
||||
len(body.AttesterSlashings()),
|
||||
params.BeaconConfig().MaxAttesterSlashings,
|
||||
maxSlashings,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -437,6 +437,25 @@ func TestProcessBlock_OverMaxAttesterSlashings(t *testing.T) {
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestProcessBlock_OverMaxAttesterSlashingsElectra(t *testing.T) {
|
||||
maxSlashings := params.BeaconConfig().MaxAttesterSlashingsElectra
|
||||
b := ðpb.SignedBeaconBlockElectra{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
AttesterSlashings: make([]*ethpb.AttesterSlashingElectra, maxSlashings+1),
|
||||
},
|
||||
},
|
||||
}
|
||||
want := fmt.Sprintf("number of attester slashings (%d) in block body exceeds allowed threshold of %d",
|
||||
len(b.Block.Body.AttesterSlashings), params.BeaconConfig().MaxAttesterSlashingsElectra)
|
||||
s, err := state_native.InitializeFromProtoUnsafeElectra(ðpb.BeaconStateElectra{})
|
||||
require.NoError(t, err)
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
_, err = transition.VerifyOperationLengths(context.Background(), s, wsb.Block())
|
||||
assert.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
func TestProcessBlock_OverMaxAttestations(t *testing.T) {
|
||||
b := ðpb.SignedBeaconBlock{
|
||||
Block: ðpb.BeaconBlock{
|
||||
|
||||
@@ -75,7 +75,7 @@ func InitiateValidatorExit(ctx context.Context, s state.BeaconState, idx primiti
|
||||
|
||||
// Compute exit queue epoch.
|
||||
if s.Version() < version.Electra {
|
||||
// Relevant spec code from deneb:
|
||||
// Relevant spec code from phase0:
|
||||
//
|
||||
// exit_epochs = [v.exit_epoch for v in state.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||
// exit_queue_epoch = max(exit_epochs + [compute_activation_exit_epoch(get_current_epoch(state))])
|
||||
|
||||
@@ -101,6 +101,7 @@ type NoHeadAccessDatabase interface {
|
||||
SaveLightClientBootstrap(ctx context.Context, blockRoot []byte, bootstrap interfaces.LightClientBootstrap) error
|
||||
|
||||
CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint primitives.Slot) error
|
||||
DeleteHistoricalDataBeforeSlot(ctx context.Context, slot primitives.Slot) error
|
||||
}
|
||||
|
||||
// HeadAccessDatabase defines a struct with access to reading chain head data.
|
||||
|
||||
@@ -227,10 +227,7 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
|
||||
return ErrDeleteJustifiedAndFinalized
|
||||
}
|
||||
|
||||
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tx.Bucket(blockParentRootIndicesBucket).Delete(root[:]); err != nil {
|
||||
if err := s.deleteBlock(tx, root[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
s.blockCache.Del(string(root[:]))
|
||||
@@ -238,6 +235,89 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteHistoricalDataBeforeSlot deletes all blocks and states before the given slot.
|
||||
// This function deletes data from the following buckets:
|
||||
// - blocksBucket
|
||||
// - blockParentRootIndicesBucket
|
||||
// - finalizedBlockRootsIndexBucket
|
||||
// - stateBucket
|
||||
// - stateSummaryBucket
|
||||
// - blockRootValidatorHashesBucket
|
||||
// - blockSlotIndicesBucket
|
||||
// - stateSlotIndicesBucket
|
||||
func (s *Store) DeleteHistoricalDataBeforeSlot(ctx context.Context, cutoffSlot primitives.Slot) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.DeleteHistoricalDataBeforeSlot")
|
||||
defer span.End()
|
||||
|
||||
// Collect slot/root pairs to perform deletions in a separate read only transaction.
|
||||
var (
|
||||
roots [][]byte
|
||||
slts []primitives.Slot
|
||||
)
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
var err error
|
||||
roots, slts, err = blockRootsBySlotRange(ctx, tx.Bucket(blockSlotIndicesBucket), primitives.Slot(0), cutoffSlot, nil, nil, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve block roots")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve block roots and slots")
|
||||
}
|
||||
|
||||
// Perform all deletions in a single transaction for atomicity
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
for _, root := range roots {
|
||||
// Delete block
|
||||
if err = s.deleteBlock(tx, root); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete finalized block roots index
|
||||
if err = tx.Bucket(finalizedBlockRootsIndexBucket).Delete(root); err != nil {
|
||||
return errors.Wrap(err, "could not delete finalized block root index")
|
||||
}
|
||||
|
||||
// Delete state
|
||||
if err = tx.Bucket(stateBucket).Delete(root); err != nil {
|
||||
return errors.Wrap(err, "could not delete state")
|
||||
}
|
||||
|
||||
// Delete state summary
|
||||
if err = tx.Bucket(stateSummaryBucket).Delete(root); err != nil {
|
||||
return errors.Wrap(err, "could not delete state summary")
|
||||
}
|
||||
|
||||
// Delete validator entries
|
||||
if err = s.deleteValidatorHashes(tx, root); err != nil {
|
||||
return errors.Wrap(err, "could not delete validators")
|
||||
}
|
||||
}
|
||||
|
||||
for _, slot := range slts {
|
||||
// Delete slot indices
|
||||
if err = tx.Bucket(blockSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(slot)); err != nil {
|
||||
return errors.Wrap(err, "could not delete block slot index")
|
||||
}
|
||||
if err = tx.Bucket(stateSlotIndicesBucket).Delete(bytesutil.SlotToBytesBigEndian(slot)); err != nil {
|
||||
return errors.Wrap(err, "could not delete state slot index")
|
||||
}
|
||||
}
|
||||
|
||||
// Delete all caches after we have deleted everything from buckets.
|
||||
// This is done after the buckets are deleted to avoid any issues in case of transaction rollback.
|
||||
for _, root := range roots {
|
||||
// Delete block from cache
|
||||
s.blockCache.Del(string(root))
|
||||
// Delete state summary from cache
|
||||
s.stateSummaryCache.delete([32]byte(root))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// SaveBlock to the db.
|
||||
func (s *Store) SaveBlock(ctx context.Context, signed interfaces.ReadOnlySignedBeaconBlock) error {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.SaveBlock")
|
||||
@@ -609,7 +689,7 @@ func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter
|
||||
|
||||
// We retrieve block roots that match a filter criteria of slot ranges, if specified.
|
||||
filtersMap := f.Filters()
|
||||
rootsBySlotRange, err := blockRootsBySlotRange(
|
||||
rootsBySlotRange, _, err := blockRootsBySlotRange(
|
||||
ctx,
|
||||
tx.Bucket(blockSlotIndicesBucket),
|
||||
filtersMap[filters.StartSlot],
|
||||
@@ -627,6 +707,7 @@ func blockRootsByFilter(ctx context.Context, tx *bolt.Tx, f *filters.QueryFilter
|
||||
// that list of roots to lookup the block. These block will
|
||||
// meet the filter criteria.
|
||||
indices := lookupValuesForIndices(ctx, indicesByBucket, tx)
|
||||
|
||||
keys := rootsBySlotRange
|
||||
if len(indices) > 0 {
|
||||
// If we have found indices that meet the filter criteria, and there are also
|
||||
@@ -653,13 +734,13 @@ func blockRootsBySlotRange(
|
||||
ctx context.Context,
|
||||
bkt *bolt.Bucket,
|
||||
startSlotEncoded, endSlotEncoded, startEpochEncoded, endEpochEncoded, slotStepEncoded interface{},
|
||||
) ([][]byte, error) {
|
||||
) ([][]byte, []primitives.Slot, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.blockRootsBySlotRange")
|
||||
defer span.End()
|
||||
|
||||
// Return nothing when all slot parameters are missing
|
||||
if startSlotEncoded == nil && endSlotEncoded == nil && startEpochEncoded == nil && endEpochEncoded == nil {
|
||||
return [][]byte{}, nil
|
||||
return [][]byte{}, nil, nil
|
||||
}
|
||||
|
||||
var startSlot, endSlot primitives.Slot
|
||||
@@ -680,11 +761,11 @@ func blockRootsBySlotRange(
|
||||
if startEpochOk && endEpochOk {
|
||||
startSlot, err = slots.EpochStart(startEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
endSlot, err = slots.EpochStart(endEpoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
endSlot = endSlot + params.BeaconConfig().SlotsPerEpoch - 1
|
||||
}
|
||||
@@ -695,14 +776,15 @@ func blockRootsBySlotRange(
|
||||
return key != nil && bytes.Compare(key, max) <= 0
|
||||
}
|
||||
if endSlot < startSlot {
|
||||
return nil, errInvalidSlotRange
|
||||
return nil, nil, errInvalidSlotRange
|
||||
}
|
||||
rootsRange := endSlot.SubSlot(startSlot).Div(step)
|
||||
roots := make([][]byte, 0, rootsRange)
|
||||
var slts []primitives.Slot
|
||||
c := bkt.Cursor()
|
||||
for k, v := c.Seek(min); conditional(k, max); k, v = c.Next() {
|
||||
slot := bytesutil.BytesToSlotBigEndian(k)
|
||||
if step > 1 {
|
||||
slot := bytesutil.BytesToSlotBigEndian(k)
|
||||
if slot.SubSlot(startSlot).Mod(step) != 0 {
|
||||
continue
|
||||
}
|
||||
@@ -713,8 +795,9 @@ func blockRootsBySlotRange(
|
||||
splitRoots = append(splitRoots, v[i:i+32])
|
||||
}
|
||||
roots = append(roots, splitRoots...)
|
||||
slts = append(slts, slot)
|
||||
}
|
||||
return roots, nil
|
||||
return roots, slts, nil
|
||||
}
|
||||
|
||||
// blockRootsBySlot retrieves the block roots by slot
|
||||
@@ -813,9 +896,9 @@ func unmarshalBlock(_ context.Context, enc []byte) (interfaces.ReadOnlySignedBea
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(denebBlindKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal blinded Deneb block")
|
||||
}
|
||||
case hasElectraKey(enc):
|
||||
case HasElectraKey(enc):
|
||||
rawBlock = ðpb.SignedBeaconBlockElectra{}
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(electraKey):]); err != nil {
|
||||
if err := rawBlock.UnmarshalSSZ(enc[len(ElectraKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal Electra block")
|
||||
}
|
||||
case hasElectraBlindKey(enc):
|
||||
@@ -874,7 +957,7 @@ func keyForBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
if blk.IsBlinded() {
|
||||
return electraBlindKey, nil
|
||||
}
|
||||
return electraKey, nil
|
||||
return ElectraKey, nil
|
||||
}
|
||||
|
||||
if v >= version.Deneb {
|
||||
@@ -908,3 +991,32 @@ func keyForBlock(blk interfaces.ReadOnlySignedBeaconBlock) ([]byte, error) {
|
||||
|
||||
return nil, fmt.Errorf("unsupported block version: %v", blk.Version())
|
||||
}
|
||||
|
||||
func (s *Store) deleteBlock(tx *bolt.Tx, root []byte) error {
|
||||
if err := tx.Bucket(blocksBucket).Delete(root); err != nil {
|
||||
return errors.Wrap(err, "could not delete block")
|
||||
}
|
||||
|
||||
if err := tx.Bucket(blockParentRootIndicesBucket).Delete(root); err != nil {
|
||||
return errors.Wrap(err, "could not delete block parent indices")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) deleteValidatorHashes(tx *bolt.Tx, root []byte) error {
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete the validator hash index
|
||||
if err = tx.Bucket(blockRootValidatorHashesBucket).Delete(root); err != nil {
|
||||
return errors.Wrap(err, "could not delete validator index")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,9 +2,13 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filters"
|
||||
@@ -353,6 +357,189 @@ func TestStore_DeleteFinalizedBlock(t *testing.T) {
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
}
|
||||
|
||||
func TestStore_HistoricalDataBeforeSlot(t *testing.T) {
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
// Save genesis block root
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
|
||||
// Create and save blocks for 4 epochs
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
|
||||
// Mark state validator migration as complete
|
||||
err := db.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket(migrationsBucket).Put(migrationStateValidatorsKey, migrationCompleted)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
migrated, err := db.isStateValidatorMigrationOver()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, migrated)
|
||||
|
||||
// Create state summaries and states for each block
|
||||
ss := make([]*ethpb.StateSummary, len(blks))
|
||||
states := make([]state.BeaconState, len(blks))
|
||||
|
||||
for i, blk := range blks {
|
||||
slot := blk.Block().Slot()
|
||||
r, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create and save state summary
|
||||
ss[i] = ðpb.StateSummary{
|
||||
Slot: slot,
|
||||
Root: r[:],
|
||||
}
|
||||
|
||||
// Create and save state with validator entries
|
||||
vals := make([]*ethpb.Validator, 2)
|
||||
for j := range vals {
|
||||
vals[j] = ðpb.Validator{
|
||||
PublicKey: bytesutil.PadTo([]byte{byte(i*j + 1)}, 48),
|
||||
WithdrawalCredentials: bytesutil.PadTo([]byte{byte(i*j + 2)}, 32),
|
||||
}
|
||||
}
|
||||
|
||||
st, err := util.NewBeaconState(func(state *ethpb.BeaconState) error {
|
||||
state.Validators = vals
|
||||
state.Slot = slot
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(ctx, st, r))
|
||||
states[i] = st
|
||||
|
||||
// Verify validator entries are saved to db
|
||||
valsActual, err := db.validatorEntries(ctx, r)
|
||||
require.NoError(t, err)
|
||||
for j, val := range valsActual {
|
||||
require.DeepEqual(t, vals[j], val)
|
||||
}
|
||||
}
|
||||
require.NoError(t, db.SaveStateSummaries(ctx, ss))
|
||||
|
||||
// Verify slot indices exist before deletion
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
|
||||
for i := uint64(0); i < slotsPerEpoch; i++ {
|
||||
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
|
||||
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
|
||||
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist", i)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Delete data before slot at epoch 1
|
||||
require.NoError(t, db.DeleteHistoricalDataBeforeSlot(ctx, primitives.Slot(slotsPerEpoch)))
|
||||
|
||||
// Verify blocks from epoch 0 are deleted
|
||||
for i := uint64(0); i < slotsPerEpoch; i++ {
|
||||
root, err := blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check block is deleted
|
||||
retrievedBlocks, err := db.BlocksBySlot(ctx, primitives.Slot(i))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(retrievedBlocks))
|
||||
|
||||
// Verify block does not exist
|
||||
assert.Equal(t, false, db.HasBlock(ctx, root))
|
||||
|
||||
// Verify block parent root does not exist
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
require.Equal(t, 0, len(tx.Bucket(blockParentRootIndicesBucket).Get(root[:])))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify state is deleted
|
||||
hasState := db.HasState(ctx, root)
|
||||
assert.Equal(t, false, hasState)
|
||||
|
||||
// Verify state summary is deleted
|
||||
hasSummary := db.HasStateSummary(ctx, root)
|
||||
assert.Equal(t, false, hasSummary)
|
||||
|
||||
// Verify validator hashes for block roots are deleted
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
assert.Equal(t, 0, len(tx.Bucket(blockRootValidatorHashesBucket).Get(root[:])))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify slot indices are deleted
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
|
||||
for i := uint64(0); i < slotsPerEpoch; i++ {
|
||||
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
|
||||
assert.Equal(t, 0, len(blockSlotBkt.Get(slot)), fmt.Sprintf("Expected block slot index to be deleted, slot: %d", slot))
|
||||
assert.Equal(t, 0, len(stateSlotBkt.Get(slot)), fmt.Sprintf("Expected state slot index to be deleted, slot: %d", slot))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify blocks from epochs 1-3 still exist
|
||||
for i := slotsPerEpoch; i < slotsPerEpoch*4; i++ {
|
||||
root, err := blks[i].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify block exists
|
||||
assert.Equal(t, true, db.HasBlock(ctx, root))
|
||||
|
||||
// Verify remaining block parent root exists, except last slot since we store parent roots of each block.
|
||||
if i < slotsPerEpoch*4-1 {
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
require.NotNil(t, tx.Bucket(blockParentRootIndicesBucket).Get(root[:]), fmt.Sprintf("Expected block parent index to be deleted, slot: %d", i))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Verify state exists
|
||||
hasState := db.HasState(ctx, root)
|
||||
assert.Equal(t, true, hasState)
|
||||
|
||||
// Verify state summary exists
|
||||
hasSummary := db.HasStateSummary(ctx, root)
|
||||
assert.Equal(t, true, hasSummary)
|
||||
|
||||
// Verify slot indices still exist
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
blockSlotBkt := tx.Bucket(blockSlotIndicesBucket)
|
||||
stateSlotBkt := tx.Bucket(stateSlotIndicesBucket)
|
||||
|
||||
slot := bytesutil.SlotToBytesBigEndian(primitives.Slot(i + 1))
|
||||
assert.NotNil(t, blockSlotBkt.Get(slot), "Expected block slot index to exist")
|
||||
assert.NotNil(t, stateSlotBkt.Get(slot), "Expected state slot index to exist")
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify validator entries still exist
|
||||
valsActual, err := db.validatorEntries(ctx, root)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, valsActual)
|
||||
|
||||
// Verify remaining validator hashes for block roots exists
|
||||
err = db.db.View(func(tx *bolt.Tx) error {
|
||||
assert.NotNil(t, tx.Bucket(blockRootValidatorHashesBucket).Get(root[:]))
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GenesisBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -52,11 +52,12 @@ func hasDenebBlindKey(enc []byte) bool {
|
||||
return bytes.Equal(enc[:len(denebBlindKey)], denebBlindKey)
|
||||
}
|
||||
|
||||
func hasElectraKey(enc []byte) bool {
|
||||
if len(electraKey) >= len(enc) {
|
||||
// HasElectraKey verifies if the encoding is Electra compatible.
|
||||
func HasElectraKey(enc []byte) bool {
|
||||
if len(ElectraKey) >= len(enc) {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(enc[:len(electraKey)], electraKey)
|
||||
return bytes.Equal(enc[:len(ElectraKey)], ElectraKey)
|
||||
}
|
||||
|
||||
func hasElectraBlindKey(enc []byte) bool {
|
||||
|
||||
@@ -167,13 +167,13 @@ func decodeLightClientBootstrap(enc []byte) (interfaces.LightClientBootstrap, []
|
||||
}
|
||||
m = bootstrap
|
||||
syncCommitteeHash = enc[len(denebKey) : len(denebKey)+32]
|
||||
case hasElectraKey(enc):
|
||||
case HasElectraKey(enc):
|
||||
bootstrap := ðpb.LightClientBootstrapElectra{}
|
||||
if err := bootstrap.UnmarshalSSZ(enc[len(electraKey)+32:]); err != nil {
|
||||
if err := bootstrap.UnmarshalSSZ(enc[len(ElectraKey)+32:]); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not unmarshal Electra light client bootstrap")
|
||||
}
|
||||
m = bootstrap
|
||||
syncCommitteeHash = enc[len(electraKey) : len(electraKey)+32]
|
||||
syncCommitteeHash = enc[len(ElectraKey) : len(ElectraKey)+32]
|
||||
default:
|
||||
return nil, nil, errors.New("decoding of saved light client bootstrap is unsupported")
|
||||
}
|
||||
@@ -277,9 +277,9 @@ func decodeLightClientUpdate(enc []byte) (interfaces.LightClientUpdate, error) {
|
||||
return nil, errors.Wrap(err, "could not unmarshal Deneb light client update")
|
||||
}
|
||||
m = update
|
||||
case hasElectraKey(enc):
|
||||
case HasElectraKey(enc):
|
||||
update := ðpb.LightClientUpdateElectra{}
|
||||
if err := update.UnmarshalSSZ(enc[len(electraKey):]); err != nil {
|
||||
if err := update.UnmarshalSSZ(enc[len(ElectraKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "could not unmarshal Electra light client update")
|
||||
}
|
||||
m = update
|
||||
@@ -292,7 +292,7 @@ func decodeLightClientUpdate(enc []byte) (interfaces.LightClientUpdate, error) {
|
||||
func keyForLightClientUpdate(v int) ([]byte, error) {
|
||||
switch v {
|
||||
case version.Electra:
|
||||
return electraKey, nil
|
||||
return ElectraKey, nil
|
||||
case version.Deneb:
|
||||
return denebKey, nil
|
||||
case version.Capella:
|
||||
|
||||
@@ -53,7 +53,7 @@ var (
|
||||
saveBlindedBeaconBlocksKey = []byte("save-blinded-beacon-blocks")
|
||||
denebKey = []byte("deneb")
|
||||
denebBlindKey = []byte("blind-deneb")
|
||||
electraKey = []byte("electra")
|
||||
ElectraKey = []byte("electra")
|
||||
electraBlindKey = []byte("blind-electra")
|
||||
fuluKey = []byte("fulu")
|
||||
fuluBlindKey = []byte("blind-fulu")
|
||||
|
||||
@@ -357,7 +357,7 @@ func (s *Store) processElectra(ctx context.Context, pbState *ethpb.BeaconStateEl
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
encodedState := snappy.Encode(nil, append(electraKey, rawObj...))
|
||||
encodedState := snappy.Encode(nil, append(ElectraKey, rawObj...))
|
||||
if err := bucket.Put(rootHash, encodedState); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -517,9 +517,22 @@ func (s *Store) unmarshalState(_ context.Context, enc []byte, validatorEntries [
|
||||
}
|
||||
|
||||
switch {
|
||||
case hasElectraKey(enc):
|
||||
case hasFuluKey(enc):
|
||||
protoState := ðpb.BeaconStateFulu{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(fuluKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Electra")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
protoState.Validators = validatorEntries
|
||||
}
|
||||
return statenative.InitializeFromProtoUnsafeFulu(protoState)
|
||||
case HasElectraKey(enc):
|
||||
protoState := ðpb.BeaconStateElectra{}
|
||||
if err := protoState.UnmarshalSSZ(enc[len(electraKey):]); err != nil {
|
||||
if err := protoState.UnmarshalSSZ(enc[len(ElectraKey):]); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal encoding for Electra")
|
||||
}
|
||||
ok, err := s.isStateValidatorMigrationOver()
|
||||
@@ -675,7 +688,7 @@ func marshalState(ctx context.Context, st state.ReadOnlyBeaconState) ([]byte, er
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return snappy.Encode(nil, append(electraKey, rawObj...)), nil
|
||||
return snappy.Encode(nil, append(ElectraKey, rawObj...)), nil
|
||||
case version.Fulu:
|
||||
rState, ok := st.ToProtoUnsafe().(*ethpb.BeaconStateFulu)
|
||||
if !ok {
|
||||
@@ -712,7 +725,7 @@ func (s *Store) validatorEntries(ctx context.Context, blockRoot [32]byte) ([]*et
|
||||
idxBkt := tx.Bucket(blockRootValidatorHashesBucket)
|
||||
valKey := idxBkt.Get(blockRoot[:])
|
||||
if len(valKey) == 0 {
|
||||
return errors.Errorf("invalid compressed validator keys length")
|
||||
return errors.Errorf("validator keys not found for given block root: %x", blockRoot)
|
||||
}
|
||||
|
||||
// decompress the keys and check if they are of proper length.
|
||||
|
||||
38
beacon-chain/db/pruner/BUILD.bazel
Normal file
38
beacon-chain/db/pruner/BUILD.bazel
Normal file
@@ -0,0 +1,38 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["pruner.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/pruner",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["pruner_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots/testing:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
174
beacon-chain/db/pruner/pruner.go
Normal file
174
beacon-chain/db/pruner/pruner.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package pruner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/iface"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var log = logrus.WithField("prefix", "db-pruner")
|
||||
|
||||
type ServiceOption func(*Service)
|
||||
|
||||
// WithRetentionPeriod allows the user to specify a different data retention period than the spec default.
|
||||
// The retention period is specified in epochs, and must be >= MIN_EPOCHS_FOR_BLOCK_REQUESTS.
|
||||
func WithRetentionPeriod(retentionEpochs primitives.Epoch) ServiceOption {
|
||||
return func(s *Service) {
|
||||
defaultRetentionEpochs := helpers.MinEpochsForBlockRequests() + 1
|
||||
if retentionEpochs < defaultRetentionEpochs {
|
||||
log.WithField("userEpochs", retentionEpochs).
|
||||
WithField("minRequired", defaultRetentionEpochs).
|
||||
Warn("Retention period too low, using minimum required value")
|
||||
}
|
||||
|
||||
s.ps = pruneStartSlotFunc(retentionEpochs)
|
||||
}
|
||||
}
|
||||
|
||||
func WithSlotTicker(slotTicker slots.Ticker) ServiceOption {
|
||||
return func(s *Service) {
|
||||
s.slotTicker = slotTicker
|
||||
}
|
||||
}
|
||||
|
||||
// Service defines a service that prunes beacon chain DB based on MIN_EPOCHS_FOR_BLOCK_REQUESTS.
|
||||
type Service struct {
|
||||
ctx context.Context
|
||||
db db.Database
|
||||
ps func(current primitives.Slot) primitives.Slot
|
||||
prunedUpto primitives.Slot
|
||||
done chan struct{}
|
||||
slotTicker slots.Ticker
|
||||
backfillWaiter func() error
|
||||
initSyncWaiter func() error
|
||||
}
|
||||
|
||||
func New(ctx context.Context, db iface.Database, genesisTime uint64, initSyncWaiter, backfillWaiter func() error, opts ...ServiceOption) (*Service, error) {
|
||||
p := &Service{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
ps: pruneStartSlotFunc(helpers.MinEpochsForBlockRequests() + 1), // Default retention epochs is MIN_EPOCHS_FOR_BLOCK_REQUESTS + 1 from the current slot.
|
||||
done: make(chan struct{}),
|
||||
slotTicker: slots.NewSlotTicker(slots.StartTime(genesisTime, 0), params.BeaconConfig().SecondsPerSlot),
|
||||
initSyncWaiter: initSyncWaiter,
|
||||
backfillWaiter: backfillWaiter,
|
||||
}
|
||||
|
||||
for _, o := range opts {
|
||||
o(p)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *Service) Start() {
|
||||
log.Info("Starting Beacon DB pruner service")
|
||||
p.run()
|
||||
}
|
||||
|
||||
func (p *Service) Stop() error {
|
||||
log.Info("Stopping Beacon DB pruner service")
|
||||
close(p.done)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Service) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Service) run() {
|
||||
if p.initSyncWaiter != nil {
|
||||
log.Info("Waiting for initial sync service to complete before starting pruner")
|
||||
if err := p.initSyncWaiter(); err != nil {
|
||||
log.WithError(err).Error("Failed to start database pruner, error waiting for initial sync completion")
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.backfillWaiter != nil {
|
||||
log.Info("Waiting for backfill service to complete before starting pruner")
|
||||
if err := p.backfillWaiter(); err != nil {
|
||||
log.WithError(err).Error("Failed to start database pruner, error waiting for backfill completion")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
defer p.slotTicker.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
log.Debug("Stopping Beacon DB pruner service", "prunedUpto", p.prunedUpto)
|
||||
return
|
||||
case <-p.done:
|
||||
log.Debug("Stopping Beacon DB pruner service", "prunedUpto", p.prunedUpto)
|
||||
return
|
||||
case slot := <-p.slotTicker.C():
|
||||
// Prune at the middle of every epoch since we do a lot of things around epoch boundaries.
|
||||
if slots.SinceEpochStarts(slot) != (params.BeaconConfig().SlotsPerEpoch / 2) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := p.prune(slot); err != nil {
|
||||
log.WithError(err).Error("Failed to prune database")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// prune deletes historical chain data beyond the pruneSlot.
|
||||
func (p *Service) prune(slot primitives.Slot) error {
|
||||
// Prune everything up to this slot (inclusive).
|
||||
pruneUpto := p.ps(slot)
|
||||
|
||||
// Can't prune beyond genesis.
|
||||
if pruneUpto == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip if already pruned up to this slot.
|
||||
if pruneUpto <= p.prunedUpto {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"pruneUpto": pruneUpto,
|
||||
}).Debug("Pruning chain data")
|
||||
|
||||
tt := time.Now()
|
||||
if err := p.db.DeleteHistoricalDataBeforeSlot(p.ctx, pruneUpto); err != nil {
|
||||
return errors.Wrapf(err, "could not delete upto slot %d", pruneUpto)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"prunedUpto": pruneUpto,
|
||||
"duration": time.Since(tt),
|
||||
"currentSlot": slot,
|
||||
}).Debug("Successfully pruned chain data")
|
||||
|
||||
// Update pruning checkpoint.
|
||||
p.prunedUpto = pruneUpto
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pruneStartSlotFunc returns the function to determine the start slot to start pruning.
|
||||
func pruneStartSlotFunc(retentionEpochs primitives.Epoch) func(primitives.Slot) primitives.Slot {
|
||||
return func(current primitives.Slot) primitives.Slot {
|
||||
if retentionEpochs > slots.MaxSafeEpoch() {
|
||||
retentionEpochs = slots.MaxSafeEpoch()
|
||||
}
|
||||
offset := slots.UnsafeEpochStart(retentionEpochs)
|
||||
if offset >= current {
|
||||
return 0
|
||||
}
|
||||
return current - offset
|
||||
}
|
||||
}
|
||||
135
beacon-chain/db/pruner/pruner_test.go
Normal file
135
beacon-chain/db/pruner/pruner_test.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package pruner
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/blocks"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
slottest "github.com/prysmaticlabs/prysm/v5/time/slots/testing"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
dbtest "github.com/prysmaticlabs/prysm/v5/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestPruner_PruningConditions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
synced bool
|
||||
backfillCompleted bool
|
||||
expectedLog string
|
||||
}{
|
||||
{
|
||||
name: "Not synced",
|
||||
synced: false,
|
||||
backfillCompleted: true,
|
||||
expectedLog: "Waiting for initial sync service to complete before starting pruner",
|
||||
},
|
||||
{
|
||||
name: "Backfill incomplete",
|
||||
synced: true,
|
||||
backfillCompleted: false,
|
||||
expectedLog: "Waiting for backfill service to complete before starting pruner",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
hook := logTest.NewGlobal()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
beaconDB := dbtest.SetupDB(t)
|
||||
|
||||
slotTicker := &slottest.MockTicker{Channel: make(chan primitives.Slot)}
|
||||
|
||||
waitChan := make(chan struct{})
|
||||
waiter := func() error {
|
||||
close(waitChan)
|
||||
return nil
|
||||
}
|
||||
|
||||
var initSyncWaiter, backfillWaiter func() error
|
||||
if !tt.synced {
|
||||
initSyncWaiter = waiter
|
||||
}
|
||||
if !tt.backfillCompleted {
|
||||
backfillWaiter = waiter
|
||||
}
|
||||
p, err := New(ctx, beaconDB, uint64(time.Now().Unix()), initSyncWaiter, backfillWaiter, WithSlotTicker(slotTicker))
|
||||
require.NoError(t, err)
|
||||
|
||||
go p.Start()
|
||||
<-waitChan
|
||||
cancel()
|
||||
|
||||
if tt.expectedLog != "" {
|
||||
require.LogsContain(t, hook, tt.expectedLog)
|
||||
}
|
||||
|
||||
require.NoError(t, p.Stop())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPruner_PruneSuccess(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := dbtest.SetupDB(t)
|
||||
|
||||
// Create and save some blocks at different slots
|
||||
var blks []*eth.SignedBeaconBlock
|
||||
for slot := primitives.Slot(1); slot <= 32; slot++ {
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = slot
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
blks = append(blks, blk)
|
||||
}
|
||||
|
||||
// Create pruner with retention of 2 epochs (64 slots)
|
||||
retentionEpochs := primitives.Epoch(2)
|
||||
slotTicker := &slottest.MockTicker{Channel: make(chan primitives.Slot)}
|
||||
|
||||
p, err := New(
|
||||
ctx,
|
||||
beaconDB,
|
||||
uint64(time.Now().Unix()),
|
||||
nil,
|
||||
nil,
|
||||
WithSlotTicker(slotTicker),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
p.ps = func(current primitives.Slot) primitives.Slot {
|
||||
return current - primitives.Slot(retentionEpochs)*params.BeaconConfig().SlotsPerEpoch
|
||||
}
|
||||
|
||||
// Start pruner and trigger at middle of 3rd epoch (slot 80)
|
||||
go p.Start()
|
||||
currentSlot := primitives.Slot(80) // Middle of 3rd epoch
|
||||
slotTicker.Channel <- currentSlot
|
||||
// Send the same slot again to ensure the pruning operation completes
|
||||
slotTicker.Channel <- currentSlot
|
||||
|
||||
for slot := primitives.Slot(1); slot <= 32; slot++ {
|
||||
root, err := blks[slot-1].Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
present := beaconDB.HasBlock(ctx, root)
|
||||
if slot <= 16 { // These should be pruned
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, present, "Expected present at slot %d to be pruned", slot)
|
||||
} else { // These should remain
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, present, "Expected present at slot %d to exist", slot)
|
||||
}
|
||||
}
|
||||
|
||||
require.NoError(t, p.Stop())
|
||||
}
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
visibility = ["//beacon-chain:__subpackages__"],
|
||||
deps = [
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/slasher/types:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -22,6 +23,7 @@ go_library(
|
||||
"//io/file:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
@@ -50,6 +52,7 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
slashertypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
@@ -177,8 +178,8 @@ func TestStore_PruneAttestations_OK(t *testing.T) {
|
||||
if i > 0 {
|
||||
source = target - 1
|
||||
}
|
||||
att1 := createAttestationWrapper(source, target, []uint64{attester1}, []byte{0})
|
||||
att2 := createAttestationWrapper(source, target, []uint64{attester2}, []byte{1})
|
||||
att1 := createAttestationWrapper(version.Phase0, source, target, []uint64{attester1}, []byte{0})
|
||||
att2 := createAttestationWrapper(version.Phase0, source, target, []uint64{attester2}, []byte{1})
|
||||
attestations = append(attestations, att1, att2)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,11 +11,13 @@ import (
|
||||
"github.com/golang/snappy"
|
||||
"github.com/pkg/errors"
|
||||
ssz "github.com/prysmaticlabs/fastssz"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/kv"
|
||||
slashertypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/slasher/types"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/monitoring/tracing/trace"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
@@ -692,6 +694,11 @@ func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byt
|
||||
return []byte{}, errors.New("nil proposal record")
|
||||
}
|
||||
|
||||
var versionKey []byte
|
||||
if att.IndexedAttestation.Version() >= version.Electra {
|
||||
versionKey = kv.ElectraKey
|
||||
}
|
||||
|
||||
// Encode attestation.
|
||||
encodedAtt, err := att.IndexedAttestation.MarshalSSZ()
|
||||
if err != nil {
|
||||
@@ -701,7 +708,14 @@ func encodeAttestationRecord(att *slashertypes.IndexedAttestationWrapper) ([]byt
|
||||
// Compress attestation.
|
||||
compressedAtt := snappy.Encode(nil, encodedAtt)
|
||||
|
||||
return append(att.DataRoot[:], compressedAtt...), nil
|
||||
enc := make([]byte, len(versionKey)+len(att.DataRoot)+len(compressedAtt))
|
||||
if len(versionKey) > 0 {
|
||||
copy(enc, versionKey)
|
||||
}
|
||||
copy(enc[len(versionKey):len(versionKey)+len(att.DataRoot)], att.DataRoot[:])
|
||||
copy(enc[len(versionKey)+len(att.DataRoot):], compressedAtt)
|
||||
|
||||
return enc, nil
|
||||
}
|
||||
|
||||
// Decode attestation record from bytes.
|
||||
@@ -711,6 +725,11 @@ func decodeAttestationRecord(encoded []byte) (*slashertypes.IndexedAttestationWr
|
||||
return nil, fmt.Errorf("wrong length for encoded attestation record, want minimum %d, got %d", rootSize, len(encoded))
|
||||
}
|
||||
|
||||
postElectra := kv.HasElectraKey(encoded)
|
||||
if postElectra {
|
||||
encoded = encoded[len(kv.ElectraKey):]
|
||||
}
|
||||
|
||||
// Decompress attestation.
|
||||
decodedAttBytes, err := snappy.Decode(nil, encoded[rootSize:])
|
||||
if err != nil {
|
||||
@@ -718,8 +737,14 @@ func decodeAttestationRecord(encoded []byte) (*slashertypes.IndexedAttestationWr
|
||||
}
|
||||
|
||||
// Decode attestation.
|
||||
decodedAtt := ðpb.IndexedAttestation{}
|
||||
if err := decodedAtt.UnmarshalSSZ(decodedAttBytes); err != nil {
|
||||
var decodedAtt ethpb.IndexedAtt
|
||||
if postElectra {
|
||||
decodedAtt = ðpb.IndexedAttestationElectra{}
|
||||
} else {
|
||||
decodedAtt = ðpb.IndexedAttestation{}
|
||||
}
|
||||
|
||||
if err = decodedAtt.UnmarshalSSZ(decodedAttBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -14,20 +14,16 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestStore_AttestationRecordForValidator_SaveRetrieve(t *testing.T) {
|
||||
const attestationsCount = 11_000
|
||||
|
||||
// Create context.
|
||||
ctx := context.Background()
|
||||
|
||||
// Create database.
|
||||
beaconDB := setupDB(t)
|
||||
|
||||
// Define the validator index.
|
||||
validatorIndex := primitives.ValidatorIndex(1)
|
||||
phase0ValidatorIndex := primitives.ValidatorIndex(1)
|
||||
electraValidatorIndex := primitives.ValidatorIndex(2)
|
||||
|
||||
// Defines attestations to save and retrieve.
|
||||
attWrappers := make([]*slashertypes.IndexedAttestationWrapper, attestationsCount)
|
||||
@@ -36,33 +32,71 @@ func TestStore_AttestationRecordForValidator_SaveRetrieve(t *testing.T) {
|
||||
binary.LittleEndian.PutUint64(dataRoot[:], uint64(i))
|
||||
|
||||
attWrapper := createAttestationWrapper(
|
||||
version.Phase0,
|
||||
primitives.Epoch(i),
|
||||
primitives.Epoch(i+1),
|
||||
[]uint64{uint64(validatorIndex)},
|
||||
[]uint64{uint64(phase0ValidatorIndex)},
|
||||
dataRoot[:],
|
||||
)
|
||||
|
||||
attWrappers[i] = attWrapper
|
||||
}
|
||||
attWrappersElectra := make([]*slashertypes.IndexedAttestationWrapper, attestationsCount)
|
||||
for i := 0; i < attestationsCount; i++ {
|
||||
var dataRoot [32]byte
|
||||
binary.LittleEndian.PutUint64(dataRoot[:], uint64(i))
|
||||
|
||||
// Check on a sample of validators that no attestation records are available.
|
||||
for i := 0; i < attestationsCount; i += 100 {
|
||||
attRecord, err := beaconDB.AttestationRecordForValidator(ctx, validatorIndex, primitives.Epoch(i+1))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, attRecord == nil)
|
||||
attWrapper := createAttestationWrapper(
|
||||
version.Electra,
|
||||
primitives.Epoch(i),
|
||||
primitives.Epoch(i+1),
|
||||
[]uint64{uint64(electraValidatorIndex)},
|
||||
dataRoot[:],
|
||||
)
|
||||
|
||||
attWrappersElectra[i] = attWrapper
|
||||
}
|
||||
|
||||
// Save the attestation records to the database.
|
||||
err := beaconDB.SaveAttestationRecordsForValidators(ctx, attWrappers)
|
||||
require.NoError(t, err)
|
||||
type testCase struct {
|
||||
name string
|
||||
atts []*slashertypes.IndexedAttestationWrapper
|
||||
vi primitives.ValidatorIndex
|
||||
}
|
||||
testCases := []testCase{
|
||||
{
|
||||
name: "phase0",
|
||||
atts: attWrappers,
|
||||
vi: phase0ValidatorIndex,
|
||||
},
|
||||
{
|
||||
name: "electra",
|
||||
atts: attWrappersElectra,
|
||||
vi: electraValidatorIndex,
|
||||
},
|
||||
}
|
||||
|
||||
// Check on a sample of validators that attestation records are available.
|
||||
for i := 0; i < attestationsCount; i += 100 {
|
||||
expected := attWrappers[i]
|
||||
actual, err := beaconDB.AttestationRecordForValidator(ctx, validatorIndex, primitives.Epoch(i+1))
|
||||
require.NoError(t, err)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Check on a sample of validators that no attestation records are available.
|
||||
for i := 0; i < attestationsCount; i += 100 {
|
||||
attRecord, err := beaconDB.AttestationRecordForValidator(ctx, tc.vi, primitives.Epoch(i+1))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, attRecord == nil)
|
||||
}
|
||||
|
||||
require.DeepEqual(t, expected.IndexedAttestation.GetData().Source.Epoch, actual.IndexedAttestation.GetData().Source.Epoch)
|
||||
// Save the attestation records to the database.
|
||||
err := beaconDB.SaveAttestationRecordsForValidators(ctx, tc.atts)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check on a sample of validators that attestation records are available.
|
||||
for i := 0; i < attestationsCount; i += 100 {
|
||||
expected := attWrappers[i]
|
||||
actual, err := beaconDB.AttestationRecordForValidator(ctx, tc.vi, primitives.Epoch(i+1))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, expected.IndexedAttestation.GetData().Source.Epoch, actual.IndexedAttestation.GetData().Source.Epoch)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,55 +142,60 @@ func TestStore_LastEpochWrittenForValidators(t *testing.T) {
|
||||
|
||||
func TestStore_CheckAttesterDoubleVotes(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := setupDB(t)
|
||||
err := beaconDB.SaveAttestationRecordsForValidators(ctx, []*slashertypes.IndexedAttestationWrapper{
|
||||
createAttestationWrapper(2, 3, []uint64{0, 1}, []byte{1}),
|
||||
createAttestationWrapper(3, 4, []uint64{2, 3}, []byte{3}),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
slashableAtts := []*slashertypes.IndexedAttestationWrapper{
|
||||
createAttestationWrapper(2, 3, []uint64{0, 1}, []byte{2}), // Different signing root.
|
||||
createAttestationWrapper(3, 4, []uint64{2, 3}, []byte{4}), // Different signing root.
|
||||
}
|
||||
for _, ver := range []int{version.Phase0, version.Electra} {
|
||||
t.Run(version.String(ver), func(t *testing.T) {
|
||||
beaconDB := setupDB(t)
|
||||
err := beaconDB.SaveAttestationRecordsForValidators(ctx, []*slashertypes.IndexedAttestationWrapper{
|
||||
createAttestationWrapper(version.Phase0, 2, 3, []uint64{0, 1}, []byte{1}),
|
||||
createAttestationWrapper(version.Phase0, 3, 4, []uint64{2, 3}, []byte{3}),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
wanted := []*slashertypes.AttesterDoubleVote{
|
||||
{
|
||||
ValidatorIndex: 0,
|
||||
Target: 3,
|
||||
Wrapper_1: createAttestationWrapper(2, 3, []uint64{0, 1}, []byte{1}),
|
||||
Wrapper_2: createAttestationWrapper(2, 3, []uint64{0, 1}, []byte{2}),
|
||||
},
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
Target: 3,
|
||||
Wrapper_1: createAttestationWrapper(2, 3, []uint64{0, 1}, []byte{1}),
|
||||
Wrapper_2: createAttestationWrapper(2, 3, []uint64{0, 1}, []byte{2}),
|
||||
},
|
||||
{
|
||||
ValidatorIndex: 2,
|
||||
Target: 4,
|
||||
Wrapper_1: createAttestationWrapper(3, 4, []uint64{2, 3}, []byte{3}),
|
||||
Wrapper_2: createAttestationWrapper(3, 4, []uint64{2, 3}, []byte{4}),
|
||||
},
|
||||
{
|
||||
ValidatorIndex: 3,
|
||||
Target: 4,
|
||||
Wrapper_1: createAttestationWrapper(3, 4, []uint64{2, 3}, []byte{3}),
|
||||
Wrapper_2: createAttestationWrapper(3, 4, []uint64{2, 3}, []byte{4}),
|
||||
},
|
||||
}
|
||||
doubleVotes, err := beaconDB.CheckAttesterDoubleVotes(ctx, slashableAtts)
|
||||
require.NoError(t, err)
|
||||
sort.SliceStable(doubleVotes, func(i, j int) bool {
|
||||
return uint64(doubleVotes[i].ValidatorIndex) < uint64(doubleVotes[j].ValidatorIndex)
|
||||
})
|
||||
require.Equal(t, len(wanted), len(doubleVotes))
|
||||
for i, double := range doubleVotes {
|
||||
require.DeepEqual(t, wanted[i].ValidatorIndex, double.ValidatorIndex)
|
||||
require.DeepEqual(t, wanted[i].Target, double.Target)
|
||||
require.DeepEqual(t, wanted[i].Wrapper_1, double.Wrapper_1)
|
||||
require.DeepEqual(t, wanted[i].Wrapper_2, double.Wrapper_2)
|
||||
slashableAtts := []*slashertypes.IndexedAttestationWrapper{
|
||||
createAttestationWrapper(version.Phase0, 2, 3, []uint64{0, 1}, []byte{2}), // Different signing root.
|
||||
createAttestationWrapper(version.Phase0, 3, 4, []uint64{2, 3}, []byte{4}), // Different signing root.
|
||||
}
|
||||
|
||||
wanted := []*slashertypes.AttesterDoubleVote{
|
||||
{
|
||||
ValidatorIndex: 0,
|
||||
Target: 3,
|
||||
Wrapper_1: createAttestationWrapper(version.Phase0, 2, 3, []uint64{0, 1}, []byte{1}),
|
||||
Wrapper_2: createAttestationWrapper(version.Phase0, 2, 3, []uint64{0, 1}, []byte{2}),
|
||||
},
|
||||
{
|
||||
ValidatorIndex: 1,
|
||||
Target: 3,
|
||||
Wrapper_1: createAttestationWrapper(version.Phase0, 2, 3, []uint64{0, 1}, []byte{1}),
|
||||
Wrapper_2: createAttestationWrapper(version.Phase0, 2, 3, []uint64{0, 1}, []byte{2}),
|
||||
},
|
||||
{
|
||||
ValidatorIndex: 2,
|
||||
Target: 4,
|
||||
Wrapper_1: createAttestationWrapper(version.Phase0, 3, 4, []uint64{2, 3}, []byte{3}),
|
||||
Wrapper_2: createAttestationWrapper(version.Phase0, 3, 4, []uint64{2, 3}, []byte{4}),
|
||||
},
|
||||
{
|
||||
ValidatorIndex: 3,
|
||||
Target: 4,
|
||||
Wrapper_1: createAttestationWrapper(version.Phase0, 3, 4, []uint64{2, 3}, []byte{3}),
|
||||
Wrapper_2: createAttestationWrapper(version.Phase0, 3, 4, []uint64{2, 3}, []byte{4}),
|
||||
},
|
||||
}
|
||||
doubleVotes, err := beaconDB.CheckAttesterDoubleVotes(ctx, slashableAtts)
|
||||
require.NoError(t, err)
|
||||
sort.SliceStable(doubleVotes, func(i, j int) bool {
|
||||
return uint64(doubleVotes[i].ValidatorIndex) < uint64(doubleVotes[j].ValidatorIndex)
|
||||
})
|
||||
require.Equal(t, len(wanted), len(doubleVotes))
|
||||
for i, double := range doubleVotes {
|
||||
require.DeepEqual(t, wanted[i].ValidatorIndex, double.ValidatorIndex)
|
||||
require.DeepEqual(t, wanted[i].Target, double.Target)
|
||||
require.DeepEqual(t, wanted[i].Wrapper_1, double.Wrapper_1)
|
||||
require.DeepEqual(t, wanted[i].Wrapper_2, double.Wrapper_2)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -376,12 +415,20 @@ func Test_encodeDecodeAttestationRecord(t *testing.T) {
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty standard encode/decode",
|
||||
attWrapper: createAttestationWrapper(0, 0, nil /* indices */, nil /* signingRoot */),
|
||||
name: "phase0 empty standard encode/decode",
|
||||
attWrapper: createAttestationWrapper(version.Phase0, 0, 0, nil /* indices */, nil /* signingRoot */),
|
||||
},
|
||||
{
|
||||
name: "standard encode/decode",
|
||||
attWrapper: createAttestationWrapper(15, 6, []uint64{2, 4}, []byte("1") /* signingRoot */),
|
||||
name: "phase0 standard encode/decode",
|
||||
attWrapper: createAttestationWrapper(version.Phase0, 15, 6, []uint64{2, 4}, []byte("1") /* signingRoot */),
|
||||
},
|
||||
{
|
||||
name: "electra empty standard encode/decode",
|
||||
attWrapper: createAttestationWrapper(version.Electra, 0, 0, nil /* indices */, nil /* signingRoot */),
|
||||
},
|
||||
{
|
||||
name: "electra standard encode/decode",
|
||||
attWrapper: createAttestationWrapper(version.Electra, 15, 6, []uint64{2, 4}, []byte("1") /* signingRoot */),
|
||||
},
|
||||
{
|
||||
name: "failing encode/decode",
|
||||
@@ -433,7 +480,7 @@ func TestStore_HighestAttestations(t *testing.T) {
|
||||
{
|
||||
name: "should get highest att if single att in db",
|
||||
attestationsInDB: []*slashertypes.IndexedAttestationWrapper{
|
||||
createAttestationWrapper(0, 3, []uint64{1}, []byte{1}),
|
||||
createAttestationWrapper(version.Phase0, 0, 3, []uint64{1}, []byte{1}),
|
||||
},
|
||||
indices: []primitives.ValidatorIndex{1},
|
||||
expected: []*ethpb.HighestAttestation{
|
||||
@@ -447,10 +494,10 @@ func TestStore_HighestAttestations(t *testing.T) {
|
||||
{
|
||||
name: "should get highest att for multiple with diff histories",
|
||||
attestationsInDB: []*slashertypes.IndexedAttestationWrapper{
|
||||
createAttestationWrapper(0, 3, []uint64{2}, []byte{1}),
|
||||
createAttestationWrapper(1, 4, []uint64{3}, []byte{2}),
|
||||
createAttestationWrapper(2, 3, []uint64{4}, []byte{3}),
|
||||
createAttestationWrapper(5, 6, []uint64{5}, []byte{4}),
|
||||
createAttestationWrapper(version.Phase0, 0, 3, []uint64{2}, []byte{1}),
|
||||
createAttestationWrapper(version.Phase0, 1, 4, []uint64{3}, []byte{2}),
|
||||
createAttestationWrapper(version.Phase0, 2, 3, []uint64{4}, []byte{3}),
|
||||
createAttestationWrapper(version.Phase0, 5, 6, []uint64{5}, []byte{4}),
|
||||
},
|
||||
indices: []primitives.ValidatorIndex{2, 3, 4, 5},
|
||||
expected: []*ethpb.HighestAttestation{
|
||||
@@ -479,10 +526,10 @@ func TestStore_HighestAttestations(t *testing.T) {
|
||||
{
|
||||
name: "should get correct highest att for multiple shared atts with diff histories",
|
||||
attestationsInDB: []*slashertypes.IndexedAttestationWrapper{
|
||||
createAttestationWrapper(1, 4, []uint64{2, 3}, []byte{1}),
|
||||
createAttestationWrapper(2, 5, []uint64{3, 5}, []byte{2}),
|
||||
createAttestationWrapper(4, 5, []uint64{1, 2}, []byte{3}),
|
||||
createAttestationWrapper(6, 7, []uint64{5}, []byte{4}),
|
||||
createAttestationWrapper(version.Phase0, 1, 4, []uint64{2, 3}, []byte{1}),
|
||||
createAttestationWrapper(version.Phase0, 2, 5, []uint64{3, 5}, []byte{2}),
|
||||
createAttestationWrapper(version.Phase0, 4, 5, []uint64{1, 2}, []byte{3}),
|
||||
createAttestationWrapper(version.Phase0, 6, 7, []uint64{5}, []byte{4}),
|
||||
},
|
||||
indices: []primitives.ValidatorIndex{2, 3, 4, 5},
|
||||
expected: []*ethpb.HighestAttestation{
|
||||
@@ -533,7 +580,7 @@ func BenchmarkHighestAttestations(b *testing.B) {
|
||||
}
|
||||
atts := make([]*slashertypes.IndexedAttestationWrapper, count)
|
||||
for i := 0; i < count; i++ {
|
||||
atts[i] = createAttestationWrapper(primitives.Epoch(i), primitives.Epoch(i+2), indicesPerAtt[i], []byte{})
|
||||
atts[i] = createAttestationWrapper(version.Phase0, primitives.Epoch(i), primitives.Epoch(i+2), indicesPerAtt[i], []byte{})
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
@@ -570,7 +617,7 @@ func BenchmarkStore_CheckDoubleBlockProposals(b *testing.B) {
|
||||
}
|
||||
atts := make([]*slashertypes.IndexedAttestationWrapper, count)
|
||||
for i := 0; i < count; i++ {
|
||||
atts[i] = createAttestationWrapper(primitives.Epoch(i), primitives.Epoch(i+2), indicesPerAtt[i], []byte{})
|
||||
atts[i] = createAttestationWrapper(version.Phase0, primitives.Epoch(i), primitives.Epoch(i+2), indicesPerAtt[i], []byte{})
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
@@ -609,7 +656,7 @@ func createProposalWrapper(t *testing.T, slot primitives.Slot, proposerIndex pri
|
||||
}
|
||||
}
|
||||
|
||||
func createAttestationWrapper(source, target primitives.Epoch, indices []uint64, dataRootBytes []byte) *slashertypes.IndexedAttestationWrapper {
|
||||
func createAttestationWrapper(ver int, source, target primitives.Epoch, indices []uint64, dataRootBytes []byte) *slashertypes.IndexedAttestationWrapper {
|
||||
dataRoot := bytesutil.ToBytes32(dataRootBytes)
|
||||
if dataRootBytes == nil {
|
||||
dataRoot = params.BeaconConfig().ZeroHash
|
||||
@@ -627,6 +674,16 @@ func createAttestationWrapper(source, target primitives.Epoch, indices []uint64,
|
||||
},
|
||||
}
|
||||
|
||||
if ver >= version.Electra {
|
||||
return &slashertypes.IndexedAttestationWrapper{
|
||||
IndexedAttestation: ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: indices,
|
||||
Data: data,
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
},
|
||||
DataRoot: dataRoot,
|
||||
}
|
||||
}
|
||||
return &slashertypes.IndexedAttestationWrapper{
|
||||
IndexedAttestation: ðpb.IndexedAttestation{
|
||||
AttestingIndices: indices,
|
||||
|
||||
@@ -2,6 +2,7 @@ package slasherkv
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -10,5 +11,5 @@ import (
|
||||
func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(io.Discard)
|
||||
m.Run()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package execution
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -11,5 +12,5 @@ func TestMain(m *testing.M) {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(io.Discard)
|
||||
|
||||
m.Run()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/db/pruner:go_default_library",
|
||||
"//beacon-chain/db/slasherkv:go_default_library",
|
||||
"//beacon-chain/execution:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/pruner"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/slasherkv"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/execution"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/forkchoice"
|
||||
@@ -336,6 +337,11 @@ func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *sta
|
||||
return errors.Wrap(err, "could not register sync service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Slashing Pool Service")
|
||||
if err := beacon.registerSlashingPoolService(); err != nil {
|
||||
return errors.Wrap(err, "could not register slashing pool service")
|
||||
}
|
||||
|
||||
log.Debugln("Registering Slasher Service")
|
||||
if err := beacon.registerSlasherService(); err != nil {
|
||||
return errors.Wrap(err, "could not register slasher service")
|
||||
@@ -369,6 +375,13 @@ func registerServices(cliCtx *cli.Context, beacon *BeaconNode, synchronizer *sta
|
||||
}
|
||||
}
|
||||
|
||||
if cliCtx.Bool(flags.BeaconDBPruning.Name) {
|
||||
log.Debugln("Registering Pruner Service")
|
||||
if err := beacon.registerPrunerService(cliCtx); err != nil {
|
||||
return errors.Wrap(err, "could not register pruner service")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -716,6 +729,16 @@ func (b *BeaconNode) registerAttestationPool() error {
|
||||
return b.services.RegisterService(s)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerSlashingPoolService() error {
|
||||
var chainService *blockchain.Service
|
||||
if err := b.services.FetchService(&chainService); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := slashings.NewPoolService(b.ctx, b.slashingsPool, slashings.WithElectraTimer(b.clockWaiter, chainService.CurrentSlot))
|
||||
return b.services.RegisterService(s)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *startup.ClockSynchronizer, syncComplete chan struct{}) error {
|
||||
var web3Service *execution.Service
|
||||
if err := b.services.FetchService(&web3Service); err != nil {
|
||||
@@ -1089,6 +1112,34 @@ func (b *BeaconNode) registerBuilderService(cliCtx *cli.Context) error {
|
||||
return b.services.RegisterService(svc)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) registerPrunerService(cliCtx *cli.Context) error {
|
||||
genesisTimeUnix := params.BeaconConfig().MinGenesisTime + params.BeaconConfig().GenesisDelay
|
||||
var backfillService *backfill.Service
|
||||
if err := b.services.FetchService(&backfillService); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var opts []pruner.ServiceOption
|
||||
if cliCtx.IsSet(flags.PrunerRetentionEpochs.Name) {
|
||||
uv := cliCtx.Uint64(flags.PrunerRetentionEpochs.Name)
|
||||
opts = append(opts, pruner.WithRetentionPeriod(primitives.Epoch(uv)))
|
||||
}
|
||||
|
||||
p, err := pruner.New(
|
||||
cliCtx.Context,
|
||||
b.db,
|
||||
genesisTimeUnix,
|
||||
initSyncWaiter(cliCtx.Context, b.initialSyncComplete),
|
||||
backfillService.WaitForCompletion,
|
||||
opts...,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return b.services.RegisterService(p)
|
||||
}
|
||||
|
||||
func (b *BeaconNode) RegisterBackfillService(cliCtx *cli.Context, bfs *backfill.Store) error {
|
||||
pa := peers.NewAssigner(b.fetchP2P().Peers(), b.forkChoicer)
|
||||
bf, err := backfill.NewService(cliCtx.Context, bfs, b.BlobStorage, b.clockWaiter, b.fetchP2P(), pa, b.BackfillOpts...)
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"service.go",
|
||||
"service_new.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/operations/slashings",
|
||||
@@ -19,6 +20,7 @@ go_library(
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
@@ -26,6 +28,7 @@ go_library(
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -39,18 +42,21 @@ go_test(
|
||||
size = "small",
|
||||
srcs = [
|
||||
"service_attester_test.go",
|
||||
"service_new_test.go",
|
||||
"service_proposer_test.go",
|
||||
"service_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/operations/slashings/mock:go_default_library",
|
||||
"//beacon-chain/startup:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -35,6 +35,9 @@ func (m *PoolMock) InsertProposerSlashing(_ context.Context, _ state.ReadOnlyBea
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConvertToElectra --
|
||||
func (*PoolMock) ConvertToElectra() {}
|
||||
|
||||
// MarkIncludedAttesterSlashing --
|
||||
func (*PoolMock) MarkIncludedAttesterSlashing(_ ethpb.AttSlashing) {
|
||||
panic("implement me")
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
coretime "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
@@ -270,6 +270,32 @@ func (p *Pool) MarkIncludedProposerSlashing(ps *ethpb.ProposerSlashing) {
|
||||
numProposerSlashingsIncluded.Inc()
|
||||
}
|
||||
|
||||
// ConvertToElectra converts all Phase0 attester slashings to Electra attester slashings.
|
||||
// This functionality is needed at the time of the Electra fork.
|
||||
func (p *Pool) ConvertToElectra() {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
for _, pas := range p.pendingAttesterSlashing {
|
||||
if pas.attesterSlashing.Version() == version.Phase0 {
|
||||
first := pas.attesterSlashing.FirstAttestation()
|
||||
second := pas.attesterSlashing.SecondAttestation()
|
||||
pas.attesterSlashing = ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: first.GetAttestingIndices(),
|
||||
Data: first.GetData(),
|
||||
Signature: first.GetSignature(),
|
||||
},
|
||||
Attestation_2: ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: second.GetAttestingIndices(),
|
||||
Data: second.GetData(),
|
||||
Signature: second.GetSignature(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// this function checks a few items about a validator before proceeding with inserting
|
||||
// a proposer/attester slashing into the pool. First, it checks if the validator
|
||||
// has been recently included in the pool, then it checks if the validator is slashable.
|
||||
@@ -291,7 +317,7 @@ func (p *Pool) validatorSlashingPreconditionCheck(
|
||||
return false, err
|
||||
}
|
||||
// Checking if the validator is slashable.
|
||||
if !helpers.IsSlashableValidatorUsingTrie(validator, time.CurrentEpoch(state)) {
|
||||
if !helpers.IsSlashableValidatorUsingTrie(validator, coretime.CurrentEpoch(state)) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
||||
@@ -9,23 +9,24 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/crypto/bls"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func validAttesterSlashingForValIdx(t *testing.T, beaconState state.BeaconState, privs []bls.SecretKey, valIdx ...uint64) *ethpb.AttesterSlashing {
|
||||
var slashings []*ethpb.AttesterSlashing
|
||||
func validAttesterSlashingForValIdx(t *testing.T, beaconState state.BeaconState, privs []bls.SecretKey, valIdx ...uint64) ethpb.AttSlashing {
|
||||
var slashings []ethpb.AttSlashing
|
||||
for _, idx := range valIdx {
|
||||
generatedSlashing, err := util.GenerateAttesterSlashingForValidator(beaconState, privs[idx], primitives.ValidatorIndex(idx))
|
||||
require.NoError(t, err)
|
||||
slashings = append(slashings, generatedSlashing.(*ethpb.AttesterSlashing))
|
||||
slashings = append(slashings, generatedSlashing)
|
||||
}
|
||||
var allSig1 []bls.Signature
|
||||
var allSig2 []bls.Signature
|
||||
for _, slashing := range slashings {
|
||||
sig1 := slashing.Attestation_1.Signature
|
||||
sig2 := slashing.Attestation_2.Signature
|
||||
sig1 := slashing.FirstAttestation().GetSignature()
|
||||
sig2 := slashing.SecondAttestation().GetSignature()
|
||||
sigFromBytes1, err := bls.SignatureFromBytes(sig1)
|
||||
require.NoError(t, err)
|
||||
sigFromBytes2, err := bls.SignatureFromBytes(sig2)
|
||||
@@ -35,31 +36,52 @@ func validAttesterSlashingForValIdx(t *testing.T, beaconState state.BeaconState,
|
||||
}
|
||||
aggSig1 := bls.AggregateSignatures(allSig1)
|
||||
aggSig2 := bls.AggregateSignatures(allSig2)
|
||||
aggSlashing := ðpb.AttesterSlashing{
|
||||
|
||||
if beaconState.Version() >= version.Electra {
|
||||
return ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: valIdx,
|
||||
Data: slashings[0].FirstAttestation().GetData(),
|
||||
Signature: aggSig1.Marshal(),
|
||||
},
|
||||
Attestation_2: ðpb.IndexedAttestationElectra{
|
||||
AttestingIndices: valIdx,
|
||||
Data: slashings[0].SecondAttestation().GetData(),
|
||||
Signature: aggSig2.Marshal(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return ðpb.AttesterSlashing{
|
||||
Attestation_1: ðpb.IndexedAttestation{
|
||||
AttestingIndices: valIdx,
|
||||
Data: slashings[0].Attestation_1.Data,
|
||||
Data: slashings[0].FirstAttestation().GetData(),
|
||||
Signature: aggSig1.Marshal(),
|
||||
},
|
||||
Attestation_2: ðpb.IndexedAttestation{
|
||||
AttestingIndices: valIdx,
|
||||
Data: slashings[0].Attestation_2.Data,
|
||||
Data: slashings[0].SecondAttestation().GetData(),
|
||||
Signature: aggSig2.Marshal(),
|
||||
},
|
||||
}
|
||||
return aggSlashing
|
||||
}
|
||||
|
||||
func attesterSlashingForValIdx(valIdx ...uint64) *ethpb.AttesterSlashing {
|
||||
func attesterSlashingForValIdx(ver int, valIdx ...uint64) ethpb.AttSlashing {
|
||||
if ver >= version.Electra {
|
||||
return ðpb.AttesterSlashingElectra{
|
||||
Attestation_1: ðpb.IndexedAttestationElectra{AttestingIndices: valIdx},
|
||||
Attestation_2: ðpb.IndexedAttestationElectra{AttestingIndices: valIdx},
|
||||
}
|
||||
}
|
||||
return ðpb.AttesterSlashing{
|
||||
Attestation_1: ðpb.IndexedAttestation{AttestingIndices: valIdx},
|
||||
Attestation_2: ðpb.IndexedAttestation{AttestingIndices: valIdx},
|
||||
}
|
||||
}
|
||||
|
||||
func pendingSlashingForValIdx(valIdx ...uint64) *PendingAttesterSlashing {
|
||||
func pendingSlashingForValIdx(ver int, valIdx ...uint64) *PendingAttesterSlashing {
|
||||
return &PendingAttesterSlashing{
|
||||
attesterSlashing: attesterSlashingForValIdx(valIdx...),
|
||||
attesterSlashing: attesterSlashingForValIdx(ver, valIdx...),
|
||||
validatorToSlash: primitives.ValidatorIndex(valIdx[0]),
|
||||
}
|
||||
}
|
||||
@@ -71,229 +93,245 @@ func TestPool_InsertAttesterSlashing(t *testing.T) {
|
||||
wantErr []bool
|
||||
}
|
||||
type args struct {
|
||||
slashings []*ethpb.AttesterSlashing
|
||||
slashings []ethpb.AttSlashing
|
||||
}
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
pendingSlashings := make([]*PendingAttesterSlashing, 20)
|
||||
slashings := make([]*ethpb.AttesterSlashing, 20)
|
||||
for i := 0; i < len(pendingSlashings); i++ {
|
||||
generatedSl, err := util.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], primitives.ValidatorIndex(i))
|
||||
require.NoError(t, err)
|
||||
pendingSlashings[i] = &PendingAttesterSlashing{
|
||||
attesterSlashing: generatedSl,
|
||||
validatorToSlash: primitives.ValidatorIndex(i),
|
||||
}
|
||||
sl, ok := generatedSl.(*ethpb.AttesterSlashing)
|
||||
require.Equal(t, true, ok, "Attester slashing has the wrong type (expected %T, got %T)", ðpb.AttesterSlashing{}, generatedSl)
|
||||
slashings[i] = sl
|
||||
}
|
||||
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
|
||||
// We mark the following validators with some preconditions.
|
||||
exitedVal, err := beaconState.ValidatorAtIndex(primitives.ValidatorIndex(2))
|
||||
require.NoError(t, err)
|
||||
exitedVal.WithdrawableEpoch = 0
|
||||
require.NoError(t, beaconState.UpdateValidatorAtIndex(primitives.ValidatorIndex(2), exitedVal))
|
||||
futureWithdrawVal, err := beaconState.ValidatorAtIndex(primitives.ValidatorIndex(4))
|
||||
require.NoError(t, err)
|
||||
futureWithdrawVal.WithdrawableEpoch = 17
|
||||
require.NoError(t, beaconState.UpdateValidatorAtIndex(primitives.ValidatorIndex(4), futureWithdrawVal))
|
||||
slashedVal, err := beaconState.ValidatorAtIndex(primitives.ValidatorIndex(5))
|
||||
require.NoError(t, err)
|
||||
slashedVal.Slashed = true
|
||||
require.NoError(t, beaconState.UpdateValidatorAtIndex(primitives.ValidatorIndex(5), slashedVal))
|
||||
slashedVal2, err := beaconState.ValidatorAtIndex(primitives.ValidatorIndex(21))
|
||||
require.NoError(t, err)
|
||||
slashedVal2.Slashed = true
|
||||
require.NoError(t, beaconState.UpdateValidatorAtIndex(primitives.ValidatorIndex(21), slashedVal2))
|
||||
|
||||
aggSlashing1 := validAttesterSlashingForValIdx(t, beaconState, privKeys, 0, 1, 2)
|
||||
aggSlashing2 := validAttesterSlashingForValIdx(t, beaconState, privKeys, 5, 9, 13)
|
||||
aggSlashing3 := validAttesterSlashingForValIdx(t, beaconState, privKeys, 15, 20, 21)
|
||||
aggSlashing4 := validAttesterSlashingForValIdx(t, beaconState, privKeys, 2, 5, 21)
|
||||
|
||||
tests := []struct {
|
||||
type testCase struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want []*PendingAttesterSlashing
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "Empty list",
|
||||
fields: fields{
|
||||
pending: make([]*PendingAttesterSlashing, 0),
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{false},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[0:1],
|
||||
},
|
||||
want: []*PendingAttesterSlashing{
|
||||
{
|
||||
attesterSlashing: slashings[0],
|
||||
validatorToSlash: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Empty list two validators slashed",
|
||||
fields: fields{
|
||||
pending: make([]*PendingAttesterSlashing, 0),
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{false, false},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[0:2],
|
||||
},
|
||||
want: pendingSlashings[0:2],
|
||||
},
|
||||
{
|
||||
name: "Duplicate identical slashing",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{
|
||||
pendingSlashings[1],
|
||||
},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{true},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[1:2],
|
||||
},
|
||||
want: pendingSlashings[1:2],
|
||||
},
|
||||
{
|
||||
name: "Slashing for already exit validator",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{true},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[5:6],
|
||||
},
|
||||
want: []*PendingAttesterSlashing{},
|
||||
},
|
||||
{
|
||||
name: "Slashing for withdrawable validator",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{true},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[2:3],
|
||||
},
|
||||
want: []*PendingAttesterSlashing{},
|
||||
},
|
||||
{
|
||||
name: "Slashing for slashed validator",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{false},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[4:5],
|
||||
},
|
||||
want: pendingSlashings[4:5],
|
||||
},
|
||||
{
|
||||
name: "Already included",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{},
|
||||
included: map[primitives.ValidatorIndex]bool{
|
||||
1: true,
|
||||
},
|
||||
wantErr: []bool{true},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[1:2],
|
||||
},
|
||||
want: []*PendingAttesterSlashing{},
|
||||
},
|
||||
{
|
||||
name: "Maintains sorted order",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{
|
||||
pendingSlashings[0],
|
||||
pendingSlashings[2],
|
||||
},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{false},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[1:2],
|
||||
},
|
||||
want: pendingSlashings[0:3],
|
||||
},
|
||||
{
|
||||
name: "Doesn't reject partially slashed slashings",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{false, false, false, true},
|
||||
},
|
||||
args: args{
|
||||
slashings: []*ethpb.AttesterSlashing{
|
||||
aggSlashing1,
|
||||
aggSlashing2,
|
||||
aggSlashing3,
|
||||
aggSlashing4,
|
||||
},
|
||||
},
|
||||
want: []*PendingAttesterSlashing{
|
||||
{
|
||||
attesterSlashing: aggSlashing1,
|
||||
validatorToSlash: 0,
|
||||
},
|
||||
{
|
||||
attesterSlashing: aggSlashing1,
|
||||
validatorToSlash: 1,
|
||||
},
|
||||
{
|
||||
attesterSlashing: aggSlashing2,
|
||||
validatorToSlash: 9,
|
||||
},
|
||||
{
|
||||
attesterSlashing: aggSlashing2,
|
||||
validatorToSlash: 13,
|
||||
},
|
||||
{
|
||||
attesterSlashing: aggSlashing3,
|
||||
validatorToSlash: 15,
|
||||
},
|
||||
{
|
||||
attesterSlashing: aggSlashing3,
|
||||
validatorToSlash: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
p := &Pool{
|
||||
pendingAttesterSlashing: tt.fields.pending,
|
||||
included: tt.fields.included,
|
||||
}
|
||||
var err error
|
||||
for i := 0; i < len(tt.args.slashings); i++ {
|
||||
err = p.InsertAttesterSlashing(context.Background(), beaconState, tt.args.slashings[i])
|
||||
if tt.fields.wantErr[i] {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
assert.Equal(t, len(tt.want), len(p.pendingAttesterSlashing))
|
||||
|
||||
for i := range p.pendingAttesterSlashing {
|
||||
assert.Equal(t, tt.want[i].validatorToSlash, p.pendingAttesterSlashing[i].validatorToSlash)
|
||||
assert.DeepEqual(t, tt.want[i].attesterSlashing, p.pendingAttesterSlashing[i].attesterSlashing, "At index %d", i)
|
||||
setupFunc := func(beaconState state.BeaconState, privKeys []bls.SecretKey) []testCase {
|
||||
pendingSlashings := make([]*PendingAttesterSlashing, 20)
|
||||
slashings := make([]ethpb.AttSlashing, 20)
|
||||
for i := 0; i < len(pendingSlashings); i++ {
|
||||
generatedSl, err := util.GenerateAttesterSlashingForValidator(beaconState, privKeys[i], primitives.ValidatorIndex(i))
|
||||
require.NoError(t, err)
|
||||
pendingSlashings[i] = &PendingAttesterSlashing{
|
||||
attesterSlashing: generatedSl,
|
||||
validatorToSlash: primitives.ValidatorIndex(i),
|
||||
}
|
||||
})
|
||||
slashings[i] = generatedSl
|
||||
}
|
||||
require.NoError(t, beaconState.SetSlot(params.BeaconConfig().SlotsPerEpoch))
|
||||
|
||||
// We mark the following validators with some preconditions.
|
||||
exitedVal, err := beaconState.ValidatorAtIndex(primitives.ValidatorIndex(2))
|
||||
require.NoError(t, err)
|
||||
exitedVal.WithdrawableEpoch = 0
|
||||
require.NoError(t, beaconState.UpdateValidatorAtIndex(primitives.ValidatorIndex(2), exitedVal))
|
||||
futureWithdrawVal, err := beaconState.ValidatorAtIndex(primitives.ValidatorIndex(4))
|
||||
require.NoError(t, err)
|
||||
futureWithdrawVal.WithdrawableEpoch = 17
|
||||
require.NoError(t, beaconState.UpdateValidatorAtIndex(primitives.ValidatorIndex(4), futureWithdrawVal))
|
||||
slashedVal, err := beaconState.ValidatorAtIndex(primitives.ValidatorIndex(5))
|
||||
require.NoError(t, err)
|
||||
slashedVal.Slashed = true
|
||||
require.NoError(t, beaconState.UpdateValidatorAtIndex(primitives.ValidatorIndex(5), slashedVal))
|
||||
slashedVal2, err := beaconState.ValidatorAtIndex(primitives.ValidatorIndex(21))
|
||||
require.NoError(t, err)
|
||||
slashedVal2.Slashed = true
|
||||
require.NoError(t, beaconState.UpdateValidatorAtIndex(primitives.ValidatorIndex(21), slashedVal2))
|
||||
|
||||
aggSlashing1 := validAttesterSlashingForValIdx(t, beaconState, privKeys, 0, 1, 2)
|
||||
aggSlashing2 := validAttesterSlashingForValIdx(t, beaconState, privKeys, 5, 9, 13)
|
||||
aggSlashing3 := validAttesterSlashingForValIdx(t, beaconState, privKeys, 15, 20, 21)
|
||||
aggSlashing4 := validAttesterSlashingForValIdx(t, beaconState, privKeys, 2, 5, 21)
|
||||
|
||||
tests := []testCase{
|
||||
{
|
||||
name: "Empty list",
|
||||
fields: fields{
|
||||
pending: make([]*PendingAttesterSlashing, 0),
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{false},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[0:1],
|
||||
},
|
||||
want: []*PendingAttesterSlashing{
|
||||
{
|
||||
attesterSlashing: slashings[0],
|
||||
validatorToSlash: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Empty list two validators slashed",
|
||||
fields: fields{
|
||||
pending: make([]*PendingAttesterSlashing, 0),
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{false, false},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[0:2],
|
||||
},
|
||||
want: pendingSlashings[0:2],
|
||||
},
|
||||
{
|
||||
name: "Duplicate identical slashing",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{
|
||||
pendingSlashings[1],
|
||||
},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{true},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[1:2],
|
||||
},
|
||||
want: pendingSlashings[1:2],
|
||||
},
|
||||
{
|
||||
name: "Slashing for already exit validator",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{true},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[5:6],
|
||||
},
|
||||
want: []*PendingAttesterSlashing{},
|
||||
},
|
||||
{
|
||||
name: "Slashing for withdrawable validator",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{true},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[2:3],
|
||||
},
|
||||
want: []*PendingAttesterSlashing{},
|
||||
},
|
||||
{
|
||||
name: "Slashing for slashed validator",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{false},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[4:5],
|
||||
},
|
||||
want: pendingSlashings[4:5],
|
||||
},
|
||||
{
|
||||
name: "Already included",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{},
|
||||
included: map[primitives.ValidatorIndex]bool{
|
||||
1: true,
|
||||
},
|
||||
wantErr: []bool{true},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[1:2],
|
||||
},
|
||||
want: []*PendingAttesterSlashing{},
|
||||
},
|
||||
{
|
||||
name: "Maintains sorted order",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{
|
||||
pendingSlashings[0],
|
||||
pendingSlashings[2],
|
||||
},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{false},
|
||||
},
|
||||
args: args{
|
||||
slashings: slashings[1:2],
|
||||
},
|
||||
want: pendingSlashings[0:3],
|
||||
},
|
||||
{
|
||||
name: "Doesn't reject partially slashed slashings",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
wantErr: []bool{false, false, false, true},
|
||||
},
|
||||
args: args{
|
||||
slashings: []ethpb.AttSlashing{
|
||||
aggSlashing1,
|
||||
aggSlashing2,
|
||||
aggSlashing3,
|
||||
aggSlashing4,
|
||||
},
|
||||
},
|
||||
want: []*PendingAttesterSlashing{
|
||||
{
|
||||
attesterSlashing: aggSlashing1,
|
||||
validatorToSlash: 0,
|
||||
},
|
||||
{
|
||||
attesterSlashing: aggSlashing1,
|
||||
validatorToSlash: 1,
|
||||
},
|
||||
{
|
||||
attesterSlashing: aggSlashing2,
|
||||
validatorToSlash: 9,
|
||||
},
|
||||
{
|
||||
attesterSlashing: aggSlashing2,
|
||||
validatorToSlash: 13,
|
||||
},
|
||||
{
|
||||
attesterSlashing: aggSlashing3,
|
||||
validatorToSlash: 15,
|
||||
},
|
||||
{
|
||||
attesterSlashing: aggSlashing3,
|
||||
validatorToSlash: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return tests
|
||||
}
|
||||
|
||||
runFunc := func(beaconState state.BeaconState, tests []testCase) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
p := &Pool{
|
||||
pendingAttesterSlashing: tt.fields.pending,
|
||||
included: tt.fields.included,
|
||||
}
|
||||
var err error
|
||||
for i := 0; i < len(tt.args.slashings); i++ {
|
||||
err = p.InsertAttesterSlashing(context.Background(), beaconState, tt.args.slashings[i])
|
||||
if tt.fields.wantErr[i] {
|
||||
assert.NotNil(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
assert.Equal(t, len(tt.want), len(p.pendingAttesterSlashing))
|
||||
|
||||
for i := range p.pendingAttesterSlashing {
|
||||
assert.Equal(t, tt.want[i].validatorToSlash, p.pendingAttesterSlashing[i].validatorToSlash)
|
||||
assert.DeepEqual(t, tt.want[i].attesterSlashing, p.pendingAttesterSlashing[i].attesterSlashing, "At index %d", i)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("phase0", func(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
tests := setupFunc(beaconState, privKeys)
|
||||
runFunc(beaconState, tests)
|
||||
})
|
||||
t.Run("electra", func(t *testing.T) {
|
||||
beaconState, privKeys := util.DeterministicGenesisStateElectra(t, 64)
|
||||
tests := setupFunc(beaconState, privKeys)
|
||||
runFunc(beaconState, tests)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPool_InsertAttesterSlashing_SigFailsVerify_ClearPool(t *testing.T) {
|
||||
@@ -337,7 +375,7 @@ func TestPool_MarkIncludedAttesterSlashing(t *testing.T) {
|
||||
included map[primitives.ValidatorIndex]bool
|
||||
}
|
||||
type args struct {
|
||||
slashing *ethpb.AttesterSlashing
|
||||
slashing ethpb.AttSlashing
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -346,22 +384,45 @@ func TestPool_MarkIncludedAttesterSlashing(t *testing.T) {
|
||||
want fields
|
||||
}{
|
||||
{
|
||||
name: "Included, does not exist in pending",
|
||||
name: "phase0 included, does not exist in pending",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{
|
||||
{
|
||||
attesterSlashing: attesterSlashingForValIdx(1),
|
||||
attesterSlashing: attesterSlashingForValIdx(version.Phase0, 1),
|
||||
validatorToSlash: 1,
|
||||
},
|
||||
},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
},
|
||||
args: args{
|
||||
slashing: attesterSlashingForValIdx(3),
|
||||
slashing: attesterSlashingForValIdx(version.Phase0, 3),
|
||||
},
|
||||
want: fields{
|
||||
pending: []*PendingAttesterSlashing{
|
||||
pendingSlashingForValIdx(1),
|
||||
pendingSlashingForValIdx(version.Phase0, 1),
|
||||
},
|
||||
included: map[primitives.ValidatorIndex]bool{
|
||||
3: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "electra included, does not exist in pending",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{
|
||||
{
|
||||
attesterSlashing: attesterSlashingForValIdx(version.Electra, 1),
|
||||
validatorToSlash: 1,
|
||||
},
|
||||
},
|
||||
included: make(map[primitives.ValidatorIndex]bool),
|
||||
},
|
||||
args: args{
|
||||
slashing: attesterSlashingForValIdx(version.Electra, 3),
|
||||
},
|
||||
want: fields{
|
||||
pending: []*PendingAttesterSlashing{
|
||||
pendingSlashingForValIdx(version.Electra, 1),
|
||||
},
|
||||
included: map[primitives.ValidatorIndex]bool{
|
||||
3: true,
|
||||
@@ -372,21 +433,21 @@ func TestPool_MarkIncludedAttesterSlashing(t *testing.T) {
|
||||
name: "Removes from pending list",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{
|
||||
pendingSlashingForValIdx(1),
|
||||
pendingSlashingForValIdx(2),
|
||||
pendingSlashingForValIdx(3),
|
||||
pendingSlashingForValIdx(version.Phase0, 1),
|
||||
pendingSlashingForValIdx(version.Phase0, 2),
|
||||
pendingSlashingForValIdx(version.Phase0, 3),
|
||||
},
|
||||
included: map[primitives.ValidatorIndex]bool{
|
||||
0: true,
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
slashing: attesterSlashingForValIdx(2),
|
||||
slashing: attesterSlashingForValIdx(version.Phase0, 2),
|
||||
},
|
||||
want: fields{
|
||||
pending: []*PendingAttesterSlashing{
|
||||
pendingSlashingForValIdx(1),
|
||||
pendingSlashingForValIdx(3),
|
||||
pendingSlashingForValIdx(version.Phase0, 1),
|
||||
pendingSlashingForValIdx(version.Phase0, 3),
|
||||
},
|
||||
included: map[primitives.ValidatorIndex]bool{
|
||||
0: true,
|
||||
@@ -398,37 +459,37 @@ func TestPool_MarkIncludedAttesterSlashing(t *testing.T) {
|
||||
name: "Removes from long pending list",
|
||||
fields: fields{
|
||||
pending: []*PendingAttesterSlashing{
|
||||
pendingSlashingForValIdx(1),
|
||||
pendingSlashingForValIdx(2),
|
||||
pendingSlashingForValIdx(3),
|
||||
pendingSlashingForValIdx(4),
|
||||
pendingSlashingForValIdx(5),
|
||||
pendingSlashingForValIdx(6),
|
||||
pendingSlashingForValIdx(7),
|
||||
pendingSlashingForValIdx(8),
|
||||
pendingSlashingForValIdx(9),
|
||||
pendingSlashingForValIdx(10),
|
||||
pendingSlashingForValIdx(11),
|
||||
pendingSlashingForValIdx(version.Phase0, 1),
|
||||
pendingSlashingForValIdx(version.Phase0, 2),
|
||||
pendingSlashingForValIdx(version.Phase0, 3),
|
||||
pendingSlashingForValIdx(version.Phase0, 4),
|
||||
pendingSlashingForValIdx(version.Phase0, 5),
|
||||
pendingSlashingForValIdx(version.Phase0, 6),
|
||||
pendingSlashingForValIdx(version.Phase0, 7),
|
||||
pendingSlashingForValIdx(version.Phase0, 8),
|
||||
pendingSlashingForValIdx(version.Phase0, 9),
|
||||
pendingSlashingForValIdx(version.Phase0, 10),
|
||||
pendingSlashingForValIdx(version.Phase0, 11),
|
||||
},
|
||||
included: map[primitives.ValidatorIndex]bool{
|
||||
0: true,
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
slashing: attesterSlashingForValIdx(6),
|
||||
slashing: attesterSlashingForValIdx(version.Phase0, 6),
|
||||
},
|
||||
want: fields{
|
||||
pending: []*PendingAttesterSlashing{
|
||||
pendingSlashingForValIdx(1),
|
||||
pendingSlashingForValIdx(2),
|
||||
pendingSlashingForValIdx(3),
|
||||
pendingSlashingForValIdx(4),
|
||||
pendingSlashingForValIdx(5),
|
||||
pendingSlashingForValIdx(7),
|
||||
pendingSlashingForValIdx(8),
|
||||
pendingSlashingForValIdx(9),
|
||||
pendingSlashingForValIdx(10),
|
||||
pendingSlashingForValIdx(11),
|
||||
pendingSlashingForValIdx(version.Phase0, 1),
|
||||
pendingSlashingForValIdx(version.Phase0, 2),
|
||||
pendingSlashingForValIdx(version.Phase0, 3),
|
||||
pendingSlashingForValIdx(version.Phase0, 4),
|
||||
pendingSlashingForValIdx(version.Phase0, 5),
|
||||
pendingSlashingForValIdx(version.Phase0, 7),
|
||||
pendingSlashingForValIdx(version.Phase0, 8),
|
||||
pendingSlashingForValIdx(version.Phase0, 9),
|
||||
pendingSlashingForValIdx(version.Phase0, 10),
|
||||
pendingSlashingForValIdx(version.Phase0, 11),
|
||||
},
|
||||
included: map[primitives.ValidatorIndex]bool{
|
||||
0: true,
|
||||
|
||||
102
beacon-chain/operations/slashings/service_new.go
Normal file
102
beacon-chain/operations/slashings/service_new.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package slashings
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/v5/time/slots"
|
||||
)
|
||||
|
||||
// WithElectraTimer includes functional options for the blockchain service related to CLI flags.
|
||||
func WithElectraTimer(cw startup.ClockWaiter, currentSlotFn func() primitives.Slot) Option {
|
||||
return func(p *PoolService) error {
|
||||
p.runElectraTimer = true
|
||||
p.cw = cw
|
||||
p.currentSlotFn = currentSlotFn
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewPoolService returns a service that manages the Pool.
|
||||
func NewPoolService(ctx context.Context, pool PoolManager, opts ...Option) *PoolService {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
p := &PoolService{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
poolManager: pool,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(p); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
// Start the slashing pool service.
|
||||
func (p *PoolService) Start() {
|
||||
go p.run()
|
||||
}
|
||||
|
||||
func (p *PoolService) run() {
|
||||
if !p.runElectraTimer {
|
||||
return
|
||||
}
|
||||
|
||||
// if Electra has not been scheduled return
|
||||
if params.BeaconConfig().ElectraForkEpoch == params.BeaconConfig().FarFutureEpoch {
|
||||
return
|
||||
}
|
||||
|
||||
// If run() is executed after the transition to Electra has already happened,
|
||||
// there is nothing to convert because the slashing pool is empty at startup.
|
||||
if slots.ToEpoch(p.currentSlotFn()) >= params.BeaconConfig().ElectraForkEpoch {
|
||||
return
|
||||
}
|
||||
|
||||
p.waitForChainInitialization()
|
||||
|
||||
ticker := time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-p.ctx.Done():
|
||||
log.Warning("Context cancelled, ConvertToElectra aborted")
|
||||
return
|
||||
case <-ticker.C:
|
||||
if slots.ToEpoch(p.currentSlotFn()) >= params.BeaconConfig().ElectraForkEpoch {
|
||||
log.Info("Converting Phase0 slashings to Electra slashings")
|
||||
p.poolManager.ConvertToElectra()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PoolService) waitForChainInitialization() {
|
||||
clock, err := p.cw.WaitForClock(p.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not receive chain start notification")
|
||||
}
|
||||
p.clock = clock
|
||||
log.WithField("genesisTime", clock.GenesisTime()).Info(
|
||||
"Slashing pool service received chain initialization event",
|
||||
)
|
||||
}
|
||||
|
||||
// Stop the slashing pool service.
|
||||
func (p *PoolService) Stop() error {
|
||||
p.cancel()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status of the slashing pool service.
|
||||
func (p *PoolService) Status() error {
|
||||
return nil
|
||||
}
|
||||
81
beacon-chain/operations/slashings/service_new_test.go
Normal file
81
beacon-chain/operations/slashings/service_new_test.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package slashings
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
)
|
||||
|
||||
func TestConvertToElectraWithTimer(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.ElectraForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
|
||||
indices := []uint64{0, 1}
|
||||
data := ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
}
|
||||
sig := make([]byte, fieldparams.BLSSignatureLength)
|
||||
|
||||
phase0Slashing := &PendingAttesterSlashing{
|
||||
attesterSlashing: ðpb.AttesterSlashing{
|
||||
Attestation_1: ðpb.IndexedAttestation{
|
||||
AttestingIndices: indices,
|
||||
Data: data,
|
||||
Signature: sig,
|
||||
},
|
||||
Attestation_2: ðpb.IndexedAttestation{
|
||||
AttestingIndices: indices,
|
||||
Data: data,
|
||||
Signature: sig,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// We need run() to execute the conversion immediately, otherwise we'd need a time.Sleep to wait for the Electra fork.
|
||||
// To do that we need a timer with the current time being at the Electra fork.
|
||||
now := time.Now()
|
||||
electraTime := now.Add(time.Duration(uint64(cfg.ElectraForkEpoch)*uint64(params.BeaconConfig().SlotsPerEpoch)*params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
c := startup.NewClock(now, [32]byte{}, startup.WithNower(func() time.Time { return electraTime }))
|
||||
cw := startup.NewClockSynchronizer()
|
||||
require.NoError(t, cw.SetClock(c))
|
||||
p := NewPool()
|
||||
// The service has to think that the current slot is before Electra
|
||||
// because run() exits early after Electra.
|
||||
s := NewPoolService(ctx, p, WithElectraTimer(cw, func() primitives.Slot {
|
||||
return primitives.Slot(cfg.ElectraForkEpoch)*params.BeaconConfig().SlotsPerEpoch - 1
|
||||
}))
|
||||
p.pendingAttesterSlashing = append(p.pendingAttesterSlashing, phase0Slashing)
|
||||
|
||||
s.run()
|
||||
|
||||
electraSlashing, ok := p.pendingAttesterSlashing[0].attesterSlashing.(*ethpb.AttesterSlashingElectra)
|
||||
require.Equal(t, true, ok, "Slashing was not converted to Electra")
|
||||
assert.DeepEqual(t, phase0Slashing.attesterSlashing.FirstAttestation().GetAttestingIndices(), electraSlashing.FirstAttestation().GetAttestingIndices())
|
||||
assert.DeepEqual(t, phase0Slashing.attesterSlashing.FirstAttestation().GetData(), electraSlashing.FirstAttestation().GetData())
|
||||
assert.DeepEqual(t, phase0Slashing.attesterSlashing.FirstAttestation().GetSignature(), electraSlashing.FirstAttestation().GetSignature())
|
||||
assert.DeepEqual(t, phase0Slashing.attesterSlashing.SecondAttestation().GetAttestingIndices(), electraSlashing.SecondAttestation().GetAttestingIndices())
|
||||
assert.DeepEqual(t, phase0Slashing.attesterSlashing.SecondAttestation().GetData(), electraSlashing.SecondAttestation().GetData())
|
||||
assert.DeepEqual(t, phase0Slashing.attesterSlashing.SecondAttestation().GetSignature(), electraSlashing.SecondAttestation().GetSignature())
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/startup"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
@@ -31,6 +32,21 @@ type PoolManager interface {
|
||||
PendingProposerSlashings(ctx context.Context, state state.ReadOnlyBeaconState, noLimit bool) []*ethpb.ProposerSlashing
|
||||
MarkIncludedAttesterSlashing(as ethpb.AttSlashing)
|
||||
MarkIncludedProposerSlashing(ps *ethpb.ProposerSlashing)
|
||||
ConvertToElectra()
|
||||
}
|
||||
|
||||
// Option for pool service configuration.
|
||||
type Option func(p *PoolService) error
|
||||
|
||||
// PoolService manages the Pool.
|
||||
type PoolService struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
poolManager PoolManager
|
||||
currentSlotFn func() primitives.Slot
|
||||
cw startup.ClockWaiter
|
||||
clock *startup.Clock
|
||||
runElectraTimer bool
|
||||
}
|
||||
|
||||
// Pool is a concrete implementation of PoolManager.
|
||||
|
||||
@@ -354,10 +354,10 @@ func TestStaticPeering_PeersAreAdded(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHostIsResolved(t *testing.T) {
|
||||
// As defined in RFC 2606 , example.org is a
|
||||
// reserved example domain name.
|
||||
exampleHost := "example.org"
|
||||
exampleIP := "93.184.215.14"
|
||||
// ip.addr.tools - construct domain names that resolve to any given IP address
|
||||
// ex: 192-0-2-1.ip.addr.tools resolves to 192.0.2.1.
|
||||
exampleHost := "96-7-129-13.ip.addr.tools"
|
||||
exampleIP := "96.7.129.13"
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
|
||||
@@ -2,6 +2,7 @@ package peers_test
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
@@ -28,5 +29,5 @@ func TestMain(m *testing.M) {
|
||||
defer func() {
|
||||
flags.Init(resetFlags)
|
||||
}()
|
||||
m.Run()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package scorers_test
|
||||
import (
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
|
||||
@@ -28,7 +29,7 @@ func TestMain(m *testing.M) {
|
||||
defer func() {
|
||||
flags.Init(resetFlags)
|
||||
}()
|
||||
m.Run()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
// roundScore returns score rounded in accordance with the score manager's rounding factor.
|
||||
|
||||
@@ -54,6 +54,8 @@ func (s *Service) nodeFilter(topic string, index uint64) (func(node *enode.Node)
|
||||
return s.filterPeerForAttSubnet(index), nil
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
return s.filterPeerForSyncSubnet(index), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
return s.filterPeerForBlobSubnet(), nil
|
||||
default:
|
||||
return nil, errors.Errorf("no subnet exists for provided topic: %s", topic)
|
||||
}
|
||||
@@ -266,6 +268,14 @@ func (s *Service) filterPeerForSyncSubnet(index uint64) func(node *enode.Node) b
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular blob subnet.
|
||||
// All peers are supposed to be subscribed to all blob subnets.
|
||||
func (s *Service) filterPeerForBlobSubnet() func(_ *enode.Node) bool {
|
||||
return func(_ *enode.Node) bool {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// lower threshold to broadcast object compared to searching
|
||||
// for a subnet. So that even in the event of poor peer
|
||||
// connectivity, we can still broadcast an attestation.
|
||||
|
||||
@@ -123,7 +123,7 @@ func InitializeDataMaps() {
|
||||
return ðpb.SingleAttestation{}, nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (ethpb.Att, error) {
|
||||
return ðpb.AttestationElectra{}, nil
|
||||
return ðpb.SingleAttestation{}, nil
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -90,6 +90,5 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_x_exp//maps:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"net/http"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
func Test_endpoints(t *testing.T) {
|
||||
|
||||
@@ -1559,7 +1559,7 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
Signature: bytesutil.PadTo([]byte("signature4"), 96),
|
||||
},
|
||||
}
|
||||
slashing1PostElectra := ðpbv1alpha1.AttesterSlashingElectra{
|
||||
slashingPostElectra := ðpbv1alpha1.AttesterSlashingElectra{
|
||||
Attestation_1: ðpbv1alpha1.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{1, 10},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
@@ -1595,42 +1595,6 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
Signature: bytesutil.PadTo([]byte("signature2"), 96),
|
||||
},
|
||||
}
|
||||
slashing2PostElectra := ðpbv1alpha1.AttesterSlashingElectra{
|
||||
Attestation_1: ðpbv1alpha1.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{3, 30},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 3,
|
||||
CommitteeIndex: 3,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot3"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 3,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot3"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 30,
|
||||
Root: bytesutil.PadTo([]byte("targetroot3"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature3"), 96),
|
||||
},
|
||||
Attestation_2: ðpbv1alpha1.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{4, 40},
|
||||
Data: ðpbv1alpha1.AttestationData{
|
||||
Slot: 4,
|
||||
CommitteeIndex: 4,
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte("blockroot4"), 32),
|
||||
Source: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 4,
|
||||
Root: bytesutil.PadTo([]byte("sourceroot4"), 32),
|
||||
},
|
||||
Target: ðpbv1alpha1.Checkpoint{
|
||||
Epoch: 40,
|
||||
Root: bytesutil.PadTo([]byte("targetroot4"), 32),
|
||||
},
|
||||
},
|
||||
Signature: bytesutil.PadTo([]byte("signature4"), 96),
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("V1", func(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
@@ -1702,7 +1666,7 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
s := &Server{
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PostElectra, slashing2PostElectra, slashing1PreElectra}},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashingPostElectra, slashing1PreElectra}},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/beacon/pool/attester_slashings", nil)
|
||||
@@ -1724,8 +1688,7 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
ss, err := structs.AttesterSlashingsElectraToConsensus(slashings)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, slashing1PostElectra, ss[0])
|
||||
require.DeepEqual(t, slashing2PostElectra, ss[1])
|
||||
require.DeepEqual(t, slashingPostElectra, ss[0])
|
||||
})
|
||||
t.Run("post-electra-ok", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
@@ -1741,7 +1704,7 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
s := &Server{
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashing1PostElectra, slashing2PostElectra}},
|
||||
SlashingsPool: &slashingsmock.PoolMock{PendingAttSlashings: []ethpbv1alpha1.AttSlashing{slashingPostElectra}},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://example.com/eth/v2/beacon/pool/attester_slashings", nil)
|
||||
@@ -1763,8 +1726,7 @@ func TestGetAttesterSlashings(t *testing.T) {
|
||||
ss, err := structs.AttesterSlashingsElectraToConsensus(slashings)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.DeepEqual(t, slashing1PostElectra, ss[0])
|
||||
require.DeepEqual(t, slashing2PostElectra, ss[1])
|
||||
require.DeepEqual(t, slashingPostElectra, ss[0])
|
||||
})
|
||||
t.Run("pre-electra-ok", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconState()
|
||||
|
||||
@@ -9,6 +9,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/eth/blob",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
@@ -30,6 +31,7 @@ go_test(
|
||||
srcs = ["handlers_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//api:go_default_library",
|
||||
"//api/server/structs:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
@@ -41,6 +43,7 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/rpc/core"
|
||||
field_params "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
|
||||
@@ -52,21 +53,27 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not fetch block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if blk == nil {
|
||||
httputil.HandleError(w, "Block not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if httputil.RespondWithSsz(r) {
|
||||
sszResp, err := buildSidecarsSSZResponse(verifiedBlobs)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(blk.Version()))
|
||||
httputil.WriteSsz(w, sszResp, "blob_sidecars.ssz")
|
||||
return
|
||||
}
|
||||
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not fetch block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
blkRoot, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not hash block: "+err.Error(), http.StatusInternalServerError)
|
||||
@@ -85,6 +92,7 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
Finalized: s.FinalizationFetcher.IsFinalized(ctx, blkRoot),
|
||||
}
|
||||
w.Header().Set(api.VersionHeader, version.String(blk.Version()))
|
||||
httputil.WriteJson(w, resp)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/api"
|
||||
"github.com/prysmaticlabs/prysm/v5/api/server/structs"
|
||||
mockChain "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/v5/beacon-chain/db/filesystem"
|
||||
@@ -24,6 +26,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/config/params"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/httputil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
@@ -189,6 +192,23 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("slot not found", func(t *testing.T) {
|
||||
u := "http://foo.example/122"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{Block: denebBlock},
|
||||
BeaconDB: db,
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
})
|
||||
t.Run("one blob only", func(t *testing.T) {
|
||||
u := "http://foo.example/123?indices=2"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
@@ -204,6 +224,7 @@ func TestBlobs(t *testing.T) {
|
||||
}
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, version.String(version.Deneb), writer.Header().Get(api.VersionHeader))
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
@@ -243,6 +264,21 @@ func TestBlobs(t *testing.T) {
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("blob index over max", func(t *testing.T) {
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Deneb)
|
||||
u := fmt.Sprintf("http://foo.example/123?indices=%d", overLimit)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{}
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, fmt.Sprintf("requested blob indices [%d] are invalid", overLimit)))
|
||||
})
|
||||
t.Run("outside retention period returns 200 w/ empty list ", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
@@ -342,7 +378,7 @@ func TestBlobs(t *testing.T) {
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, version.String(version.Deneb), writer.Header().Get(api.VersionHeader))
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
require.Equal(t, len(writer.Body.Bytes()), fieldparams.BlobSidecarSize) // size of each sidecar
|
||||
// can directly unmarshal to sidecar since there's only 1
|
||||
@@ -371,6 +407,112 @@ func TestBlobs(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBlobs_Electra(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.DenebForkEpoch = 0
|
||||
cfg.ElectraForkEpoch = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
db := testDB.SetupDB(t)
|
||||
electraBlock, blobs := util.GenerateTestElectraBlockWithSidecar(t, [32]byte{}, 123, params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Electra))
|
||||
require.NoError(t, db.SaveBlock(context.Background(), electraBlock))
|
||||
bs := filesystem.NewEphemeralBlobStorage(t)
|
||||
testSidecars, err := verification.BlobSidecarSliceNoop(blobs)
|
||||
require.NoError(t, err)
|
||||
for i := range testSidecars {
|
||||
require.NoError(t, bs.Save(testSidecars[i]))
|
||||
}
|
||||
blockRoot := blobs[0].BlockRoot()
|
||||
|
||||
mockChainService := &mockChain.ChainService{
|
||||
FinalizedRoots: map[[32]byte]bool{},
|
||||
}
|
||||
s := &Server{
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
TimeFetcher: mockChainService,
|
||||
}
|
||||
t.Run("max blobs for electra", func(t *testing.T) {
|
||||
u := "http://foo.example/123"
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: electraBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, version.String(version.Electra), writer.Header().Get(api.VersionHeader))
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Electra), len(resp.Data))
|
||||
sidecar := resp.Data[0]
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, "0", sidecar.Index)
|
||||
assert.Equal(t, hexutil.Encode(blobs[0].Blob), sidecar.Blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[0].KzgCommitment), sidecar.KzgCommitment)
|
||||
assert.Equal(t, hexutil.Encode(blobs[0].KzgProof), sidecar.KzgProof)
|
||||
|
||||
require.Equal(t, version.String(version.Electra), resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("requested blob index at max", func(t *testing.T) {
|
||||
limit := params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Electra) - 1
|
||||
u := fmt.Sprintf("http://foo.example/123?indices=%d", limit)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ð.Checkpoint{Root: blockRoot[:]}, Block: electraBlock},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, version.String(version.Electra), writer.Header().Get(api.VersionHeader))
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.SidecarsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
require.Equal(t, 1, len(resp.Data))
|
||||
sidecar := resp.Data[0]
|
||||
require.NotNil(t, sidecar)
|
||||
assert.Equal(t, fmt.Sprintf("%d", limit), sidecar.Index)
|
||||
assert.Equal(t, hexutil.Encode(blobs[limit].Blob), sidecar.Blob)
|
||||
assert.Equal(t, hexutil.Encode(blobs[limit].KzgCommitment), sidecar.KzgCommitment)
|
||||
assert.Equal(t, hexutil.Encode(blobs[limit].KzgProof), sidecar.KzgProof)
|
||||
|
||||
require.Equal(t, version.String(version.Electra), resp.Version)
|
||||
require.Equal(t, false, resp.ExecutionOptimistic)
|
||||
require.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("blob index over max", func(t *testing.T) {
|
||||
overLimit := params.BeaconConfig().MaxBlobsPerBlockByVersion(version.Electra)
|
||||
u := fmt.Sprintf("http://foo.example/123?indices=%d", overLimit)
|
||||
request := httptest.NewRequest("GET", u, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
s.Blocker = &lookup.BeaconDbBlocker{}
|
||||
s.Blobs(writer, request)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, fmt.Sprintf("requested blob indices [%d] are invalid", overLimit)))
|
||||
})
|
||||
}
|
||||
|
||||
func Test_parseIndices(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -140,7 +140,7 @@ func TestGetSpec(t *testing.T) {
|
||||
config.WhistleBlowerRewardQuotientElectra = 79
|
||||
config.PendingPartialWithdrawalsLimit = 80
|
||||
config.MinActivationBalance = 81
|
||||
config.PendingDepositLimit = 82
|
||||
config.PendingDepositsLimit = 82
|
||||
config.MaxPendingPartialsPerWithdrawalsSweep = 83
|
||||
config.PendingConsolidationsLimit = 84
|
||||
config.MaxPartialWithdrawalsPerPayload = 85
|
||||
|
||||
@@ -222,6 +222,39 @@ func operationEventsFixtures(t *testing.T) (*topicRequest, []*feed.Event) {
|
||||
},
|
||||
},
|
||||
},
|
||||
&feed.Event{
|
||||
Type: operation.AttesterSlashingReceived,
|
||||
Data: &operation.AttesterSlashingReceivedData{
|
||||
AttesterSlashing: ð.AttesterSlashingElectra{
|
||||
Attestation_1: ð.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
Data: ð.AttestationData{
|
||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
Source: ð.Checkpoint{
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{0, 1},
|
||||
Data: ð.AttestationData{
|
||||
BeaconBlockRoot: make([]byte, fieldparams.RootLength),
|
||||
Source: ð.Checkpoint{
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
Target: ð.Checkpoint{
|
||||
Root: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&feed.Event{
|
||||
Type: operation.ProposerSlashingReceived,
|
||||
Data: &operation.ProposerSlashingReceivedData{
|
||||
@@ -461,7 +494,7 @@ func TestStreamEvents_OperationsEvents(t *testing.T) {
|
||||
defer testSync.cleanup()
|
||||
|
||||
st := tc.getState()
|
||||
v := ð.Validator{ExitEpoch: math.MaxUint64, EffectiveBalance: params.BeaconConfig().MinActivationBalance}
|
||||
v := ð.Validator{ExitEpoch: math.MaxUint64, EffectiveBalance: params.BeaconConfig().MinActivationBalance, WithdrawalCredentials: make([]byte, 32)}
|
||||
require.NoError(t, st.SetValidators([]*eth.Validator{v}))
|
||||
currentSlot := primitives.Slot(0)
|
||||
// to avoid slot processing
|
||||
@@ -544,7 +577,7 @@ func TestStuckReaderScenarios(t *testing.T) {
|
||||
|
||||
func wedgedWriterTestCase(t *testing.T, queueDepth func([]*feed.Event) int) {
|
||||
topics, events := operationEventsFixtures(t)
|
||||
require.Equal(t, 8, len(events))
|
||||
require.Equal(t, 9, len(events))
|
||||
|
||||
// set eventFeedDepth to a number lower than the events we intend to send to force the server to drop the reader.
|
||||
stn := mockChain.NewEventFeedWrapper()
|
||||
|
||||
@@ -91,6 +91,40 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
require.NotNil(t, resp.Data.CurrentSyncCommittee)
|
||||
require.NotNil(t, resp.Data.CurrentSyncCommitteeBranch)
|
||||
})
|
||||
t.Run("altairSSZ", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().AltairForkEpoch * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
|
||||
blockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(l.Ctx, slot, l.State, l.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com/", nil)
|
||||
request.SetPathValue("block_root", hexutil.Encode(blockRoot[:]))
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetLightClientBootstrap(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
var resp pb.LightClientBootstrapAltair
|
||||
err = resp.UnmarshalSSZ(writer.Body.Bytes())
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, resp.Header, bootstrap.Header().Proto())
|
||||
require.DeepEqual(t, resp.CurrentSyncCommittee, bootstrap.CurrentSyncCommittee())
|
||||
require.NotNil(t, resp.CurrentSyncCommitteeBranch)
|
||||
})
|
||||
t.Run("altair - no bootstrap found", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestAltair()
|
||||
|
||||
@@ -158,6 +192,40 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
require.NotNil(t, resp.Data.CurrentSyncCommittee)
|
||||
require.NotNil(t, resp.Data.CurrentSyncCommitteeBranch)
|
||||
})
|
||||
t.Run("bellatrixSSZ", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestBellatrix()
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().BellatrixForkEpoch * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
|
||||
blockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(l.Ctx, slot, l.State, l.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com/", nil)
|
||||
request.SetPathValue("block_root", hexutil.Encode(blockRoot[:]))
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetLightClientBootstrap(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
var resp pb.LightClientBootstrapAltair
|
||||
err = resp.UnmarshalSSZ(writer.Body.Bytes())
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, resp.Header, bootstrap.Header().Proto())
|
||||
require.DeepEqual(t, resp.CurrentSyncCommittee, bootstrap.CurrentSyncCommittee())
|
||||
require.NotNil(t, resp.CurrentSyncCommitteeBranch)
|
||||
})
|
||||
t.Run("capella", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false) // result is same for true and false
|
||||
|
||||
@@ -199,6 +267,40 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
require.NotNil(t, resp.Data.CurrentSyncCommittee)
|
||||
require.NotNil(t, resp.Data.CurrentSyncCommitteeBranch)
|
||||
})
|
||||
t.Run("capellaSSZ", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestCapella(false) // result is same for true and false
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().CapellaForkEpoch * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
|
||||
blockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(l.Ctx, slot, l.State, l.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com/", nil)
|
||||
request.SetPathValue("block_root", hexutil.Encode(blockRoot[:]))
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetLightClientBootstrap(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
var resp pb.LightClientBootstrapCapella
|
||||
err = resp.UnmarshalSSZ(writer.Body.Bytes())
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, resp.Header, bootstrap.Header().Proto())
|
||||
require.DeepEqual(t, resp.CurrentSyncCommittee, bootstrap.CurrentSyncCommittee())
|
||||
require.NotNil(t, resp.CurrentSyncCommitteeBranch)
|
||||
})
|
||||
t.Run("deneb", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false) // result is same for true and false
|
||||
|
||||
@@ -240,6 +342,40 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
require.NotNil(t, resp.Data.CurrentSyncCommittee)
|
||||
require.NotNil(t, resp.Data.CurrentSyncCommitteeBranch)
|
||||
})
|
||||
t.Run("denebSSZ", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestDeneb(false) // result is same for true and false
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().DenebForkEpoch * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
|
||||
blockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(l.Ctx, slot, l.State, l.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com/", nil)
|
||||
request.SetPathValue("block_root", hexutil.Encode(blockRoot[:]))
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetLightClientBootstrap(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
var resp pb.LightClientBootstrapDeneb
|
||||
err = resp.UnmarshalSSZ(writer.Body.Bytes())
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, resp.Header, bootstrap.Header().Proto())
|
||||
require.DeepEqual(t, resp.CurrentSyncCommittee, bootstrap.CurrentSyncCommittee())
|
||||
require.NotNil(t, resp.CurrentSyncCommitteeBranch)
|
||||
})
|
||||
t.Run("electra", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestElectra(false) // result is same for true and false
|
||||
|
||||
@@ -281,6 +417,40 @@ func TestLightClientHandler_GetLightClientBootstrap(t *testing.T) {
|
||||
require.NotNil(t, resp.Data.CurrentSyncCommittee)
|
||||
require.NotNil(t, resp.Data.CurrentSyncCommitteeBranch)
|
||||
})
|
||||
t.Run("electraSSZ", func(t *testing.T) {
|
||||
l := util.NewTestLightClient(t).SetupTestElectra(false) // result is same for true and false
|
||||
|
||||
slot := primitives.Slot(params.BeaconConfig().ElectraForkEpoch * primitives.Epoch(params.BeaconConfig().SlotsPerEpoch)).Add(1)
|
||||
blockRoot, err := l.Block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
bootstrap, err := lightclient.NewLightClientBootstrapFromBeaconState(l.Ctx, slot, l.State, l.Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
db := dbtesting.SetupDB(t)
|
||||
|
||||
err = db.SaveLightClientBootstrap(l.Ctx, blockRoot[:], bootstrap)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := &Server{
|
||||
BeaconDB: db,
|
||||
}
|
||||
request := httptest.NewRequest("GET", "http://foo.com/", nil)
|
||||
request.SetPathValue("block_root", hexutil.Encode(blockRoot[:]))
|
||||
request.Header.Add("Accept", "application/octet-stream")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetLightClientBootstrap(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
|
||||
var resp pb.LightClientBootstrapElectra
|
||||
err = resp.UnmarshalSSZ(writer.Body.Bytes())
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, resp.Header, bootstrap.Header().Proto())
|
||||
require.DeepEqual(t, resp.CurrentSyncCommittee, bootstrap.CurrentSyncCommittee())
|
||||
require.NotNil(t, resp.CurrentSyncCommitteeBranch)
|
||||
})
|
||||
}
|
||||
|
||||
// GetLightClientByRange tests
|
||||
|
||||
@@ -63,6 +63,7 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//network/httputil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
|
||||
@@ -32,45 +32,52 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/v5/network/httputil"
|
||||
eth "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/v5/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/v5/testing/util"
|
||||
)
|
||||
|
||||
func BlockRewardTestSetup(t *testing.T, forkName string) (state.BeaconState, interfaces.SignedBeaconBlock, error) {
|
||||
func BlockRewardTestSetup(t *testing.T, ver int) (state.BeaconState, interfaces.SignedBeaconBlock, error) {
|
||||
helpers.ClearCache()
|
||||
var sbb interfaces.SignedBeaconBlock
|
||||
var st state.BeaconState
|
||||
var err error
|
||||
switch forkName {
|
||||
case "phase0":
|
||||
switch ver {
|
||||
case version.Phase0:
|
||||
return nil, nil, errors.New("phase0 not supported")
|
||||
case "altair":
|
||||
case version.Altair:
|
||||
st, err = util.NewBeaconStateAltair()
|
||||
require.NoError(t, err)
|
||||
b := util.HydrateSignedBeaconBlockAltair(util.NewBeaconBlockAltair())
|
||||
sbb, err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
case "bellatrix":
|
||||
case version.Bellatrix:
|
||||
st, err = util.NewBeaconStateBellatrix()
|
||||
require.NoError(t, err)
|
||||
b := util.HydrateSignedBeaconBlockBellatrix(util.NewBeaconBlockBellatrix())
|
||||
sbb, err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
case "capella":
|
||||
case version.Capella:
|
||||
st, err = util.NewBeaconStateCapella()
|
||||
require.NoError(t, err)
|
||||
b := util.HydrateSignedBeaconBlockCapella(util.NewBeaconBlockCapella())
|
||||
sbb, err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
case "deneb":
|
||||
case version.Deneb:
|
||||
st, err = util.NewBeaconStateDeneb()
|
||||
require.NoError(t, err)
|
||||
b := util.HydrateSignedBeaconBlockDeneb(util.NewBeaconBlockDeneb())
|
||||
sbb, err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
case version.Electra:
|
||||
st, err = util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
b := util.HydrateSignedBeaconBlockElectra(util.NewBeaconBlockElectra())
|
||||
sbb, err = blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
default:
|
||||
return nil, nil, errors.New("fork is not supported")
|
||||
return nil, nil, fmt.Errorf("fork %s is not supported", version.String(ver))
|
||||
}
|
||||
valCount := 64
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
@@ -102,20 +109,47 @@ func BlockRewardTestSetup(t *testing.T, forkName string) (state.BeaconState, int
|
||||
require.NoError(t, st.SetBlockRoots(bRoots))
|
||||
|
||||
sbb.SetSlot(2)
|
||||
|
||||
// we have to set the proposer index to the value that will be randomly chosen (fortunately it's deterministic)
|
||||
sbb.SetProposerIndex(12)
|
||||
require.NoError(t, sbb.SetAttestations([]eth.Att{
|
||||
ð.Attestation{
|
||||
AggregationBits: bitfield.Bitlist{0b00000111},
|
||||
Data: util.HydrateAttestationData(ð.AttestationData{}),
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ð.Attestation{
|
||||
AggregationBits: bitfield.Bitlist{0b00000111},
|
||||
Data: util.HydrateAttestationData(ð.AttestationData{}),
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
}))
|
||||
if ver >= version.Electra {
|
||||
sbb.SetProposerIndex(4)
|
||||
} else {
|
||||
sbb.SetProposerIndex(12)
|
||||
}
|
||||
|
||||
var atts []eth.Att
|
||||
if ver >= version.Electra {
|
||||
cb := primitives.NewAttestationCommitteeBits()
|
||||
cb.SetBitAt(0, true)
|
||||
atts = []eth.Att{
|
||||
ð.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b00000111},
|
||||
Data: util.HydrateAttestationData(ð.AttestationData{}),
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
CommitteeBits: cb,
|
||||
},
|
||||
ð.AttestationElectra{
|
||||
AggregationBits: bitfield.Bitlist{0b00000111},
|
||||
Data: util.HydrateAttestationData(ð.AttestationData{}),
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
CommitteeBits: cb,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
atts = []eth.Att{
|
||||
ð.Attestation{
|
||||
AggregationBits: bitfield.Bitlist{0b00000111},
|
||||
Data: util.HydrateAttestationData(ð.AttestationData{}),
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ð.Attestation{
|
||||
AggregationBits: bitfield.Bitlist{0b00000111},
|
||||
Data: util.HydrateAttestationData(ð.AttestationData{}),
|
||||
Signature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
}
|
||||
}
|
||||
require.NoError(t, sbb.SetAttestations(atts))
|
||||
|
||||
attData1 := util.HydrateAttestationData(ð.AttestationData{BeaconBlockRoot: bytesutil.PadTo([]byte("root1"), 32)})
|
||||
attData2 := util.HydrateAttestationData(ð.AttestationData{BeaconBlockRoot: bytesutil.PadTo([]byte("root2"), 32)})
|
||||
@@ -125,8 +159,23 @@ func BlockRewardTestSetup(t *testing.T, forkName string) (state.BeaconState, int
|
||||
require.NoError(t, err)
|
||||
sigRoot2, err := signing.ComputeSigningRoot(attData2, domain)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, sbb.SetAttesterSlashings([]eth.AttSlashing{
|
||||
ð.AttesterSlashing{
|
||||
|
||||
var attSlashing eth.AttSlashing
|
||||
if ver >= version.Electra {
|
||||
attSlashing = ð.AttesterSlashingElectra{
|
||||
Attestation_1: ð.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: attData1,
|
||||
Signature: secretKeys[0].Sign(sigRoot1[:]).Marshal(),
|
||||
},
|
||||
Attestation_2: ð.IndexedAttestationElectra{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: attData2,
|
||||
Signature: secretKeys[0].Sign(sigRoot2[:]).Marshal(),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
attSlashing = ð.AttesterSlashing{
|
||||
Attestation_1: ð.IndexedAttestation{
|
||||
AttestingIndices: []uint64{0},
|
||||
Data: attData1,
|
||||
@@ -137,8 +186,10 @@ func BlockRewardTestSetup(t *testing.T, forkName string) (state.BeaconState, int
|
||||
Data: attData2,
|
||||
Signature: secretKeys[0].Sign(sigRoot2[:]).Marshal(),
|
||||
},
|
||||
},
|
||||
}))
|
||||
}
|
||||
}
|
||||
require.NoError(t, sbb.SetAttesterSlashings([]eth.AttSlashing{attSlashing}))
|
||||
|
||||
header1 := ð.BeaconBlockHeader{
|
||||
Slot: 0,
|
||||
ProposerIndex: 1,
|
||||
@@ -179,11 +230,21 @@ func BlockRewardTestSetup(t *testing.T, forkName string) (state.BeaconState, int
|
||||
sszBytes := primitives.SSZBytes(slot0bRoot)
|
||||
r, err := signing.ComputeSigningRoot(&sszBytes, domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Bits set in sync committee bits determine which validators will be treated as participating in sync committee.
|
||||
// These validators have to sign the message.
|
||||
sig1, err := blst.SignatureFromBytes(secretKeys[47].Sign(r[:]).Marshal())
|
||||
var scValIdx1 int
|
||||
var scValIdx2 int
|
||||
if ver >= version.Electra {
|
||||
scValIdx1 = 14
|
||||
scValIdx2 = 27
|
||||
} else {
|
||||
scValIdx1 = 47
|
||||
scValIdx2 = 19
|
||||
}
|
||||
sig1, err := blst.SignatureFromBytes(secretKeys[scValIdx1].Sign(r[:]).Marshal())
|
||||
require.NoError(t, err)
|
||||
sig2, err := blst.SignatureFromBytes(secretKeys[19].Sign(r[:]).Marshal())
|
||||
sig2, err := blst.SignatureFromBytes(secretKeys[scValIdx2].Sign(r[:]).Marshal())
|
||||
require.NoError(t, err)
|
||||
aggSig := bls.AggregateSignatures([]bls.Signature{sig1, sig2}).Marshal()
|
||||
err = sbb.SetSyncAggregate(ð.SyncAggregate{SyncCommitteeBits: scBits, SyncCommitteeSignature: aggSig})
|
||||
@@ -211,14 +272,14 @@ func TestBlockRewards(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.BlockRewards(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
require.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, "Block rewards are not supported for Phase 0 blocks", e.Message)
|
||||
})
|
||||
t.Run("altair", func(t *testing.T) {
|
||||
st, sbb, err := BlockRewardTestSetup(t, "altair")
|
||||
st, sbb, err := BlockRewardTestSetup(t, version.Altair)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{Optimistic: true}
|
||||
@@ -241,7 +302,7 @@ func TestBlockRewards(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.BlockRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.BlockRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, "12", resp.Data.ProposerIndex)
|
||||
@@ -254,7 +315,7 @@ func TestBlockRewards(t *testing.T) {
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("bellatrix", func(t *testing.T) {
|
||||
st, sbb, err := BlockRewardTestSetup(t, "bellatrix")
|
||||
st, sbb, err := BlockRewardTestSetup(t, version.Bellatrix)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{Optimistic: true}
|
||||
@@ -277,7 +338,7 @@ func TestBlockRewards(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.BlockRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.BlockRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, "12", resp.Data.ProposerIndex)
|
||||
@@ -290,7 +351,7 @@ func TestBlockRewards(t *testing.T) {
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("capella", func(t *testing.T) {
|
||||
st, sbb, err := BlockRewardTestSetup(t, "capella")
|
||||
st, sbb, err := BlockRewardTestSetup(t, version.Capella)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{Optimistic: true}
|
||||
@@ -313,7 +374,7 @@ func TestBlockRewards(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.BlockRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.BlockRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, "12", resp.Data.ProposerIndex)
|
||||
@@ -326,7 +387,7 @@ func TestBlockRewards(t *testing.T) {
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("deneb", func(t *testing.T) {
|
||||
st, sbb, err := BlockRewardTestSetup(t, "deneb")
|
||||
st, sbb, err := BlockRewardTestSetup(t, version.Deneb)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{Optimistic: true}
|
||||
@@ -349,7 +410,7 @@ func TestBlockRewards(t *testing.T) {
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.BlockRewards(writer, request)
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.BlockRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, "12", resp.Data.ProposerIndex)
|
||||
@@ -361,6 +422,42 @@ func TestBlockRewards(t *testing.T) {
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
t.Run("electra", func(t *testing.T) {
|
||||
st, sbb, err := BlockRewardTestSetup(t, version.Electra)
|
||||
require.NoError(t, err)
|
||||
|
||||
mockChainService := &mock.ChainService{Optimistic: true}
|
||||
s := &Server{
|
||||
Blocker: &testutil.MockBlocker{SlotBlockMap: map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock{
|
||||
0: phase0block,
|
||||
2: sbb,
|
||||
}},
|
||||
OptimisticModeFetcher: mockChainService,
|
||||
FinalizationFetcher: mockChainService,
|
||||
BlockRewardFetcher: &BlockRewardService{
|
||||
Replayer: mockstategen.NewReplayerBuilder(mockstategen.WithMockState(st)),
|
||||
DB: db,
|
||||
},
|
||||
}
|
||||
|
||||
url := "http://only.the.slot.number.at.the.end.is.important/2"
|
||||
request := httptest.NewRequest("GET", url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.BlockRewards(writer, request)
|
||||
require.Equal(t, http.StatusOK, writer.Code)
|
||||
resp := &structs.BlockRewardsResponse{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
|
||||
assert.Equal(t, "4", resp.Data.ProposerIndex)
|
||||
assert.Equal(t, "15714490", resp.Data.Total)
|
||||
assert.Equal(t, "89442", resp.Data.Attestations)
|
||||
assert.Equal(t, "48", resp.Data.SyncAggregate)
|
||||
assert.Equal(t, "7812500", resp.Data.AttesterSlashings)
|
||||
assert.Equal(t, "7812500", resp.Data.ProposerSlashings)
|
||||
assert.Equal(t, true, resp.ExecutionOptimistic)
|
||||
assert.Equal(t, false, resp.Finalized)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAttestationRewards(t *testing.T) {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package beacon
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
|
||||
@@ -21,5 +22,5 @@ func TestMain(m *testing.M) {
|
||||
flags.Init(resetFlags)
|
||||
}()
|
||||
|
||||
m.Run()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user