mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Compare commits
87 Commits
ba2333069a
...
backfill-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
acd24232c5 | ||
|
|
4c1560540c | ||
|
|
5751dbf134 | ||
|
|
426fbcc3b0 | ||
|
|
a3baf98b05 | ||
|
|
5a897dfa6b | ||
|
|
90190883bc | ||
|
|
64ec665890 | ||
|
|
fdb06ea461 | ||
|
|
0486631d73 | ||
|
|
47764696ce | ||
|
|
b2d350b988 | ||
|
|
41e7607092 | ||
|
|
cd429dc253 | ||
|
|
5ced1125f2 | ||
|
|
f67ca6ae5e | ||
|
|
9742333f68 | ||
|
|
c811fadf33 | ||
|
|
55b9448d41 | ||
|
|
10f8d8c26e | ||
|
|
4eab41ea4c | ||
|
|
683608e34a | ||
|
|
fbbf2a1404 | ||
|
|
82f556c50f | ||
|
|
c88aa77ac1 | ||
|
|
0568bec935 | ||
|
|
e463bcd1e1 | ||
|
|
5f8eb69201 | ||
|
|
4b98451649 | ||
|
|
0aa248e663 | ||
|
|
6973cd2c5f | ||
|
|
4e47905884 | ||
|
|
a94ea1e5f5 | ||
|
|
c0ad87df4b | ||
|
|
515590e7fe | ||
|
|
83a171b439 | ||
|
|
4946b007ab | ||
|
|
3f10439de1 | ||
|
|
5b20352ac6 | ||
|
|
d5ca327c30 | ||
|
|
38955fd08c | ||
|
|
71f05b597f | ||
|
|
0d742c6f88 | ||
|
|
06b5409ff0 | ||
|
|
9805e90d73 | ||
|
|
537f3cb863 | ||
|
|
b45e87abd6 | ||
|
|
4c4b12cca7 | ||
|
|
aabded250f | ||
|
|
4f9e56fc70 | ||
|
|
2a86132994 | ||
|
|
74c47e25a9 | ||
|
|
28eb1a4c3c | ||
|
|
1f89394727 | ||
|
|
bf1095c782 | ||
|
|
b24fe0d23a | ||
|
|
cbe50269de | ||
|
|
4ed2953fcf | ||
|
|
915837d059 | ||
|
|
26b276660f | ||
|
|
580509f2f4 | ||
|
|
be144da099 | ||
|
|
cc2565a422 | ||
|
|
d86353ea9d | ||
|
|
45d6002411 | ||
|
|
08c855fd4b | ||
|
|
7c86b5d737 | ||
|
|
023287f7df | ||
|
|
1432867c92 | ||
|
|
18efd620dc | ||
|
|
6139d58fa5 | ||
|
|
0ea5e2cf9d | ||
|
|
29fe707143 | ||
|
|
d68196822b | ||
|
|
924fe4de98 | ||
|
|
fe9dd255c7 | ||
|
|
83d75bcb78 | ||
|
|
ba5f7361ad | ||
|
|
8fa956036a | ||
|
|
58ce1c25f5 | ||
|
|
98532a2df3 | ||
|
|
08be6fde92 | ||
|
|
4585cdc932 | ||
|
|
aa47435c91 | ||
|
|
80eba4e6dd | ||
|
|
606294e17f | ||
|
|
977e923692 |
1
.bazelrc
1
.bazelrc
@@ -34,6 +34,7 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
|
||||
build:release --compilation_mode=opt
|
||||
build:release --stamp
|
||||
build:release --define pgo_enabled=1
|
||||
build:release --strip=always
|
||||
|
||||
# Build binary with cgo symbolizer for debugging / profiling.
|
||||
build:cgo_symbolizer --copt=-g
|
||||
|
||||
2
.github/actions/gomodtidy/Dockerfile
vendored
2
.github/actions/gomodtidy/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.24-alpine
|
||||
FROM golang:1.25.1-alpine
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
|
||||
2
.github/workflows/changelog.yml
vendored
2
.github/workflows/changelog.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
|
||||
jobs:
|
||||
run-changelog-check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
steps:
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
|
||||
|
||||
2
.github/workflows/check-specrefs.yml
vendored
2
.github/workflows/check-specrefs.yml
vendored
@@ -3,7 +3,7 @@ on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
check-specrefs:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
||||
2
.github/workflows/clang-format.yml
vendored
2
.github/workflows/clang-format.yml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
|
||||
jobs:
|
||||
clang-format-checking:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
# Is this step failing for you?
|
||||
|
||||
8
.github/workflows/fuzz.yml
vendored
8
.github/workflows/fuzz.yml
vendored
@@ -10,13 +10,13 @@ permissions:
|
||||
|
||||
jobs:
|
||||
list:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
timeout-minutes: 180
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.25.1'
|
||||
- id: list
|
||||
uses: shogo82148/actions-go-fuzz/list@v0
|
||||
with:
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
fuzz-tests: ${{steps.list.outputs.fuzz-tests}}
|
||||
|
||||
fuzz:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
timeout-minutes: 360
|
||||
needs: list
|
||||
strategy:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.25.1'
|
||||
- uses: shogo82148/actions-go-fuzz/run@v0
|
||||
with:
|
||||
packages: ${{ matrix.package }}
|
||||
|
||||
30
.github/workflows/go.yml
vendored
30
.github/workflows/go.yml
vendored
@@ -11,7 +11,7 @@ on:
|
||||
jobs:
|
||||
formatting:
|
||||
name: Formatting
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -22,7 +22,7 @@ jobs:
|
||||
|
||||
gosec:
|
||||
name: Gosec scan
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
env:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
go-version: '1.25.1'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
@@ -40,31 +40,31 @@ jobs:
|
||||
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
id: go
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go 1.25.1
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25.1'
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
version: v1.64.5
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
version: v2.4
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
steps:
|
||||
- name: Set up Go 1.x
|
||||
- name: Set up Go 1.25.1
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
go-version: '1.25.1'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
4
.github/workflows/horusec.yaml
vendored
4
.github/workflows/horusec.yaml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
jobs:
|
||||
Horusec_Scan:
|
||||
name: horusec-Scan
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
if: github.ref == 'refs/heads/develop'
|
||||
steps:
|
||||
- name: Check out code
|
||||
@@ -19,4 +19,4 @@ jobs:
|
||||
- name: Running Security Scan
|
||||
run: |
|
||||
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
|
||||
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"
|
||||
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"
|
||||
|
||||
121
.golangci.yml
121
.golangci.yml
@@ -1,90 +1,41 @@
|
||||
version: "2"
|
||||
run:
|
||||
timeout: 10m
|
||||
go: '1.23.5'
|
||||
|
||||
issues:
|
||||
exclude-files:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
exclude-dirs:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
|
||||
go: 1.23.5
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
# Deprecated linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- ineffassign
|
||||
- govet
|
||||
|
||||
# Disabled for now:
|
||||
- asasalint
|
||||
- bodyclose
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- cyclop
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- durationcheck
|
||||
- errname
|
||||
- err113
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
- forcetypeassert
|
||||
- funlen
|
||||
- gci
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godot
|
||||
- godox
|
||||
- gofumpt
|
||||
- gomoddirectives
|
||||
- gosec
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- intrange
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- mnd
|
||||
- musttag
|
||||
- nakedret
|
||||
- nestif
|
||||
- nilnil
|
||||
- nlreturn
|
||||
- noctx
|
||||
- nolintlint
|
||||
- nonamedreturns
|
||||
- nosprintfhostport
|
||||
- perfsprint
|
||||
- prealloc
|
||||
- predeclared
|
||||
- promlinter
|
||||
- protogetter
|
||||
- recvcheck
|
||||
- revive
|
||||
- spancheck
|
||||
disable:
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- thelper
|
||||
- unparam
|
||||
- usetesting
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
- unused
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
- proto
|
||||
- tools/analyzers
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
linters-settings:
|
||||
gocognit:
|
||||
# TODO: We should target for < 50
|
||||
min-complexity: 65
|
||||
|
||||
output:
|
||||
print-issued-lines: true
|
||||
sort-results: true
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
- proto
|
||||
- tools/analyzers
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
301
CHANGELOG.md
301
CHANGELOG.md
@@ -4,6 +4,307 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v6.1.2](https://github.com/prysmaticlabs/prysm/compare/v6.1.1...v6.1.2) - 2025-10-10
|
||||
|
||||
This release has several important fixes to improve Prysm's peering, stability, and attestation inclusion on mainnet and all testnets. All node operators are encouraged to update to this release as soon as practical for the best mainnet performance.
|
||||
|
||||
### Added
|
||||
|
||||
- Added a 1 minute timeout on PruneAttestationOnEpoch operations to prevent very large bolt transactions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15746)
|
||||
- Added expected delay before broadcasting light client p2p messages. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15776)
|
||||
|
||||
### Changed
|
||||
|
||||
- Replaced reflect.TypeOf with reflect.TypeFor. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15627)
|
||||
- Bazel builds with `--config=release` now properly apply `--strip=always` to strip debug symbols from the release assets. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15774)
|
||||
- Add sources for compute_fork_digest to specrefs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15699)
|
||||
- Aggregate logs when broadcasting data column sidecars (one per root instead of one per sidecar). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15748)
|
||||
- `c-kzg-4844`: Update from `v2.1.1` to `v2.1.5`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15708)
|
||||
- Process pending attestations as soon as the block arrives. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15791)
|
||||
- Compare received LC messages over gossipsub with locally computed ones before forwarding. Also no longer save updates. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15783)
|
||||
- Optimize pending attestation processing by adding batching. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15801)
|
||||
|
||||
### Removed
|
||||
|
||||
- removed unused configs and hides prysm specific configs from `/eth/v1/config/spec` endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15797)
|
||||
|
||||
### Fixed
|
||||
|
||||
- SSZ-QL: Support nested `List` type (e.g., `ExecutionPayload.Transactions`). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15725)
|
||||
- Fixing Unsupported config field kind; value forwarded verbatim errors for type string. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15773)
|
||||
- fix /eth/v1/config/spec endpoint to properly skip omitted values. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15777)
|
||||
- Fix ProduceSyncCommitteeContribution not returning error when committee index is out of range. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15770)
|
||||
- adding in improvements to getduties v2, replaces helpers.PrecomputeCommittees() ( exepensive ) with CommitteeAssignments. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15784)
|
||||
- Avoid unnecessary calls to `ExitInformation()`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15764)
|
||||
- `inclusionProofKey`: Include the commitments in the key. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15795)
|
||||
- Do not reject peers if they have a mismatched version|digest when the next for epoch is FAR_FUTURE_EPOCH. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15798)
|
||||
- Don't include entries in the fork schedule if their epoch is set to far future epoch. Avoids reporting next_fork_version == <unscheduled fork>. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15799)
|
||||
- Wait for custody info to be initialized before querying them. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15804)
|
||||
- fixes level=error msg="Could not clean up dirty states" error="OriginBlockRoot: not found in db" prefix=state-gen error when starting in kurtosis. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15808)
|
||||
- Correctly clear disconnected peers from `connected_libp2p_peers` and `connected_libp2p_peers_average_scores`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15807)
|
||||
- `buildStatusFromStream`: Respond `statusV2` only if Fulu is enabled. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15818)
|
||||
- Send our real earliest available slot when sending a Status request post Fulu instead of `0`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15818)
|
||||
- switch to built-in min/max. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15817)
|
||||
- `findPeersWithSubnets`: If the filter function returns an error for a given peer, log an error and skip the peer instead of aborting the whole function. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15815)
|
||||
- `computeIndicesByRootByPeer`: If the loop returns an error for a given peer, log an error and skip the peer instead of aborting the whole function. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15815)
|
||||
- Fixed issue #15738 where separate goroutines assume sole responsibility for topic registration. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15779)
|
||||
|
||||
## [v6.1.0](https://github.com/prysmaticlabs/prysm/compare/v6.0.5...v6.1.0) and [v6.1.1](https://github.com/prysmaticlabs/prysm/compare/v6.1.0...v6.1.1) - 2025-09-26
|
||||
|
||||
This release has support for Fusaka testnets as well as many mainnet improvements. Testnet operators are required to updated prior to the testnet fork date. See [PR #15721](https://github.com/OffchainLabs/prysm/pull/15721).
|
||||
|
||||
Mainnet operators are encouraged to update per their regular update cadence.
|
||||
|
||||
Note: This release was re-issued as v6.1.1 to distribute release assets without debug symbols. See issue [#15760](https://github.com/OffchainLabs/prysm/issues/15760).
|
||||
|
||||
#### Noteworthy improvements, changes and bugfixes:
|
||||
- The `--disable-experimental-state` beacon-node flag has been removed, marking the full graduation of the [Copy-on-write design](https://hackmd.io/zlTJ6Qe_RiueT3y2R77BvA) for BeaconState fields, which reduces the memory overhead of keeping multiple BeaconStates in RAM for block processing. Congrats @rkapka!
|
||||
- The behavior set by the `--attest_timely` flag is now on by default, with the flag itself deprecated.
|
||||
- GetDutiesV2 introduced, lowering duty request latency and beacon-node load. Multiple other improvements and bugfixes have been made to harden the validator run loop.
|
||||
- New validator flag `--max-health-checks` configures a validator to switch to a fallback beacon node after the given number of health check failures.
|
||||
- Improvements to rest-mode validator, defaulting to SSZ where available and adding SSZ support to more Beacon API endpoints.
|
||||
- Beacon API now honors the gzip content-encoding header.
|
||||
- Log timestamps now include milliseconds.
|
||||
- Full fusaka support for testnets!
|
||||
|
||||
**Special thanks to external contributors!**: @Alleysira, @KaloyanTanev, @rose2221
|
||||
|
||||
[1] To override this limit, use the validator flag `--suggested-gas-limit` or set the `builder.gas_limit` setting in your [proposer settings file](https://prysm.offchainlabs.com/docs/configure-prysm/fee-recipient/#advanced-configure-mev-builder-and-gas-limit).
|
||||
|
||||
|
||||
### Added
|
||||
|
||||
- PeerDAS: Add `CustodyInfo` in `BeaconNode`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15378)
|
||||
- GetDutiesV2 gRPC function, removes committee list from duties, replaced with committee length, validator committee index. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15273)
|
||||
- Add SSZ support for two attestation APIs: `/eth/v1/validator/attestation_data` and. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15377)
|
||||
- Added feature flag for validator client to use get duties v2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15380)
|
||||
- PeerDAS: Implement DAS. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15367)
|
||||
- `verifyBlobCommitmentCount`: Print max allowed blob count in error message. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15386)
|
||||
- Data column support for beacon api event end point. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15387)
|
||||
- Implement EIP-7917: Stable proposer lookahead. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15129)
|
||||
- Implement `dataColumnSidecarByRootRPCHandler`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15405)
|
||||
- New ssz-only flag for validator client to enable calling rest apis in SSZ, starting with get block endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15390)
|
||||
- Implement `dataColumnSidecarsByRangeRPCHandler`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15421)
|
||||
- Add SSZ support for `submitPoolAttestationsV2` beacon API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15422)
|
||||
- New `StatusV2` proto message. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15423)
|
||||
- Implement `SendDataColumnSidecarsByRangeRequest`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15430)
|
||||
- Implement `SendDataColumnSidecarsByRootRequest`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15430)
|
||||
- Implement beacon API blob sidecar enpoint for Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15436)
|
||||
- PeerDAS: Implement the new Fulu Metadata. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15440)
|
||||
- PeerDAS: Implement reconstruction. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15454)
|
||||
- Implement engine method `GetBlobsV2`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15469)
|
||||
- Implement execution `ReconstructDataColumnSidecars`, which reconstruct data column sidecars from data fetched from the execution layer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15469)
|
||||
- new `--batch-verifier-limit` flag to configure max number of signatures to batch verify on gossip. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15467)
|
||||
- `disable-attest-timely` flag to disable attest timely. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15410)
|
||||
- Added `max-health-checks` flag that sets the maximum times the validator tries to check the health of the beacon node before timing out. 0 or a negative number is indefinite. (the default is 0). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15401)
|
||||
- Add method `VersionToForkEpochMap()` to the `BeaconChainConfig` in the `params` package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15482)
|
||||
- Add log capitalization analyzer and apply changes across codebase. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15452)
|
||||
- Slot aware cache for seen data column gossip p2p to reduce memory usages. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15477)
|
||||
- **Gzip Compression for Beacon API:**. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14982)
|
||||
- Implement data column sidecars reconstruction with data retrieved from the execution client when receiving a block via gossip. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15483)
|
||||
- Add support for parsing and handling `ExecutionPayloadAndBlobsBundleV2`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15503)
|
||||
- Added new PRYSM_API_OVERRIDE_ACCEPT environment variable to override ssz accept header as a replacement to flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15433)
|
||||
- Implements the `/eth/v1/beacon/states/{state_id}/proposer_lookahead` beacon api endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15525)
|
||||
- Added new metadata fields (attnets,syncnets,custody_group_count) to `/eth/v1/node/identity`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15506)
|
||||
- Add BLOB_SCHEDULE field to `/eth/v1/config/spec` endpoint response to expose blob scheduling configuration for networks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15485)
|
||||
- Add timing metric `publish_block_v2_duration_milliseconds` to measure processing duration of the `PublishBlockV2` beacon API endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15539)
|
||||
- Add Fulu case for `saveStatesEfficientInternal`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15553)
|
||||
- Support for fusaka `nfd` enr field, and changes to the semantics of the eth2 field. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15501)
|
||||
- Implement post-Fulu MEV-boost protocol changes where relays only return status codes for blinded block submissions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15486)
|
||||
- Added fulu block support to StreamBlocksAltair. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15583)
|
||||
- All outbound HTTP requests from the validator client now include a custom `User-Agent` header in the format `Prysm/<name>/<version>`. This enhances observability and enables upstream systems to correctly identify Prysm validator clients by their name and version. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15574)
|
||||
- Fixes [#15435](https://github.com/OffchainLabs/prysm/issues/15435). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15574)
|
||||
- Data columns syncing for Fusaka. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15564)
|
||||
- Added specification references which map spec to implementation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15592)
|
||||
- Warm data columns storage cache at start. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15629)
|
||||
- Add `--data-column-path` flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15629)
|
||||
- Initialize package for SSZ Query Language. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15588)
|
||||
- In FetchDataColumnSidecars, after retrieving sidecars from peers, if still some sidecars are missing for a given root and if a reconstruction is possible (combining sidecars already retrieved from peers and sidecars in the storage), then reconstruct missing sidecars instead of trying to fetch the missing ones from peers. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15593)
|
||||
- Fulu block proposal changes for beacon api and gRPC. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15628)
|
||||
- Retry to fetch origin data column sidecars when starting from a checkpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15634)
|
||||
- Aggregate and pack sync committee messages into blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15608)
|
||||
- Support `List` type for SSZ-QL. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15637)
|
||||
- Configured the beacon node to seek peers when we have validator custody requirements. If one or more validators are connected to the beacon node, then the beacon node should seek a diverse set of peers such that broadcasting to all data column subnets for a block proposal is more efficient. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15654)
|
||||
- SSZ-QL: Add element information for `Vector` type. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15668)
|
||||
- SSZ-QL: Support multi-dimensional tag parsing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15668)
|
||||
- Added more metadata for debug logs when initial sync requests fail for "invalid data returned from peer" errors. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15674)
|
||||
- Adding Fulu types for web3signer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15498)
|
||||
- Added erigon/caplin to known p2p agent strings. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15678)
|
||||
- Add Fulu fork transition tests for mainnet and minimal configurations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15666)
|
||||
- Fulu proposer lookahead epoch processing tests for mainnet and minimal configurations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15667)
|
||||
- Populate sszInfo of variable-length fields in AnalyzeObjects. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15676)
|
||||
- KZG proof batch verification for data column gossip validation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15617)
|
||||
- Added flag `--p2p-colocation-whitelist` to accept CIDRs which will bypass the p2p colocation restrictions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15685)
|
||||
- Fulu spec tests coverage for covering the general package. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15682)
|
||||
- Implemented syncing in a disjoint network with respect to data column sidecars subscribed by peers. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15644)
|
||||
- Add retry logic when GetBlobsV2 is called. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15520)
|
||||
- Call GetBlobsV2 as soon as we receive the first data column sidecar or block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15520)
|
||||
- Added new post fulu /eth/v1/beacon/blobs/{block_id} endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15610)
|
||||
- SSZ-QL: Handle `Bitlist` and `Bitvector` types. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15704)
|
||||
- Adding `/eth/v1/debug/beacon/data_column_sidecars/{block_id}` endpoint. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15701)
|
||||
- Support Fulu genesis block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15652)
|
||||
- Update spectests to 1.6.0-beta.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15741)
|
||||
|
||||
### Changed
|
||||
|
||||
- `parseIndices`: Return `[]int` instead of `[]uint64`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15386)
|
||||
- Reclaim memory manually in some tests that fuzz the beacon state. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15395)
|
||||
- when REST api is enabled the get Block api defaults to requesting and receiving SSZ instead of JSON, JSON is the fallback. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15390)
|
||||
- Remove "invalid" from logs for incoming blob sidecar that is missing parent or out of range slot. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15428)
|
||||
- In `TopicFromMessage`: Do not assume anymore that all Fulu specific topic are V3 only. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15423)
|
||||
- `readChunkedDataColumnSidecar`: Add `validationFunctions` parameter and add tests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15423)
|
||||
- Put the initiation of LC Store behind the `enable-light-client` flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15464)
|
||||
- default batch signature verification limit increased from 50 to 1000. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15467)
|
||||
- Increase mainnet DefaultBuilderGasLimit from 36M to 45M. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15455)
|
||||
- Attest timely is now default. `attest-timely` flag is now deprecated. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15410)
|
||||
- Move data col reconstruction log to a more accurate place in the code. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15475)
|
||||
- Makes the multivalue slice permanent in the state and removes old paths. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15414)
|
||||
- Previously, we optimistically believed the beacon node was healthy and tried to get chain start, but now we do a health check at the start. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15401)
|
||||
- Optimize proposer inclusion proof calcuation by pre caching subtries. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15473)
|
||||
- Move setter/getter functions for LC Bootstrap into LcStore for a unified interface. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15476)
|
||||
- Changed `enable-duties-v2` to `disable-duties-v2` to default to using duties v2. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15445)
|
||||
- Changed `uint64` genesis time to use `time.Time`. Also did some refactoring and cleanup that was enabled by these changes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15419)
|
||||
- Add milliseconds to log timestamps. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15496)
|
||||
- Move setter/getter functions for LC Updates into LcStore for a unified interface. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15488)
|
||||
- Change LC Bootstrap logic to only save bootstraps on finalized checkpoints instead of every block. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15497)
|
||||
- Update links to consensus-specs to point to `master` branch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15523)
|
||||
- changed from in-memory to persistent discv5 db to keep local node information persistent for the key to keep the ENR sequence number deterministic when restarting. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15519)
|
||||
- Fix some nits associated with data column sidecar verification. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15521)
|
||||
- Include state root in StateNotFoundError for better debugging of consensus validation failures. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15533)
|
||||
- when shutting down the sync service we now send p2p goodbye messages in parallel to maxmimize changes of propogating goodbyes to all peers before an unsafe shutdown. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15542)
|
||||
- Do not compare liveness response with LH in e2e Beacon API evaluator. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15556)
|
||||
- Moved the broadcast and event notifier logic for saving LC updates to the store function. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15540)
|
||||
- Fixed the issue with broadcasting more than twice per LC Finality update, and the if-case bug. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15540)
|
||||
- Separated the finality update validation rules for saving and broadcasting. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15540)
|
||||
- Update validator custody to the latest specification, including the new status message. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15532)
|
||||
- Beacon api optimize validator lookup for large batch request size. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15558)
|
||||
- Check pending block is in forkchoice before importing pending attestation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15547)
|
||||
- Redesign the pending attestation queue. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15024)
|
||||
- Replaced hardcoded `grpc-gateway-port` with `flags.HTTPServerPort.Name` in `testing/endtoend/components/validator.go`, resolving an inline TODO for improved flag consistency. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15236)
|
||||
- Refactor `htrutil.go` by removing redundant codes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15453)
|
||||
- Improved sync unaggregated attestation cache key outside of lock path. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15572)
|
||||
- Move aggregated attestation cache key generation outside of critical locks to improve performance. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15579)
|
||||
- Renamed various variables/functions to be more clear. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15529)
|
||||
- Update consensus spec to v1.6.0-alpha.4 and implement data column support for forkchoice spectests. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15590)
|
||||
- Reject incoming connections when the fork schedule of the connecting peer (parsed from their ENR) has a matching next_fork_epoch, but mismatched next_fork_version or nfd (next fork digest). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15604)
|
||||
- Update gohashtree to v0.0.5-beta. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15619)
|
||||
- Updated consensus spec from v1.6.0-alpha.4 to v1.6.0-alpha.5 with adjusted minimal config parameters. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15621)
|
||||
- Changed old atomic functions to new atomic.Int for safer and clearer code. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15625)
|
||||
- Start from justified checkpoint by default. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15636)
|
||||
- Updated consensus spec from v1.6.0-alpha.5 to v1.6.0-alpha.6. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15658)
|
||||
- Updated outdated documentation links for Web3Signer and Why Bazel. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15631)
|
||||
- changed validatorpb.SignRequest_AggregateAttestationAndProof signing type to use AggregateAttestationAndProofV2 on web3signer. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15498)
|
||||
- Pre-calculate exit epoch, churn and active balance before processing slashings to reduce CPU load. [[PR]](https://github.com/prysmaticlabs/prysm/pull/14990)
|
||||
- Switching default of validator client rest call for submit block from JSON to SSZ. Fallback json will be attempted. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15645)
|
||||
- Deprecated and added error to /prysm/v1/beacon/blobs endpoint for post Fulu fork. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15643)
|
||||
- Upgraded gossipsub to v0.14.2 and libp2p to v0.39.1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15677)
|
||||
- Prysm will now downscore peers that return invalid block_by_range responses. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15686)
|
||||
- Filtering peers for data column subnets: Added a one-epoch slack to the peer’s head slot view. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15644)
|
||||
- Fetching data column sidecars: If not all requested sidecars are available for a given root, return the successfully retrieved ones along with a map indicating which could not be fetched. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15644)
|
||||
- Fetching origin data column sidecars: If only some sidecars are fetched, save the retrieved ones and retry fetching the missing ones on the next attempt. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15644)
|
||||
- Renamed the `--enable-experimental-backfill` flag to `--enable-backfill` to signal that it is more mature. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15690)
|
||||
- Restrict best LC update collection to canonical blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15585)
|
||||
- PeerDAS: Wait for a random delay, then reconstruct data column sidecars and immediately reseed instead of immediately reconstructing, waiting and then reseeding. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15705)
|
||||
- Clarified misleading log messages in beacon-chain/rpc/service gRPC module. [[PR]](https://github.com/prysmaticlabs/prysm/pull/13063)
|
||||
- Broadcast block then sidecars, instead block and sidecars concurrently. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15720)
|
||||
- Broadcast and receive sidecars in concurrently, instead sequentially. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15720)
|
||||
- Changed blst dependency from `http_archive` to `go_repository` so that gazelle can keep it in sync with go.mod. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15709)
|
||||
- Updated go to v1.25.1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15641)
|
||||
- Updated rules_go to v0.57.0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15641)
|
||||
- Updated protobuf to 28.3. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15641)
|
||||
- Set Fulu fork epochs for Holesky, Hoodi, and Sepolia testnets. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15721)
|
||||
- Improve logging of data column sidecars. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15728)
|
||||
- Updated go.mod to v1.25.1. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15740)
|
||||
|
||||
### Deprecated
|
||||
|
||||
- Deprecated `p2p-metadata` flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15554)
|
||||
|
||||
### Removed
|
||||
|
||||
- Removed //tools/eth1voting tool. This is no longer needed as the beacon chain no longer uses eth1data voting since Electra. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15415)
|
||||
- Remove deposit count from sync new block log. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15420)
|
||||
- Unused `DataColumnIdentifier` proto message. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15423)
|
||||
- Validator client will no longer need to call the canonical head api. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15480)
|
||||
- Partially reverting pr #15390 removing the `ssz-only` debug flag until there is a real usecase for the flag. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15433)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Added regression test for [PR 15369](https://github.com/OffchainLabs/prysm/pull/15369). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15379)
|
||||
- Added missing `meta` field to the response of the endpoint `/eth/v1/node/peers` to align with the Beacon API spec (#15370). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15371)
|
||||
- Fix blob metric name for peer count. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15412)
|
||||
- Non deterministic output order of `dataColumnSidecarByRootRPCHandler`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15441)
|
||||
- Fixed the versioning bug for light client data types in the Beacon API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15400)
|
||||
- `--chain-config-file`: Do not use any more mainnet boot nodes. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15460)
|
||||
- Fix panic on dutiesv2 when there is no committee assignment on the epoch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15466)
|
||||
- Allow SSZ requests for pending deposits, partial withdrawals and consolidations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15474)
|
||||
- Validator client shuts down cleanly on error instead of fatal error. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15401)
|
||||
- Fixes edge case starting validator client with new validator keys starts the slot ticker too early resulting in replayed slots in the main runner loop. Fixes edge case of replayed slots when waiting for account acivations. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15479)
|
||||
- DV aggregations failing first slot of the epoch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15156)
|
||||
- Skip genesis block retrieval when EIP-6110 deposit requests have started to prevent "pruned history unavailable" errors with execution clients that have pruned pre-merge data. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15494)
|
||||
- Fixed lookahead initialization at the fulu fork. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15450)
|
||||
- Write `Content-Encoding` header in the response properly when gzip encoding is requested. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15499)
|
||||
- Subnets subscription: Avoid dynamic subscribing blocking in case not enough peers per subnets are found. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15471)
|
||||
- Do not apply the gzip middleware to the event stream API. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15517)
|
||||
- Fixed various reasons why a node is banned by its peers when it stops. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15505)
|
||||
- Use `MinEpochsForDataColumnSidecarsRequest` in `WithinDAPeriod` when in Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15522)
|
||||
- Return zero value for `Eth-Consensus-Block-Value` on error to avoid missed block proposals. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15526)
|
||||
- Moved reconstruction lock to prevent unnecessary work. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15528)
|
||||
- Fixed variable names, links, and typos in das core code. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15524)
|
||||
- Fix builder bid version compatibility to support Electra bids with Fulu blocks. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15536)
|
||||
- Fixed align submitPoolSyncCommitteeSignatures response with Beacon API specification. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15516)
|
||||
- Trigger payload attribute event as soon as an early block is processed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15541)
|
||||
- Beacon-api proposer duty fulu computation. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15534)
|
||||
- Fixed the max proofs in `BlobsBundleV2`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15530)
|
||||
- Prevent a race on double `ReceiveBlock`. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15565)
|
||||
- Fixed [#15544](https://github.com/OffchainLabs/prysm/issues/15544): Persist metadata sequence number if it is needed (e.g., use static peer ID option or Fulu enabled). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15554)
|
||||
- Fix the validateConsensus endpoint handler. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15548)
|
||||
- builder version check was using head block version instead of current fork's version based on slot, fixes e2e from https://github.com/OffchainLabs/prysm/commit/57e27199bdb9b3ef1af14c3374999aba5e0788a3. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15568)
|
||||
- Don't submit duplicate `SignedContributionAndProof` messages. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15571)
|
||||
- Genesis state, timestamp and validators root now ubiquitously available at node startup, supporting tech debt cleanup. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15470)
|
||||
- Fixed a condition where the blob cache could panic when there were less than or no sidecars in the cache entry. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15581)
|
||||
- Fixed endpoint response to return 404 or 400 after isOptimistic check. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15559)
|
||||
- Safeguard against accidental out of bounds array access in dataColumnSidecars method. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15586)
|
||||
- Fixed NewSignedBeaconBlock calls to use Block field for proper equivocation handling. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15595)
|
||||
- Fixed regression in find peer functions introduced in PR#15471, where nodes with equal sequence numbers were incorrectly skipped and the peer count was incorrectly reduced when replacing nodes with higher sequence numbers. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15578)
|
||||
- Fix bug where stale computed value in closure excludes newly required (eg attestation) subscriptions. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15603)
|
||||
- Fix bug where arguments of fillInForkChoiceMissingBlocks were incorrectly placed. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15639)
|
||||
- Fix next epoch proposer duties in Fulu by advancing the state to the beginning of the current epoch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15642)
|
||||
- Fix getBlockAttestationsV2 to return [] instead of null when data is empty. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15651)
|
||||
- Fixed the issue of empty dirs not being deleted when using –blob-storage-layout=by-epoch. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15573)
|
||||
- Start topic-based peer discovery before initial sync completes so that we have coverage of needed columns when range syncing. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15660)
|
||||
- Fixed an off-by-one in forkchoice startup. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15684)
|
||||
- mitigate potential supernode clustering due to libp2p ConnManager pruning of non-supernodes, see https://github.com/OffchainLabs/prysm/issues/15607. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15681)
|
||||
- Initial sync: Do not request data column sidecars for blocks before the retention period. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15644)
|
||||
- Fixed incorrect attestation data request where the assigned committee index was used after Electra, instead of 0. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15696)
|
||||
- Use v2 endpoint for blinded block submission post-Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15716)
|
||||
- Fixed 'justified' block support missing on blocker.Block and optimized logic between blocker.Block and blocker.Blob. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15715)
|
||||
- Fix prysmctl panic when baseFee is not set in genesis.json. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15687)
|
||||
- Fix getStateRandao not returning historic RANDAO mix values. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15653)
|
||||
- fix race in PriorityQueue.Pop by checking emptiness under write lock. (#15726). [[PR]](https://github.com/prysmaticlabs/prysm/pull/15726)
|
||||
- In P2P service start, wait for the custody info to be correctly initialized. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15732)
|
||||
- `createLocalNode`: Wait before retrying to retrieve the custody group count if not present. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15735)
|
||||
- Replace fmt.Printf with proper test error handling in web3signer keymanager tests, using require.NoError(t, err) instead of t.Fatalf for better error handling and debugging. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15723)
|
||||
- fixed regression introduced in PR #15715 , blocker now returns an error for not found, and error handling correctly handles error and returns 404 instead of 500. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15742)
|
||||
- da metric was not writing correctly because if statement on err was accidently flipped. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15743)
|
||||
|
||||
### Security
|
||||
|
||||
- Updated go to version 1.24.5. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15561)
|
||||
- Updated distroless/cc-debian11 to latest to resolve CVE-2024-2961. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15562)
|
||||
- Updated go to version 1.24.6. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15566)
|
||||
- Updated quic-go to latest version. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15749)
|
||||
|
||||
|
||||
## [v6.0.5](https://github.com/prysmaticlabs/prysm/compare/v6.0.4...v6.0.5) - 2025-09-26
|
||||
|
||||
We are releasing a patch update on top of v6.0.4 to address a stability issue with quic-go.
|
||||
All operators should update as soon as possible to v6.0.5 or later.
|
||||
|
||||
### Security
|
||||
|
||||
- Updated quic-go to latest version. [[PR]](https://github.com/prysmaticlabs/prysm/pull/15749)
|
||||
|
||||
## [v6.0.4](https://github.com/prysmaticlabs/prysm/compare/v6.0.3...v6.0.4) - 2025-06-05
|
||||
|
||||
This release has more work on PeerDAS, and light client support. Additionally, we have a few bug fixes:
|
||||
|
||||
44
WORKSPACE
44
WORKSPACE
@@ -253,16 +253,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0-alpha.6"
|
||||
consensus_spec_version = "v1.6.0-beta.0"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-7wkWuahuCO37uVYnxq8Badvi+jY907pBj68ixL8XDOI=",
|
||||
"minimal": "sha256-Qy/f27N0LffS/ej7VhIubwDejD6LMK0VdenKkqtZVt4=",
|
||||
"mainnet": "sha256-3H7mu5yE+FGz2Wr/nc8Nd9aEu93YoEpsYtn0zBSoeDE=",
|
||||
"general": "sha256-rT3jQp2+ZaDiO66gIQggetzqr+kGeexaLqEhbx4HDMY=",
|
||||
"minimal": "sha256-wowwwyvd0KJLsE+oDOtPkrhZyJndJpJ0lbXYsLH6XBw=",
|
||||
"mainnet": "sha256-4ZLrLNeO7NihZ4TuWH5V5fUhvW9Y3mAPBQDCqrfShps=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-uvz3XfMTGfy3/BtQQoEp5XQOgrWgcH/5Zo/gR0iiP+k=",
|
||||
integrity = "sha256-sBe3Rx8zGq9IrvfgIhZQpYidGjy3mE1SiCb6/+pjLdY=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -300,22 +300,6 @@ filegroup(
|
||||
url = "https://github.com/ethereum/bls12-381-tests/releases/download/%s/bls_tests_yaml.tar.gz" % bls_test_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eth2_networks",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = glob([
|
||||
"shared/**/config.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "77e7e3ed65e33b7bb19d30131f4c2bb39e4dfeb188ab9ae84651c3cc7600131d",
|
||||
strip_prefix = "eth2-networks-934c948e69205dcf2deb87e4ae6cc140c335f94d",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "holesky_testnet",
|
||||
build_file_content = """
|
||||
@@ -327,9 +311,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-YVFFrCmjoGZ3fXMWpsCpSsYbANy1grnqYwOLKIg2SsA=",
|
||||
strip_prefix = "holesky-32a72e21c6e53c262f27d50dd540cb654517d03a",
|
||||
url = "https://github.com/eth-clients/holesky/archive/32a72e21c6e53c262f27d50dd540cb654517d03a.tar.gz", # 2025-03-17
|
||||
integrity = "sha256-htyxg8Ln2o8eCiifFN7/hcHGZg8Ir9CPzCEx+FUnnCs=",
|
||||
strip_prefix = "holesky-8aec65f11f0c986d6b76b2eb902420635eb9b815",
|
||||
url = "https://github.com/eth-clients/holesky/archive/8aec65f11f0c986d6b76b2eb902420635eb9b815.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -359,9 +343,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-b5F7Wg9LLMqGRIpP2uqb/YsSFVn2ynzlV7g/Nb1EFLk=",
|
||||
strip_prefix = "sepolia-562d9938f08675e9ba490a1dfba21fb05843f39f",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/562d9938f08675e9ba490a1dfba21fb05843f39f.tar.gz", # 2025-03-17
|
||||
integrity = "sha256-+UZgfvBcea0K0sbvAJZOz5ZNmxdWZYbohP38heUuc6w=",
|
||||
strip_prefix = "sepolia-f9158732adb1a2a6440613ad2232eb50e7384c4f",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/f9158732adb1a2a6440613ad2232eb50e7384c4f.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -375,9 +359,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-dPiEWUd8QvbYGwGtIm0QtCekitVLOLsW5rpQIGzz8PU=",
|
||||
strip_prefix = "hoodi-828c2c940e1141092bd4bb979cef547ea926d272",
|
||||
url = "https://github.com/eth-clients/hoodi/archive/828c2c940e1141092bd4bb979cef547ea926d272.tar.gz",
|
||||
integrity = "sha256-G+4c9c/vci1OyPrQJnQCI+ZCv/E0cWN4hrHDY3i7ns0=",
|
||||
strip_prefix = "hoodi-b6ee51b2045a5e7fe3efac52534f75b080b049c6",
|
||||
url = "https://github.com/eth-clients/hoodi/archive/b6ee51b2045a5e7fe3efac52534f75b080b049c6.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
|
||||
@@ -284,7 +284,7 @@ func (c *Client) SubmitChangeBLStoExecution(ctx context.Context, request []*stru
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
decoder.DisallowUnknownFields()
|
||||
errorJson := &server.IndexedVerificationFailureError{}
|
||||
errorJson := &server.IndexedErrorContainer{}
|
||||
if err := decoder.Decode(errorJson); err != nil {
|
||||
return errors.Wrapf(err, "failed to decode error JSON for %s", resp.Request.URL)
|
||||
}
|
||||
|
||||
@@ -59,6 +59,7 @@ go_test(
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
|
||||
@@ -726,6 +726,12 @@ func unexpectedStatusErr(response *http.Response, expected int) error {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
return errors.Wrap(ErrNotOK, errMessage.Message)
|
||||
case http.StatusBadGateway:
|
||||
log.WithError(ErrBadGateway).Debug(msg)
|
||||
if jsonErr := json.Unmarshal(bodyBytes, &errMessage); jsonErr != nil {
|
||||
return errors.Wrap(jsonErr, "unable to read response body")
|
||||
}
|
||||
return errors.Wrap(ErrBadGateway, errMessage.Message)
|
||||
default:
|
||||
log.WithError(ErrNotOK).Debug(msg)
|
||||
return errors.Wrap(ErrNotOK, fmt.Sprintf("unsupported error code: %d", response.StatusCode))
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
eth "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/prysmaticlabs/go-bitfield"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -170,8 +171,11 @@ func TestClient_RegisterValidator(t *testing.T) {
|
||||
|
||||
func TestClient_GetHeader(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
expectedPath := "/eth/v1/builder/header/23/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
var slot primitives.Slot = 23
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
expectedPath := "/eth/v1/builder/header/%d/0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2/0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a"
|
||||
expectedPath = fmt.Sprintf(expectedPath, ds)
|
||||
var slot primitives.Slot = ds
|
||||
parentHash := ezDecode(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2")
|
||||
pubkey := ezDecode(t, "0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a")
|
||||
t.Run("server error", func(t *testing.T) {
|
||||
@@ -533,7 +537,7 @@ func TestClient_GetHeader(t *testing.T) {
|
||||
require.Equal(t, expectedPath, r.URL.Path)
|
||||
epr := &ExecHeaderResponseElectra{}
|
||||
require.NoError(t, json.Unmarshal([]byte(testExampleHeaderResponseElectra), epr))
|
||||
pro, err := epr.ToProto(100)
|
||||
pro, err := epr.ToProto(es)
|
||||
require.NoError(t, err)
|
||||
ssz, err := pro.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -21,3 +21,4 @@ var ErrUnsupportedMediaType = errors.Wrap(ErrNotOK, "The media type in \"Content
|
||||
|
||||
// ErrNotAcceptable specifically means that a '406 - Not Acceptable' was received from the API.
|
||||
var ErrNotAcceptable = errors.Wrap(ErrNotOK, "The accept header value is not acceptable")
|
||||
var ErrBadGateway = errors.Wrap(ErrNotOK, "recv 502 BadGateway response from API")
|
||||
|
||||
@@ -6,6 +6,11 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrIndexedValidationFail = "One or more messages failed validation"
|
||||
ErrIndexedBroadcastFail = "One or more messages failed broadcast"
|
||||
)
|
||||
|
||||
// DecodeError represents an error resulting from trying to decode an HTTP request.
|
||||
// It tracks the full field name for which decoding failed.
|
||||
type DecodeError struct {
|
||||
@@ -29,19 +34,38 @@ func (e *DecodeError) Error() string {
|
||||
return fmt.Sprintf("could not decode %s: %s", strings.Join(e.path, "."), e.err.Error())
|
||||
}
|
||||
|
||||
// IndexedVerificationFailureError wraps a collection of verification failures.
|
||||
type IndexedVerificationFailureError struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"code"`
|
||||
Failures []*IndexedVerificationFailure `json:"failures"`
|
||||
// IndexedErrorContainer wraps a collection of indexed errors.
|
||||
type IndexedErrorContainer struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"code"`
|
||||
Failures []*IndexedError `json:"failures"`
|
||||
}
|
||||
|
||||
func (e *IndexedVerificationFailureError) StatusCode() int {
|
||||
func (e *IndexedErrorContainer) StatusCode() int {
|
||||
return e.Code
|
||||
}
|
||||
|
||||
// IndexedVerificationFailure represents an issue when verifying a single indexed object e.g. an item in an array.
|
||||
type IndexedVerificationFailure struct {
|
||||
// IndexedError represents an issue when processing a single indexed object e.g. an item in an array.
|
||||
type IndexedError struct {
|
||||
Index int `json:"index"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// BroadcastFailedError represents an error scenario where broadcasting a published message failed.
|
||||
type BroadcastFailedError struct {
|
||||
msg string
|
||||
err error
|
||||
}
|
||||
|
||||
// NewBroadcastFailedError creates a new instance of BroadcastFailedError.
|
||||
func NewBroadcastFailedError(msg string, err error) *BroadcastFailedError {
|
||||
return &BroadcastFailedError{
|
||||
msg: msg,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns the underlying error message.
|
||||
func (e *BroadcastFailedError) Error() string {
|
||||
return fmt.Sprintf("could not broadcast %s: %s", e.msg, e.err.Error())
|
||||
}
|
||||
|
||||
@@ -296,3 +296,8 @@ type GetBlobsResponse struct {
|
||||
Finalized bool `json:"finalized"`
|
||||
Data []string `json:"data"` //blobs
|
||||
}
|
||||
|
||||
type SSZQueryRequest struct {
|
||||
Query string `json:"query"`
|
||||
IncludeProof bool `json:"include_proof,omitempty"`
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ var (
|
||||
// errWSBlockNotFoundInEpoch is returned when a block is not found in the WS cache or DB within epoch.
|
||||
errWSBlockNotFoundInEpoch = errors.New("weak subjectivity root not found in db within epoch")
|
||||
// ErrNotDescendantOfFinalized is returned when a block is not a descendant of the finalized checkpoint
|
||||
ErrNotDescendantOfFinalized = invalidBlock{error: errors.New("not descendant of finalized checkpoint")}
|
||||
ErrNotDescendantOfFinalized = errors.New("not descendant of finalized checkpoint")
|
||||
// ErrNotCheckpoint is returned when a given checkpoint is not a
|
||||
// checkpoint in any chain known to forkchoice
|
||||
ErrNotCheckpoint = errors.New("not a checkpoint in forkchoice")
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
blocktypes "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v6/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -218,24 +218,18 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
|
||||
// notifyNewPayload signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
|
||||
// stVersion should represent the version of the pre-state; header should also be from the pre-state.
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header interfaces.ExecutionData, blk blocktypes.ROBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
// Execution payload is only supported in Bellatrix and beyond. Pre
|
||||
// merge blocks are never optimistic
|
||||
if blk == nil {
|
||||
return false, errors.New("signed beacon block can't be nil")
|
||||
}
|
||||
if preStateVersion < version.Bellatrix {
|
||||
if stVersion < version.Bellatrix {
|
||||
return true, nil
|
||||
}
|
||||
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(header, body)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
|
||||
}
|
||||
@@ -268,28 +262,32 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
return false, errors.New("nil execution requests")
|
||||
}
|
||||
}
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
if err == nil {
|
||||
newPayloadValidNodeCount.Inc()
|
||||
return true, nil
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
}
|
||||
logFields := logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"parentRoot": fmt.Sprintf("%#x", parentRoot),
|
||||
"root": fmt.Sprintf("%#x", blk.Root()),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}
|
||||
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
log.WithFields(logFields).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
lvh := bytesutil.ToBytes32(lastValidHash)
|
||||
}
|
||||
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
|
||||
log.WithFields(logFields).WithError(err).Error("Invalid payload status")
|
||||
return false, invalidBlock{
|
||||
error: ErrInvalidPayload,
|
||||
lastValidHash: lvh,
|
||||
lastValidHash: bytesutil.ToBytes32(lastValidHash),
|
||||
}
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
log.WithFields(logFields).WithError(err).Error("Unexpected execution engine error")
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
|
||||
@@ -481,33 +481,12 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
phase0State, _ := util.DeterministicGenesisState(t, 1)
|
||||
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
a := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{},
|
||||
},
|
||||
}
|
||||
a := util.NewBeaconBlockAltair()
|
||||
altairBlk, err := consensusblocks.NewSignedBeaconBlock(a)
|
||||
require.NoError(t, err)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
BlockNumber: 1,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 1
|
||||
blk.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
@@ -544,12 +523,6 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
blk: altairBlk,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "nil beacon block",
|
||||
postState: bellatrixState,
|
||||
errString: "signed beacon block can't be nil",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "new payload with optimistic block",
|
||||
postState: bellatrixState,
|
||||
@@ -576,15 +549,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "altair pre state, happy case",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -595,24 +561,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "not at merge transition",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -623,15 +572,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "happy case",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -642,15 +584,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "undefined error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -662,15 +597,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "invalid block hash error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -701,7 +629,9 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, tt.blk)
|
||||
rob, err := consensusblocks.NewROBlock(tt.blk)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
if tt.invalidBlock {
|
||||
@@ -725,17 +655,12 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
ctx := tr.ctx
|
||||
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
rob, err := consensusblocks.NewROBlock(bellatrixBlk)
|
||||
require.NoError(t, err)
|
||||
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
@@ -752,7 +677,7 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(bellatrixState)
|
||||
require.NoError(t, err)
|
||||
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, bellatrixBlk)
|
||||
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, validated)
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
ethpbv1 "github.com/OffchainLabs/prysm/v6/proto/eth/v1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
@@ -108,7 +107,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
commonRoot = params.BeaconConfig().ZeroHash
|
||||
}
|
||||
dis := headSlot + newHeadSlot - 2*forkSlot
|
||||
dep := math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
dep := max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot))
|
||||
oldWeight, err := s.cfg.ForkChoiceStore.Weight(oldHeadRoot)
|
||||
if err != nil {
|
||||
log.WithField("root", fmt.Sprintf("%#x", oldHeadRoot)).Warn("Could not determine node weight")
|
||||
@@ -135,7 +134,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
|
||||
Type: statefeed.Reorg,
|
||||
Data: ðpbv1.EventChainReorg{
|
||||
Slot: newHeadSlot,
|
||||
Depth: math.Max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
|
||||
Depth: max(uint64(headSlot-forkSlot), uint64(newHeadSlot-forkSlot)),
|
||||
OldHeadBlock: oldHeadRoot[:],
|
||||
NewHeadBlock: newHeadRoot[:],
|
||||
OldHeadState: oldStateRoot[:],
|
||||
@@ -347,13 +346,24 @@ func (s *Service) notifyNewHeadEvent(
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
}
|
||||
|
||||
parentRoot, err := s.ParentRoot([32]byte(newHeadRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not obtain parent root in forkchoice")
|
||||
}
|
||||
parentSlot, err := s.RecentBlockSlot(parentRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not obtain parent slot in forkchoice")
|
||||
}
|
||||
epochTransition := slots.ToEpoch(newHeadSlot) > slots.ToEpoch(parentSlot)
|
||||
|
||||
s.cfg.StateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.NewHead,
|
||||
Data: ðpbv1.EventHead{
|
||||
Slot: newHeadSlot,
|
||||
Block: newHeadRoot,
|
||||
State: newHeadStateRoot,
|
||||
EpochTransition: slots.IsEpochStart(newHeadSlot),
|
||||
EpochTransition: epochTransition,
|
||||
PreviousDutyDependentRoot: previousDutyDependentRoot[:],
|
||||
CurrentDutyDependentRoot: currentDutyDependentRoot[:],
|
||||
ExecutionOptimistic: isOptimistic,
|
||||
|
||||
@@ -162,6 +162,9 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
st, blk, err = prepareForkchoiceState(t.Context(), 1, newHeadRoot, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), 1, bState, newHeadStateRoot[:], newHeadRoot[:]))
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
@@ -196,6 +199,9 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
st, blk, err = prepareForkchoiceState(t.Context(), 0, newHeadRoot, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
err = srv.notifyNewHeadEvent(t.Context(), epoch2Start, bState, newHeadStateRoot[:], newHeadRoot[:])
|
||||
require.NoError(t, err)
|
||||
events := notifier.ReceivedEvents()
|
||||
@@ -213,6 +219,37 @@ func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
}
|
||||
require.DeepSSZEqual(t, wanted, eventHead)
|
||||
})
|
||||
t.Run("epoch transition", func(t *testing.T) {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
srv := testServiceWithDB(t)
|
||||
srv.SetGenesisTime(time.Now())
|
||||
notifier := srv.cfg.StateNotifier.(*mock.MockStateNotifier)
|
||||
srv.originBlockRoot = [32]byte{1}
|
||||
st, blk, err := prepareForkchoiceState(t.Context(), 0, [32]byte{}, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
newHeadStateRoot := [32]byte{2}
|
||||
newHeadRoot := [32]byte{3}
|
||||
st, blk, err = prepareForkchoiceState(t.Context(), 32, newHeadRoot, [32]byte{}, [32]byte{}, ðpb.Checkpoint{}, ðpb.Checkpoint{})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, srv.cfg.ForkChoiceStore.InsertNode(t.Context(), st, blk))
|
||||
newHeadSlot := params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, srv.notifyNewHeadEvent(t.Context(), newHeadSlot, bState, newHeadStateRoot[:], newHeadRoot[:]))
|
||||
events := notifier.ReceivedEvents()
|
||||
require.Equal(t, 1, len(events))
|
||||
|
||||
eventHead, ok := events[0].Data.(*ethpbv1.EventHead)
|
||||
require.Equal(t, true, ok)
|
||||
wanted := ðpbv1.EventHead{
|
||||
Slot: newHeadSlot,
|
||||
Block: newHeadRoot[:],
|
||||
State: newHeadStateRoot[:],
|
||||
EpochTransition: true,
|
||||
PreviousDutyDependentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
CurrentDutyDependentRoot: srv.originBlockRoot[:],
|
||||
}
|
||||
require.DeepSSZEqual(t, wanted, eventHead)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRetrieveHead_ReadOnly(t *testing.T) {
|
||||
|
||||
@@ -109,6 +109,7 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
||||
}
|
||||
|
||||
// RecoverCellsAndKZGProofs recovers the complete cells and KZG proofs from a given set of cell indices and partial cells.
|
||||
// Note: `len(cellIndices)` must be equal to `len(partialCells)` and `cellIndices` must be sorted in ascending order.
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
|
||||
@@ -3,7 +3,6 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
@@ -73,7 +72,7 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
}
|
||||
defer s.sendStateFeedOnBlock(cfg)
|
||||
|
||||
defer reportProcessingTime(startTime)
|
||||
defer reportAttestationInclusion(cfg.roblock.Block())
|
||||
|
||||
@@ -94,6 +93,8 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
|
||||
defer s.sendStateFeedOnBlock(cfg) // only send event after successful insertion
|
||||
start := time.Now()
|
||||
cfg.headRoot, err = s.cfg.ForkChoiceStore.Head(ctx)
|
||||
if err != nil {
|
||||
@@ -713,7 +714,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
nodeID := s.cfg.P2P.NodeID()
|
||||
|
||||
// Get the custody group sampling size for the node.
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount()
|
||||
custodyGroupCount, err := s.cfg.P2P.CustodyGroupCount(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
@@ -746,14 +747,14 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
missing, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missingMap) == 0 {
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -770,33 +771,17 @@ func (s *Service) areDataColumnsAvailable(
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
timer := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
missingCount := uint64(len(missing))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
if missingCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
expected interface{} = "all"
|
||||
missing interface{} = "all"
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
colMapCount := uint64(len(peerInfo.CustodyColumns))
|
||||
|
||||
if colMapCount < numberOfColumns {
|
||||
expected = uint64MapToSortedSlice(peerInfo.CustodyColumns)
|
||||
}
|
||||
|
||||
if missingMapCount < numberOfColumns {
|
||||
missing = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
"columnsExpected": helpers.SortedPrettySliceFromMap(peerInfo.CustodyColumns),
|
||||
"columnsWaiting": helpers.SortedPrettySliceFromMap(missing),
|
||||
}).Warning("Data columns still missing at slot end")
|
||||
})
|
||||
defer timer.Stop()
|
||||
@@ -812,7 +797,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
|
||||
for _, index := range idents.Indices {
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[index]; ok {
|
||||
if _, ok := missing[index]; ok {
|
||||
storedDataColumnsCount++
|
||||
}
|
||||
|
||||
@@ -823,10 +808,10 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, index)
|
||||
delete(missing, index)
|
||||
|
||||
// Return if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -834,10 +819,10 @@ func (s *Service) areDataColumnsAvailable(
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missingMap))
|
||||
missingIndicesCount := uint64(len(missing))
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
missingIndices = helpers.SortedPrettySliceFromMap(missing)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
|
||||
@@ -921,16 +906,6 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
|
||||
}
|
||||
}
|
||||
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
// related to late blocks. It emits a MissedSlot state feed event.
|
||||
// It calls FCU and sets the right attributes if we are proposing next slot
|
||||
|
||||
@@ -9,8 +9,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
@@ -2413,6 +2415,8 @@ func driftGenesisTime(s *Service, slot primitives.Slot, delay time.Duration) {
|
||||
}
|
||||
|
||||
func TestMissingBlobIndices(t *testing.T) {
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
maxBlobs := params.BeaconConfig().MaxBlobsPerBlock(ds)
|
||||
cases := []struct {
|
||||
name string
|
||||
expected [][]byte
|
||||
@@ -2426,23 +2430,23 @@ func TestMissingBlobIndices(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "expected exceeds max",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0) + 1),
|
||||
expected: fakeCommitments(maxBlobs + 1),
|
||||
err: errMaxBlobsExceeded,
|
||||
},
|
||||
{
|
||||
name: "first missing",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
expected: fakeCommitments(maxBlobs),
|
||||
present: []uint64{1, 2, 3, 4, 5},
|
||||
result: fakeResult([]uint64{0}),
|
||||
},
|
||||
{
|
||||
name: "all missing",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
expected: fakeCommitments(maxBlobs),
|
||||
result: fakeResult([]uint64{0, 1, 2, 3, 4, 5}),
|
||||
},
|
||||
{
|
||||
name: "none missing",
|
||||
expected: fakeCommitments(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
expected: fakeCommitments(maxBlobs),
|
||||
present: []uint64{0, 1, 2, 3, 4, 5},
|
||||
result: fakeResult([]uint64{}),
|
||||
},
|
||||
@@ -2475,8 +2479,8 @@ func TestMissingBlobIndices(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
bm, bs := filesystem.NewEphemeralBlobStorageWithMocker(t)
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, 0, c.present...))
|
||||
missing, err := missingBlobIndices(bs, c.root, c.expected, 0)
|
||||
require.NoError(t, bm.CreateFakeIndices(c.root, ds, c.present...))
|
||||
missing, err := missingBlobIndices(bs, c.root, c.expected, ds)
|
||||
if c.err != nil {
|
||||
require.ErrorIs(t, err, c.err)
|
||||
return
|
||||
@@ -2904,22 +2908,21 @@ type testIsAvailableParams struct {
|
||||
columnsToSave []uint64
|
||||
}
|
||||
|
||||
func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
|
||||
func testIsAvailableSetup(t *testing.T, p testIsAvailableParams) (context.Context, context.CancelFunc, *Service, [fieldparams.RootLength]byte, interfaces.SignedBeaconBlock) {
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
dataColumnStorage := filesystem.NewEphemeralDataColumnStorage(t)
|
||||
|
||||
options := append(params.options, WithDataColumnStorage(dataColumnStorage))
|
||||
options := append(p.options, WithDataColumnStorage(dataColumnStorage))
|
||||
service, _ := minimalTestService(t, options...)
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
|
||||
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32 /*validator count*/)
|
||||
|
||||
err := service.saveGenesisData(ctx, genesisState)
|
||||
require.NoError(t, err)
|
||||
genesisState, secretKeys := util.DeterministicGenesisStateElectra(t, 32, util.WithElectraStateSlot(fs))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
|
||||
conf := util.DefaultBlockGenConfig()
|
||||
conf.NumBlobKzgCommitments = params.blobKzgCommitmentsCount
|
||||
conf.NumBlobKzgCommitments = p.blobKzgCommitmentsCount
|
||||
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, 10 /*block slot*/)
|
||||
signedBeaconBlock, err := util.GenerateFullBlockFulu(genesisState, secretKeys, conf, fs+1)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := signedBeaconBlock.Block
|
||||
@@ -2929,8 +2932,8 @@ func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.C
|
||||
root, err := block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(params.columnsToSave))
|
||||
for _, i := range params.columnsToSave {
|
||||
dataColumnsParams := make([]util.DataColumnParam, 0, len(p.columnsToSave))
|
||||
for _, i := range p.columnsToSave {
|
||||
dataColumnParam := util.DataColumnParam{
|
||||
Index: i,
|
||||
Slot: block.Slot,
|
||||
@@ -2954,8 +2957,12 @@ func testIsAvailableSetup(t *testing.T, params testIsAvailableParams) (context.C
|
||||
}
|
||||
|
||||
func TestIsDataAvailable(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.AltairForkEpoch, cfg.BellatrixForkEpoch, cfg.CapellaForkEpoch, cfg.DenebForkEpoch, cfg.ElectraForkEpoch, cfg.FuluForkEpoch = 0, 0, 0, 0, 0, 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
t.Run("Fulu - out of retention window", func(t *testing.T) {
|
||||
params := testIsAvailableParams{options: []Option{WithGenesisTime(time.Unix(0, 0))}}
|
||||
params := testIsAvailableParams{}
|
||||
ctx, _, service, root, signed := testIsAvailableSetup(t, params)
|
||||
|
||||
roBlock, err := consensusblocks.NewROBlockWithRoot(signed, root)
|
||||
@@ -2972,7 +2979,6 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
err = service.isDataAvailable(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Fulu - more than half of the columns in custody", func(t *testing.T) {
|
||||
minimumColumnsCountToReconstruct := peerdas.MinimumColumnCountToReconstruct()
|
||||
indices := make([]uint64, 0, minimumColumnsCountToReconstruct)
|
||||
@@ -3143,6 +3149,159 @@ func TestIsDataAvailable(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// Test_postBlockProcess_EventSending tests that block processed events are only sent
|
||||
// when block processing succeeds according to the decision tree:
|
||||
//
|
||||
// Block Processing Flow:
|
||||
// ├─ InsertNode FAILS (fork choice timeout)
|
||||
// │ └─ blockProcessed = false ❌ NO EVENT
|
||||
// │
|
||||
// ├─ InsertNode succeeds
|
||||
// │ ├─ handleBlockAttestations FAILS
|
||||
// │ │ └─ blockProcessed = false ❌ NO EVENT
|
||||
// │ │
|
||||
// │ ├─ Block is NON-CANONICAL (not head)
|
||||
// │ │ └─ blockProcessed = true ✅ SEND EVENT (Line 111)
|
||||
// │ │
|
||||
// │ ├─ Block IS CANONICAL (new head)
|
||||
// │ │ ├─ getFCUArgs FAILS
|
||||
// │ │ │ └─ blockProcessed = true ✅ SEND EVENT (Line 117)
|
||||
// │ │ │
|
||||
// │ │ ├─ sendFCU FAILS
|
||||
// │ │ │ └─ blockProcessed = false ❌ NO EVENT
|
||||
// │ │ │
|
||||
// │ │ └─ Full success
|
||||
// │ │ └─ blockProcessed = true ✅ SEND EVENT (Line 125)
|
||||
func Test_postBlockProcess_EventSending(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Helper to create a minimal valid block and state
|
||||
createTestBlockAndState := func(t *testing.T, slot primitives.Slot, parentRoot [32]byte) (consensusblocks.ROBlock, state.BeaconState) {
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, st.SetSlot(slot))
|
||||
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = slot
|
||||
blk.Block.ProposerIndex = 0
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
blk.Block.StateRoot = stateRoot[:]
|
||||
|
||||
signed := util.HydrateSignedBeaconBlock(blk)
|
||||
roBlock, err := consensusblocks.NewSignedBeaconBlock(signed)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlk, err := consensusblocks.NewROBlock(roBlock)
|
||||
require.NoError(t, err)
|
||||
return roBlk, st
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setupService func(*Service, [32]byte)
|
||||
expectEvent bool
|
||||
expectError bool
|
||||
errorContains string
|
||||
}{
|
||||
{
|
||||
name: "Block successfully processed - sends event",
|
||||
setupService: func(s *Service, blockRoot [32]byte) {
|
||||
// Default setup should work
|
||||
},
|
||||
expectEvent: true,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create service with required options
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Initialize fork choice with genesis block
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, st.SetSlot(0))
|
||||
genesisBlock := util.NewBeaconBlock()
|
||||
genesisBlock.Block.StateRoot = bytesutil.PadTo([]byte("genesisState"), 32)
|
||||
signedGenesis := util.HydrateSignedBeaconBlock(genesisBlock)
|
||||
block, err := consensusblocks.NewSignedBeaconBlock(signedGenesis)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, block))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, genesisRoot))
|
||||
|
||||
genesisROBlock, err := consensusblocks.NewROBlock(block)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, genesisROBlock))
|
||||
|
||||
// Create test block and state with genesis as parent
|
||||
roBlock, postSt := createTestBlockAndState(t, 100, genesisRoot)
|
||||
|
||||
// Apply additional service setup if provided
|
||||
if tt.setupService != nil {
|
||||
tt.setupService(service, roBlock.Root())
|
||||
}
|
||||
|
||||
// Create post block process config
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roBlock,
|
||||
postState: postSt,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
// Execute postBlockProcess
|
||||
err = service.postBlockProcess(cfg)
|
||||
|
||||
// Check error expectation
|
||||
if tt.expectError {
|
||||
require.NotNil(t, err)
|
||||
if tt.errorContains != "" {
|
||||
require.ErrorContains(t, tt.errorContains, err)
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Give a moment for deferred functions to execute
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Check event expectation
|
||||
notifier := service.cfg.StateNotifier.(*mock.MockStateNotifier)
|
||||
events := notifier.ReceivedEvents()
|
||||
|
||||
if tt.expectEvent {
|
||||
require.NotEqual(t, 0, len(events), "Expected event to be sent but none were received")
|
||||
|
||||
// Verify it's a BlockProcessed event
|
||||
foundBlockProcessed := false
|
||||
for _, evt := range events {
|
||||
if evt.Type == statefeed.BlockProcessed {
|
||||
foundBlockProcessed = true
|
||||
data, ok := evt.Data.(*statefeed.BlockProcessedData)
|
||||
require.Equal(t, true, ok, "Event data should be BlockProcessedData")
|
||||
require.Equal(t, roBlock.Root(), data.BlockRoot, "Event should contain correct block root")
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundBlockProcessed, "Expected BlockProcessed event type")
|
||||
} else {
|
||||
// For no-event cases, verify no BlockProcessed events were sent
|
||||
for _, evt := range events {
|
||||
require.NotEqual(t, statefeed.BlockProcessed, evt.Type,
|
||||
"Expected no BlockProcessed event but one was sent")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func setupLightClientTestRequirements(ctx context.Context, t *testing.T, s *Service, v int, options ...util.LightClientOption) (*util.TestLightClient, *postBlockProcessConfig) {
|
||||
var l *util.TestLightClient
|
||||
switch v {
|
||||
|
||||
@@ -219,6 +219,9 @@ func (s *Service) validateExecutionAndConsensus(
|
||||
eg.Go(func() error {
|
||||
var err error
|
||||
postState, err = s.validateStateTransition(ctx, preState, block)
|
||||
if errors.Is(err, ErrNotDescendantOfFinalized) {
|
||||
return invalidBlock{error: err, root: block.Root()}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to validate consensus state transition function")
|
||||
}
|
||||
@@ -248,7 +251,7 @@ func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block
|
||||
err = s.isDataAvailable(ctx, block)
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
if err != nil {
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
return elapsed, err
|
||||
@@ -585,17 +588,17 @@ func (s *Service) sendNewFinalizedEvent(ctx context.Context, postState state.Bea
|
||||
func (s *Service) sendBlockAttestationsToSlasher(signed interfaces.ReadOnlySignedBeaconBlock, preState state.BeaconState) {
|
||||
// Feed the indexed attestation to slasher if enabled. This action
|
||||
// is done in the background to avoid adding more load to this critical code path.
|
||||
ctx := context.TODO()
|
||||
ctx := s.ctx
|
||||
for _, att := range signed.Block().Body().Attestations() {
|
||||
committees, err := helpers.AttestationCommitteesFromState(ctx, preState, att)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get attestation committees")
|
||||
return
|
||||
continue
|
||||
}
|
||||
indexedAtt, err := attestation.ConvertToIndexed(ctx, att, committees...)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not convert to indexed attestation")
|
||||
return
|
||||
continue
|
||||
}
|
||||
s.cfg.SlasherAttestationsFeed.Send(&types.WrappedIndexedAtt{IndexedAtt: indexedAtt})
|
||||
}
|
||||
|
||||
@@ -562,8 +562,9 @@ func TestNotifyIndex(t *testing.T) {
|
||||
var root [32]byte
|
||||
copy(root[:], "exampleRoot")
|
||||
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
// Test notifying a new index
|
||||
bn.notifyIndex(root, 1, 1)
|
||||
bn.notifyIndex(root, 1, ds)
|
||||
if !bn.seenIndex[root][1] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
@@ -580,7 +581,7 @@ func TestNotifyIndex(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test notifying a new index again
|
||||
bn.notifyIndex(root, 2, 1)
|
||||
bn.notifyIndex(root, 2, ds)
|
||||
if !bn.seenIndex[root][2] {
|
||||
t.Errorf("Index was not marked as seen")
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
@@ -106,14 +106,14 @@ type mockCustodyManager struct {
|
||||
custodyGroupCount uint64
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
func (dch *mockCustodyManager) EarliestAvailableSlot(context.Context) (primitives.Slot, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
return dch.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
func (dch *mockCustodyManager) CustodyGroupCount() (uint64, error) {
|
||||
func (dch *mockCustodyManager) CustodyGroupCount(context.Context) (uint64, error) {
|
||||
dch.mut.RLock()
|
||||
defer dch.mut.RUnlock()
|
||||
|
||||
|
||||
3
beacon-chain/cache/committee.go
vendored
3
beacon-chain/cache/committee.go
vendored
@@ -5,7 +5,6 @@ package cache
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -272,7 +271,7 @@ func (c *CommitteeCache) checkInProgress(ctx context.Context, seed [32]byte) err
|
||||
// for the in progress boolean to flip to false.
|
||||
time.Sleep(time.Duration(delay) * time.Nanosecond)
|
||||
delay *= delayFactor
|
||||
delay = math.Min(delay, maxDelay)
|
||||
delay = min(delay, maxDelay)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -52,7 +52,7 @@ func create(leaves [][32]byte, depth uint64) MerkleTreeNode {
|
||||
if depth == 0 {
|
||||
return &LeafNode{hash: leaves[0]}
|
||||
}
|
||||
split := math.Min(math.PowerOf2(depth-1), length)
|
||||
split := min(math.PowerOf2(depth-1), length)
|
||||
left := create(leaves[0:split], depth-1)
|
||||
right := create(leaves[split:], depth-1)
|
||||
return &InnerNode{left: left, right: right}
|
||||
|
||||
3
beacon-chain/cache/skip_slot_cache.go
vendored
3
beacon-chain/cache/skip_slot_cache.go
vendored
@@ -2,7 +2,6 @@ package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -90,7 +89,7 @@ func (c *SkipSlotCache) Get(ctx context.Context, r [32]byte) (state.BeaconState,
|
||||
// for the in progress boolean to flip to false.
|
||||
time.Sleep(time.Duration(delay) * time.Nanosecond)
|
||||
delay *= delayFactor
|
||||
delay = math.Min(delay, maxDelay)
|
||||
delay = min(delay, maxDelay)
|
||||
}
|
||||
span.SetAttributes(trace.BoolAttribute("inProgress", inProgress))
|
||||
|
||||
|
||||
@@ -49,13 +49,22 @@ func ProcessSyncAggregate(ctx context.Context, s state.BeaconState, sync *ethpb.
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not filter sync committee votes")
|
||||
}
|
||||
|
||||
if err := VerifySyncCommitteeSig(s, votedKeys, sync.SyncCommitteeSignature); err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not verify sync committee signature")
|
||||
}
|
||||
return s, reward, nil
|
||||
}
|
||||
|
||||
// ProcessSyncAggregateNoVerifySig processes the sync aggregate without verifying the sync committee signature.
|
||||
// This is useful in scenarios such as block reward calculation, where we can assume the data in the block is valid.
|
||||
func ProcessSyncAggregateNoVerifySig(ctx context.Context, s state.BeaconState, sync *ethpb.SyncAggregate) (state.BeaconState, uint64, error) {
|
||||
s, _, reward, err := processSyncAggregate(ctx, s, sync)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "could not filter sync committee votes")
|
||||
}
|
||||
return s, reward, nil
|
||||
}
|
||||
|
||||
// processSyncAggregate applies all the logic in the spec function `process_sync_aggregate` except
|
||||
// verifying the BLS signatures. It returns the modified beacons state, the list of validators'
|
||||
// public keys that voted (for future signature verification) and the proposer reward for including
|
||||
|
||||
@@ -53,9 +53,19 @@ func TestProcessSyncCommittee_PerfectParticipation(t *testing.T) {
|
||||
SyncCommitteeSignature: aggregatedSig,
|
||||
}
|
||||
|
||||
// Verify that ProcessSyncAggregateNoVerifySig and ProcessSyncAggregate have the same outcome.
|
||||
beaconStateNoVerifySig := beaconState.Copy()
|
||||
beaconStateNoVerifySig, rewardNoVerifySig, err := altair.ProcessSyncAggregateNoVerifySig(t.Context(), beaconStateNoVerifySig, syncAggregate)
|
||||
require.NoError(t, err)
|
||||
sszNoVerifySig, err := beaconStateNoVerifySig.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
var reward uint64
|
||||
beaconState, reward, err = altair.ProcessSyncAggregate(t.Context(), beaconState, syncAggregate)
|
||||
require.NoError(t, err)
|
||||
ssz, err := beaconState.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszNoVerifySig, ssz, "States resulting from ProcessSyncAggregateNoVerifySig and ProcessSyncAggregate are not equal")
|
||||
assert.Equal(t, rewardNoVerifySig, reward, "Rewards resulting from ProcessSyncAggregateNoVerifySig and ProcessSyncAggregate are not equal")
|
||||
assert.Equal(t, uint64(72192), reward)
|
||||
|
||||
// Use a non-sync committee index to compare profitability.
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -209,7 +208,7 @@ func IsSyncCommitteeAggregator(sig []byte) (bool, error) {
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
modulo := math.Max(1, cfg.SyncCommitteeSize/cfg.SyncCommitteeSubnetCount/cfg.TargetAggregatorsPerSyncSubcommittee)
|
||||
modulo := max(1, cfg.SyncCommitteeSize/cfg.SyncCommitteeSubnetCount/cfg.TargetAggregatorsPerSyncSubcommittee)
|
||||
hashedSig := hash.Hash(sig)
|
||||
return bytesutil.FromBytes8(hashedSig[:8])%modulo == 0, nil
|
||||
}
|
||||
|
||||
@@ -12,6 +12,46 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1/attestation"
|
||||
)
|
||||
|
||||
// ConvertToAltair converts a Phase 0 beacon state to an Altair beacon state.
|
||||
func ConvertToAltair(state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: uint64(state.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: make([]byte, numValidators),
|
||||
CurrentEpochParticipation: make([]byte, numValidators),
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: make([]uint64, numValidators),
|
||||
}
|
||||
newState, err := state_native.InitializeFromProtoUnsafeAltair(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newState, nil
|
||||
}
|
||||
|
||||
// UpgradeToAltair updates input state to return the version Altair state.
|
||||
//
|
||||
// Spec code:
|
||||
@@ -64,39 +104,7 @@ import (
|
||||
// post.next_sync_committee = get_next_sync_committee(post)
|
||||
// return post
|
||||
func UpgradeToAltair(ctx context.Context, state state.BeaconState) (state.BeaconState, error) {
|
||||
epoch := time.CurrentEpoch(state)
|
||||
|
||||
numValidators := state.NumValidators()
|
||||
s := ðpb.BeaconStateAltair{
|
||||
GenesisTime: uint64(state.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: state.GenesisValidatorsRoot(),
|
||||
Slot: state.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: state.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
Epoch: epoch,
|
||||
},
|
||||
LatestBlockHeader: state.LatestBlockHeader(),
|
||||
BlockRoots: state.BlockRoots(),
|
||||
StateRoots: state.StateRoots(),
|
||||
HistoricalRoots: state.HistoricalRoots(),
|
||||
Eth1Data: state.Eth1Data(),
|
||||
Eth1DataVotes: state.Eth1DataVotes(),
|
||||
Eth1DepositIndex: state.Eth1DepositIndex(),
|
||||
Validators: state.Validators(),
|
||||
Balances: state.Balances(),
|
||||
RandaoMixes: state.RandaoMixes(),
|
||||
Slashings: state.Slashings(),
|
||||
PreviousEpochParticipation: make([]byte, numValidators),
|
||||
CurrentEpochParticipation: make([]byte, numValidators),
|
||||
JustificationBits: state.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: state.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: state.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: state.FinalizedCheckpoint(),
|
||||
InactivityScores: make([]uint64, numValidators),
|
||||
}
|
||||
|
||||
newState, err := state_native.InitializeFromProtoUnsafeAltair(s)
|
||||
newState, err := ConvertToAltair(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -39,7 +39,6 @@ go_library(
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//encoding/ssz:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -42,6 +42,9 @@ func ProcessAttesterSlashings(
|
||||
slashings []ethpb.AttSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if exitInfo == nil && len(slashings) > 0 {
|
||||
return nil, errors.New("exit info required to process attester slashings")
|
||||
}
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
@@ -52,6 +55,28 @@ func ProcessAttesterSlashings(
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessAttesterSlashingsNoVerify processes attester slashings without verifying them.
|
||||
// This is useful in scenarios such as block reward calculation, where we can assume the data
|
||||
// in the block is valid.
|
||||
func ProcessAttesterSlashingsNoVerify(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashings []ethpb.AttSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if exitInfo == nil && len(slashings) > 0 {
|
||||
return nil, errors.New("exit info required to process attester slashings")
|
||||
}
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessAttesterSlashingNoVerify(ctx, beaconState, slashing, exitInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessAttesterSlashing processes individual attester slashing.
|
||||
func ProcessAttesterSlashing(
|
||||
ctx context.Context,
|
||||
@@ -59,9 +84,36 @@ func ProcessAttesterSlashing(
|
||||
slashing ethpb.AttSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process attester slashing")
|
||||
}
|
||||
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attester slashing")
|
||||
}
|
||||
return processAttesterSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
}
|
||||
|
||||
// ProcessAttesterSlashingNoVerify processes individual attester slashing without verifying it.
|
||||
// This is useful in scenarios such as block reward calculation, where we can assume the data
|
||||
// in the block is valid.
|
||||
func ProcessAttesterSlashingNoVerify(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing ethpb.AttSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process attester slashing")
|
||||
}
|
||||
return processAttesterSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
}
|
||||
|
||||
func processAttesterSlashing(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing ethpb.AttSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
slashableIndices := SlashableAttesterIndices(slashing)
|
||||
sort.SliceStable(slashableIndices, func(i, j int) bool {
|
||||
return slashableIndices[i] < slashableIndices[j]
|
||||
|
||||
@@ -242,8 +242,18 @@ func TestProcessAttesterSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
currentSlot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
require.NoError(t, tc.st.SetSlot(currentSlot))
|
||||
|
||||
// Verify that ProcessAttesterSlashingsNoVerify and ProcessAttesterSlashings have the same outcome.
|
||||
stNoVerify := tc.st.Copy()
|
||||
newStateNoVerify, err := blocks.ProcessAttesterSlashingsNoVerify(t.Context(), stNoVerify, []ethpb.AttSlashing{tc.slashing}, v.ExitInformation(stNoVerify))
|
||||
require.NoError(t, err)
|
||||
sszNoVerify, err := newStateNoVerify.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
newState, err := blocks.ProcessAttesterSlashings(t.Context(), tc.st, []ethpb.AttSlashing{tc.slashing}, v.ExitInformation(tc.st))
|
||||
require.NoError(t, err)
|
||||
ssz, err := newState.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszNoVerify, ssz, "States resulting from ProcessAttesterSlashingsNoVerify and ProcessAttesterSlashings are not equal")
|
||||
|
||||
newRegistry := newState.Validators()
|
||||
|
||||
// Given the intersection of slashable indices is [1], only validator
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/contracts/deposit"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -34,7 +33,7 @@ func ActivateValidatorWithEffectiveBalance(beaconState state.BeaconState, deposi
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validator.EffectiveBalance = math.Min(balance-balance%params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance)
|
||||
validator.EffectiveBalance = min(balance-balance%params.BeaconConfig().EffectiveBalanceIncrement, params.BeaconConfig().MaxEffectiveBalance)
|
||||
if validator.EffectiveBalance ==
|
||||
params.BeaconConfig().MaxEffectiveBalance {
|
||||
validator.ActivationEligibilityEpoch = 0
|
||||
|
||||
@@ -6,3 +6,4 @@ var errNilSignedWithdrawalMessage = errors.New("nil SignedBLSToExecutionChange m
|
||||
var errNilWithdrawalMessage = errors.New("nil BLSToExecutionChange message")
|
||||
var errInvalidBLSPrefix = errors.New("withdrawal credential prefix is not a BLS prefix")
|
||||
var errInvalidWithdrawalCredentials = errors.New("withdrawal credentials do not match")
|
||||
var ErrInvalidSignature = errors.New("invalid signature")
|
||||
|
||||
@@ -55,6 +55,9 @@ func ProcessVoluntaryExits(
|
||||
if len(exits) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info required to process voluntary exits")
|
||||
}
|
||||
for idx, exit := range exits {
|
||||
if exit == nil || exit.Exit == nil {
|
||||
return nil, errors.New("nil voluntary exit in block body")
|
||||
|
||||
@@ -51,6 +51,9 @@ func ProcessProposerSlashings(
|
||||
slashings []*ethpb.ProposerSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if exitInfo == nil && len(slashings) > 0 {
|
||||
return nil, errors.New("exit info required to process proposer slashings")
|
||||
}
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
@@ -61,6 +64,28 @@ func ProcessProposerSlashings(
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessProposerSlashingsNoVerify processes proposer slashings without verifying them.
|
||||
// This is useful in scenarios such as block reward calculation, where we can assume the data
|
||||
// in the block is valid.
|
||||
func ProcessProposerSlashingsNoVerify(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashings []*ethpb.ProposerSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if exitInfo == nil && len(slashings) > 0 {
|
||||
return nil, errors.New("exit info required to process proposer slashings")
|
||||
}
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessProposerSlashingNoVerify(ctx, beaconState, slashing, exitInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// ProcessProposerSlashing processes individual proposer slashing.
|
||||
func ProcessProposerSlashing(
|
||||
ctx context.Context,
|
||||
@@ -68,13 +93,40 @@ func ProcessProposerSlashing(
|
||||
slashing *ethpb.ProposerSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
if slashing == nil {
|
||||
return nil, errors.New("nil proposer slashings in block body")
|
||||
}
|
||||
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
|
||||
if err := VerifyProposerSlashing(beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
return processProposerSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
}
|
||||
|
||||
// ProcessProposerSlashingNoVerify processes individual proposer slashing without verifying it.
|
||||
// This is useful in scenarios such as block reward calculation, where we can assume the data
|
||||
// in the block is valid.
|
||||
func ProcessProposerSlashingNoVerify(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing *ethpb.ProposerSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if slashing == nil {
|
||||
return nil, errors.New("nil proposer slashings in block body")
|
||||
}
|
||||
return processProposerSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
}
|
||||
|
||||
func processProposerSlashing(
|
||||
ctx context.Context,
|
||||
beaconState state.BeaconState,
|
||||
slashing *ethpb.ProposerSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process proposer slashing")
|
||||
}
|
||||
var err error
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
|
||||
@@ -172,8 +172,17 @@ func TestProcessProposerSlashings_AppliesCorrectStatus(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Body.ProposerSlashings = slashings
|
||||
|
||||
// Verify that ProcessProposerSlashingsNoVerify and ProcessProposerSlashings have the same outcome.
|
||||
beaconStateNoVerify := beaconState.Copy()
|
||||
newStateNoVerify, err := blocks.ProcessProposerSlashingsNoVerify(t.Context(), beaconStateNoVerify, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconStateNoVerify))
|
||||
require.NoError(t, err)
|
||||
sszNoVerify, err := newStateNoVerify.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
newState, err := blocks.ProcessProposerSlashings(t.Context(), beaconState, block.Block.Body.ProposerSlashings, v.ExitInformation(beaconState))
|
||||
require.NoError(t, err)
|
||||
ssz, err := newState.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
assert.DeepEqual(t, sszNoVerify, ssz, "States resulting from ProcessProposerSlashingsNoVerify and ProcessProposerSlashings are not equal")
|
||||
|
||||
newStateVals := newState.Validators()
|
||||
if newStateVals[1].ExitEpoch != beaconState.Validators()[1].ExitEpoch {
|
||||
|
||||
@@ -114,9 +114,12 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState,
|
||||
}
|
||||
proposerPubKey := proposer.PublicKey
|
||||
sig := blk.Signature()
|
||||
return signing.VerifyBlockSigningRoot(proposerPubKey, sig[:], domain, func() ([32]byte, error) {
|
||||
if err := signing.VerifyBlockSigningRoot(proposerPubKey, sig[:], domain, func() ([32]byte, error) {
|
||||
return blkRoot, nil
|
||||
})
|
||||
}); err != nil {
|
||||
return ErrInvalidSignature
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state.
|
||||
|
||||
@@ -89,3 +89,36 @@ func TestVerifyBlockSignatureUsingCurrentFork(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb, blkRoot))
|
||||
}
|
||||
|
||||
func TestVerifyBlockSignatureUsingCurrentFork_InvalidSignature(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
bCfg := params.BeaconConfig()
|
||||
bCfg.AltairForkEpoch = 100
|
||||
bCfg.ForkVersionSchedule[bytesutil.ToBytes4(bCfg.AltairForkVersion)] = 100
|
||||
params.OverrideBeaconConfig(bCfg)
|
||||
bState, keys := util.DeterministicGenesisState(t, 100)
|
||||
altairBlk := util.NewBeaconBlockAltair()
|
||||
altairBlk.Block.ProposerIndex = 0
|
||||
altairBlk.Block.Slot = params.BeaconConfig().SlotsPerEpoch * 100
|
||||
blkRoot, err := altairBlk.Block.HashTreeRoot()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Sign with wrong key (proposer index 0, but using key 1)
|
||||
fData := ðpb.Fork{
|
||||
Epoch: 100,
|
||||
CurrentVersion: params.BeaconConfig().AltairForkVersion,
|
||||
PreviousVersion: params.BeaconConfig().GenesisForkVersion,
|
||||
}
|
||||
domain, err := signing.Domain(fData, 100, params.BeaconConfig().DomainBeaconProposer, bState.GenesisValidatorsRoot())
|
||||
assert.NoError(t, err)
|
||||
rt, err := signing.ComputeSigningRoot(altairBlk.Block, domain)
|
||||
assert.NoError(t, err)
|
||||
wrongSig := keys[1].Sign(rt[:]).Marshal()
|
||||
altairBlk.Signature = wrongSig
|
||||
|
||||
wsb, err := consensusblocks.NewSignedBeaconBlock(altairBlk)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = blocks.VerifyBlockSignatureUsingCurrentFork(bState, wsb, blkRoot)
|
||||
require.ErrorIs(t, err, blocks.ErrInvalidSignature, "Expected ErrInvalidSignature for invalid signature")
|
||||
}
|
||||
|
||||
@@ -53,9 +53,15 @@ func ProcessOperations(ctx context.Context, st state.BeaconState, block interfac
|
||||
// 6110 validations are in VerifyOperationLengths
|
||||
bb := block.Body()
|
||||
// Electra extends the altair operations.
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
var exitInfo *v.ExitInfo
|
||||
hasSlashings := len(bb.ProposerSlashings()) > 0 || len(bb.AttesterSlashings()) > 0
|
||||
hasExits := len(bb.VoluntaryExits()) > 0
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
|
||||
@@ -15,6 +15,129 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ConvertToElectra converts a Deneb beacon state to an Electra beacon state. It does not perform any fork logic.
|
||||
func ConvertToElectra(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := beaconState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := beaconState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := beaconState.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHeader, err := beaconState.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txRoot, err := payloadHeader.TransactionsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wi, err := beaconState.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vi, err := beaconState.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summaries, err := beaconState.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
excessBlobGas, err := payloadHeader.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobGasUsed, err := payloadHeader.BlobGasUsed()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateElectra{
|
||||
GenesisTime: uint64(beaconState.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: beaconState.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
Epoch: time.CurrentEpoch(beaconState),
|
||||
},
|
||||
LatestBlockHeader: beaconState.LatestBlockHeader(),
|
||||
BlockRoots: beaconState.BlockRoots(),
|
||||
StateRoots: beaconState.StateRoots(),
|
||||
HistoricalRoots: beaconState.HistoricalRoots(),
|
||||
Eth1Data: beaconState.Eth1Data(),
|
||||
Eth1DataVotes: beaconState.Eth1DataVotes(),
|
||||
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
|
||||
Validators: beaconState.Validators(),
|
||||
Balances: beaconState.Balances(),
|
||||
RandaoMixes: beaconState.RandaoMixes(),
|
||||
Slashings: beaconState.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: beaconState.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: beaconState.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: beaconState.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: beaconState.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
DepositBalanceToConsume: 0,
|
||||
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
|
||||
PendingDeposits: make([]*ethpb.PendingDeposit, 0),
|
||||
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
|
||||
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
|
||||
}
|
||||
|
||||
// need to cast the beaconState to use in helper functions
|
||||
post, err := state_native.InitializeFromProtoUnsafeElectra(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to initialize post electra beaconState")
|
||||
}
|
||||
return post, nil
|
||||
}
|
||||
|
||||
// UpgradeToElectra updates inputs a generic state to return the version Electra state.
|
||||
//
|
||||
// nolint:dupword
|
||||
@@ -126,55 +249,7 @@ import (
|
||||
//
|
||||
// return post
|
||||
func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nextSyncCommittee, err := beaconState.NextSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prevEpochParticipation, err := beaconState.PreviousEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currentEpochParticipation, err := beaconState.CurrentEpochParticipation()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inactivityScores, err := beaconState.InactivityScores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadHeader, err := beaconState.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
txRoot, err := payloadHeader.TransactionsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wdRoot, err := payloadHeader.WithdrawalsRoot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wi, err := beaconState.NextWithdrawalIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vi, err := beaconState.NextWithdrawalValidatorIndex()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
summaries, err := beaconState.HistoricalSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
excessBlobGas, err := payloadHeader.ExcessBlobGas()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
blobGasUsed, err := payloadHeader.BlobGasUsed()
|
||||
s, err := ConvertToElectra(beaconState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -206,97 +281,38 @@ func UpgradeToElectra(beaconState state.BeaconState) (state.BeaconState, error)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get total active balance")
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateElectra{
|
||||
GenesisTime: uint64(beaconState.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
Slot: beaconState.Slot(),
|
||||
Fork: ðpb.Fork{
|
||||
PreviousVersion: beaconState.Fork().CurrentVersion,
|
||||
CurrentVersion: params.BeaconConfig().ElectraForkVersion,
|
||||
Epoch: time.CurrentEpoch(beaconState),
|
||||
},
|
||||
LatestBlockHeader: beaconState.LatestBlockHeader(),
|
||||
BlockRoots: beaconState.BlockRoots(),
|
||||
StateRoots: beaconState.StateRoots(),
|
||||
HistoricalRoots: beaconState.HistoricalRoots(),
|
||||
Eth1Data: beaconState.Eth1Data(),
|
||||
Eth1DataVotes: beaconState.Eth1DataVotes(),
|
||||
Eth1DepositIndex: beaconState.Eth1DepositIndex(),
|
||||
Validators: beaconState.Validators(),
|
||||
Balances: beaconState.Balances(),
|
||||
RandaoMixes: beaconState.RandaoMixes(),
|
||||
Slashings: beaconState.Slashings(),
|
||||
PreviousEpochParticipation: prevEpochParticipation,
|
||||
CurrentEpochParticipation: currentEpochParticipation,
|
||||
JustificationBits: beaconState.JustificationBits(),
|
||||
PreviousJustifiedCheckpoint: beaconState.PreviousJustifiedCheckpoint(),
|
||||
CurrentJustifiedCheckpoint: beaconState.CurrentJustifiedCheckpoint(),
|
||||
FinalizedCheckpoint: beaconState.FinalizedCheckpoint(),
|
||||
InactivityScores: inactivityScores,
|
||||
CurrentSyncCommittee: currentSyncCommittee,
|
||||
NextSyncCommittee: nextSyncCommittee,
|
||||
LatestExecutionPayloadHeader: &enginev1.ExecutionPayloadHeaderDeneb{
|
||||
ParentHash: payloadHeader.ParentHash(),
|
||||
FeeRecipient: payloadHeader.FeeRecipient(),
|
||||
StateRoot: payloadHeader.StateRoot(),
|
||||
ReceiptsRoot: payloadHeader.ReceiptsRoot(),
|
||||
LogsBloom: payloadHeader.LogsBloom(),
|
||||
PrevRandao: payloadHeader.PrevRandao(),
|
||||
BlockNumber: payloadHeader.BlockNumber(),
|
||||
GasLimit: payloadHeader.GasLimit(),
|
||||
GasUsed: payloadHeader.GasUsed(),
|
||||
Timestamp: payloadHeader.Timestamp(),
|
||||
ExtraData: payloadHeader.ExtraData(),
|
||||
BaseFeePerGas: payloadHeader.BaseFeePerGas(),
|
||||
BlockHash: payloadHeader.BlockHash(),
|
||||
TransactionsRoot: txRoot,
|
||||
WithdrawalsRoot: wdRoot,
|
||||
ExcessBlobGas: excessBlobGas,
|
||||
BlobGasUsed: blobGasUsed,
|
||||
},
|
||||
NextWithdrawalIndex: wi,
|
||||
NextWithdrawalValidatorIndex: vi,
|
||||
HistoricalSummaries: summaries,
|
||||
|
||||
DepositRequestsStartIndex: params.BeaconConfig().UnsetDepositRequestsStartIndex,
|
||||
DepositBalanceToConsume: 0,
|
||||
ExitBalanceToConsume: helpers.ActivationExitChurnLimit(primitives.Gwei(tab)),
|
||||
EarliestExitEpoch: earliestExitEpoch,
|
||||
ConsolidationBalanceToConsume: helpers.ConsolidationChurnLimit(primitives.Gwei(tab)),
|
||||
EarliestConsolidationEpoch: helpers.ActivationExitEpoch(slots.ToEpoch(beaconState.Slot())),
|
||||
PendingDeposits: make([]*ethpb.PendingDeposit, 0),
|
||||
PendingPartialWithdrawals: make([]*ethpb.PendingPartialWithdrawal, 0),
|
||||
PendingConsolidations: make([]*ethpb.PendingConsolidation, 0),
|
||||
if err := s.SetExitBalanceToConsume(helpers.ActivationExitChurnLimit(primitives.Gwei(tab))); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to set exit balance to consume")
|
||||
}
|
||||
if err := s.SetEarliestExitEpoch(earliestExitEpoch); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to set earliest exit epoch")
|
||||
}
|
||||
if err := s.SetConsolidationBalanceToConsume(helpers.ConsolidationChurnLimit(primitives.Gwei(tab))); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to set consolidation balance to consume")
|
||||
}
|
||||
|
||||
// Sorting preActivationIndices based on a custom criteria
|
||||
vals := s.Validators()
|
||||
sort.Slice(preActivationIndices, func(i, j int) bool {
|
||||
// Comparing based on ActivationEligibilityEpoch and then by index if the epochs are the same
|
||||
if s.Validators[preActivationIndices[i]].ActivationEligibilityEpoch == s.Validators[preActivationIndices[j]].ActivationEligibilityEpoch {
|
||||
if vals[preActivationIndices[i]].ActivationEligibilityEpoch == vals[preActivationIndices[j]].ActivationEligibilityEpoch {
|
||||
return preActivationIndices[i] < preActivationIndices[j]
|
||||
}
|
||||
return s.Validators[preActivationIndices[i]].ActivationEligibilityEpoch < s.Validators[preActivationIndices[j]].ActivationEligibilityEpoch
|
||||
return vals[preActivationIndices[i]].ActivationEligibilityEpoch < vals[preActivationIndices[j]].ActivationEligibilityEpoch
|
||||
})
|
||||
|
||||
// need to cast the beaconState to use in helper functions
|
||||
post, err := state_native.InitializeFromProtoUnsafeElectra(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to initialize post electra beaconState")
|
||||
}
|
||||
|
||||
for _, index := range preActivationIndices {
|
||||
if err := QueueEntireBalanceAndResetValidator(post, index); err != nil {
|
||||
if err := QueueEntireBalanceAndResetValidator(s, index); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to queue entire balance and reset validator")
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure early adopters of compounding credentials go through the activation churn
|
||||
for _, index := range compoundWithdrawalIndices {
|
||||
if err := QueueExcessActiveBalance(post, index); err != nil {
|
||||
if err := QueueExcessActiveBalance(s, index); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to queue excess active balance")
|
||||
}
|
||||
}
|
||||
|
||||
return post, nil
|
||||
return s, nil
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -91,6 +92,18 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
|
||||
defer span.End()
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
if len(wrs) == 0 {
|
||||
return st, nil
|
||||
}
|
||||
// It is correct to compute exitInfo once for all withdrawals in the block, as the ExitInfo pointer is
|
||||
// updated within InitiateValidatorExit which is the only function that uses it.
|
||||
var exitInfo *validators.ExitInfo
|
||||
if st.Version() < version.Electra {
|
||||
exitInfo = validators.ExitInformation(st)
|
||||
} else {
|
||||
// After Electra, the function InitiateValidatorExit ignores the exitInfo passed to it and recomputes it anyway.
|
||||
exitInfo = &validators.ExitInfo{}
|
||||
}
|
||||
for _, wr := range wrs {
|
||||
if wr == nil {
|
||||
return nil, errors.New("nil execution layer withdrawal request")
|
||||
@@ -148,7 +161,8 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
// Only exit validator if it has no pending withdrawals in the queue
|
||||
if pendingBalanceToWithdraw == 0 {
|
||||
var err error
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, validators.ExitInformation(st))
|
||||
// exitInfo is updated within InitiateValidatorExit
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, exitInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -96,12 +96,17 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
|
||||
}
|
||||
|
||||
// Process validators eligible for ejection.
|
||||
for _, idx := range eligibleForEjection {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, idx, validators.ExitInformation(st))
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
if len(eligibleForEjection) > 0 {
|
||||
// It is safe to compute exitInfo once for all ejections in the epoch, as the ExitInfo pointer is
|
||||
// updated within InitiateValidatorExit which is the only function that uses it.
|
||||
exitInfo := validators.ExitInformation(st)
|
||||
for _, idx := range eligibleForEjection {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, idx, exitInfo)
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -228,7 +233,7 @@ func ProcessSlashings(st state.BeaconState) error {
|
||||
// a callback is used here to apply the following actions to all validators
|
||||
// below equally.
|
||||
increment := params.BeaconConfig().EffectiveBalanceIncrement
|
||||
minSlashing := math.Min(totalSlashing*slashingMultiplier, totalBalance)
|
||||
minSlashing := min(totalSlashing*slashingMultiplier, totalBalance)
|
||||
|
||||
// Modified in Electra:EIP7251
|
||||
var penaltyPerEffectiveBalanceIncrement uint64
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
)
|
||||
|
||||
// ProcessSlashingsPrecompute processes the slashed validators during epoch processing.
|
||||
@@ -21,7 +20,7 @@ func ProcessSlashingsPrecompute(s state.BeaconState, pBal *Balance) error {
|
||||
totalSlashing += slashing
|
||||
}
|
||||
|
||||
minSlashing := math.Min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplier, pBal.ActiveCurrentEpoch)
|
||||
minSlashing := min(totalSlashing*params.BeaconConfig().ProportionalSlashingMultiplier, pBal.ActiveCurrentEpoch)
|
||||
epochToWithdraw := currentEpoch + exitLength/2
|
||||
|
||||
var hasSlashing bool
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//cmd/prysmctl/testnet:__pkg__",
|
||||
"//consensus-types/hdiff:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
"//validator/client:__pkg__",
|
||||
],
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v6/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -17,6 +18,25 @@ import (
|
||||
// UpgradeToFulu updates inputs a generic state to return the version Fulu state.
|
||||
// https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/fork.md#upgrading-the-state
|
||||
func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
s, err := ConvertToFulu(beaconState)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not convert to fulu")
|
||||
}
|
||||
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, beaconState, slots.ToEpoch(beaconState.Slot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pl := make([]primitives.ValidatorIndex, len(proposerLookahead))
|
||||
for i, v := range proposerLookahead {
|
||||
pl[i] = primitives.ValidatorIndex(v)
|
||||
}
|
||||
if err := s.SetProposerLookahead(pl); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to set proposer lookahead")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func ConvertToFulu(beaconState state.BeaconState) (state.BeaconState, error) {
|
||||
currentSyncCommittee, err := beaconState.CurrentSyncCommittee()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -105,11 +125,6 @@ func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.Be
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
proposerLookahead, err := helpers.InitializeProposerLookahead(ctx, beaconState, slots.ToEpoch(beaconState.Slot()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := ðpb.BeaconStateFulu{
|
||||
GenesisTime: uint64(beaconState.GenesisTime().Unix()),
|
||||
GenesisValidatorsRoot: beaconState.GenesisValidatorsRoot(),
|
||||
@@ -171,14 +186,6 @@ func UpgradeToFulu(ctx context.Context, beaconState state.BeaconState) (state.Be
|
||||
PendingDeposits: pendingDeposits,
|
||||
PendingPartialWithdrawals: pendingPartialWithdrawals,
|
||||
PendingConsolidations: pendingConsolidations,
|
||||
ProposerLookahead: proposerLookahead,
|
||||
}
|
||||
|
||||
// Need to cast the beaconState to use in helper functions
|
||||
post, err := state_native.InitializeFromProtoUnsafeFulu(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to initialize post fulu beaconState")
|
||||
}
|
||||
|
||||
return post, nil
|
||||
return state_native.InitializeFromProtoUnsafeFulu(s)
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
"legacy.go",
|
||||
"metrics.go",
|
||||
"randao.go",
|
||||
"ranges.go",
|
||||
"rewards_penalties.go",
|
||||
"shuffle.go",
|
||||
"sync_committee.go",
|
||||
@@ -56,6 +57,7 @@ go_test(
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
"randao_test.go",
|
||||
"ranges_test.go",
|
||||
"rewards_penalties_test.go",
|
||||
"shuffle_test.go",
|
||||
"sync_committee_test.go",
|
||||
|
||||
@@ -399,7 +399,6 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
ctx, span := trace.StartSpan(ctx, "helpers.CommitteeAssignments")
|
||||
defer span.End()
|
||||
|
||||
// Verify if the epoch is valid for assignment based on the provided state.
|
||||
if err := VerifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -407,12 +406,15 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals := make(map[primitives.ValidatorIndex]struct{})
|
||||
|
||||
// Deduplicate and make set for O(1) membership checks.
|
||||
vals := make(map[primitives.ValidatorIndex]struct{}, len(validators))
|
||||
for _, v := range validators {
|
||||
vals[v] = struct{}{}
|
||||
}
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
|
||||
// Compute committee assignments for each slot in the epoch.
|
||||
remaining := len(vals)
|
||||
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment, len(vals))
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
committees, err := BeaconCommittees(ctx, state, slot)
|
||||
if err != nil {
|
||||
@@ -420,7 +422,7 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
}
|
||||
for j, committee := range committees {
|
||||
for _, vIndex := range committee {
|
||||
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
|
||||
if _, ok := vals[vIndex]; !ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := assignments[vIndex]; !ok {
|
||||
@@ -429,6 +431,11 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
assignments[vIndex].Committee = committee
|
||||
assignments[vIndex].AttesterSlot = slot
|
||||
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
|
||||
delete(vals, vIndex)
|
||||
remaining--
|
||||
if remaining == 0 {
|
||||
return assignments, nil // early exit
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
62
beacon-chain/core/helpers/ranges.go
Normal file
62
beacon-chain/core/helpers/ranges.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// SortedSliceFromMap takes a map with uint64 keys and returns a sorted slice of the keys.
|
||||
func SortedSliceFromMap(toSort map[uint64]bool) []uint64 {
|
||||
slice := make([]uint64, 0, len(toSort))
|
||||
for key := range toSort {
|
||||
slice = append(slice, key)
|
||||
}
|
||||
|
||||
slices.Sort(slice)
|
||||
return slice
|
||||
}
|
||||
|
||||
// PrettySlice returns a pretty string representation of a sorted slice of uint64.
|
||||
// `sortedSlice` must be sorted in ascending order.
|
||||
// Example: [1,2,3,5,6,7,8,10] -> "1-3,5-8,10"
|
||||
func PrettySlice(sortedSlice []uint64) string {
|
||||
if len(sortedSlice) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var result string
|
||||
start := sortedSlice[0]
|
||||
end := sortedSlice[0]
|
||||
|
||||
for i := 1; i < len(sortedSlice); i++ {
|
||||
if sortedSlice[i] == end+1 {
|
||||
end = sortedSlice[i]
|
||||
continue
|
||||
}
|
||||
|
||||
if start == end {
|
||||
result += fmt.Sprintf("%d,", start)
|
||||
start = sortedSlice[i]
|
||||
end = sortedSlice[i]
|
||||
continue
|
||||
}
|
||||
|
||||
result += fmt.Sprintf("%d-%d,", start, end)
|
||||
start = sortedSlice[i]
|
||||
end = sortedSlice[i]
|
||||
}
|
||||
|
||||
if start == end {
|
||||
result += fmt.Sprintf("%d", start)
|
||||
return result
|
||||
}
|
||||
|
||||
result += fmt.Sprintf("%d-%d", start, end)
|
||||
return result
|
||||
}
|
||||
|
||||
// SortedPrettySliceFromMap combines SortedSliceFromMap and PrettySlice to return a pretty string representation of the keys in a map.
|
||||
func SortedPrettySliceFromMap(toSort map[uint64]bool) string {
|
||||
sorted := SortedSliceFromMap(toSort)
|
||||
return PrettySlice(sorted)
|
||||
}
|
||||
64
beacon-chain/core/helpers/ranges_test.go
Normal file
64
beacon-chain/core/helpers/ranges_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestSortedSliceFromMap(t *testing.T) {
|
||||
input := map[uint64]bool{5: true, 3: true, 8: true, 1: true}
|
||||
expected := []uint64{1, 3, 5, 8}
|
||||
|
||||
actual := helpers.SortedSliceFromMap(input)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i], actual[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrettySlice(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []uint64
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "empty slice",
|
||||
input: []uint64{},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "only distinct elements",
|
||||
input: []uint64{1, 3, 5, 7, 9},
|
||||
expected: "1,3,5,7,9",
|
||||
},
|
||||
{
|
||||
name: "single range",
|
||||
input: []uint64{1, 2, 3, 4, 5},
|
||||
expected: "1-5",
|
||||
},
|
||||
{
|
||||
name: "multiple ranges and distinct elements",
|
||||
input: []uint64{1, 2, 3, 5, 6, 7, 8, 10, 12, 13, 14},
|
||||
expected: "1-3,5-8,10,12-14",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
actual := helpers.PrettySlice(tt.input)
|
||||
require.Equal(t, tt.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortedPrettySliceFromMap(t *testing.T) {
|
||||
input := map[uint64]bool{5: true, 7: true, 8: true, 10: true}
|
||||
expected := "5,7-8,10"
|
||||
|
||||
actual := helpers.SortedPrettySliceFromMap(input)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
@@ -79,7 +79,7 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) {
|
||||
}
|
||||
|
||||
// Spec defines `EffectiveBalanceIncrement` as min to avoid divisions by zero.
|
||||
total = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, total)
|
||||
total = max(params.BeaconConfig().EffectiveBalanceIncrement, total)
|
||||
if err := balanceCache.AddTotalEffectiveBalance(s, total); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
v1alpha1 "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
)
|
||||
@@ -95,7 +94,7 @@ func ComputeWeakSubjectivityPeriod(ctx context.Context, st state.ReadOnlyBeaconS
|
||||
if T*(200+3*D) < t*(200+12*D) {
|
||||
epochsForValidatorSetChurn := N * (t*(200+12*D) - T*(200+3*D)) / (600 * delta * (2*t + T))
|
||||
epochsForBalanceTopUps := N * (200 + 3*D) / (600 * Delta)
|
||||
wsp += math.Max(epochsForValidatorSetChurn, epochsForBalanceTopUps)
|
||||
wsp += max(epochsForValidatorSetChurn, epochsForBalanceTopUps)
|
||||
} else {
|
||||
wsp += 3 * N * D * t / (200 * Delta * (T - t))
|
||||
}
|
||||
|
||||
@@ -43,6 +43,13 @@ func VerifyDataColumnSidecar(sidecar blocks.RODataColumn) error {
|
||||
return ErrNoKzgCommitments
|
||||
}
|
||||
|
||||
// A sidecar with more commitments than the max blob count for this block is invalid.
|
||||
slot := sidecar.Slot()
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(sidecar.KzgCommitments) > maxBlobsPerBlock {
|
||||
return ErrTooManyCommitments
|
||||
}
|
||||
|
||||
// The column length must be equal to the number of commitments/proofs.
|
||||
if len(sidecar.Column) != len(sidecar.KzgCommitments) || len(sidecar.Column) != len(sidecar.KzgProofs) {
|
||||
return ErrMismatchLength
|
||||
@@ -72,10 +79,30 @@ func VerifyDataColumnsSidecarKZGProofs(sidecars []blocks.RODataColumn) error {
|
||||
|
||||
for _, sidecar := range sidecars {
|
||||
for i := range sidecar.Column {
|
||||
commitments = append(commitments, kzg.Bytes48(sidecar.KzgCommitments[i]))
|
||||
var (
|
||||
commitment kzg.Bytes48
|
||||
cell kzg.Cell
|
||||
proof kzg.Bytes48
|
||||
)
|
||||
|
||||
commitmentBytes := sidecar.KzgCommitments[i]
|
||||
cellBytes := sidecar.Column[i]
|
||||
proofBytes := sidecar.KzgProofs[i]
|
||||
|
||||
if len(commitmentBytes) != len(commitment) ||
|
||||
len(cellBytes) != len(cell) ||
|
||||
len(proofBytes) != len(proof) {
|
||||
return ErrMismatchLength
|
||||
}
|
||||
|
||||
copy(commitment[:], commitmentBytes)
|
||||
copy(cell[:], cellBytes)
|
||||
copy(proof[:], proofBytes)
|
||||
|
||||
commitments = append(commitments, commitment)
|
||||
indices = append(indices, sidecar.Index)
|
||||
cells = append(cells, kzg.Cell(sidecar.Column[i]))
|
||||
proofs = append(proofs, kzg.Bytes48(sidecar.KzgProofs[i]))
|
||||
cells = append(cells, cell)
|
||||
proofs = append(proofs, proof)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,38 +18,46 @@ import (
|
||||
)
|
||||
|
||||
func TestVerifyDataColumnSidecar(t *testing.T) {
|
||||
t.Run("index too large", func(t *testing.T) {
|
||||
roSidecar := createTestSidecar(t, 1_000_000, nil, nil, nil)
|
||||
err := peerdas.VerifyDataColumnSidecar(roSidecar)
|
||||
require.ErrorIs(t, err, peerdas.ErrIndexTooLarge)
|
||||
})
|
||||
testCases := []struct {
|
||||
name string
|
||||
index uint64
|
||||
blobCount int
|
||||
commitmentCount int
|
||||
proofCount int
|
||||
maxBlobsPerBlock uint64
|
||||
expectedError error
|
||||
}{
|
||||
{name: "index too large", index: 1_000_000, expectedError: peerdas.ErrIndexTooLarge},
|
||||
{name: "no commitments", expectedError: peerdas.ErrNoKzgCommitments},
|
||||
{name: "too many commitments", blobCount: 10, commitmentCount: 10, proofCount: 10, maxBlobsPerBlock: 2, expectedError: peerdas.ErrTooManyCommitments},
|
||||
{name: "commitments size mismatch", commitmentCount: 1, maxBlobsPerBlock: 1, expectedError: peerdas.ErrMismatchLength},
|
||||
{name: "proofs size mismatch", blobCount: 1, commitmentCount: 1, maxBlobsPerBlock: 1, expectedError: peerdas.ErrMismatchLength},
|
||||
{name: "nominal", blobCount: 1, commitmentCount: 1, proofCount: 1, maxBlobsPerBlock: 1, expectedError: nil},
|
||||
}
|
||||
|
||||
t.Run("no commitments", func(t *testing.T) {
|
||||
roSidecar := createTestSidecar(t, 0, nil, nil, nil)
|
||||
err := peerdas.VerifyDataColumnSidecar(roSidecar)
|
||||
require.ErrorIs(t, err, peerdas.ErrNoKzgCommitments)
|
||||
})
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = 0
|
||||
cfg.BlobSchedule = []params.BlobScheduleEntry{{Epoch: 0, MaxBlobsPerBlock: tc.maxBlobsPerBlock}}
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
t.Run("KZG commitments size mismatch", func(t *testing.T) {
|
||||
kzgCommitments := make([][]byte, 1)
|
||||
roSidecar := createTestSidecar(t, 0, nil, kzgCommitments, nil)
|
||||
err := peerdas.VerifyDataColumnSidecar(roSidecar)
|
||||
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
|
||||
})
|
||||
column := make([][]byte, tc.blobCount)
|
||||
kzgCommitments := make([][]byte, tc.commitmentCount)
|
||||
kzgProof := make([][]byte, tc.proofCount)
|
||||
|
||||
t.Run("KZG proofs size mismatch", func(t *testing.T) {
|
||||
column, kzgCommitments := make([][]byte, 1), make([][]byte, 1)
|
||||
roSidecar := createTestSidecar(t, 0, column, kzgCommitments, nil)
|
||||
err := peerdas.VerifyDataColumnSidecar(roSidecar)
|
||||
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
|
||||
})
|
||||
roSidecar := createTestSidecar(t, tc.index, column, kzgCommitments, kzgProof)
|
||||
err := peerdas.VerifyDataColumnSidecar(roSidecar)
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
column, kzgCommitments, kzgProofs := make([][]byte, 1), make([][]byte, 1), make([][]byte, 1)
|
||||
roSidecar := createTestSidecar(t, 0, column, kzgCommitments, kzgProofs)
|
||||
err := peerdas.VerifyDataColumnSidecar(roSidecar)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
if tc.expectedError != nil {
|
||||
require.ErrorIs(t, err, tc.expectedError)
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
@@ -60,6 +68,14 @@ func TestVerifyDataColumnSidecarKZGProofs(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("size mismatch", func(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
sidecars[0].Column[0] = sidecars[0].Column[0][:len(sidecars[0].Column[0])-1] // Remove one byte to create size mismatch
|
||||
|
||||
err := peerdas.VerifyDataColumnsSidecarKZGProofs(sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrMismatchLength)
|
||||
})
|
||||
|
||||
t.Run("invalid proof", func(t *testing.T) {
|
||||
sidecars := generateRandomSidecars(t, seed, blobCount)
|
||||
sidecars[0].Column[0][0]++ // It is OK to overflow
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -28,7 +30,8 @@ func MinimumColumnCountToReconstruct() uint64 {
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs all the data column sidecars from the given input data column sidecars.
|
||||
// All input sidecars must be committed to the same block.
|
||||
// `inVerifiedRoSidecars` should contain enough (unique) sidecars to reconstruct the missing columns.
|
||||
// `inVerifiedRoSidecars` should contain enough sidecars to reconstruct the missing columns, and should not contain any duplicate.
|
||||
// WARNING: This function sorts inplace `verifiedRoSidecars` by index.
|
||||
func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if there is at least one input sidecar.
|
||||
if len(verifiedRoSidecars) == 0 {
|
||||
@@ -51,18 +54,17 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
|
||||
}
|
||||
}
|
||||
|
||||
// Deduplicate sidecars.
|
||||
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(verifiedRoSidecars))
|
||||
for _, inVerifiedRoSidecar := range verifiedRoSidecars {
|
||||
sidecarByIndex[inVerifiedRoSidecar.Index] = inVerifiedRoSidecar
|
||||
}
|
||||
|
||||
// Check if there is enough sidecars to reconstruct the missing columns.
|
||||
sidecarCount := len(sidecarByIndex)
|
||||
sidecarCount := len(verifiedRoSidecars)
|
||||
if uint64(sidecarCount) < MinimumColumnCountToReconstruct() {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// Sort the input sidecars by index.
|
||||
sort.Slice(verifiedRoSidecars, func(i, j int) bool {
|
||||
return verifiedRoSidecars[i].Index < verifiedRoSidecars[j].Index
|
||||
})
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
var wg errgroup.Group
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
@@ -71,10 +73,10 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
|
||||
cellsIndices := make([]uint64, 0, sidecarCount)
|
||||
cells := make([]kzg.Cell, 0, sidecarCount)
|
||||
|
||||
for columnIndex, sidecar := range sidecarByIndex {
|
||||
for _, sidecar := range verifiedRoSidecars {
|
||||
cell := sidecar.Column[blobIndex]
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
cellsIndices = append(cellsIndices, columnIndex)
|
||||
cellsIndices = append(cellsIndices, sidecar.Index)
|
||||
}
|
||||
|
||||
// Recover the cells and proofs for the corresponding blob
|
||||
@@ -255,7 +257,7 @@ func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([
|
||||
return nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, numberOfColumns*kzg.BytesPerProof)
|
||||
kzgProofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, errors.New("wrong KZG proof size - should never happen")
|
||||
|
||||
@@ -125,11 +125,12 @@ func TestReconstructDataColumnSidecars(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReconstructBlobs(t *testing.T) {
|
||||
// Start the trusted setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
|
||||
require.NoError(t, kzg.Start())
|
||||
var emptyBlock blocks.ROBlock
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().FuluForkEpoch)
|
||||
|
||||
t.Run("no index", func(t *testing.T) {
|
||||
actual, err := peerdas.ReconstructBlobs(emptyBlock, nil, nil)
|
||||
@@ -190,10 +191,10 @@ func TestReconstructBlobs(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("not committed to the same block", func(t *testing.T) {
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}))
|
||||
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}))
|
||||
_, _, verifiedRoSidecars := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{1}), util.WithSlot(fs))
|
||||
roBlock, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3, util.WithParentRoot([fieldparams.RootLength]byte{2}), util.WithSlot(fs))
|
||||
|
||||
_, err = peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
|
||||
_, err := peerdas.ReconstructBlobs(roBlock, verifiedRoSidecars, []int{0})
|
||||
require.ErrorContains(t, peerdas.ErrRootMismatch.Error(), err)
|
||||
})
|
||||
|
||||
@@ -440,6 +441,7 @@ func TestComputeCellsAndProofsFromStructured(t *testing.T) {
|
||||
for i := range blobCount {
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Cells), len(actualCellsAndProofs[i].Cells))
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), len(actualCellsAndProofs[i].Proofs))
|
||||
require.Equal(t, len(expectedCellsAndProofs[i].Proofs), cap(actualCellsAndProofs[i].Proofs))
|
||||
|
||||
// Compare cells
|
||||
for j, expectedCell := range expectedCellsAndProofs[i].Cells {
|
||||
|
||||
@@ -16,61 +16,60 @@ func TestDataColumnsAlignWithBlock(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
fs := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
require.NoError(t, err)
|
||||
fuluMax := params.BeaconConfig().MaxBlobsPerBlock(fs)
|
||||
t.Run("pre fulu", func(t *testing.T) {
|
||||
block, _ := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, 0, 0)
|
||||
block, _ := util.GenerateTestElectraBlockWithSidecar(t, [fieldparams.RootLength]byte{}, fs, 0)
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, nil)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("too many commitmnets", func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig()
|
||||
config.BlobSchedule = []params.BlobScheduleEntry{{}}
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 3)
|
||||
t.Run("too many commitments", func(t *testing.T) {
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, fuluMax+1, util.WithSlot(fs))
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, nil)
|
||||
require.ErrorIs(t, err, peerdas.ErrTooManyCommitments)
|
||||
})
|
||||
|
||||
t.Run("root mismatch", func(t *testing.T) {
|
||||
_, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0)
|
||||
_, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 0, util.WithSlot(fs))
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrRootMismatch)
|
||||
})
|
||||
|
||||
t.Run("column size mismatch", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
sidecars[0].Column = [][]byte{}
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("KZG commitments size mismatch", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
sidecars[0].KzgCommitments = [][]byte{}
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("KZG proofs mismatch", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
sidecars[0].KzgProofs = [][]byte{}
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrBlockColumnSizeMismatch)
|
||||
})
|
||||
|
||||
t.Run("commitment mismatch", func(t *testing.T) {
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, _, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
_, alteredSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
alteredSidecars[1].KzgCommitments[0][0]++ // Overflow is OK
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, alteredSidecars)
|
||||
require.ErrorIs(t, err, peerdas.ErrCommitmentMismatch)
|
||||
})
|
||||
|
||||
t.Run("nominal", func(t *testing.T) {
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2)
|
||||
block, sidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, 2, util.WithSlot(fs))
|
||||
err := peerdas.DataColumnsAlignWithBlock(block, sidecars)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -378,9 +379,16 @@ func ProcessBlockForStateRoot(
|
||||
func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
|
||||
// exitInfo is only needed for voluntary exits pre Electra.
|
||||
hasExits := st.Version() < version.Electra && len(beaconBlock.Body().VoluntaryExits()) > 0
|
||||
exitInfo := &validators.ExitInfo{}
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
@@ -407,10 +415,15 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
|
||||
// This calls phase 0 block operations.
|
||||
func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
|
||||
hasExits := len(beaconBlock.Body().VoluntaryExits()) > 0
|
||||
var exitInfo *v.ExitInfo
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/math"
|
||||
mathutil "github.com/OffchainLabs/prysm/v6/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
@@ -60,7 +59,7 @@ func ExitInformation(s state.BeaconState) *ExitInfo {
|
||||
_ = err
|
||||
|
||||
// Apply minimum balance as per spec
|
||||
exitInfo.TotalActiveBalance = mathutil.Max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
|
||||
exitInfo.TotalActiveBalance = max(params.BeaconConfig().EffectiveBalanceIncrement, totalActiveBalance)
|
||||
return exitInfo
|
||||
}
|
||||
|
||||
@@ -98,7 +97,9 @@ func InitiateValidatorExit(
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return s, ErrValidatorAlreadyExited
|
||||
}
|
||||
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process validator exit")
|
||||
}
|
||||
// Compute exit queue epoch.
|
||||
if s.Version() < version.Electra {
|
||||
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
|
||||
@@ -177,6 +178,9 @@ func initiateValidatorExitPreElectra(ctx context.Context, s state.BeaconState, e
|
||||
// if exit_queue_churn >= get_validator_churn_limit(state):
|
||||
// exit_queue_epoch += Epoch(1)
|
||||
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
|
||||
if exitInfo == nil {
|
||||
return errors.New("exit info is required to process validator exit")
|
||||
}
|
||||
if exitableEpoch > exitInfo.HighestExitEpoch {
|
||||
exitInfo.HighestExitEpoch = exitableEpoch
|
||||
exitInfo.Churn = 0
|
||||
@@ -235,7 +239,9 @@ func SlashValidator(
|
||||
exitInfo *ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to slash validator")
|
||||
}
|
||||
s, err = InitiateValidatorExitForTotalBal(ctx, s, slashedIdx, exitInfo, primitives.Gwei(exitInfo.TotalActiveBalance))
|
||||
if err != nil && !errors.Is(err, ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
|
||||
|
||||
@@ -18,13 +18,16 @@ import (
|
||||
)
|
||||
|
||||
func Test_commitmentsToCheck(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
fulu := primitives.Slot(params.BeaconConfig().FuluForkEpoch) * params.BeaconConfig().SlotsPerEpoch
|
||||
windowSlots, err := slots.EpochEnd(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
commits := [][]byte{
|
||||
bytesutil.PadTo([]byte("a"), 48),
|
||||
bytesutil.PadTo([]byte("b"), 48),
|
||||
bytesutil.PadTo([]byte("c"), 48),
|
||||
bytesutil.PadTo([]byte("d"), 48),
|
||||
windowSlots = windowSlots + primitives.Slot(params.BeaconConfig().FuluForkEpoch)
|
||||
maxBlobs := params.LastNetworkScheduleEntry().MaxBlobsPerBlock
|
||||
commits := make([][]byte, maxBlobs+1)
|
||||
for i := 0; i < len(commits); i++ {
|
||||
commits[i] = bytesutil.PadTo([]byte{byte(i)}, 48)
|
||||
}
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -47,41 +50,40 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
{
|
||||
name: "commitments within da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Slot = 100
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Body.BlobKzgCommitments = commits[:maxBlobs]
|
||||
d.Block.Slot = fulu + 100
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
commits: commits,
|
||||
slot: 100,
|
||||
commits: commits[:maxBlobs],
|
||||
slot: fulu + 100,
|
||||
},
|
||||
{
|
||||
name: "commitments outside da",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Slot = fulu
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
d.Block.Body.BlobKzgCommitments = commits[:maxBlobs]
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
return rb
|
||||
},
|
||||
slot: windowSlots + 1,
|
||||
slot: fulu + windowSlots + 1,
|
||||
},
|
||||
{
|
||||
name: "excessive commitments",
|
||||
block: func(t *testing.T) blocks.ROBlock {
|
||||
d := util.NewBeaconBlockDeneb()
|
||||
d.Block.Slot = 100
|
||||
d := util.NewBeaconBlockFulu()
|
||||
d.Block.Slot = fulu + 100
|
||||
// block is from slot 0, "current slot" is window size +1 (so outside the window)
|
||||
d.Block.Body.BlobKzgCommitments = commits
|
||||
// Double the number of commitments, assert that this is over the limit
|
||||
d.Block.Body.BlobKzgCommitments = append(commits, d.Block.Body.BlobKzgCommitments...)
|
||||
sb, err := blocks.NewSignedBeaconBlock(d)
|
||||
require.NoError(t, err)
|
||||
rb, err := blocks.NewROBlock(sb)
|
||||
@@ -115,67 +117,69 @@ func Test_commitmentsToCheck(t *testing.T) {
|
||||
func TestLazilyPersistent_Missing(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, scs: blobSidecars}
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, blobSidecars[2]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[2]))
|
||||
err := as.IsDataAvailable(ctx, ds, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All but one persisted, return missing idx
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err = as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[0]))
|
||||
err = as.IsDataAvailable(ctx, ds, blk)
|
||||
require.ErrorIs(t, err, errMissingSidecar)
|
||||
|
||||
// All persisted, return nil
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, blobSidecars...))
|
||||
|
||||
require.NoError(t, as.IsDataAvailable(ctx, 1, blk))
|
||||
require.NoError(t, as.IsDataAvailable(ctx, ds, blk))
|
||||
}
|
||||
|
||||
func TestLazilyPersistent_Mismatch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
store := filesystem.NewEphemeralBlobStorage(t)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 3)
|
||||
blk, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 3)
|
||||
|
||||
mbv := &mockBlobBatchVerifier{t: t, err: errors.New("kzg check should not run")}
|
||||
blobSidecars[0].KzgCommitment = bytesutil.PadTo([]byte("nope"), 48)
|
||||
as := NewLazilyPersistentStore(store, mbv)
|
||||
|
||||
// Only one commitment persisted, should return error with other indices
|
||||
require.NoError(t, as.Persist(1, blobSidecars[0]))
|
||||
err := as.IsDataAvailable(ctx, 1, blk)
|
||||
require.NoError(t, as.Persist(ds, blobSidecars[0]))
|
||||
err := as.IsDataAvailable(ctx, ds, blk)
|
||||
require.NotNil(t, err)
|
||||
require.ErrorIs(t, err, errCommitmentMismatch)
|
||||
}
|
||||
|
||||
func TestLazyPersistOnceCommitted(t *testing.T) {
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 6)
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, blobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 6)
|
||||
|
||||
as := NewLazilyPersistentStore(filesystem.NewEphemeralBlobStorage(t), &mockBlobBatchVerifier{})
|
||||
// stashes as expected
|
||||
require.NoError(t, as.Persist(1, blobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, blobSidecars...))
|
||||
// ignores duplicates
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars...), ErrDuplicateSidecar)
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars...), ErrDuplicateSidecar)
|
||||
|
||||
// ignores index out of bound
|
||||
blobSidecars[0].Index = 6
|
||||
require.ErrorIs(t, as.Persist(1, blobSidecars[0]), errIndexOutOfBounds)
|
||||
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 4)
|
||||
require.ErrorIs(t, as.Persist(ds, blobSidecars[0]), errIndexOutOfBounds)
|
||||
_, moreBlobSidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, 4)
|
||||
|
||||
// ignores sidecars before the retention period
|
||||
slotOOB, err := slots.EpochStart(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, as.Persist(32+slotOOB, moreBlobSidecars[0]))
|
||||
slotOOB := util.SlotAtEpoch(t, params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
slotOOB += ds + 32
|
||||
require.NoError(t, as.Persist(slotOOB, moreBlobSidecars[0]))
|
||||
|
||||
// doesn't ignore new sidecars with a different block root
|
||||
require.NoError(t, as.Persist(1, moreBlobSidecars...))
|
||||
require.NoError(t, as.Persist(ds, moreBlobSidecars...))
|
||||
}
|
||||
|
||||
type mockBlobBatchVerifier struct {
|
||||
|
||||
@@ -39,7 +39,7 @@ func filterTestCaseSetup(slot primitives.Slot, nBlobs int, onDisk []int, numExpe
|
||||
entry := &blobCacheEntry{}
|
||||
if len(onDisk) > 0 {
|
||||
od := map[[32]byte][]int{blk.Root(): onDisk}
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, od)
|
||||
sumz := filesystem.NewMockBlobStorageSummarizer(t, slots.ToEpoch(slot), od)
|
||||
sum := sumz.Summary(blk.Root())
|
||||
entry.setDiskSummary(sum)
|
||||
}
|
||||
|
||||
@@ -122,18 +122,18 @@ type BlobStorage struct {
|
||||
func (bs *BlobStorage) WarmCache() {
|
||||
start := time.Now()
|
||||
if bs.layoutName == LayoutNameFlat {
|
||||
log.Info("Blob filesystem cache warm-up started. This may take a few minutes.")
|
||||
log.Info("Blob filesystem cache warm-up started. This may take a few minutes")
|
||||
} else {
|
||||
log.Info("Blob filesystem cache warm-up started.")
|
||||
log.Info("Blob filesystem cache warm-up started")
|
||||
}
|
||||
|
||||
if err := warmCache(bs.layout, bs.cache); err != nil {
|
||||
log.WithError(err).Error("Error encountered while warming up blob filesystem cache.")
|
||||
log.WithError(err).Error("Error encountered while warming up blob filesystem cache")
|
||||
}
|
||||
if err := bs.migrateLayouts(); err != nil {
|
||||
log.WithError(err).Error("Error encountered while migrating blob storage.")
|
||||
log.WithError(err).Error("Error encountered while migrating blob storage")
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// If any blob storage directories are found for layouts besides the configured layout, migrate them.
|
||||
|
||||
@@ -21,7 +21,8 @@ import (
|
||||
)
|
||||
|
||||
func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, params.BeaconConfig().MaxBlobsPerBlock(1))
|
||||
ds := util.SlotAtEpoch(t, params.BeaconConfig().DenebForkEpoch)
|
||||
_, sidecars := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, ds, params.BeaconConfig().MaxBlobsPerBlock(ds))
|
||||
testSidecars := verification.FakeVerifySliceForTest(t, sidecars)
|
||||
|
||||
t.Run("no error for duplicate", func(t *testing.T) {
|
||||
@@ -127,21 +128,22 @@ func TestBlobStorage_SaveBlobData(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlobIndicesBounds(t *testing.T) {
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
fs := afero.NewMemMapFs()
|
||||
root := [32]byte{}
|
||||
|
||||
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0)) - 1
|
||||
writeFakeSSZ(t, fs, root, 0, okIdx)
|
||||
okIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(es)) - 1
|
||||
writeFakeSSZ(t, fs, root, es, okIdx)
|
||||
bs := NewWarmedEphemeralBlobStorageUsingFs(t, fs, WithLayout(LayoutNameByEpoch))
|
||||
indices := bs.Summary(root).mask
|
||||
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
expected := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
expected[okIdx] = true
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i], indices[i])
|
||||
}
|
||||
|
||||
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
writeFakeSSZ(t, fs, root, 0, oobIdx)
|
||||
oobIdx := uint64(params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
writeFakeSSZ(t, fs, root, es, oobIdx)
|
||||
// This now fails at cache warmup time.
|
||||
require.ErrorIs(t, warmCache(bs.layout, bs.cache), errIndexOutOfBounds)
|
||||
}
|
||||
|
||||
@@ -6,14 +6,17 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
)
|
||||
|
||||
func TestSlotByRoot_Summary(t *testing.T) {
|
||||
noneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
allSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
firstSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
lastSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
oneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
ee := params.BeaconConfig().ElectraForkEpoch
|
||||
es := util.SlotAtEpoch(t, ee)
|
||||
noneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
allSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
firstSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
lastSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
oneSet := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
firstSet[0] = true
|
||||
lastSet[len(lastSet)-1] = true
|
||||
oneSet[1] = true
|
||||
@@ -53,7 +56,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
if c.expected != nil {
|
||||
key := bytesutil.ToBytes32([]byte(c.name))
|
||||
sc.cache[key] = BlobStorageSummary{epoch: 0, mask: c.expected}
|
||||
sc.cache[key] = BlobStorageSummary{epoch: ee, mask: c.expected}
|
||||
}
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -73,6 +76,7 @@ func TestSlotByRoot_Summary(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllAvailable(t *testing.T) {
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
idxUpTo := func(u int) []int {
|
||||
r := make([]int, u)
|
||||
for i := range r {
|
||||
@@ -125,13 +129,13 @@ func TestAllAvailable(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "out of bound is safe",
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(0) + 1,
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(es) + 1,
|
||||
aa: false,
|
||||
},
|
||||
{
|
||||
name: "max present",
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(0),
|
||||
idxSet: idxUpTo(params.BeaconConfig().MaxBlobsPerBlock(0)),
|
||||
count: params.BeaconConfig().MaxBlobsPerBlock(es),
|
||||
idxSet: idxUpTo(params.BeaconConfig().MaxBlobsPerBlock(es)),
|
||||
aa: true,
|
||||
},
|
||||
{
|
||||
@@ -143,7 +147,7 @@ func TestAllAvailable(t *testing.T) {
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
mask := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(0))
|
||||
mask := make([]bool, params.BeaconConfig().MaxBlobsPerBlock(es))
|
||||
for _, idx := range c.idxSet {
|
||||
mask[idx] = true
|
||||
}
|
||||
|
||||
@@ -200,6 +200,7 @@ func (dcs *DataColumnStorage) WarmCache() {
|
||||
fileMetadata, err := extractFileMetadata(path)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error encountered while extracting file metadata")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open the data column filesystem file.
|
||||
@@ -988,8 +989,8 @@ func filePath(root [fieldparams.RootLength]byte, epoch primitives.Epoch) string
|
||||
// extractFileMetadata extracts the metadata from a file path.
|
||||
// If the path is not a leaf, it returns nil.
|
||||
func extractFileMetadata(path string) (*fileMetadata, error) {
|
||||
// Is this Windows friendly?
|
||||
parts := strings.Split(path, "/")
|
||||
// Use filepath.Separator to handle both Windows (\) and Unix (/) path separators
|
||||
parts := strings.Split(path, string(filepath.Separator))
|
||||
if len(parts) != 3 {
|
||||
return nil, errors.Errorf("unexpected file %s", path)
|
||||
}
|
||||
@@ -1032,5 +1033,5 @@ func extractFileMetadata(path string) (*fileMetadata, error) {
|
||||
|
||||
// period computes the period of a given epoch.
|
||||
func period(epoch primitives.Epoch) uint64 {
|
||||
return uint64(epoch / params.BeaconConfig().MinEpochsForBlobsSidecarsRequest)
|
||||
return uint64(epoch / params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
|
||||
}
|
||||
|
||||
@@ -35,8 +35,9 @@ func (s DataColumnStorageSummary) HasIndex(index uint64) bool {
|
||||
|
||||
// HasAtLeastOneIndex returns true if at least one of the DataColumnSidecars at the given indices is available in the filesystem.
|
||||
func (s DataColumnStorageSummary) HasAtLeastOneIndex(indices []uint64) bool {
|
||||
size := uint64(len(s.mask))
|
||||
for _, index := range indices {
|
||||
if s.mask[index] {
|
||||
if index < size && s.mask[index] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,11 +25,11 @@ func TestHasIndex(t *testing.T) {
|
||||
func TestHasAtLeastOneIndex(t *testing.T) {
|
||||
summary := NewDataColumnStorageSummary(0, [fieldparams.NumberOfColumns]bool{false, true})
|
||||
|
||||
hasAtLeastOneIndex := summary.HasAtLeastOneIndex([]uint64{3, 1, 2})
|
||||
require.Equal(t, true, hasAtLeastOneIndex)
|
||||
actual := summary.HasAtLeastOneIndex([]uint64{3, 1, fieldparams.NumberOfColumns, 2})
|
||||
require.Equal(t, true, actual)
|
||||
|
||||
hasAtLeastOneIndex = summary.HasAtLeastOneIndex([]uint64{3, 4, 2})
|
||||
require.Equal(t, false, hasAtLeastOneIndex)
|
||||
actual = summary.HasAtLeastOneIndex([]uint64{3, 4, fieldparams.NumberOfColumns, 2})
|
||||
require.Equal(t, false, actual)
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
|
||||
@@ -3,6 +3,7 @@ package filesystem
|
||||
import (
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
@@ -725,3 +726,37 @@ func TestPrune(t *testing.T) {
|
||||
require.Equal(t, true, compareSlices([]string{"0x0de28a18cae63cbc6f0b20dc1afb0b1df38da40824a5f09f92d485ade04de97f.sszs"}, dirs))
|
||||
})
|
||||
}
|
||||
|
||||
func TestExtractFileMetadata(t *testing.T) {
|
||||
t.Run("Unix", func(t *testing.T) {
|
||||
// Test with Unix-style path separators (/)
|
||||
path := "12/1234/0x8bb2f09de48c102635622dc27e6de03ae2b22639df7c33edbc8222b2ec423746.sszs"
|
||||
metadata, err := extractFileMetadata(path)
|
||||
if filepath.Separator == '/' {
|
||||
// On Unix systems, this should succeed
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(12), metadata.period)
|
||||
require.Equal(t, primitives.Epoch(1234), metadata.epoch)
|
||||
return
|
||||
}
|
||||
|
||||
// On Windows systems, this should fail because it uses the wrong separator
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("Windows", func(t *testing.T) {
|
||||
// Test with Windows-style path separators (\)
|
||||
path := "12\\1234\\0x8bb2f09de48c102635622dc27e6de03ae2b22639df7c33edbc8222b2ec423746.sszs"
|
||||
metadata, err := extractFileMetadata(path)
|
||||
if filepath.Separator == '\\' {
|
||||
// On Windows systems, this should succeed
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(12), metadata.period)
|
||||
require.Equal(t, primitives.Epoch(1234), metadata.epoch)
|
||||
return
|
||||
}
|
||||
|
||||
// On Unix systems, this should fail because it uses the wrong separator
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -60,12 +61,13 @@ func TestRootFromDir(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSlotFromFile(t *testing.T) {
|
||||
es := util.SlotAtEpoch(t, params.BeaconConfig().ElectraForkEpoch)
|
||||
cases := []struct {
|
||||
slot primitives.Slot
|
||||
}{
|
||||
{slot: 0},
|
||||
{slot: 2},
|
||||
{slot: 1123581321},
|
||||
{slot: es + 0},
|
||||
{slot: es + 2},
|
||||
{slot: es + 1123581321},
|
||||
{slot: math.MaxUint64},
|
||||
}
|
||||
for _, c := range cases {
|
||||
@@ -243,39 +245,40 @@ func TestSlotFromBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIterationComplete(t *testing.T) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
targets := []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
path: "by-epoch/0/1234/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
|
||||
path: "by-epoch/%d/%d/0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
|
||||
slotOffset: 31,
|
||||
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
path: "by-epoch/%d/%d/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
|
||||
slotOffset: 31,
|
||||
path: "by-epoch/1/5330/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
path: "by-epoch/%d/%d/0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86/1.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", -1+math.MaxUint64/32, 0),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
|
||||
path: "by-epoch/%d/%d/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", -1+math.MaxUint64/32, 1),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777216/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
|
||||
path: "by-epoch/%d/%d/0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c/1.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", -1+math.MaxUint64/32, 0),
|
||||
slotOffset: 16,
|
||||
path: "by-epoch/4096/16777217/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
|
||||
path: "by-epoch/%d/%d/0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba/0.ssz",
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
|
||||
path: "by-epoch/2/11235/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", de+11235, 1),
|
||||
path: "by-epoch/%d/%d/0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d/1.ssz",
|
||||
},
|
||||
}
|
||||
fs := afero.NewMemMapFs()
|
||||
@@ -299,6 +302,7 @@ func TestIterationComplete(t *testing.T) {
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, tar.ident.epoch, entry.epoch)
|
||||
require.Equal(t, true, entry.HasIndex(tar.ident.index))
|
||||
require.Equal(t, tar.path, byEpoch.sszPath(tar.ident))
|
||||
path := fmt.Sprintf(tar.path, periodForEpoch(tar.ident.epoch), tar.ident.epoch)
|
||||
require.Equal(t, path, byEpoch.sszPath(tar.ident))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,10 +4,10 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/spf13/afero"
|
||||
)
|
||||
|
||||
@@ -18,9 +18,7 @@ func ezIdent(t *testing.T, rootStr string, epoch primitives.Epoch, index uint64)
|
||||
}
|
||||
|
||||
func setupTestBlobFile(t *testing.T, ident blobIdent, offset primitives.Slot, fs afero.Fs, l fsLayout) {
|
||||
slot, err := slots.EpochStart(ident.epoch)
|
||||
require.NoError(t, err)
|
||||
slot += offset
|
||||
slot := util.SlotAtEpoch(t, ident.epoch) + offset
|
||||
_, sc := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, slot, 1)
|
||||
scb, err := sc[0].MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
@@ -53,6 +51,7 @@ func testAssertFsMigrated(t *testing.T, fs afero.Fs, ident blobIdent, before, af
|
||||
}
|
||||
|
||||
func TestMigrations(t *testing.T) {
|
||||
de := params.BeaconConfig().DenebForkEpoch
|
||||
cases := []struct {
|
||||
name string
|
||||
forwardLayout string
|
||||
@@ -65,18 +64,18 @@ func TestMigrations(t *testing.T) {
|
||||
forwardLayout: LayoutNameByEpoch,
|
||||
targets: []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 0),
|
||||
slotOffset: 16,
|
||||
},
|
||||
},
|
||||
@@ -87,33 +86,33 @@ func TestMigrations(t *testing.T) {
|
||||
forwardLayout: LayoutNameByEpoch,
|
||||
targets: []migrationTestTarget{
|
||||
{
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", 1234, 0),
|
||||
ident: ezIdent(t, "0x0125e54c64c925018c9296965a5b622d9f5ab626c10917860dcfb6aa09a0a00b", de+1234, 0),
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 0),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 0),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", 5330, 1),
|
||||
ident: ezIdent(t, "0x0127dba6fd30fdbb47e73e861d5c6e602b38ac3ddc945bb6a2fc4e10761e9a86", de+5330, 1),
|
||||
slotOffset: 31,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 0),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 0),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", 16777216, 1),
|
||||
ident: ezIdent(t, "0x0232521756a0b965eab2c2245d7ad85feaeaf5f427cd14d1a7531f9d555b415c", de+16777216, 1),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", 16777217, 0),
|
||||
ident: ezIdent(t, "0x42eabe3d2c125410cd226de6f2825fb7575ab896c3f52e43de1fa29e4c809aba", de+16777217, 0),
|
||||
slotOffset: 16,
|
||||
migrated: true,
|
||||
},
|
||||
{
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", 11235, 1),
|
||||
ident: ezIdent(t, "0x666cea5034e22bd3b849cb33914cad59afd88ee08e4d5bc0e997411c945fbc1d", de+11235, 1),
|
||||
migrated: true,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -88,11 +88,11 @@ func NewEphemeralBlobStorageWithMocker(t testing.TB) (*BlobMocker, *BlobStorage)
|
||||
return &BlobMocker{fs: fs, bs: bs}, bs
|
||||
}
|
||||
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, set map[[32]byte][]int) BlobStorageSummarizer {
|
||||
func NewMockBlobStorageSummarizer(t *testing.T, epoch primitives.Epoch, set map[[32]byte][]int) BlobStorageSummarizer {
|
||||
c := newBlobStorageCache()
|
||||
for k, v := range set {
|
||||
for i := range v {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: 0, index: uint64(v[i])}); err != nil {
|
||||
if err := c.ensure(blobIdent{root: k, epoch: epoch, index: uint64(v[i])}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -126,7 +126,7 @@ func NewWarmedEphemeralDataColumnStorageUsingFs(t testing.TB, fs afero.Fs, opts
|
||||
|
||||
func NewEphemeralDataColumnStorageUsingFs(t testing.TB, fs afero.Fs, opts ...DataColumnStorageOption) *DataColumnStorage {
|
||||
opts = append(opts,
|
||||
WithDataColumnRetentionEpochs(params.BeaconConfig().MinEpochsForBlobsSidecarsRequest),
|
||||
WithDataColumnRetentionEpochs(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest),
|
||||
WithDataColumnFs(fs),
|
||||
)
|
||||
|
||||
|
||||
@@ -142,6 +142,7 @@ func testRoots(n int) [][32]byte {
|
||||
}
|
||||
|
||||
func TestLayoutPruneBefore(t *testing.T) {
|
||||
electra := params.BeaconConfig().ElectraForkEpoch
|
||||
roots := testRoots(10)
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -153,27 +154,27 @@ func TestLayoutPruneBefore(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "none pruned",
|
||||
pruneBefore: 1,
|
||||
pruneBefore: electra + 1,
|
||||
pruned: []testIdent{},
|
||||
remain: []testIdent{
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[0], epoch: electra + 1, index: 0}},
|
||||
{offset: 1, blobIdent: blobIdent{root: roots[1], epoch: electra + 1, index: 0}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "expected pruned before epoch",
|
||||
pruneBefore: 3,
|
||||
pruneBefore: electra + 3,
|
||||
pruned: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: 1, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: 1, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: 2, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: 2, index: 3}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[0], epoch: electra + 1, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[1], epoch: electra + 1, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[2], epoch: electra + 2, index: 0}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[3], epoch: electra + 2, index: 3}},
|
||||
},
|
||||
remain: []testIdent{
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: 3, index: 2}}, // boundary
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: 3, index: 0}}, // boundary
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: 4, index: 1}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: 4, index: 5}},
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[4], epoch: electra + 3, index: 2}}, // boundary
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[5], epoch: electra + 3, index: 0}}, // boundary
|
||||
{offset: 0, blobIdent: blobIdent{root: roots[6], epoch: electra + 4, index: 1}},
|
||||
{offset: 31, blobIdent: blobIdent{root: roots[7], epoch: electra + 4, index: 5}},
|
||||
},
|
||||
sum: pruneSummary{blobsPruned: 4},
|
||||
},
|
||||
|
||||
@@ -132,6 +132,6 @@ func recoverStateSummary(ctx context.Context, tx *bolt.Tx, root []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
summaryBucket := tx.Bucket(stateBucket)
|
||||
summaryBucket := tx.Bucket(stateSummaryBucket)
|
||||
return summaryBucket.Put(root, summaryEnc)
|
||||
}
|
||||
|
||||
@@ -137,3 +137,32 @@ func TestStore_FinalizedCheckpoint_StateMustExist(t *testing.T) {
|
||||
|
||||
require.ErrorContains(t, errMissingStateForCheckpoint.Error(), db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
}
|
||||
|
||||
// Regression test: verify that saving a checkpoint triggers recovery which writes
|
||||
// the state summary into the correct stateSummaryBucket so that HasStateSummary/StateSummary see it.
|
||||
func TestRecoverStateSummary_WritesToStateSummaryBucket(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := t.Context()
|
||||
|
||||
// Create a block without saving a state or summary, so recovery is needed.
|
||||
blk := util.HydrateSignedBeaconBlock(ðpb.SignedBeaconBlock{})
|
||||
root, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, wsb))
|
||||
|
||||
// Precondition: summary not present yet.
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, root))
|
||||
|
||||
// Saving justified checkpoint should trigger recovery path calling recoverStateSummary.
|
||||
cp := ðpb.Checkpoint{Epoch: 2, Root: root[:]}
|
||||
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
|
||||
|
||||
// Postcondition: summary is visible via the public summary APIs (which read stateSummaryBucket).
|
||||
require.Equal(t, true, db.HasStateSummary(ctx, root))
|
||||
summary, err := db.StateSummary(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, summary)
|
||||
assert.DeepEqual(t, ðpb.StateSummary{Slot: blk.Block.Slot, Root: root[:]}, summary)
|
||||
}
|
||||
|
||||
@@ -954,7 +954,9 @@ func (s *Store) CleanUpDirtyStates(ctx context.Context, slotsPerArchivedPoint pr
|
||||
deletedRoots := make([][32]byte, 0)
|
||||
|
||||
oRoot, err := s.OriginCheckpointBlockRoot(ctx)
|
||||
if err != nil {
|
||||
if err != nil && !errors.Is(err, ErrNotFoundOriginBlockRoot) {
|
||||
// If the node did not use checkpoint sync, there will be no origin block root.
|
||||
// Use zero hash which will never match any actual state root
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
@@ -1283,3 +1284,50 @@ func BenchmarkState_CheckStateSaveTime_10(b *testing.B) { checkStateSaveTime(b,
|
||||
|
||||
func BenchmarkState_CheckStateReadTime_1(b *testing.B) { checkStateReadTime(b, 1) }
|
||||
func BenchmarkState_CheckStateReadTime_10(b *testing.B) { checkStateReadTime(b, 10) }
|
||||
|
||||
func TestStore_CleanUpDirtyStates_NoOriginRoot(t *testing.T) {
|
||||
// This test verifies that CleanUpDirtyStates does not fail when the origin block root is not set,
|
||||
// which can happen when starting from genesis or in certain fork scenarios like Fulu.
|
||||
db := setupDB(t)
|
||||
genesisState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
genesisRoot := [fieldparams.RootLength]byte{'a'}
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(t.Context(), genesisRoot))
|
||||
require.NoError(t, db.SaveState(t.Context(), genesisState, genesisRoot))
|
||||
// Note: We intentionally do NOT call SaveOriginCheckpointBlockRoot here
|
||||
// to simulate the scenario where origin block root is not set
|
||||
slotsPerArchivedPoint := primitives.Slot(128)
|
||||
bRoots := make([][fieldparams.RootLength]byte, 0)
|
||||
prevRoot := genesisRoot
|
||||
for i := primitives.Slot(1); i <= slotsPerArchivedPoint; i++ { // skip slot 0
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = i
|
||||
b.Block.ParentRoot = prevRoot[:]
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(t.Context(), wsb))
|
||||
bRoots = append(bRoots, r)
|
||||
prevRoot = r
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(i))
|
||||
require.NoError(t, db.SaveState(t.Context(), st, r))
|
||||
}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(t.Context(), ðpb.Checkpoint{
|
||||
Root: bRoots[len(bRoots)-1][:],
|
||||
Epoch: primitives.Epoch(slotsPerArchivedPoint / params.BeaconConfig().SlotsPerEpoch),
|
||||
}))
|
||||
// This should not fail even though origin block root is not set
|
||||
err = db.CleanUpDirtyStates(t.Context(), slotsPerArchivedPoint)
|
||||
require.NoError(t, err)
|
||||
// Verify that cleanup still works correctly
|
||||
for i, root := range bRoots {
|
||||
if primitives.Slot(i) >= slotsPerArchivedPoint.SubSlot(slotsPerArchivedPoint.Div(3)) {
|
||||
require.Equal(t, true, db.HasState(t.Context(), root))
|
||||
} else {
|
||||
require.Equal(t, false, db.HasState(t.Context(), root))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,17 +4,26 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var errTimeOut = errors.New("operation timed out")
|
||||
|
||||
// PruneAttestationsAtEpoch deletes all attestations from the slasher DB with target epoch
|
||||
// less than or equal to the specified epoch.
|
||||
func (s *Store) PruneAttestationsAtEpoch(
|
||||
_ context.Context, maxEpoch primitives.Epoch,
|
||||
ctx context.Context, maxEpoch primitives.Epoch,
|
||||
) (numPruned uint, err error) {
|
||||
// In some cases, pruning may take a very long time and consume significant memory in the
|
||||
// open Update transaction. Therefore, we impose a 1 minute timeout on this operation.
|
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// We can prune everything less than the current epoch - history length.
|
||||
encodedEndPruneEpoch := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(encodedEndPruneEpoch, uint64(maxEpoch))
|
||||
@@ -48,13 +57,18 @@ func (s *Store) PruneAttestationsAtEpoch(
|
||||
return
|
||||
}
|
||||
|
||||
if err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
signingRootsBkt := tx.Bucket(attestationDataRootsBucket)
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
c := signingRootsBkt.Cursor()
|
||||
|
||||
// We begin a pruning iteration starting from the first item in the bucket.
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if ctx.Err() != nil {
|
||||
// Exit the routine if the context has expired.
|
||||
return errTimeOut
|
||||
}
|
||||
|
||||
// We check the epoch from the current key in the database.
|
||||
// If we have hit an epoch that is greater than the end epoch of the pruning process,
|
||||
// we then completely exit the process as we are done.
|
||||
@@ -67,18 +81,27 @@ func (s *Store) PruneAttestationsAtEpoch(
|
||||
// so it is possible we have a few adjacent objects that have the same slot, such as
|
||||
// (target_epoch = 3 ++ _) => encode(attestation)
|
||||
if err := signingRootsBkt.Delete(k); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "delete attestation signing root")
|
||||
}
|
||||
if err := attRecordsBkt.Delete(v); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "delete attestation record")
|
||||
}
|
||||
slasherAttestationsPrunedTotal.Inc()
|
||||
numPruned++
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
})
|
||||
|
||||
if errors.Is(err, errTimeOut) {
|
||||
log.Warning("Aborting pruning routine")
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to prune attestations")
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -142,10 +142,9 @@ var ErrEmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
defer func(start time.Time) {
|
||||
newPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
}(time.Now())
|
||||
|
||||
d := time.Now().Add(time.Duration(params.BeaconConfig().ExecutionEngineTimeoutValue) * time.Second)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
@@ -183,7 +182,10 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
return nil, errors.New("unknown execution data type")
|
||||
}
|
||||
if result.ValidationError != "" {
|
||||
log.WithError(errors.New(result.ValidationError)).Error("Got a validation error in newPayload")
|
||||
log.WithField("status", result.Status.String()).
|
||||
WithField("parentRoot", fmt.Sprintf("%#x", parentBlockRoot)).
|
||||
WithError(errors.New(result.ValidationError)).
|
||||
Error("Got a validation error in newPayload")
|
||||
}
|
||||
switch result.Status {
|
||||
case pb.PayloadStatus_INVALID_BLOCK_HASH:
|
||||
@@ -195,7 +197,7 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
case pb.PayloadStatus_VALID:
|
||||
return result.LatestValidHash, nil
|
||||
default:
|
||||
return nil, ErrUnknownPayloadStatus
|
||||
return nil, errors.Wrapf(ErrUnknownPayloadStatus, "unknown payload status: %s", result.Status.String())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -928,7 +928,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.ErrorIs(t, err, ErrUnknownPayloadStatus)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(BlockByNumberMethod, func(t *testing.T) {
|
||||
|
||||
@@ -254,6 +254,7 @@ func (s *Store) getCacheUpdatesByPeriod(headBlock interfaces.ReadOnlySignedBeaco
|
||||
return updatesByPeriod, nil
|
||||
}
|
||||
|
||||
// SetLastFinalityUpdate should be used only for testing.
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
@@ -263,9 +264,11 @@ func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdat
|
||||
|
||||
func (s *Store) setLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
if broadcast && IsFinalityUpdateValidForBroadcast(update, s.lastFinalityUpdate) {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
go func() {
|
||||
if err := s.p2p.BroadcastLightClientFinalityUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client finality update")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
s.lastFinalityUpdate = update
|
||||
@@ -283,6 +286,7 @@ func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
return s.lastFinalityUpdate
|
||||
}
|
||||
|
||||
// SetLastOptimisticUpdate should be used only for testing.
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
@@ -292,9 +296,11 @@ func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticU
|
||||
|
||||
func (s *Store) setLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
if broadcast {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
go func() {
|
||||
if err := s.p2p.BroadcastLightClientOptimisticUpdate(context.Background(), update); err != nil {
|
||||
log.WithError(err).Error("Could not broadcast light client optimistic update")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
s.lastOptimisticUpdate = update
|
||||
|
||||
@@ -3,6 +3,7 @@ package light_client
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
@@ -74,6 +75,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := NewLightClientStore(p2p, new(event.Feed), testDB.SetupDB(t))
|
||||
|
||||
timeForGoroutinesToFinish := 20 * time.Microsecond
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
@@ -85,6 +87,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -99,6 +102,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -113,6 +117,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -127,6 +132,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
@@ -140,6 +146,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -154,6 +161,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
@@ -167,6 +175,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
|
||||
|
||||
@@ -58,7 +58,6 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//genesis:go_default_library",
|
||||
"//monitoring/prometheus:go_default_library",
|
||||
"//monitoring/tracing:go_default_library",
|
||||
|
||||
@@ -2,11 +2,13 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/kv"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/slasherkv"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
@@ -36,6 +38,22 @@ func (c *dbClearer) clearKV(ctx context.Context, db *kv.Store) (*kv.Store, error
|
||||
return kv.NewKVStore(ctx, db.DatabasePath())
|
||||
}
|
||||
|
||||
func (c *dbClearer) clearGenesis(dir string) error {
|
||||
if !c.shouldProceed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
gfile, err := genesis.FindStateFile(dir)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.Remove(gfile.FilePath()); err != nil {
|
||||
return errors.Wrapf(err, "genesis state file not removed: %s", gfile.FilePath())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dbClearer) clearBlobs(bs *filesystem.BlobStorage) error {
|
||||
if !c.shouldProceed() {
|
||||
return nil
|
||||
|
||||
@@ -60,7 +60,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/container/slice"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v6/genesis"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/prometheus"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime"
|
||||
@@ -178,6 +177,9 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
|
||||
}
|
||||
beacon.db = kvdb
|
||||
|
||||
if err := dbClearer.clearGenesis(dataDir); err != nil {
|
||||
return nil, errors.Wrap(err, "could not clear genesis state")
|
||||
}
|
||||
providers := append(beacon.GenesisProviders, kv.NewLegacyGenesisProvider(kvdb))
|
||||
if err := genesis.Initialize(ctx, dataDir, providers...); err != nil {
|
||||
return nil, errors.Wrap(err, "could not initialize genesis state")
|
||||
@@ -598,22 +600,7 @@ func (b *BeaconNode) startStateGen(ctx context.Context, bfs coverage.AvailableBl
|
||||
return err
|
||||
}
|
||||
|
||||
r := bytesutil.ToBytes32(cp.Root)
|
||||
// Consider edge case where finalized root are zeros instead of genesis root hash.
|
||||
if r == params.BeaconConfig().ZeroHash {
|
||||
genesisBlock, err := b.db.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if genesisBlock != nil && !genesisBlock.IsNil() {
|
||||
r, err = genesisBlock.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b.finalizedStateAtStartUp, err = sg.StateByRoot(ctx, r)
|
||||
b.finalizedStateAtStartUp, err = sg.StateByRoot(ctx, [32]byte(cp.Root))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package blstoexec
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
@@ -87,7 +86,7 @@ func (p *Pool) PendingBLSToExecChanges() ([]*ethpb.SignedBLSToExecutionChange, e
|
||||
func (p *Pool) BLSToExecChangesForInclusion(st state.ReadOnlyBeaconState) ([]*ethpb.SignedBLSToExecutionChange, error) {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
length := int(math.Min(float64(params.BeaconConfig().MaxBlsToExecutionChanges), float64(p.pending.Len())))
|
||||
length := int(min(float64(params.BeaconConfig().MaxBlsToExecutionChanges), float64(p.pending.Len())))
|
||||
result := make([]*ethpb.SignedBLSToExecutionChange, 0, length)
|
||||
node := p.pending.Last()
|
||||
for node != nil && len(result) < length {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package voluntaryexits
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
@@ -63,7 +62,7 @@ func (p *Pool) PendingExits() ([]*ethpb.SignedVoluntaryExit, error) {
|
||||
// return more than the block enforced MaxVoluntaryExits.
|
||||
func (p *Pool) ExitsForInclusion(state state.ReadOnlyBeaconState, slot types.Slot) ([]*ethpb.SignedVoluntaryExit, error) {
|
||||
p.lock.RLock()
|
||||
length := int(math.Min(float64(params.BeaconConfig().MaxVoluntaryExits), float64(p.pending.Len())))
|
||||
length := int(min(float64(params.BeaconConfig().MaxVoluntaryExits), float64(p.pending.Len())))
|
||||
result := make([]*ethpb.SignedVoluntaryExit, 0, length)
|
||||
node := p.pending.First()
|
||||
for node != nil && len(result) < length {
|
||||
|
||||
@@ -139,6 +139,7 @@ go_test(
|
||||
"sender_test.go",
|
||||
"service_test.go",
|
||||
"subnets_test.go",
|
||||
"topics_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@@ -162,6 +163,7 @@ go_test(
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -5,14 +5,18 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
@@ -274,6 +278,20 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
|
||||
return errors.New("attempted to broadcast nil light client optimistic update")
|
||||
}
|
||||
|
||||
// add delay to ensure block has time to propagate
|
||||
slotStart, err := slots.StartTime(s.genesisTime, update.SignatureSlot())
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not compute slot start time")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
timeSinceSlotStart := time.Since(slotStart)
|
||||
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
|
||||
if timeSinceSlotStart < expectedDelay {
|
||||
waitDuration := expectedDelay - timeSinceSlotStart
|
||||
<-time.After(waitDuration)
|
||||
}
|
||||
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
|
||||
@@ -294,6 +312,20 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return errors.New("attempted to broadcast nil light client finality update")
|
||||
}
|
||||
|
||||
// add delay to ensure block has time to propagate
|
||||
slotStart, err := slots.StartTime(s.genesisTime, update.SignatureSlot())
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "could not compute slot start time")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
timeSinceSlotStart := time.Since(slotStart)
|
||||
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
|
||||
if timeSinceSlotStart < expectedDelay {
|
||||
waitDuration := expectedDelay - timeSinceSlotStart
|
||||
<-time.After(waitDuration)
|
||||
}
|
||||
|
||||
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client finality update")
|
||||
@@ -306,86 +338,150 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
func (s *Service) BroadcastDataColumnSidecar(
|
||||
dataColumnSubnet uint64,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
// BroadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
|
||||
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
||||
// This function is non-blocking. It stops trying to broadcast a given sidecar when more than one slot has passed, or the context is
|
||||
// cancelled (whichever comes first).
|
||||
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error {
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Add(float64(len(sidecars)))
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "current fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
return errors.Wrap(err, "current fork digest")
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumnSidecar(ctx, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumnSidecar(
|
||||
ctx context.Context,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Inc()
|
||||
|
||||
// Define a one-slot length context timeout.
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
oneSlot := time.Duration(secondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
// Build the topic corresponding to this column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
|
||||
|
||||
// Find peers if needed.
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, columnSubnet); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers for data column subnet")
|
||||
tracing.AnnotateError(span, err)
|
||||
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
|
||||
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
||||
// It returns when all broadcasts are complete, or the context is cancelled (whichever comes first).
|
||||
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn) {
|
||||
type rootAndIndex struct {
|
||||
root [fieldparams.RootLength]byte
|
||||
index uint64
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast data column sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
timings sync.Map
|
||||
)
|
||||
|
||||
logLevel := logrus.GetLevel()
|
||||
|
||||
slotPerRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, 1)
|
||||
for _, sidecar := range sidecars {
|
||||
slotPerRoot[sidecar.BlockRoot()] = sidecar.Slot()
|
||||
|
||||
wg.Go(func() {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.broadcastDataColumnSidecars")
|
||||
defer span.End()
|
||||
|
||||
// Compute the subnet for this data column sidecar.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
|
||||
// Build the topic corresponding to subnet column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(subnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := subnet + dataColumnSubnetVal
|
||||
|
||||
// Find peers if needed.
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Cannot find peers if needed")
|
||||
return
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, sidecar, topic); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Cannot broadcast data column sidecar")
|
||||
return
|
||||
}
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
dataColumnSidecarBroadcasts.Inc()
|
||||
|
||||
// Record the timing for log purposes.
|
||||
if logLevel >= logrus.DebugLevel {
|
||||
root := sidecar.BlockRoot()
|
||||
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for all broadcasts to finish.
|
||||
wg.Wait()
|
||||
|
||||
// The rest of this function is only for debug logging purposes.
|
||||
if logLevel < logrus.DebugLevel {
|
||||
return
|
||||
}
|
||||
|
||||
header := dataColumnSidecar.SignedBlockHeader.GetHeader()
|
||||
slot := header.GetSlot()
|
||||
|
||||
slotStartTime, err := slots.StartTime(s.genesisTime, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to convert slot to time")
|
||||
type logInfo struct {
|
||||
durationMin time.Duration
|
||||
durationMax time.Duration
|
||||
indices []uint64
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"timeSinceSlotStart": time.Since(slotStartTime),
|
||||
"root": fmt.Sprintf("%#x", dataColumnSidecar.BlockRoot()),
|
||||
"columnSubnet": columnSubnet,
|
||||
"blobCount": len(dataColumnSidecar.Column),
|
||||
}).Debug("Broadcasted data column sidecar")
|
||||
logInfoPerRoot := make(map[[fieldparams.RootLength]byte]*logInfo, 1)
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
dataColumnSidecarBroadcasts.Inc()
|
||||
timings.Range(func(key any, value any) bool {
|
||||
rootAndIndex, ok := key.(rootAndIndex)
|
||||
if !ok {
|
||||
log.Error("Could not cast key to rootAndIndex")
|
||||
return true
|
||||
}
|
||||
|
||||
broadcastTime, ok := value.(time.Time)
|
||||
if !ok {
|
||||
log.Error("Could not cast value to time.Time")
|
||||
return true
|
||||
}
|
||||
|
||||
slot, ok := slotPerRoot[rootAndIndex.root]
|
||||
if !ok {
|
||||
log.WithField("root", fmt.Sprintf("%#x", rootAndIndex.root)).Error("Could not find slot for root")
|
||||
return true
|
||||
}
|
||||
|
||||
duration, err := slots.SinceSlotStart(slot, s.genesisTime, broadcastTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute duration since slot start")
|
||||
return true
|
||||
}
|
||||
|
||||
info, ok := logInfoPerRoot[rootAndIndex.root]
|
||||
if !ok {
|
||||
logInfoPerRoot[rootAndIndex.root] = &logInfo{durationMin: duration, durationMax: duration, indices: []uint64{rootAndIndex.index}}
|
||||
return true
|
||||
}
|
||||
|
||||
info.durationMin = min(info.durationMin, duration)
|
||||
info.durationMax = max(info.durationMax, duration)
|
||||
info.indices = append(info.indices, rootAndIndex.index)
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
for root, info := range logInfoPerRoot {
|
||||
slices.Sort(info.indices)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slotPerRoot[root],
|
||||
"count": len(info.indices),
|
||||
"indices": helpers.PrettySlice(info.indices),
|
||||
"timeSinceSlotStartMin": info.durationMin,
|
||||
"timeSinceSlotStartMax": info.durationMax,
|
||||
}).Debug("Broadcasted data column sidecars")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) findPeersIfNeeded(
|
||||
|
||||
@@ -15,11 +15,12 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
@@ -60,7 +61,6 @@ func TestService_Broadcast(t *testing.T) {
|
||||
topic := "/eth2/%x/testing"
|
||||
// Set a test gossip mapping for testpb.TestSimpleMessage.
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = topic
|
||||
p.clock = startup.NewClock(p.genesisTime, bytesutil.ToBytes32(p.genesisValidatorsRoot))
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest)
|
||||
@@ -530,6 +530,11 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.SyncMessageDueBPS = 60 // ~72 millisecond
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
@@ -540,7 +545,7 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisTime: time.Now().Add(-33 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second), // the signature slot of the mock update is 33
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
@@ -567,12 +572,19 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second)
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 150*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
incomingMessage, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
slotStartTime, err := slots.StartTime(p.genesisTime, msg.SignatureSlot())
|
||||
require.NoError(t, err)
|
||||
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
|
||||
if time.Now().Before(slotStartTime.Add(expectedDelay)) {
|
||||
tt.Errorf("Message received too early, now %v, expected at least %v", time.Now(), slotStartTime.Add(expectedDelay))
|
||||
}
|
||||
|
||||
result := ðpb.LightClientOptimisticUpdateAltair{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
|
||||
if !proto.Equal(result, msg.Proto()) {
|
||||
@@ -594,6 +606,11 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.SyncMessageDueBPS = 60 // ~72 millisecond
|
||||
params.OverrideBeaconConfig(config)
|
||||
|
||||
p1 := p2ptest.NewTestP2P(t)
|
||||
p2 := p2ptest.NewTestP2P(t)
|
||||
p1.Connect(p2)
|
||||
@@ -604,7 +621,7 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: &Config{},
|
||||
genesisTime: time.Now(),
|
||||
genesisTime: time.Now().Add(-33 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second), // the signature slot of the mock update is 33
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
@@ -631,12 +648,19 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
wg.Add(1)
|
||||
go func(tt *testing.T) {
|
||||
defer wg.Done()
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 1*time.Second)
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 150*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
incomingMessage, err := sub.Next(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
slotStartTime, err := slots.StartTime(p.genesisTime, msg.SignatureSlot())
|
||||
require.NoError(t, err)
|
||||
expectedDelay := slots.ComponentDuration(primitives.BP(params.BeaconConfig().SyncMessageDueBPS))
|
||||
if time.Now().Before(slotStartTime.Add(expectedDelay)) {
|
||||
tt.Errorf("Message received too early, now %v, expected at least %v", time.Now(), slotStartTime.Add(expectedDelay))
|
||||
}
|
||||
|
||||
result := ðpb.LightClientFinalityUpdateAltair{}
|
||||
require.NoError(t, p.Encoding().DecodeGossip(incomingMessage.Data, result))
|
||||
if !proto.Equal(result, msg.Proto()) {
|
||||
@@ -664,6 +688,8 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
topicFormat = DataColumnSubnetTopicFormat
|
||||
)
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
// Load the KZG trust setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -686,7 +712,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
_, pkey, ipAddr := createHost(t, port)
|
||||
|
||||
service := &Service{
|
||||
ctx: t.Context(),
|
||||
ctx: ctx,
|
||||
host: p1.BHost,
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
@@ -695,7 +721,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ScorerParams: &scorers.Config{}}),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{ScorerParams: &scorers.Config{}}),
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
@@ -722,7 +748,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumnSidecar(subnet, verifiedRoSidecar)
|
||||
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Receive the message.
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -13,27 +15,25 @@ import (
|
||||
var _ CustodyManager = (*Service)(nil)
|
||||
|
||||
// EarliestAvailableSlot returns the earliest available slot.
|
||||
func (s *Service) EarliestAvailableSlot() (primitives.Slot, error) {
|
||||
s.custodyInfoLock.RLock()
|
||||
defer s.custodyInfoLock.RUnlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
// It blocks until the custody info is set or the context is done.
|
||||
func (s *Service) EarliestAvailableSlot(ctx context.Context) (primitives.Slot, error) {
|
||||
custodyInfo, err := s.waitForCustodyInfo(ctx)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "wait for custody info")
|
||||
}
|
||||
|
||||
return s.custodyInfo.earliestAvailableSlot, nil
|
||||
return custodyInfo.earliestAvailableSlot, nil
|
||||
}
|
||||
|
||||
// CustodyGroupCount returns the custody group count.
|
||||
func (s *Service) CustodyGroupCount() (uint64, error) {
|
||||
s.custodyInfoLock.Lock()
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
// It blocks until the custody info is set or the context is done.
|
||||
func (s *Service) CustodyGroupCount(ctx context.Context) (uint64, error) {
|
||||
custodyInfo, err := s.waitForCustodyInfo(ctx)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "wait for custody info")
|
||||
}
|
||||
|
||||
return s.custodyInfo.groupCount, nil
|
||||
return custodyInfo.groupCount, nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfo updates the stored custody group count to the incoming one
|
||||
@@ -77,6 +77,9 @@ func (s *Service) UpdateCustodyInfo(earliestAvailableSlot primitives.Slot, custo
|
||||
earliestAvailableSlot: earliestAvailableSlot,
|
||||
groupCount: custodyGroupCount,
|
||||
}
|
||||
|
||||
close(s.custodyInfoSet)
|
||||
|
||||
return earliestAvailableSlot, custodyGroupCount, nil
|
||||
}
|
||||
|
||||
@@ -145,6 +148,33 @@ func (s *Service) CustodyGroupCountFromPeer(pid peer.ID) uint64 {
|
||||
return custodyCount
|
||||
}
|
||||
|
||||
func (s *Service) waitForCustodyInfo(ctx context.Context) (custodyInfo, error) {
|
||||
select {
|
||||
case <-s.custodyInfoSet:
|
||||
info, ok := s.copyCustodyInfo()
|
||||
if !ok {
|
||||
return custodyInfo{}, errors.New("custody info was set but is nil")
|
||||
}
|
||||
|
||||
return info, nil
|
||||
case <-ctx.Done():
|
||||
return custodyInfo{}, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// copyCustodyInfo returns a copy of the current custody info in a thread-safe manner.
|
||||
// If no custody info is set, it returns false as the second return value.
|
||||
func (s *Service) copyCustodyInfo() (custodyInfo, bool) {
|
||||
s.custodyInfoLock.RLock()
|
||||
defer s.custodyInfoLock.RUnlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return custodyInfo{}, false
|
||||
}
|
||||
|
||||
return *s.custodyInfo, true
|
||||
}
|
||||
|
||||
// custodyGroupCountFromPeerENR retrieves the custody count from the peer's ENR.
|
||||
// If the ENR is not available, it defaults to the minimum number of custody groups
|
||||
// an honest node custodies and serves samples from.
|
||||
|
||||
@@ -20,58 +20,37 @@ import (
|
||||
)
|
||||
|
||||
func TestEarliestAvailableSlot(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
const expected primitives.Slot = 100
|
||||
|
||||
_, err := service.EarliestAvailableSlot()
|
||||
service := &Service{
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
custodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: expected,
|
||||
},
|
||||
}
|
||||
|
||||
require.NotNil(t, err)
|
||||
})
|
||||
close(service.custodyInfoSet)
|
||||
slot, err := service.EarliestAvailableSlot(t.Context())
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected primitives.Slot = 100
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
earliestAvailableSlot: expected,
|
||||
},
|
||||
}
|
||||
|
||||
slot, err := service.EarliestAvailableSlot()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, slot)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, slot)
|
||||
}
|
||||
|
||||
func TestCustodyGroupCount(t *testing.T) {
|
||||
t.Run("No custody info available", func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: nil,
|
||||
}
|
||||
const expected uint64 = 5
|
||||
|
||||
_, err := service.CustodyGroupCount()
|
||||
service := &Service{
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
custodyInfo: &custodyInfo{
|
||||
groupCount: expected,
|
||||
},
|
||||
}
|
||||
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, true, strings.Contains(err.Error(), "no custody info available"))
|
||||
})
|
||||
close(service.custodyInfoSet)
|
||||
count, err := service.CustodyGroupCount(t.Context())
|
||||
|
||||
t.Run("Valid custody info", func(t *testing.T) {
|
||||
const expected uint64 = 5
|
||||
|
||||
service := &Service{
|
||||
custodyInfo: &custodyInfo{
|
||||
groupCount: expected,
|
||||
},
|
||||
}
|
||||
|
||||
count, err := service.CustodyGroupCount()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, count)
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, count)
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
@@ -163,7 +142,8 @@ func TestUpdateCustodyInfo(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
service := &Service{
|
||||
custodyInfo: tc.initialCustodyInfo,
|
||||
custodyInfoSet: make(chan struct{}),
|
||||
custodyInfo: tc.initialCustodyInfo,
|
||||
}
|
||||
|
||||
slot, groupCount, err := service.UpdateCustodyInfo(tc.inputSlot, tc.inputGroupCount)
|
||||
|
||||
@@ -79,7 +79,7 @@ func (quicProtocol) ENRKey() string { return quickProtocolEnrKey }
|
||||
func newListener(listenerCreator func() (*discover.UDPv5, error)) (*listenerWrapper, error) {
|
||||
rawListener, err := listenerCreator()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new listener")
|
||||
return nil, errors.Wrap(err, "create new listener")
|
||||
}
|
||||
return &listenerWrapper{
|
||||
listener: rawListener,
|
||||
@@ -253,7 +253,7 @@ func (s *Service) RefreshPersistentSubnets() {
|
||||
return
|
||||
}
|
||||
|
||||
custodyGroupCount, err = s.CustodyGroupCount()
|
||||
custodyGroupCount, err = s.CustodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve custody group count")
|
||||
return
|
||||
@@ -536,7 +536,7 @@ func (s *Service) createListener(
|
||||
int(s.cfg.QUICPort),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create local node")
|
||||
return nil, errors.Wrap(err, "create local node")
|
||||
}
|
||||
|
||||
bootNodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddrs))
|
||||
@@ -604,7 +604,7 @@ func (s *Service) createLocalNode(
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCount, err := s.CustodyGroupCount()
|
||||
custodyGroupCount, err := s.CustodyGroupCount(s.ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve custody group count")
|
||||
}
|
||||
@@ -652,7 +652,7 @@ func (s *Service) startDiscoveryV5(
|
||||
}
|
||||
wrappedListener, err := newListener(createListener)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create listener")
|
||||
return nil, errors.Wrap(err, "create listener")
|
||||
}
|
||||
record := wrappedListener.Self()
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user