Compare commits

..

108 Commits

Author SHA1 Message Date
aarshkshah1992
46b300c6ca increase crawl timeout 2026-01-07 19:11:45 +05:30
aarshkshah1992
04d45b6194 improve logging 2026-01-07 15:19:51 +05:30
aarshkshah1992
3e7cd8c2f1 protect one peer per topic 2026-01-06 21:31:05 +05:30
aarshkshah1992
ea1962bf17 better dialing policy 2026-01-06 20:54:18 +05:30
aarshkshah1992
6072e9c335 unit tests and better logging 2026-01-06 20:17:52 +05:30
aarshkshah1992
7d92cc5c32 do not prune sole provider peers 2026-01-06 19:26:20 +05:30
aarshkshah1992
4b8973289a better conn management 2026-01-06 18:40:54 +05:30
aarshkshah1992
51c64e75c0 add logging for topics 2026-01-06 18:22:40 +05:30
aarshkshah1992
d1ceff6621 fix CI 2026-01-06 12:37:33 +05:30
aarshkshah1992
e63c1bebfe resolve conflicts 2026-01-06 12:31:42 +05:30
Aarsh Shah
25becdcd33 Merge branch 'develop' into feat/gossipsub-control-pane-peer-crawler-peer-controller 2026-01-06 12:02:23 +05:30
aarshkshah1992
a2459aa365 address review 2026-01-05 14:59:41 +05:30
aarshkshah1992
80641dc3ae Add Status to P2P Service interface 2026-01-05 14:40:08 +05:30
aarshkshah1992
59bb4a8301 wait for p2p service to start 2026-01-05 14:35:37 +05:30
aarshkshah1992
08f117f04f better dial logic 2025-12-15 18:27:45 +04:00
aarshkshah1992
0b6365781f address review 2025-12-15 16:10:58 +04:00
aarshkshah1992
0c994445ea failed to ping node 2025-12-15 16:02:42 +04:00
aarshkshah1992
77c32203af removed seen check that is not needed 2025-12-15 15:59:07 +04:00
aarshkshah1992
743e6bab07 add a test for TestSubscriptionController_GetCurrentActiveTopicsWithMinPeerCount 2025-12-15 15:02:33 +04:00
aarshkshah1992
451d2a8bc5 per topic family dial policy 2025-12-15 14:46:39 +04:00
aarshkshah1992
6508bdfa9a fix bug in subscription 2025-12-15 12:25:12 +04:00
aarshkshah1992
d2bf512f36 remove pointer 2025-12-15 11:16:26 +04:00
aarshkshah1992
e62fe66b0a add metrics 2025-12-15 10:54:18 +04:00
aarshkshah1992
08fff3dec4 changes for fork watching 2025-12-15 09:53:03 +04:00
aarshkshah1992
75a3d45470 revert service dep 2025-12-12 18:38:59 +04:00
aarshkshah1992
bbd2d4da0f fix build 2025-12-12 16:28:36 +04:00
aarshkshah1992
378468c1ec fix flaky test 2025-12-12 16:06:57 +04:00
aarshkshah1992
25d375dbe0 change to control plane 2025-12-12 13:08:47 +04:00
aarshkshah1992
2d5f3112c8 merge develop 2025-12-12 13:03:29 +04:00
aarshkshah1992
1d49a2a88d change gossipsub to gossip 2025-12-12 12:57:29 +04:00
aarshkshah1992
b119290584 change gossipsub to gossip 2025-12-12 12:51:21 +04:00
aarshkshah1992
9b0cffcdea NSE changes 2025-12-12 12:39:04 +04:00
aarshkshah1992
979466a3d7 fix naming to subscription controller 2025-12-12 11:39:21 +04:00
aarshkshah1992
08ba8dd487 change interface names 2025-12-12 11:21:47 +04:00
aarshkshah1992
a235a581e1 remove service dep 2025-12-12 11:14:29 +04:00
aarshkshah1992
45b88de7f2 use beacon-chain config 2025-12-12 09:57:39 +04:00
aarshkshah1992
f69e017f6a add go-docs 2025-12-11 18:46:46 +04:00
aarshkshah1992
19e5684875 changes as per review 2025-12-11 18:33:43 +04:00
aarshkshah1992
24fc76b7fb changes as per review 2025-12-11 18:14:47 +04:00
aarshkshah1992
0a4aad543b address review 2025-12-11 17:11:15 +04:00
aarshkshah1992
ab8584d138 changes as per review 2025-12-11 16:28:16 +04:00
aarshkshah1992
9b6cd96012 changes as per review 2025-12-11 15:56:08 +04:00
aarshkshah1992
bba1358637 change data structures 2025-12-05 08:32:39 +04:00
aarshkshah1992
e6af417c62 add a go doc for the PeersForTopic function 2025-12-04 17:35:20 +04:00
aarshkshah1992
3509622323 fix comment 2025-12-04 17:20:08 +04:00
aarshkshah1992
80d7bd6084 address review 2025-12-04 17:01:27 +04:00
aarshkshah1992
8d3c3fd40b remove context cancellation comment 2025-12-04 16:58:10 +04:00
aarshkshah1992
1caf7074a7 fix error wrapping 2025-12-04 16:56:53 +04:00
Aarsh Shah
92bd155e3f Apply suggestions from code review
Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-12-04 16:50:24 +04:00
aarshkshah1992
8360b0f882 fix CI 2025-12-03 17:22:34 +04:00
aarshkshah1992
bc0d60138c run bazel gazelle 2025-12-03 17:12:48 +04:00
aarshkshah1992
32f377a665 fix crawl timing 2025-12-03 16:59:37 +04:00
aarshkshah1992
2ab792b0fe godocs 2025-12-03 16:50:13 +04:00
Aarsh Shah
2ae8de05dd Apply suggestions from code review
Co-authored-by: Manu NALEPA <enalepa@offchainlabs.com>
2025-12-03 16:27:07 +04:00
aarshkshah1992
cff96129b5 fix unsafe cast 2025-12-03 16:22:41 +04:00
aarshkshah1992
8abd1db7c1 change maxConcurrentDials to uint 2025-12-03 16:08:55 +04:00
aarshkshah1992
a4d726184f rename to p2p service 2025-12-03 16:02:19 +04:00
aarshkshah1992
85acf242f6 remove circular dependency 2025-12-03 16:00:29 +04:00
aarshkshah1992
ba6d1d0c6b update godoc 2025-12-03 13:34:41 +04:00
aarshkshah1992
69d3453f97 fix sync 2025-12-03 13:32:31 +04:00
aarshkshah1992
a69cf6f5d4 refactor update peers 2025-12-03 13:29:20 +04:00
aarshkshah1992
0a46c2d16d remove topic type 2025-12-03 13:12:01 +04:00
aarshkshah1992
3c1b8859bc rename functions 2025-12-03 12:40:25 +04:00
aarshkshah1992
9f645ae0a4 refactor update peer 2025-12-03 12:25:38 +04:00
aarshkshah1992
5eeeb9ed15 wrap error and remove nodeId 2025-12-03 11:06:24 +04:00
aarshkshah1992
2061fc8a2f design doc 2025-11-25 15:49:37 +04:00
aarshkshah1992
eb93350583 Merge remote-tracking branch 'origin/feat/gossipsub-control-pane-peer-crawler-peer-controller' into feat/gossipsub-control-pane-peer-crawler-peer-controller 2025-11-24 17:50:19 +04:00
aarshkshah1992
affdab7776 fix changelog 2025-11-24 17:49:47 +04:00
Aarsh Shah
eb556adab5 Merge branch 'develop' into feat/gossipsub-control-pane-peer-crawler-peer-controller 2025-11-24 17:47:11 +04:00
aarshkshah1992
09d886c676 fix test 2025-11-24 15:18:27 +04:00
aarshkshah1992
11c5c6fb8b fix dynamic families 2025-11-24 15:06:35 +04:00
aarshkshah1992
10804bbb56 fix conflicts 2025-11-24 14:58:09 +04:00
aarshkshah1992
e6f3b636ac fix lint 2025-11-24 14:54:23 +04:00
aarshkshah1992
e95676dd91 fix conflicts 2025-11-24 14:41:58 +04:00
aarshkshah1992
86b65e0912 re-run test 2025-11-21 17:10:35 +04:00
aarshkshah1992
e9dac06037 finish tests 2025-11-21 16:38:00 +04:00
aarshkshah1992
893cf60921 fix tests 2025-11-21 15:37:15 +04:00
aarshkshah1992
41c9f160a2 fix bazel 2025-11-21 14:08:02 +04:00
aarshkshah1992
707abe6112 fix mock peer manager 2025-11-21 14:06:36 +04:00
aarshkshah1992
76975a134d fix build 2025-11-21 14:00:40 +04:00
aarshkshah1992
672de432a2 finish all changes 2025-11-21 12:08:17 +04:00
aarshkshah1992
4a6d88d9fb finish all changes 2025-11-21 12:05:57 +04:00
aarshkshah1992
1ff836e549 peer controller first draft 2025-11-17 19:39:18 +04:00
aarshkshah1992
08f038fe80 fix lint 2025-11-13 18:57:18 +04:00
aarshkshah1992
63279bcadf revert new line change 2025-11-13 17:06:47 +04:00
aarshkshah1992
61628efd44 run CI 2025-11-13 17:04:41 +04:00
aarshkshah1992
9b07f13cd3 tests for the crawler 2025-11-13 16:58:49 +04:00
aarshkshah1992
1397a79b4c changelog 2025-11-12 17:09:33 +04:00
aarshkshah1992
929115639d draft 2025-11-12 16:38:07 +04:00
aarshkshah1992
14dca40786 draft 2025-11-12 16:36:53 +04:00
aarshkshah1992
fa7596bceb first draft 2025-11-12 12:54:41 +04:00
aarshkshah1992
5161f087fc fix compilation 2025-11-06 19:38:59 +04:00
aarshkshah1992
71050ab076 fix bazel 2025-11-06 19:10:19 +04:00
aarshkshah1992
614367ddcf fix lint 2025-11-05 22:26:55 +04:00
aarshkshah1992
3f7371445b fix test in sync 2025-11-05 22:23:26 +04:00
aarshkshah1992
a15a1ade17 fix test 2025-11-05 21:31:04 +04:00
aarshkshah1992
798376b1d7 fix bazel 2025-11-05 21:21:51 +04:00
aarshkshah1992
93271050bf more tests 2025-11-05 21:17:03 +04:00
aarshkshah1992
8dfbabc691 fork watcher test works 2025-11-05 14:45:35 +04:00
aarshkshah1992
af2522e5f0 fix schedule 2025-11-05 09:49:27 +04:00
aarshkshah1992
452d42bd10 fix test in sync 2025-11-05 09:21:34 +04:00
aarshkshah1992
3e985377ce fix test 2025-11-05 09:05:57 +04:00
aarshkshah1992
ab2e836d3f fix test 2025-11-04 21:11:45 +04:00
Aarsh Shah
14158bea9c Merge branch 'develop' into feat/use-topic-abstraction-for-gossipsub-and-refactor-fork-watcher 2025-11-04 12:02:25 -05:00
aarshkshah1992
e14590636f bazel gazelle 2025-11-04 20:08:41 +04:00
aarshkshah1992
ce3660d2e7 finish draft 2025-11-04 20:08:11 +04:00
aarshkshah1992
7853cb9db0 changelog fragment 2025-11-03 19:14:46 +04:00
aarshkshah1992
8cfeda1473 WIP gossipsub controller 2025-11-03 19:02:41 +04:00
524 changed files with 9797 additions and 20855 deletions

1
.gitignore vendored
View File

@@ -38,6 +38,7 @@ metaData
# execution API authentication
jwt.hex
execution/
# manual testing
tmp

View File

@@ -1,4 +1,5 @@
load("@bazel_gazelle//:def.bzl", "gazelle")
load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
load("@io_bazel_rules_go//go:def.bzl", "nogo")
@@ -54,6 +55,15 @@ alias(
visibility = ["//visibility:public"],
)
gometalinter(
name = "gometalinter",
config = "//:.gometalinter.json",
paths = [
"./...",
],
prefix = prefix,
)
goimports(
name = "goimports",
display_diffs = True,

View File

@@ -4,25 +4,6 @@ All notable changes to this project will be documented in this file.
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
## [v7.1.2](https://github.com/prysmaticlabs/prysm/compare/v7.1.1...v7.1.2) - 2026-01-07
Happy new year! This patch release is very small. The main improvement is better management of pending attestation aggregation via [PR 16153](https://github.com/OffchainLabs/prysm/pull/16153).
### Added
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16169)
### Changed
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16152)
- `validateDataColumn`: Remove error logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16157)
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16153)
### Fixed
- Fix the missing fork version object mapping for Fulu in light client p2p. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16151)
- Do not process slots and copy states for next epoch proposers after Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16168)
## [v7.1.1](https://github.com/prysmaticlabs/prysm/compare/v7.1.0...v7.1.1) - 2025-12-18
Release highlights:

View File

@@ -72,7 +72,7 @@ Do NOT add new `go_repository` to the WORKSPACE file. All dependencies should li
To enable conditional compilation and custom configuration for tests (where compiled code has more
debug info, while not being completely optimized), we rely on Go's build tags/constraints mechanism
(see official docs on [build constraints](https://pkg.go.dev/go/build#hdr-Build_Constraints)).
(see official docs on [build constraints](https://golang.org/pkg/go/build/#hdr-Build_Constraints)).
Therefore, whenever using `go test`, do not forget to pass in extra build tag, eg:
```bash

View File

@@ -9,7 +9,7 @@ This README details how to setup Prysm for interop testing for usage with other
## Installation & Setup
1. Install [Bazel](https://bazel.build/install) **(Recommended)**
1. Install [Bazel](https://docs.bazel.build/versions/master/install.html) **(Recommended)**
2. `git clone https://github.com/OffchainLabs/prysm && cd prysm`
3. `bazel build //cmd/...`

View File

@@ -15,7 +15,7 @@
## 📖 Overview
This is the core repository for Prysm, a [Golang](https://go.dev/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com).
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com).
See the [Changelog](https://github.com/OffchainLabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
@@ -23,7 +23,7 @@ See the [Changelog](https://github.com/OffchainLabs/prysm/releases) for details
## 🚀 Getting Started
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the **[official documentation portal](https://prysm.offchainlabs.com/docs/)**.
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the **[official documentation portal](https://docs.prylabs.network)**.
💬 **Need help?** Join our **[Discord Community](https://discord.gg/prysm)** for support.
@@ -51,7 +51,7 @@ Prysm maintains two permanent branches:
### 🛠 Contribution Guide
Want to get involved? Check out our **[Contribution Guide](https://prysm.offchainlabs.com/docs/contribute/contribution-guidelines/)** to learn more!
Want to get involved? Check out our **[Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/)** to learn more!
---

View File

@@ -273,16 +273,16 @@ filegroup(
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
)
consensus_spec_version = "v1.7.0-alpha.1"
consensus_spec_version = "v1.6.0"
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
consensus_spec_tests(
name = "consensus_spec_tests",
flavors = {
"general": "sha256-j5R3jA7Oo4OSDMTvpMuD+8RomaCByeFSwtfkq6fL0Zg=",
"minimal": "sha256-tdTqByoyswOS4r6OxFmo70y2BP7w1TgEok+gf4cbxB0=",
"mainnet": "sha256-5gB4dt6SnSDKzdBc06VedId3NkgvSYyv9n9FRxWKwYI=",
"general": "sha256-54hTaUNF9nLg+hRr3oHoq0yjZpW3MNiiUUuCQu6Rajk=",
"minimal": "sha256-1JHIGg3gVMjvcGYRHR5cwdDgOvX47oR/MWp6gyAeZfA=",
"mainnet": "sha256-292h3W2Ffts0YExgDTyxYe9Os7R0bZIXuAaMO8P6kl4=",
},
version = consensus_spec_version,
)
@@ -298,7 +298,7 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
integrity = "sha256-J+43DrK1pF658kTXTwMS6zGf4KDjvas++m8w2a8swpg=",
integrity = "sha256-VzBgrEokvYSMIIXVnSA5XS9I3m9oxpvToQGxC1N5lzw=",
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
)
@@ -423,6 +423,10 @@ load("@prysm//testing/endtoend:deps.bzl", "e2e_deps")
e2e_deps()
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
gometalinter_dependencies()
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
gazelle_dependencies(go_sdk = "go_sdk")

View File

@@ -503,77 +503,3 @@ func (s *SignedBlindedBeaconBlockFulu) MessageRawJson() ([]byte, error) {
func (s *SignedBlindedBeaconBlockFulu) SigString() string {
return s.Signature
}
// ----------------------------------------------------------------------------
// Gloas
// ----------------------------------------------------------------------------
type ExecutionPayloadBid struct {
ParentBlockHash string `json:"parent_block_hash"`
ParentBlockRoot string `json:"parent_block_root"`
BlockHash string `json:"block_hash"`
PrevRandao string `json:"prev_randao"`
FeeRecipient string `json:"fee_recipient"`
GasLimit string `json:"gas_limit"`
BuilderIndex string `json:"builder_index"`
Slot string `json:"slot"`
Value string `json:"value"`
ExecutionPayment string `json:"execution_payment"`
BlobKzgCommitmentsRoot string `json:"blob_kzg_commitments_root"`
}
type SignedExecutionPayloadBid struct {
Message *ExecutionPayloadBid `json:"message"`
Signature string `json:"signature"`
}
type PayloadAttestationData struct {
BeaconBlockRoot string `json:"beacon_block_root"`
Slot string `json:"slot"`
PayloadPresent bool `json:"payload_present"`
BlobDataAvailable bool `json:"blob_data_available"`
}
type PayloadAttestation struct {
AggregationBits string `json:"aggregation_bits"`
Data *PayloadAttestationData `json:"data"`
Signature string `json:"signature"`
}
type BeaconBlockBodyGloas struct {
RandaoReveal string `json:"randao_reveal"`
Eth1Data *Eth1Data `json:"eth1_data"`
Graffiti string `json:"graffiti"`
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
Attestations []*AttestationElectra `json:"attestations"`
Deposits []*Deposit `json:"deposits"`
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
SignedExecutionPayloadBid *SignedExecutionPayloadBid `json:"signed_execution_payload_bid"`
PayloadAttestations []*PayloadAttestation `json:"payload_attestations"`
}
type BeaconBlockGloas struct {
Slot string `json:"slot"`
ProposerIndex string `json:"proposer_index"`
ParentRoot string `json:"parent_root"`
StateRoot string `json:"state_root"`
Body *BeaconBlockBodyGloas `json:"body"`
}
type SignedBeaconBlockGloas struct {
Message *BeaconBlockGloas `json:"message"`
Signature string `json:"signature"`
}
var _ SignedMessageJsoner = &SignedBeaconBlockGloas{}
func (s *SignedBeaconBlockGloas) MessageRawJson() ([]byte, error) {
return json.Marshal(s.Message)
}
func (s *SignedBeaconBlockGloas) SigString() string {
return s.Signature
}

View File

@@ -268,8 +268,6 @@ func SignedBeaconBlockMessageJsoner(block interfaces.ReadOnlySignedBeaconBlock)
return SignedBlindedBeaconBlockFuluFromConsensus(pbStruct)
case *eth.SignedBeaconBlockFulu:
return SignedBeaconBlockFuluFromConsensus(pbStruct)
case *eth.SignedBeaconBlockGloas:
return SignedBeaconBlockGloasFromConsensus(pbStruct)
default:
return nil, ErrUnsupportedConversion
}
@@ -2887,379 +2885,3 @@ func SignedBeaconBlockFuluFromConsensus(b *eth.SignedBeaconBlockFulu) (*SignedBe
Signature: hexutil.Encode(b.Signature),
}, nil
}
// ----------------------------------------------------------------------------
// Gloas
// ----------------------------------------------------------------------------
func SignedBeaconBlockGloasFromConsensus(b *eth.SignedBeaconBlockGloas) (*SignedBeaconBlockGloas, error) {
block, err := BeaconBlockGloasFromConsensus(b.Block)
if err != nil {
return nil, err
}
return &SignedBeaconBlockGloas{
Message: block,
Signature: hexutil.Encode(b.Signature),
}, nil
}
func BeaconBlockGloasFromConsensus(b *eth.BeaconBlockGloas) (*BeaconBlockGloas, error) {
payloadAttestations := make([]*PayloadAttestation, len(b.Body.PayloadAttestations))
for i, pa := range b.Body.PayloadAttestations {
payloadAttestations[i] = PayloadAttestationFromConsensus(pa)
}
return &BeaconBlockGloas{
Slot: fmt.Sprintf("%d", b.Slot),
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
ParentRoot: hexutil.Encode(b.ParentRoot),
StateRoot: hexutil.Encode(b.StateRoot),
Body: &BeaconBlockBodyGloas{
RandaoReveal: hexutil.Encode(b.Body.RandaoReveal),
Eth1Data: Eth1DataFromConsensus(b.Body.Eth1Data),
Graffiti: hexutil.Encode(b.Body.Graffiti),
ProposerSlashings: ProposerSlashingsFromConsensus(b.Body.ProposerSlashings),
AttesterSlashings: AttesterSlashingsElectraFromConsensus(b.Body.AttesterSlashings),
Attestations: AttsElectraFromConsensus(b.Body.Attestations),
Deposits: DepositsFromConsensus(b.Body.Deposits),
VoluntaryExits: SignedExitsFromConsensus(b.Body.VoluntaryExits),
SyncAggregate: SyncAggregateFromConsensus(b.Body.SyncAggregate),
BLSToExecutionChanges: SignedBLSChangesFromConsensus(b.Body.BlsToExecutionChanges),
SignedExecutionPayloadBid: SignedExecutionPayloadBidFromConsensus(b.Body.SignedExecutionPayloadBid),
PayloadAttestations: payloadAttestations,
},
}, nil
}
func SignedExecutionPayloadBidFromConsensus(b *eth.SignedExecutionPayloadBid) *SignedExecutionPayloadBid {
return &SignedExecutionPayloadBid{
Message: ExecutionPayloadBidFromConsensus(b.Message),
Signature: hexutil.Encode(b.Signature),
}
}
func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayloadBid {
return &ExecutionPayloadBid{
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
BlockHash: hexutil.Encode(b.BlockHash),
PrevRandao: hexutil.Encode(b.PrevRandao),
FeeRecipient: hexutil.Encode(b.FeeRecipient),
GasLimit: fmt.Sprintf("%d", b.GasLimit),
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
Slot: fmt.Sprintf("%d", b.Slot),
Value: fmt.Sprintf("%d", b.Value),
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
BlobKzgCommitmentsRoot: hexutil.Encode(b.BlobKzgCommitmentsRoot),
}
}
func PayloadAttestationFromConsensus(pa *eth.PayloadAttestation) *PayloadAttestation {
return &PayloadAttestation{
AggregationBits: hexutil.Encode(pa.AggregationBits),
Data: PayloadAttestationDataFromConsensus(pa.Data),
Signature: hexutil.Encode(pa.Signature),
}
}
func PayloadAttestationDataFromConsensus(d *eth.PayloadAttestationData) *PayloadAttestationData {
return &PayloadAttestationData{
BeaconBlockRoot: hexutil.Encode(d.BeaconBlockRoot),
Slot: fmt.Sprintf("%d", d.Slot),
PayloadPresent: d.PayloadPresent,
BlobDataAvailable: d.BlobDataAvailable,
}
}
func (b *SignedBeaconBlockGloas) ToConsensus() (*eth.SignedBeaconBlockGloas, error) {
if b == nil {
return nil, errNilValue
}
sig, err := bytesutil.DecodeHexWithLength(b.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
block, err := b.Message.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Message")
}
return &eth.SignedBeaconBlockGloas{
Block: block,
Signature: sig,
}, nil
}
func (b *BeaconBlockGloas) ToConsensus() (*eth.BeaconBlockGloas, error) {
if b == nil {
return nil, errNilValue
}
if b.Body == nil {
return nil, server.NewDecodeError(errNilValue, "Body")
}
if b.Body.Eth1Data == nil {
return nil, server.NewDecodeError(errNilValue, "Body.Eth1Data")
}
if b.Body.SyncAggregate == nil {
return nil, server.NewDecodeError(errNilValue, "Body.SyncAggregate")
}
if b.Body.SignedExecutionPayloadBid == nil {
return nil, server.NewDecodeError(errNilValue, "Body.SignedExecutionPayloadBid")
}
slot, err := strconv.ParseUint(b.Slot, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Slot")
}
proposerIndex, err := strconv.ParseUint(b.ProposerIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ProposerIndex")
}
parentRoot, err := bytesutil.DecodeHexWithLength(b.ParentRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ParentRoot")
}
stateRoot, err := bytesutil.DecodeHexWithLength(b.StateRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "StateRoot")
}
body, err := b.Body.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Body")
}
return &eth.BeaconBlockGloas{
Slot: primitives.Slot(slot),
ProposerIndex: primitives.ValidatorIndex(proposerIndex),
ParentRoot: parentRoot,
StateRoot: stateRoot,
Body: body,
}, nil
}
func (b *BeaconBlockBodyGloas) ToConsensus() (*eth.BeaconBlockBodyGloas, error) {
if b == nil {
return nil, errNilValue
}
randaoReveal, err := bytesutil.DecodeHexWithLength(b.RandaoReveal, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "RandaoReveal")
}
depositRoot, err := bytesutil.DecodeHexWithLength(b.Eth1Data.DepositRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "Eth1Data.DepositRoot")
}
depositCount, err := strconv.ParseUint(b.Eth1Data.DepositCount, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Eth1Data.DepositCount")
}
blockHash, err := bytesutil.DecodeHexWithLength(b.Eth1Data.BlockHash, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "Eth1Data.BlockHash")
}
graffiti, err := bytesutil.DecodeHexWithLength(b.Graffiti, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "Graffiti")
}
proposerSlashings, err := ProposerSlashingsToConsensus(b.ProposerSlashings)
if err != nil {
return nil, server.NewDecodeError(err, "ProposerSlashings")
}
attesterSlashings, err := AttesterSlashingsElectraToConsensus(b.AttesterSlashings)
if err != nil {
return nil, server.NewDecodeError(err, "AttesterSlashings")
}
atts, err := AttsElectraToConsensus(b.Attestations)
if err != nil {
return nil, server.NewDecodeError(err, "Attestations")
}
deposits, err := DepositsToConsensus(b.Deposits)
if err != nil {
return nil, server.NewDecodeError(err, "Deposits")
}
exits, err := SignedExitsToConsensus(b.VoluntaryExits)
if err != nil {
return nil, server.NewDecodeError(err, "VoluntaryExits")
}
syncCommitteeBits, err := bytesutil.DecodeHexWithLength(b.SyncAggregate.SyncCommitteeBits, fieldparams.SyncAggregateSyncCommitteeBytesLength)
if err != nil {
return nil, server.NewDecodeError(err, "SyncAggregate.SyncCommitteeBits")
}
syncCommitteeSig, err := bytesutil.DecodeHexWithLength(b.SyncAggregate.SyncCommitteeSignature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "SyncAggregate.SyncCommitteeSignature")
}
blsChanges, err := SignedBLSChangesToConsensus(b.BLSToExecutionChanges)
if err != nil {
return nil, server.NewDecodeError(err, "BLSToExecutionChanges")
}
signedBid, err := b.SignedExecutionPayloadBid.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "SignedExecutionPayloadBid")
}
payloadAttestations, err := PayloadAttestationsToConsensus(b.PayloadAttestations)
if err != nil {
return nil, server.NewDecodeError(err, "PayloadAttestations")
}
return &eth.BeaconBlockBodyGloas{
RandaoReveal: randaoReveal,
Eth1Data: &eth.Eth1Data{
DepositRoot: depositRoot,
DepositCount: depositCount,
BlockHash: blockHash,
},
Graffiti: graffiti,
ProposerSlashings: proposerSlashings,
AttesterSlashings: attesterSlashings,
Attestations: atts,
Deposits: deposits,
VoluntaryExits: exits,
SyncAggregate: &eth.SyncAggregate{
SyncCommitteeBits: syncCommitteeBits,
SyncCommitteeSignature: syncCommitteeSig,
},
BlsToExecutionChanges: blsChanges,
SignedExecutionPayloadBid: signedBid,
PayloadAttestations: payloadAttestations,
}, nil
}
func (b *SignedExecutionPayloadBid) ToConsensus() (*eth.SignedExecutionPayloadBid, error) {
if b == nil {
return nil, errNilValue
}
sig, err := bytesutil.DecodeHexWithLength(b.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
message, err := b.Message.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Message")
}
return &eth.SignedExecutionPayloadBid{
Message: message,
Signature: sig,
}, nil
}
func (b *ExecutionPayloadBid) ToConsensus() (*eth.ExecutionPayloadBid, error) {
if b == nil {
return nil, errNilValue
}
parentBlockHash, err := bytesutil.DecodeHexWithLength(b.ParentBlockHash, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ParentBlockHash")
}
parentBlockRoot, err := bytesutil.DecodeHexWithLength(b.ParentBlockRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "ParentBlockRoot")
}
blockHash, err := bytesutil.DecodeHexWithLength(b.BlockHash, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "BlockHash")
}
prevRandao, err := bytesutil.DecodeHexWithLength(b.PrevRandao, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "PrevRandao")
}
feeRecipient, err := bytesutil.DecodeHexWithLength(b.FeeRecipient, fieldparams.FeeRecipientLength)
if err != nil {
return nil, server.NewDecodeError(err, "FeeRecipient")
}
gasLimit, err := strconv.ParseUint(b.GasLimit, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "GasLimit")
}
builderIndex, err := strconv.ParseUint(b.BuilderIndex, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "BuilderIndex")
}
slot, err := strconv.ParseUint(b.Slot, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Slot")
}
value, err := strconv.ParseUint(b.Value, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Value")
}
executionPayment, err := strconv.ParseUint(b.ExecutionPayment, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "ExecutionPayment")
}
blobKzgCommitmentsRoot, err := bytesutil.DecodeHexWithLength(b.BlobKzgCommitmentsRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "BlobKzgCommitmentsRoot")
}
return &eth.ExecutionPayloadBid{
ParentBlockHash: parentBlockHash,
ParentBlockRoot: parentBlockRoot,
BlockHash: blockHash,
PrevRandao: prevRandao,
FeeRecipient: feeRecipient,
GasLimit: gasLimit,
BuilderIndex: primitives.BuilderIndex(builderIndex),
Slot: primitives.Slot(slot),
Value: primitives.Gwei(value),
ExecutionPayment: primitives.Gwei(executionPayment),
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot,
}, nil
}
func PayloadAttestationsToConsensus(pa []*PayloadAttestation) ([]*eth.PayloadAttestation, error) {
if pa == nil {
return nil, errNilValue
}
result := make([]*eth.PayloadAttestation, len(pa))
for i, p := range pa {
converted, err := p.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
}
result[i] = converted
}
return result, nil
}
func (p *PayloadAttestation) ToConsensus() (*eth.PayloadAttestation, error) {
if p == nil {
return nil, errNilValue
}
aggregationBits, err := hexutil.Decode(p.AggregationBits)
if err != nil {
return nil, server.NewDecodeError(err, "AggregationBits")
}
data, err := p.Data.ToConsensus()
if err != nil {
return nil, server.NewDecodeError(err, "Data")
}
sig, err := bytesutil.DecodeHexWithLength(p.Signature, fieldparams.BLSSignatureLength)
if err != nil {
return nil, server.NewDecodeError(err, "Signature")
}
return &eth.PayloadAttestation{
AggregationBits: aggregationBits,
Data: data,
Signature: sig,
}, nil
}
func (d *PayloadAttestationData) ToConsensus() (*eth.PayloadAttestationData, error) {
if d == nil {
return nil, errNilValue
}
beaconBlockRoot, err := bytesutil.DecodeHexWithLength(d.BeaconBlockRoot, fieldparams.RootLength)
if err != nil {
return nil, server.NewDecodeError(err, "BeaconBlockRoot")
}
slot, err := strconv.ParseUint(d.Slot, 10, 64)
if err != nil {
return nil, server.NewDecodeError(err, "Slot")
}
return &eth.PayloadAttestationData{
BeaconBlockRoot: beaconBlockRoot,
Slot: primitives.Slot(slot),
PayloadPresent: d.PayloadPresent,
BlobDataAvailable: d.BlobDataAvailable,
}, nil
}

View File

@@ -27,7 +27,6 @@ go_library(
"receive_blob.go",
"receive_block.go",
"receive_data_column.go",
"receive_proof.go",
"service.go",
"setup_forkchoice.go",
"tracked_proposer.go",
@@ -50,7 +49,6 @@ go_library(
"//beacon-chain/core/electra:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
@@ -76,6 +74,7 @@ go_library(
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
@@ -175,6 +174,7 @@ go_test(
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/verification:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",

View File

@@ -13,7 +13,7 @@ go_library(
deps = [
"//consensus-types/blocks:go_default_library",
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
"@com_github_ethereum_c_kzg_4844_v2//bindings/go:go_default_library",
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
"@com_github_pkg_errors//:go_default_library",

View File

@@ -221,19 +221,6 @@ var (
Buckets: []float64{1, 2, 4, 8, 16, 32},
},
)
commitmentCount = promauto.NewHistogram(
prometheus.HistogramOpts{
Name: "commitment_count_max_21",
Help: "The number of blob KZG commitments per block.",
Buckets: []float64{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21},
},
)
maxBlobsPerBlock = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "max_blobs_per_block",
Help: "The maximum number of blobs allowed in a block.",
},
)
)
// reportSlotMetrics reports slot related metrics.

View File

@@ -5,7 +5,6 @@ import (
"github.com/OffchainLabs/prysm/v7/async/event"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
@@ -227,14 +226,6 @@ func WithDataColumnStorage(b *filesystem.DataColumnStorage) Option {
}
}
// WithProofStorage sets the proof storage backend for the blockchain service.
func WithProofStorage(p *filesystem.ProofStorage) Option {
return func(s *Service) error {
s.proofStorage = p
return nil
}
}
// WithSyncChecker sets the sync checker for the blockchain service.
func WithSyncChecker(checker Checker) Option {
return func(s *Service) error {
@@ -275,10 +266,3 @@ func WithStartWaitingDataColumnSidecars(c chan bool) Option {
return nil
}
}
func WithOperationNotifier(operationNotifier operation.Notifier) Option {
return func(s *Service) error {
s.cfg.OperationNotifier = operationNotifier
return nil
}
}

View File

@@ -113,7 +113,6 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
s.updateCachesPostBlockProcessing(cfg)
}()
}
return nil
}
@@ -662,17 +661,10 @@ func (s *Service) isDataAvailable(
return errors.New("invalid nil beacon block")
}
root, blockVersion := roBlock.Root(), roBlock.Version()
root := roBlock.Root()
blockVersion := block.Version()
if blockVersion >= version.Fulu {
if err := s.areExecutionProofsAvailable(ctx, roBlock); err != nil {
return fmt.Errorf("are execution proofs available: %w", err)
}
if err := s.areDataColumnsAvailable(ctx, root, block); err != nil {
return fmt.Errorf("are data columns available: %w", err)
}
return nil
return s.areDataColumnsAvailable(ctx, root, block)
}
if blockVersion >= version.Deneb {
@@ -682,77 +674,6 @@ func (s *Service) isDataAvailable(
return nil
}
// areExecutionProofsAvailable blocks until we have enough execution proofs to import the block,
// or an error or context cancellation occurs.
// This check is only performed for lightweight verifier nodes that need zkVM proofs
// to validate block execution (nodes without execution layer + proof generation capability).
// A nil result means that the data availability check is successful.
func (s *Service) areExecutionProofsAvailable(ctx context.Context, roBlock consensusblocks.ROBlock) error {
// Return early if zkVM features are disabled (no need to check for execution proofs),
// or if the generation proof is enabled (we will generate proofs ourselves).
if !features.Get().EnableZkvm {
return nil
}
root, slot := roBlock.Root(), roBlock.Block().Slot()
requiredProofCount := params.BeaconConfig().MinProofsRequired
log := log.WithFields(logrus.Fields{
"root": fmt.Sprintf("%#x", root),
"slot": slot,
"requiredProofCount": requiredProofCount,
})
// Subscribe to newly execution proofs stored in the database.
subscription, identChan := s.proofStorage.Subscribe()
defer subscription.Unsubscribe()
// Return early if we already have enough proofs.
if actualProofCount := uint64(s.proofStorage.Summary(root).Count()); actualProofCount >= requiredProofCount {
log.WithField("actualProofCount", actualProofCount).Debug("Already have enough execution proofs")
return nil
}
// Log for DA checks that cross over into the next slot; helpful for debugging.
nextSlot, err := slots.StartTime(s.genesisTime, roBlock.Block().Slot()+1)
if err != nil {
return fmt.Errorf("start time: %w", err)
}
// Avoid logging if DA check is called after next slot start.
if nextSlot.After(time.Now()) {
timer := time.AfterFunc(time.Until(nextSlot), func() {
actualCount := uint64(s.proofStorage.Summary(root).Count())
if actualCount >= requiredProofCount {
return
}
log.WithField("proofsRetrieved", actualCount).Warning("Execution proofs still missing at slot end")
})
defer timer.Stop()
}
// Some proofs are missing; wait for them.
for {
select {
case <-ctx.Done():
return ctx.Err()
case proofIdent := <-identChan:
// Skip if the proof is for a different block.
if proofIdent.BlockRoot != root {
continue
}
// Return if we have enough proofs.
if actualProofCount := uint64(s.proofStorage.Summary(root).Count()); actualProofCount >= requiredProofCount {
log.WithField("actualProofCount", actualProofCount).Debug("Got enough execution proofs")
return nil
}
}
}
}
// areDataColumnsAvailable blocks until all data columns committed to in the block are available,
// or an error or context cancellation occurs. A nil result means that the data availability check is successful.
func (s *Service) areDataColumnsAvailable(
@@ -889,7 +810,14 @@ func (s *Service) areDataColumnsAvailable(
}
case <-ctx.Done():
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, helpers.SortedPrettySliceFromMap(missing))
var missingIndices any = "all"
missingIndicesCount := len(missing)
if missingIndicesCount < fieldparams.NumberOfColumns {
missingIndices = helpers.SortedPrettySliceFromMap(missing)
}
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
}
}
}

View File

@@ -94,7 +94,6 @@ func (s *Service) spawnProcessAttestationsRoutine() {
for {
select {
case <-s.ctx.Done():
ticker.Done()
return
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {

View File

@@ -16,7 +16,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/slasher/types"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
@@ -60,12 +59,6 @@ type DataColumnReceiver interface {
ReceiveDataColumns([]blocks.VerifiedRODataColumn) error
}
// ProofReceiver interface defines the methods of chain service for receiving new
// execution proofs
type ProofReceiver interface {
ReceiveProof(blocks.VerifiedROSignedExecutionProof) error
}
// SlashingReceiver interface defines the methods of chain service for receiving validated slashing over the wire.
type SlashingReceiver interface {
ReceiveAttesterSlashing(ctx context.Context, slashing ethpb.AttSlashing)
@@ -265,55 +258,32 @@ func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityChecker, blo
}
func (s *Service) reportPostBlockProcessing(
signedBlock interfaces.SignedBeaconBlock,
block interfaces.SignedBeaconBlock,
blockRoot [32]byte,
receivedTime time.Time,
daWaitedTime time.Duration,
) {
block := signedBlock.Block()
if block == nil {
log.WithField("blockRoot", blockRoot).Error("Nil block")
return
}
// Reports on block and fork choice metrics.
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
finalized := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
reportSlotMetrics(block.Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
reportSlotMetrics(block.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
// Log block sync status.
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
justified := &ethpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
if err := logBlockSyncStatus(block, blockRoot, justified, finalized, receivedTime, s.genesisTime, daWaitedTime); err != nil {
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, s.genesisTime, daWaitedTime); err != nil {
log.WithError(err).Error("Unable to log block sync status")
}
// Log payload data
if err := logPayload(block); err != nil {
if err := logPayload(block.Block()); err != nil {
log.WithError(err).Error("Unable to log debug block payload data")
}
// Log state transition data.
if err := logStateTransitionData(block); err != nil {
if err := logStateTransitionData(block.Block()); err != nil {
log.WithError(err).Error("Unable to log state transition data")
}
timeWithoutDaWait := time.Since(receivedTime) - daWaitedTime
chainServiceProcessingTime.Observe(float64(timeWithoutDaWait.Milliseconds()))
body := block.Body()
if body == nil {
log.WithField("blockRoot", blockRoot).Error("Nil block body")
return
}
commitments, err := body.BlobKzgCommitments()
if err != nil {
log.WithError(err).Error("Unable to get blob KZG commitments")
}
commitmentCount.Observe(float64(len(commitments)))
maxBlobsPerBlock.Set(float64(params.BeaconConfig().MaxBlobsPerBlock(block.Slot())))
}
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {

View File

@@ -17,7 +17,6 @@ import (
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpbv1 "github.com/OffchainLabs/prysm/v7/proto/eth/v1"
@@ -131,10 +130,12 @@ func TestService_ReceiveBlock(t *testing.T) {
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
},
check: func(t *testing.T, s *Service) {
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
require.Eventually(t, func() bool {
return len(notifier.ReceivedEvents()) >= 1
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
// Hacky sleep, should use a better way to be able to resolve the race
// between event being sent out and processed.
time.Sleep(100 * time.Millisecond)
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
t.Errorf("Received %d state notifications, expected at least 1", recvd)
}
},
},
{
@@ -221,10 +222,10 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
require.NoError(t, s.ReceiveBlock(ctx, wsb, root, nil))
})
wg.Wait()
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
require.Eventually(t, func() bool {
return len(notifier.ReceivedEvents()) >= 1
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
time.Sleep(100 * time.Millisecond)
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
t.Errorf("Received %d state notifications, expected at least 1", recvd)
}
// Verify fork choice has processed the block. (Genesis block and the new block)
assert.Equal(t, 2, s.cfg.ForkChoiceStore.NodeCount())
}
@@ -264,10 +265,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
},
check: func(t *testing.T, s *Service) {
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
require.Eventually(t, func() bool {
return len(notifier.ReceivedEvents()) >= 1
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
time.Sleep(100 * time.Millisecond)
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
t.Errorf("Received %d state notifications, expected at least 1", recvd)
}
},
},
}
@@ -511,9 +512,8 @@ func Test_executePostFinalizationTasks(t *testing.T) {
s.cfg.StateNotifier = notifier
s.executePostFinalizationTasks(s.ctx, headState)
require.Eventually(t, func() bool {
return len(notifier.ReceivedEvents()) == 1
}, 5*time.Second, 50*time.Millisecond, "Expected exactly 1 state notification")
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
require.Equal(t, 1, len(notifier.ReceivedEvents()))
e := notifier.ReceivedEvents()[0]
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
@@ -552,9 +552,8 @@ func Test_executePostFinalizationTasks(t *testing.T) {
s.cfg.StateNotifier = notifier
s.executePostFinalizationTasks(s.ctx, headState)
require.Eventually(t, func() bool {
return len(notifier.ReceivedEvents()) == 1
}, 5*time.Second, 50*time.Millisecond, "Expected exactly 1 state notification")
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
require.Equal(t, 1, len(notifier.ReceivedEvents()))
e := notifier.ReceivedEvents()[0]
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
@@ -597,13 +596,13 @@ func TestProcessLightClientBootstrap(t *testing.T) {
s.executePostFinalizationTasks(s.ctx, l.AttestedState)
// Wait for the light client bootstrap to be saved (runs in goroutine)
var b interfaces.LightClientBootstrap
require.Eventually(t, func() bool {
var err error
b, err = s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
return err == nil && b != nil
}, 5*time.Second, 50*time.Millisecond, "Light client bootstrap was not saved within timeout")
// wait for the goroutine to finish processing
time.Sleep(1 * time.Second)
// Check that the light client bootstrap is saved
b, err := s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
require.NoError(t, err)
require.NotNil(t, b)
btst, err := lightClient.NewLightClientBootstrapFromBeaconState(ctx, l.FinalizedState.Slot(), l.FinalizedState, l.FinalizedBlock)
require.NoError(t, err)

View File

@@ -1,15 +0,0 @@
package blockchain
import (
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/pkg/errors"
)
// ReceiveProof saves an execution proof to storage.
func (s *Service) ReceiveProof(proof blocks.VerifiedROSignedExecutionProof) error {
if err := s.proofStorage.Save([]blocks.VerifiedROSignedExecutionProof{proof}); err != nil {
return errors.Wrap(err, "save proof")
}
return nil
}

View File

@@ -12,9 +12,9 @@ import (
"github.com/OffchainLabs/prysm/v7/async/event"
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/operation"
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
@@ -30,6 +30,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
@@ -65,7 +66,6 @@ type Service struct {
blockBeingSynced *currentlySyncingBlock
blobStorage *filesystem.BlobStorage
dataColumnStorage *filesystem.DataColumnStorage
proofStorage *filesystem.ProofStorage
slasherEnabled bool
lcStore *lightClient.Store
startWaitingDataColumnSidecars chan bool // for testing purposes only
@@ -88,7 +88,6 @@ type config struct {
P2P p2p.Accessor
MaxRoutines int
StateNotifier statefeed.Notifier
OperationNotifier operation.Notifier
ForkChoiceStore f.ForkChoicer
AttService *attestations.Service
StateGen *stategen.State
@@ -292,6 +291,19 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
return errors.Wrap(err, "failed to initialize blockchain service")
}
if !params.FuluEnabled() {
return nil
}
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
if err != nil {
return errors.Wrap(err, "could not get and save custody group count")
}
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
return errors.Wrap(err, "update custody info")
}
return nil
}
@@ -456,6 +468,73 @@ func (s *Service) removeStartupState() {
s.cfg.FinalizedStateAtStartUp = nil
}
// UpdateCustodyInfoInDB updates the custody information in the database.
// It returns the (potentially updated) custody group count and the earliest available slot.
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
isSupernode := flags.Get().Supernode
isSemiSupernode := flags.Get().SemiSupernode
cfg := params.BeaconConfig()
custodyRequirement := cfg.CustodyRequirement
// Check if the node was previously subscribed to all data subnets, and if so,
// store the new status accordingly.
wasSupernode, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
if err != nil {
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
}
// Compute the target custody group count based on current flag configuration.
targetCustodyGroupCount := custodyRequirement
// Supernode: custody all groups (either currently set or previously enabled)
if isSupernode {
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
}
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
if isSemiSupernode {
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
if err != nil {
return 0, 0, errors.Wrap(err, "minimum custody group count")
}
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
}
// Safely compute the fulu fork slot.
fuluForkSlot, err := fuluForkSlot()
if err != nil {
return 0, 0, errors.Wrap(err, "fulu fork slot")
}
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
if slot < fuluForkSlot {
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
if err != nil {
return 0, 0, errors.Wrap(err, "earliest slot")
}
}
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
if err != nil {
return 0, 0, errors.Wrap(err, "update custody info")
}
if isSupernode {
log.WithFields(logrus.Fields{
"current": actualCustodyGroupCount,
"target": cfg.NumberOfCustodyGroups,
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
}
if wasSupernode && !isSupernode {
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
}
return earliestAvailableSlot, actualCustodyGroupCount, nil
}
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
currentTime := prysmTime.Now()
if currentTime.After(genesisTime) {
@@ -472,3 +551,19 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
}
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
}
func fuluForkSlot() (primitives.Slot, error) {
cfg := params.BeaconConfig()
fuluForkEpoch := cfg.FuluForkEpoch
if fuluForkEpoch == cfg.FarFutureEpoch {
return cfg.FarFutureSlot, nil
}
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
if err != nil {
return 0, errors.Wrap(err, "epoch start")
}
return forkFuluSlot, nil
}

View File

@@ -23,9 +23,11 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
@@ -594,3 +596,218 @@ func TestNotifyIndex(t *testing.T) {
t.Errorf("Notifier channel did not receive the index")
}
}
func TestUpdateCustodyInfoInDB(t *testing.T) {
const (
fuluForkEpoch = 10
custodyRequirement = uint64(4)
earliestStoredSlot = primitives.Slot(12)
numberOfCustodyGroups = uint64(64)
)
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig()
cfg.FuluForkEpoch = fuluForkEpoch
cfg.CustodyRequirement = custodyRequirement
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
params.OverrideBeaconConfig(cfg)
ctx := t.Context()
pbBlock := util.NewBeaconBlock()
pbBlock.Block.Slot = 12
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbBlock)
require.NoError(t, err)
roBlock, err := blocks.NewROBlock(signedBeaconBlock)
require.NoError(t, err)
t.Run("CGC increases before fulu", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Before Fulu
// -----------
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(19)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
// After Fulu
// ----------
actualEas, actualCgc, err = service.updateCustodyInfoInDB(fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
})
t.Run("CGC increases after fulu", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Before Fulu
// -----------
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
require.NoError(t, err)
require.Equal(t, earliestStoredSlot, actualEas)
require.Equal(t, custodyRequirement, actualCgc)
// After Fulu
// ----------
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
})
t.Run("Supernode downgrade prevented", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.Supernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc)
// Try to downgrade by removing flag
gFlags.Supernode = false
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Should still be supernode
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
})
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
// Try to downgrade by removing flag
gFlags.SemiSupernode = false
flags.Init(gFlags)
defer flags.Init(resetFlags)
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
})
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Start with semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
// Upgrade to full supernode
gFlags.SemiSupernode = false
gFlags.Supernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Should upgrade to full supernode
upgradeSlot := slot + 2
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
require.NoError(t, err)
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
})
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
service, requirements := minimalTestService(t)
err = requirements.db.SaveBlock(ctx, roBlock)
require.NoError(t, err)
// Enable semi-supernode
resetFlags := flags.Get()
gFlags := new(flags.GlobalFlags)
gFlags.SemiSupernode = true
flags.Init(gFlags)
defer flags.Init(resetFlags)
// Mock a high custody requirement (simulating many validators)
// We need to override the custody requirement calculation
// For this test, we'll verify the logic by checking if custodyRequirement > 64
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
// This would require a different test setup with actual validators
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
require.NoError(t, err)
require.Equal(t, slot, actualEas)
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
// With low validator requirements (4), should use semi-supernode minimum (64)
require.Equal(t, semiSupernodeCustody, actualCgc)
})
}

View File

@@ -75,7 +75,6 @@ type ChainService struct {
SyncingRoot [32]byte
Blobs []blocks.VerifiedROBlob
DataColumns []blocks.VerifiedRODataColumn
Proofs []blocks.VerifiedROSignedExecutionProof
TargetRoot [32]byte
MockHeadSlot *primitives.Slot
}
@@ -758,12 +757,6 @@ func (c *ChainService) ReceiveDataColumns(dcs []blocks.VerifiedRODataColumn) err
return nil
}
// ReceiveProof implements the same method in chain service
func (c *ChainService) ReceiveProof(proof blocks.VerifiedROSignedExecutionProof) error {
c.Proofs = append(c.Proofs, proof)
return nil
}
// DependentRootForEpoch mocks the same method in the chain service
func (c *ChainService) DependentRootForEpoch(_ [32]byte, _ primitives.Epoch) ([32]byte, error) {
return c.TargetRoot, nil

View File

@@ -3,6 +3,7 @@ package altair
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
@@ -23,7 +24,7 @@ func ProcessPreGenesisDeposits(
if err != nil {
return nil, errors.Wrap(err, "could not process deposit")
}
beaconState, err = helpers.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
beaconState, err = blocks.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
if err != nil {
return nil, err
}
@@ -36,7 +37,7 @@ func ProcessDeposits(
beaconState state.BeaconState,
deposits []*ethpb.Deposit,
) (state.BeaconState, error) {
allSignaturesVerified, err := helpers.BatchVerifyDepositsSignatures(ctx, deposits)
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
if err != nil {
return nil, err
}
@@ -81,7 +82,7 @@ func ProcessDeposits(
// signature=deposit.data.signature,
// )
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
if err := helpers.VerifyDeposit(beaconState, deposit); err != nil {
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
if deposit == nil || deposit.Data == nil {
return nil, err
}
@@ -121,7 +122,7 @@ func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, allSi
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
if !ok {
if !allSignaturesVerified {
valid, err := helpers.IsValidDepositSignature(data)
valid, err := blocks.IsValidDepositSignature(data)
if err != nil {
return nil, err
}

View File

@@ -5,6 +5,7 @@ go_library(
srcs = [
"attestation.go",
"attester_slashing.go",
"deposit.go",
"error.go",
"eth1_data.go",
"exit.go",
@@ -20,7 +21,6 @@ go_library(
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/gloas:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
@@ -33,6 +33,8 @@ go_library(
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//container/trie:go_default_library",
"//contracts/deposit:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
@@ -59,6 +61,7 @@ go_test(
"attester_slashing_test.go",
"block_operations_fuzz_test.go",
"block_regression_test.go",
"deposit_test.go",
"eth1_data_test.go",
"exit_test.go",
"exports_test.go",
@@ -87,6 +90,7 @@ go_test(
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",
"//crypto/hash:go_default_library",

View File

@@ -3,7 +3,6 @@ package blocks
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
@@ -319,7 +318,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
fuzzer.Fuzz(deposit)
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
require.NoError(t, err)
err = helpers.VerifyDeposit(s, deposit)
err = VerifyDeposit(s, deposit)
_ = err
fuzz.FreeMemory(i)
}

View File

@@ -1,4 +1,4 @@
package helpers
package blocks
import (
"context"

View File

@@ -1,9 +1,9 @@
package helpers_test
package blocks_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
@@ -45,7 +45,7 @@ func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) {
deposit.Proof = proof
require.NoError(t, err)
verified, err := helpers.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
require.NoError(t, err)
require.Equal(t, true, verified)
}
@@ -68,7 +68,7 @@ func TestBatchVerifyDepositsSignatures_InvalidSignature(t *testing.T) {
deposit.Proof = proof
require.NoError(t, err)
verified, err := helpers.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
require.NoError(t, err)
require.Equal(t, false, verified)
}
@@ -99,7 +99,7 @@ func TestVerifyDeposit_MerkleBranchFailsVerification(t *testing.T) {
})
require.NoError(t, err)
want := "deposit root did not verify"
err = helpers.VerifyDeposit(beaconState, deposit)
err = blocks.VerifyDeposit(beaconState, deposit)
require.ErrorContains(t, want, err)
}
@@ -123,7 +123,7 @@ func TestIsValidDepositSignature_Ok(t *testing.T) {
require.NoError(t, err)
sig := sk.Sign(sr[:])
depositData.Signature = sig.Marshal()
valid, err := helpers.IsValidDepositSignature(depositData)
valid, err := blocks.IsValidDepositSignature(depositData)
require.NoError(t, err)
require.Equal(t, true, valid)
}
@@ -163,7 +163,7 @@ func TestBatchVerifyPendingDepositsSignatures_Ok(t *testing.T) {
sig2 := sk2.Sign(sr2[:])
pendingDeposit2.Signature = sig2.Marshal()
verified, err := helpers.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
require.NoError(t, err)
require.Equal(t, true, verified)
}
@@ -174,7 +174,7 @@ func TestBatchVerifyPendingDepositsSignatures_InvalidSignature(t *testing.T) {
WithdrawalCredentials: make([]byte, 32),
Signature: make([]byte, 96),
}
verified, err := helpers.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit})
verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit})
require.NoError(t, err)
require.Equal(t, false, verified)
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
@@ -12,7 +11,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
@@ -128,16 +126,7 @@ func processProposerSlashing(
if exitInfo == nil {
return nil, errors.New("exit info is required to process proposer slashing")
}
var err error
// [New in Gloas:EIP7732]: remove the BuilderPendingPayment corresponding to the slashed proposer within 2 epoch window
if beaconState.Version() >= version.Gloas {
err = gloas.RemoveBuilderPendingPayment(beaconState, slashing.Header_1.Header)
if err != nil {
return nil, err
}
}
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
if err != nil {
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)

View File

@@ -12,13 +12,16 @@ go_library(
"log.go",
"registry_updates.go",
"transition.go",
"transition_no_verify_sig.go",
"upgrade.go",
"validator.go",
"withdrawals.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/blocks:go_default_library",
"//beacon-chain/core/epoch:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
@@ -41,6 +44,8 @@ go_library(
"//proto/prysm/v1alpha1/attestation:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//common/math:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
@@ -57,14 +62,17 @@ go_test(
"error_test.go",
"export_test.go",
"registry_updates_test.go",
"transition_no_verify_sig_test.go",
"transition_test.go",
"upgrade_test.go",
"validator_test.go",
"withdrawals_test.go",
],
data = glob(["testdata/**"]),
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
@@ -77,12 +85,16 @@ go_test(
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/fuzz:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -3,14 +3,19 @@ package electra
import (
"bytes"
"context"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/ethereum/go-ethereum/common/math"
"github.com/pkg/errors"
)
@@ -90,6 +95,217 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
return nil
}
// ProcessConsolidationRequests implements the spec definition below. This method makes mutating
// calls to the beacon state.
//
// def process_consolidation_request(
// state: BeaconState,
// consolidation_request: ConsolidationRequest
// ) -> None:
// if is_valid_switch_to_compounding_request(state, consolidation_request):
// validator_pubkeys = [v.pubkey for v in state.validators]
// request_source_pubkey = consolidation_request.source_pubkey
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
// switch_to_compounding_validator(state, source_index)
// return
//
// # Verify that source != target, so a consolidation cannot be used as an exit.
// if consolidation_request.source_pubkey == consolidation_request.target_pubkey:
// return
// # If the pending consolidations queue is full, consolidation requests are ignored
// if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
// return
// # If there is too little available consolidation churn limit, consolidation requests are ignored
// if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE:
// return
//
// validator_pubkeys = [v.pubkey for v in state.validators]
// # Verify pubkeys exists
// request_source_pubkey = consolidation_request.source_pubkey
// request_target_pubkey = consolidation_request.target_pubkey
// if request_source_pubkey not in validator_pubkeys:
// return
// if request_target_pubkey not in validator_pubkeys:
// return
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
// target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey))
// source_validator = state.validators[source_index]
// target_validator = state.validators[target_index]
//
// # Verify source withdrawal credentials
// has_correct_credential = has_execution_withdrawal_credential(source_validator)
// is_correct_source_address = (
// source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
// )
// if not (has_correct_credential and is_correct_source_address):
// return
//
// # Verify that target has compounding withdrawal credentials
// if not has_compounding_withdrawal_credential(target_validator):
// return
//
// # Verify the source and the target are active
// current_epoch = get_current_epoch(state)
// if not is_active_validator(source_validator, current_epoch):
// return
// if not is_active_validator(target_validator, current_epoch):
// return
// # Verify exits for source and target have not been initiated
// if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
// return
// if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
// return
//
// # Verify the source has been active long enough
// if current_epoch < source_validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
// return
//
// # Verify the source has no pending withdrawals in the queue
// if get_pending_balance_to_withdraw(state, source_index) > 0:
// return
// # Initiate source validator exit and append pending consolidation
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
// state, source_validator.effective_balance
// )
// source_validator.withdrawable_epoch = Epoch(
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
// )
// state.pending_consolidations.append(PendingConsolidation(
// source_index=source_index,
// target_index=target_index
// ))
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
if len(reqs) == 0 || st == nil {
return nil
}
curEpoch := slots.ToEpoch(st.Slot())
ffe := params.BeaconConfig().FarFutureEpoch
minValWithdrawDelay := params.BeaconConfig().MinValidatorWithdrawabilityDelay
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
for _, cr := range reqs {
if cr == nil {
return errors.New("nil consolidation request")
}
if ctx.Err() != nil {
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
}
if IsValidSwitchToCompoundingRequest(st, cr) {
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
if !ok {
log.Error("Failed to find source validator index")
continue
}
if err := SwitchToCompoundingValidator(st, srcIdx); err != nil {
log.WithError(err).Error("Failed to switch to compounding validator")
}
continue
}
sourcePubkey := bytesutil.ToBytes48(cr.SourcePubkey)
targetPubkey := bytesutil.ToBytes48(cr.TargetPubkey)
if sourcePubkey == targetPubkey {
continue
}
if npc, err := st.NumPendingConsolidations(); err != nil {
return fmt.Errorf("failed to fetch number of pending consolidations: %w", err) // This should never happen.
} else if npc >= pcLimit {
continue
}
activeBal, err := helpers.TotalActiveBalance(st)
if err != nil {
return err
}
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
continue
}
srcIdx, ok := st.ValidatorIndexByPubkey(sourcePubkey)
if !ok {
continue
}
tgtIdx, ok := st.ValidatorIndexByPubkey(targetPubkey)
if !ok {
continue
}
srcV, err := st.ValidatorAtIndex(srcIdx)
if err != nil {
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
}
roSrcV, err := state_native.NewValidator(srcV)
if err != nil {
return err
}
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
if err != nil {
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
}
// Verify source withdrawal credentials
if !roSrcV.HasExecutionWithdrawalCredentials() {
continue
}
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
if len(srcV.WithdrawalCredentials) != 32 || len(cr.SourceAddress) != 20 || !bytes.HasSuffix(srcV.WithdrawalCredentials, cr.SourceAddress) {
continue
}
// Target validator must have their withdrawal credentials set appropriately.
if !tgtV.HasCompoundingWithdrawalCredentials() {
continue
}
// Both validators must be active.
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
continue
}
// Neither validator is exiting.
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
continue
}
e, overflow := math.SafeAdd(uint64(srcV.ActivationEpoch), uint64(params.BeaconConfig().ShardCommitteePeriod))
if overflow {
log.Error("Overflow when adding activation epoch and shard committee period")
continue
}
if uint64(curEpoch) < e {
continue
}
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
if err != nil {
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
continue
}
if hasBal {
continue
}
// Initiate the exit of the source validator.
exitEpoch, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(srcV.EffectiveBalance))
if err != nil {
log.WithError(err).Error("Failed to compute consolidation epoch")
continue
}
srcV.ExitEpoch = exitEpoch
srcV.WithdrawableEpoch = exitEpoch + minValWithdrawDelay
if err := st.UpdateValidatorAtIndex(srcIdx, srcV); err != nil {
return fmt.Errorf("failed to update validator: %w", err) // This should never happen.
}
if err := st.AppendPendingConsolidation(&eth.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
}
}
return nil
}
// IsValidSwitchToCompoundingRequest returns true if the given consolidation request is valid for switching to compounding.
//
// Spec code:

View File

@@ -8,6 +8,8 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
@@ -201,6 +203,275 @@ func TestProcessPendingConsolidations(t *testing.T) {
}
}
func TestProcessConsolidationRequests(t *testing.T) {
tests := []struct {
name string
state state.BeaconState
reqs []*enginev1.ConsolidationRequest
validate func(*testing.T, state.BeaconState)
wantErr bool
}{
{
name: "nil request",
state: func() state.BeaconState {
st := &eth.BeaconStateElectra{}
s, err := state_native.InitializeFromProtoElectra(st)
require.NoError(t, err)
return s
}(),
reqs: []*enginev1.ConsolidationRequest{nil},
validate: func(t *testing.T, st state.BeaconState) {
require.DeepEqual(t, st, st)
},
wantErr: true,
},
{
name: "one valid request",
state: func() state.BeaconState {
st := &eth.BeaconStateElectra{
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
}
// Validator scenario setup. See comments in reqs section.
st.Validators[3].WithdrawalCredentials = bytesutil.Bytes32(0)
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(1)
st.Validators[9].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
st.Validators[12].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
st.Validators[13].ExitEpoch = 10
st.Validators[16].ExitEpoch = 10
st.PendingPartialWithdrawals = []*eth.PendingPartialWithdrawal{
{
Index: 17,
Amount: 100,
},
}
s, err := state_native.InitializeFromProtoElectra(st)
require.NoError(t, err)
return s
}(),
reqs: []*enginev1.ConsolidationRequest{
// Source doesn't have withdrawal credentials.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
SourcePubkey: []byte("val_3"),
TargetPubkey: []byte("val_4"),
},
// Source withdrawal credentials don't match the consolidation address.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)), // Should be 5
SourcePubkey: []byte("val_5"),
TargetPubkey: []byte("val_6"),
},
// Target does not have their withdrawal credentials set appropriately. (Using eth1 address prefix)
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(7)),
SourcePubkey: []byte("val_7"),
TargetPubkey: []byte("val_8"),
},
// Source is inactive.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(9)),
SourcePubkey: []byte("val_9"),
TargetPubkey: []byte("val_10"),
},
// Target is inactive.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(11)),
SourcePubkey: []byte("val_11"),
TargetPubkey: []byte("val_12"),
},
// Source is exiting.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(13)),
SourcePubkey: []byte("val_13"),
TargetPubkey: []byte("val_14"),
},
// Target is exiting.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(15)),
SourcePubkey: []byte("val_15"),
TargetPubkey: []byte("val_16"),
},
// Source doesn't exist
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
SourcePubkey: []byte("INVALID"),
TargetPubkey: []byte("val_0"),
},
// Target doesn't exist
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
SourcePubkey: []byte("val_0"),
TargetPubkey: []byte("INVALID"),
},
// Source == target
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
SourcePubkey: []byte("val_0"),
TargetPubkey: []byte("val_0"),
},
// Has pending partial withdrawal
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
SourcePubkey: []byte("val_17"),
TargetPubkey: []byte("val_1"),
},
// Valid consolidation request. This should be last to ensure invalid requests do
// not end the processing early.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
SourcePubkey: []byte("val_1"),
TargetPubkey: []byte("val_2"),
},
},
validate: func(t *testing.T, st state.BeaconState) {
// Verify a pending consolidation is created.
numPC, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, uint64(1), numPC)
pcs, err := st.PendingConsolidations()
require.NoError(t, err)
require.Equal(t, primitives.ValidatorIndex(1), pcs[0].SourceIndex)
require.Equal(t, primitives.ValidatorIndex(2), pcs[0].TargetIndex)
// Verify the source validator is exiting.
src, err := st.ValidatorAtIndex(1)
require.NoError(t, err)
require.NotEqual(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch not updated")
require.Equal(t, params.BeaconConfig().MinValidatorWithdrawabilityDelay, src.WithdrawableEpoch-src.ExitEpoch, "source validator withdrawable epoch not set correctly")
},
},
{
name: "pending consolidations limit reached",
state: func() state.BeaconState {
st := &eth.BeaconStateElectra{
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
}
s, err := state_native.InitializeFromProtoElectra(st)
require.NoError(t, err)
return s
}(),
reqs: []*enginev1.ConsolidationRequest{
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
SourcePubkey: []byte("val_1"),
TargetPubkey: []byte("val_2"),
},
},
validate: func(t *testing.T, st state.BeaconState) {
// Verify no pending consolidation is created.
numPC, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
// Verify the source validator is not exiting.
src, err := st.ValidatorAtIndex(1)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
},
},
{
name: "pending consolidations limit reached during processing",
state: func() state.BeaconState {
st := &eth.BeaconStateElectra{
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit-1),
}
s, err := state_native.InitializeFromProtoElectra(st)
require.NoError(t, err)
return s
}(),
reqs: []*enginev1.ConsolidationRequest{
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
SourcePubkey: []byte("val_1"),
TargetPubkey: []byte("val_2"),
},
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
SourcePubkey: []byte("val_3"),
TargetPubkey: []byte("val_4"),
},
},
validate: func(t *testing.T, st state.BeaconState) {
// Verify a pending consolidation is created.
numPC, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
// The first consolidation was appended.
pcs, err := st.PendingConsolidations()
require.NoError(t, err)
require.Equal(t, primitives.ValidatorIndex(1), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].SourceIndex)
require.Equal(t, primitives.ValidatorIndex(2), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].TargetIndex)
// Verify the second source validator is not exiting.
src, err := st.ValidatorAtIndex(3)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
},
},
{
name: "pending consolidations limit reached and compounded consolidation after",
state: func() state.BeaconState {
st := &eth.BeaconStateElectra{
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
}
// To allow compounding consolidation requests.
st.Validators[3].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
s, err := state_native.InitializeFromProtoElectra(st)
require.NoError(t, err)
return s
}(),
reqs: []*enginev1.ConsolidationRequest{
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
SourcePubkey: []byte("val_1"),
TargetPubkey: []byte("val_2"),
},
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
SourcePubkey: []byte("val_3"),
TargetPubkey: []byte("val_3"),
},
},
validate: func(t *testing.T, st state.BeaconState) {
// Verify a pending consolidation is created.
numPC, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
// Verify that the last consolidation was included
src, err := st.ValidatorAtIndex(3)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, src.WithdrawalCredentials[0], "source validator was not compounded")
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := electra.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
if (err != nil) != tt.wantErr {
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !tt.wantErr {
require.NoError(t, err)
}
if tt.validate != nil {
tt.validate(t, tt.state)
}
})
}
}
func TestIsValidSwitchToCompoundingRequest(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, 1)
t.Run("nil source pubkey", func(t *testing.T) {

View File

@@ -3,6 +3,7 @@ package electra
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
@@ -12,6 +13,7 @@ import (
"github.com/OffchainLabs/prysm/v7/contracts/deposit"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/time/slots"
@@ -35,7 +37,7 @@ func ProcessDeposits(
defer span.End()
// Attempt to verify all deposit signatures at once, if this fails then fall back to processing
// individual deposits with signature verification enabled.
allSignaturesVerified, err := helpers.BatchVerifyDepositsSignatures(ctx, deposits)
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
if err != nil {
return nil, errors.Wrap(err, "could not verify deposit signatures in batch")
}
@@ -80,7 +82,7 @@ func ProcessDeposits(
// signature=deposit.data.signature,
// )
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
if err := helpers.VerifyDeposit(beaconState, deposit); err != nil {
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
if deposit == nil || deposit.Data == nil {
return nil, err
}
@@ -375,7 +377,7 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
return nil
}
allSignaturesVerified, err := helpers.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
allSignaturesVerified, err := blocks.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
if err != nil {
return errors.Wrap(err, "batch signature verification failed")
}
@@ -384,7 +386,7 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
validSig := allSignaturesVerified
if !allSignaturesVerified {
validSig, err = helpers.IsValidDepositSignature(&ethpb.Deposit_Data{
validSig, err = blocks.IsValidDepositSignature(&ethpb.Deposit_Data{
PublicKey: bytesutil.SafeCopyBytes(pd.PublicKey),
WithdrawalCredentials: bytesutil.SafeCopyBytes(pd.WithdrawalCredentials),
Amount: pd.Amount,
@@ -439,7 +441,7 @@ func ApplyPendingDeposit(ctx context.Context, st state.BeaconState, deposit *eth
defer span.End()
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(deposit.PublicKey))
if !ok {
verified, err := helpers.IsValidDepositSignature(&ethpb.Deposit_Data{
verified, err := blocks.IsValidDepositSignature(&ethpb.Deposit_Data{
PublicKey: bytesutil.SafeCopyBytes(deposit.PublicKey),
WithdrawalCredentials: bytesutil.SafeCopyBytes(deposit.WithdrawalCredentials),
Amount: deposit.Amount,
@@ -535,3 +537,62 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
validator.EffectiveBalance = min(amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement), maxEffectiveBalance)
return validator, nil
}
// ProcessDepositRequests is a function as part of electra to process execution layer deposits
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) (state.BeaconState, error) {
_, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
defer span.End()
if len(requests) == 0 {
return beaconState, nil
}
var err error
for _, receipt := range requests {
beaconState, err = processDepositRequest(beaconState, receipt)
if err != nil {
return nil, errors.Wrap(err, "could not apply deposit request")
}
}
return beaconState, nil
}
// processDepositRequest processes the specific deposit request
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
//
// # Set deposit request start index
// if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUESTS_START_INDEX:
// state.deposit_requests_start_index = deposit_request.index
//
// # Create pending deposit
// state.pending_deposits.append(PendingDeposit(
// pubkey=deposit_request.pubkey,
// withdrawal_credentials=deposit_request.withdrawal_credentials,
// amount=deposit_request.amount,
// signature=deposit_request.signature,
// slot=state.slot,
// ))
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) (state.BeaconState, error) {
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
if err != nil {
return nil, errors.Wrap(err, "could not get deposit requests start index")
}
if request == nil {
return nil, errors.New("nil deposit request")
}
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
if err := beaconState.SetDepositRequestsStartIndex(request.Index); err != nil {
return nil, errors.Wrap(err, "could not set deposit requests start index")
}
}
if err := beaconState.AppendPendingDeposit(&ethpb.PendingDeposit{
PublicKey: bytesutil.SafeCopyBytes(request.Pubkey),
WithdrawalCredentials: bytesutil.SafeCopyBytes(request.WithdrawalCredentials),
Amount: request.Amount,
Signature: bytesutil.SafeCopyBytes(request.Signature),
Slot: beaconState.Slot(),
}); err != nil {
return nil, errors.Wrap(err, "could not append deposit request")
}
return beaconState, nil
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
stateTesting "github.com/OffchainLabs/prysm/v7/beacon-chain/state/testing"
@@ -14,6 +15,7 @@ import (
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
@@ -359,6 +361,60 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) {
})
}
func TestProcessDepositRequests(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, 1)
sk, err := bls.RandKey()
require.NoError(t, err)
require.NoError(t, st.SetDepositRequestsStartIndex(1))
t.Run("empty requests continues", func(t *testing.T) {
newSt, err := electra.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
require.NoError(t, err)
require.DeepEqual(t, newSt, st)
})
t.Run("nil request errors", func(t *testing.T) {
_, err = electra.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
require.ErrorContains(t, "nil deposit request", err)
})
vals := st.Validators()
vals[0].PublicKey = sk.PublicKey().Marshal()
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
require.NoError(t, st.SetValidators(vals))
bals := st.Balances()
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
require.NoError(t, st.SetBalances(bals))
require.NoError(t, st.SetPendingDeposits(make([]*eth.PendingDeposit, 0))) // reset pbd as the determinitstic state populates this already
withdrawalCred := make([]byte, 32)
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
depositMessage := &eth.DepositMessage{
PublicKey: sk.PublicKey().Marshal(),
Amount: 1000,
WithdrawalCredentials: withdrawalCred,
}
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
require.NoError(t, err)
sr, err := signing.ComputeSigningRoot(depositMessage, domain)
require.NoError(t, err)
sig := sk.Sign(sr[:])
requests := []*enginev1.DepositRequest{
{
Pubkey: depositMessage.PublicKey,
Index: 0,
WithdrawalCredentials: depositMessage.WithdrawalCredentials,
Amount: depositMessage.Amount,
Signature: sig.Marshal(),
},
}
st, err = electra.ProcessDepositRequests(t.Context(), st, requests)
require.NoError(t, err)
pbd, err := st.PendingDeposits()
require.NoError(t, err)
require.Equal(t, 1, len(pbd))
require.Equal(t, uint64(1000), pbd[0].Amount)
}
func TestProcessDeposit_Electra_Simple(t *testing.T) {
deps, _, err := util.DeterministicDepositsAndKeysSameValidator(3)
require.NoError(t, err)

View File

@@ -6,11 +6,6 @@ type execReqErr struct {
error
}
// NewExecReqError creates a new execReqErr.
func NewExecReqError(msg string) error {
return execReqErr{errors.New(msg)}
}
// IsExecutionRequestError returns true if the error has `execReqErr`.
func IsExecutionRequestError(e error) bool {
if e == nil {

View File

@@ -1,12 +1,10 @@
package transition
package electra
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
coreRequests "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
@@ -49,7 +47,7 @@ var (
// # [New in Electra:EIP7251]
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
func electraOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
func ProcessOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
var err error
// 6110 validations are in VerifyOperationLengths
@@ -65,60 +63,59 @@ func electraOperations(ctx context.Context, st state.BeaconState, block interfac
return nil, errors.Wrap(err, "could not update total active balance cache")
}
}
st, err = blocks.ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair proposer slashing")
}
st, err = blocks.ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair attester slashing")
}
st, err = electra.ProcessAttestationsNoVerifySignature(ctx, st, block)
st, err = ProcessAttestationsNoVerifySignature(ctx, st, block)
if err != nil {
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair attestation")
}
if _, err := electra.ProcessDeposits(ctx, st, bb.Deposits()); err != nil {
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
return nil, errors.Wrap(err, "could not process altair deposit")
}
st, err = blocks.ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
return nil, errors.Wrap(err, "could not process voluntary exits")
}
st, err = blocks.ProcessBLSToExecutionChanges(st, block)
st, err = ProcessBLSToExecutionChanges(st, block)
if err != nil {
return nil, errors.Wrap(ErrProcessBLSChangesFailed, err.Error())
return nil, errors.Wrap(err, "could not process bls-to-execution changes")
}
// new in electra
requests, err := bb.ExecutionRequests()
if err != nil {
return nil, electra.NewExecReqError(errors.Wrap(err, "could not get execution requests").Error())
return nil, errors.Wrap(err, "could not get execution requests")
}
for _, d := range requests.Deposits {
if d == nil {
return nil, electra.NewExecReqError("nil deposit request")
return nil, errors.New("nil deposit request")
}
}
st, err = coreRequests.ProcessDepositRequests(ctx, st, requests.Deposits)
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
if err != nil {
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process deposit requests").Error())
return nil, execReqErr{errors.Wrap(err, "could not process deposit requests")}
}
for _, w := range requests.Withdrawals {
if w == nil {
return nil, electra.NewExecReqError("nil withdrawal request")
return nil, errors.New("nil withdrawal request")
}
}
st, err = coreRequests.ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
if err != nil {
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process withdrawal requests").Error())
return nil, execReqErr{errors.Wrap(err, "could not process withdrawal requests")}
}
for _, c := range requests.Consolidations {
if c == nil {
return nil, electra.NewExecReqError("nil consolidation request")
return nil, errors.New("nil consolidation request")
}
}
if err := coreRequests.ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process consolidation requests").Error())
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
return nil, execReqErr{errors.Wrap(err, "could not process consolidation requests")}
}
return st, nil
}

View File

@@ -0,0 +1,60 @@
package electra_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
)
func TestProcessOperationsWithNilRequests(t *testing.T) {
tests := []struct {
name string
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
errMsg string
}{
{
name: "Nil deposit request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
},
errMsg: "nil deposit request",
},
{
name: "Nil withdrawal request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
},
errMsg: "nil withdrawal request",
},
{
name: "Nil consolidation request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
},
errMsg: "nil consolidation request",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
st, ks := util.DeterministicGenesisStateElectra(t, 128)
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
tc.modifyBlk(blk)
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
_, err = electra.ProcessOperations(t.Context(), st, b.Block())
require.ErrorContains(t, tc.errMsg, err)
})
}
}

View File

@@ -1,4 +1,4 @@
package requests
package electra
import (
"bytes"
@@ -88,7 +88,7 @@ import (
// withdrawable_epoch=withdrawable_epoch,
// ))
func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.WithdrawalRequest) (state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "requests.ProcessWithdrawalRequests")
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
defer span.End()
currentEpoch := slots.ToEpoch(st.Slot())
if len(wrs) == 0 {

View File

@@ -1,9 +1,9 @@
package requests_test
package electra_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
@@ -289,7 +289,7 @@ func TestProcessWithdrawRequests(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := requests.ProcessWithdrawalRequests(t.Context(), tt.args.st, tt.args.wrs)
got, err := electra.ProcessWithdrawalRequests(t.Context(), tt.args.st, tt.args.wrs)
if (err != nil) != tt.wantErr {
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
return

View File

@@ -46,9 +46,6 @@ const (
// DataColumnReceived is sent after a data column has been seen after gossip validation rules.
DataColumnReceived = 12
// ExecutionProofReceived is sent after a execution proof object has been received from gossip or rpc.
ExecutionProofReceived = 13
)
// UnAggregatedAttReceivedData is the data sent with UnaggregatedAttReceived events.
@@ -80,11 +77,6 @@ type BLSToExecutionChangeReceivedData struct {
Change *ethpb.SignedBLSToExecutionChange
}
// ExecutionProofReceivedData is the data sent with ExecutionProofReceived events.
type ExecutionProofReceivedData struct {
ExecutionProof *blocks.VerifiedROSignedExecutionProof
}
// BlobSidecarReceivedData is the data sent with BlobSidecarReceived events.
type BlobSidecarReceivedData struct {
Blob *blocks.VerifiedROBlob

View File

@@ -1,66 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"bid.go",
"payload_attestation.go",
"pending_payment.go",
"proposer_slashing.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/state:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"bid_test.go",
"payload_attestation_test.go",
"pending_payment_test.go",
"proposer_slashing_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//proto/prysm/v1alpha1/validator-client:go_default_library",
"//runtime/version:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -1,193 +0,0 @@
package gloas
import (
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
// Spec v1.7.0-alpha.0 (pseudocode):
// process_execution_payload_bid(state: BeaconState, block: BeaconBlock):
//
// signed_bid = block.body.signed_execution_payload_bid
// bid = signed_bid.message
// builder_index = bid.builder_index
// amount = bid.value
// if builder_index == BUILDER_INDEX_SELF_BUILD:
// assert amount == 0
// assert signed_bid.signature == G2_POINT_AT_INFINITY
// else:
// assert is_active_builder(state, builder_index)
// assert can_builder_cover_bid(state, builder_index, amount)
// assert verify_execution_payload_bid_signature(state, signed_bid)
// assert bid.slot == block.slot
// assert bid.parent_block_hash == state.latest_block_hash
// assert bid.parent_block_root == block.parent_root
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
// if amount > 0:
// state.builder_pending_payments[...] = BuilderPendingPayment(weight=0, withdrawal=BuilderPendingWithdrawal(fee_recipient=bid.fee_recipient, amount=amount, builder_index=builder_index))
// state.latest_execution_payload_bid = bid
func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) error {
signedBid, err := block.Body().SignedExecutionPayloadBid()
if err != nil {
return errors.Wrap(err, "failed to get signed execution payload bid")
}
wrappedBid, err := blocks.WrappedROSignedExecutionPayloadBid(signedBid)
if err != nil {
return errors.Wrap(err, "failed to wrap signed bid")
}
bid, err := wrappedBid.Bid()
if err != nil {
return errors.Wrap(err, "failed to get bid from wrapped bid")
}
builderIndex := bid.BuilderIndex()
amount := bid.Value()
if builderIndex == params.BeaconConfig().BuilderIndexSelfBuild {
if amount != 0 {
return fmt.Errorf("self-build amount must be zero, got %d", amount)
}
if wrappedBid.Signature() != common.InfiniteSignature {
return errors.New("self-build signature must be point at infinity")
}
} else {
ok, err := st.IsActiveBuilder(builderIndex)
if err != nil {
return errors.Wrap(err, "builder active check failed")
}
if !ok {
return fmt.Errorf("builder %d is not active", builderIndex)
}
ok, err = st.CanBuilderCoverBid(builderIndex, amount)
if err != nil {
return errors.Wrap(err, "builder balance check failed")
}
if !ok {
return fmt.Errorf("builder %d cannot cover bid amount %d", builderIndex, amount)
}
if err := validatePayloadBidSignature(st, wrappedBid); err != nil {
return errors.Wrap(err, "bid signature validation failed")
}
}
if err := validateBidConsistency(st, bid, block); err != nil {
return errors.Wrap(err, "bid consistency validation failed")
}
if amount > 0 {
feeRecipient := bid.FeeRecipient()
pendingPayment := &ethpb.BuilderPendingPayment{
Weight: 0,
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: feeRecipient[:],
Amount: amount,
BuilderIndex: builderIndex,
},
}
slotIndex := params.BeaconConfig().SlotsPerEpoch + (bid.Slot() % params.BeaconConfig().SlotsPerEpoch)
if err := st.SetBuilderPendingPayment(slotIndex, pendingPayment); err != nil {
return errors.Wrap(err, "failed to set pending payment")
}
}
if err := st.SetExecutionPayloadBid(bid); err != nil {
return errors.Wrap(err, "failed to cache execution payload bid")
}
return nil
}
// validateBidConsistency checks that the bid is consistent with the current beacon state.
func validateBidConsistency(st state.BeaconState, bid interfaces.ROExecutionPayloadBid, block interfaces.ReadOnlyBeaconBlock) error {
if bid.Slot() != block.Slot() {
return fmt.Errorf("bid slot %d does not match block slot %d", bid.Slot(), block.Slot())
}
latestBlockHash, err := st.LatestBlockHash()
if err != nil {
return errors.Wrap(err, "failed to get latest block hash")
}
if bid.ParentBlockHash() != latestBlockHash {
return fmt.Errorf("bid parent block hash mismatch: got %x, expected %x",
bid.ParentBlockHash(), latestBlockHash)
}
if bid.ParentBlockRoot() != block.ParentRoot() {
return fmt.Errorf("bid parent block root mismatch: got %x, expected %x",
bid.ParentBlockRoot(), block.ParentRoot())
}
randaoMix, err := helpers.RandaoMix(st, slots.ToEpoch(st.Slot()))
if err != nil {
return errors.Wrap(err, "failed to get randao mix")
}
if bid.PrevRandao() != [32]byte(randaoMix) {
return fmt.Errorf("bid prev randao mismatch: got %x, expected %x", bid.PrevRandao(), randaoMix)
}
return nil
}
// validatePayloadBidSignature verifies the BLS signature on a signed execution payload bid.
// It validates that the signature was created by the builder specified in the bid
// using the appropriate domain for the beacon builder.
func validatePayloadBidSignature(st state.ReadOnlyBeaconState, signedBid interfaces.ROSignedExecutionPayloadBid) error {
bid, err := signedBid.Bid()
if err != nil {
return errors.Wrap(err, "failed to get bid")
}
pubkey, err := st.BuilderPubkey(bid.BuilderIndex())
if err != nil {
return errors.Wrap(err, "failed to get builder pubkey")
}
publicKey, err := bls.PublicKeyFromBytes(pubkey[:])
if err != nil {
return errors.Wrap(err, "invalid builder public key")
}
signatureBytes := signedBid.Signature()
signature, err := bls.SignatureFromBytes(signatureBytes[:])
if err != nil {
return errors.Wrap(err, "invalid signature format")
}
currentEpoch := slots.ToEpoch(bid.Slot())
domain, err := signing.Domain(
st.Fork(),
currentEpoch,
params.BeaconConfig().DomainBeaconBuilder,
st.GenesisValidatorsRoot(),
)
if err != nil {
return errors.Wrap(err, "failed to compute signing domain")
}
signingRoot, err := signedBid.SigningRoot(domain)
if err != nil {
return errors.Wrap(err, "failed to compute signing root")
}
if !signature.Verify(publicKey, signingRoot[:]) {
return signing.ErrSigFailedToVerify
}
return nil
}

View File

@@ -1,633 +0,0 @@
package gloas
import (
"bytes"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
validatorpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1/validator-client"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/time/slots"
fastssz "github.com/prysmaticlabs/fastssz"
"google.golang.org/protobuf/proto"
)
type stubBlockBody struct {
signedBid *ethpb.SignedExecutionPayloadBid
}
func (s stubBlockBody) Version() int { return version.Gloas }
func (s stubBlockBody) RandaoReveal() [96]byte { return [96]byte{} }
func (s stubBlockBody) Eth1Data() *ethpb.Eth1Data { return nil }
func (s stubBlockBody) Graffiti() [32]byte { return [32]byte{} }
func (s stubBlockBody) ProposerSlashings() []*ethpb.ProposerSlashing { return nil }
func (s stubBlockBody) AttesterSlashings() []ethpb.AttSlashing { return nil }
func (s stubBlockBody) Attestations() []ethpb.Att { return nil }
func (s stubBlockBody) Deposits() []*ethpb.Deposit { return nil }
func (s stubBlockBody) VoluntaryExits() []*ethpb.SignedVoluntaryExit { return nil }
func (s stubBlockBody) SyncAggregate() (*ethpb.SyncAggregate, error) { return nil, nil }
func (s stubBlockBody) IsNil() bool { return s.signedBid == nil }
func (s stubBlockBody) HashTreeRoot() ([32]byte, error) { return [32]byte{}, nil }
func (s stubBlockBody) Proto() (proto.Message, error) { return nil, nil }
func (s stubBlockBody) Execution() (interfaces.ExecutionData, error) { return nil, nil }
func (s stubBlockBody) BLSToExecutionChanges() ([]*ethpb.SignedBLSToExecutionChange, error) {
return nil, nil
}
func (s stubBlockBody) BlobKzgCommitments() ([][]byte, error) { return nil, nil }
func (s stubBlockBody) ExecutionRequests() (*enginev1.ExecutionRequests, error) {
return nil, nil
}
func (s stubBlockBody) PayloadAttestations() ([]*ethpb.PayloadAttestation, error) {
return nil, nil
}
func (s stubBlockBody) SignedExecutionPayloadBid() (*ethpb.SignedExecutionPayloadBid, error) {
return s.signedBid, nil
}
func (s stubBlockBody) MarshalSSZ() ([]byte, error) { return nil, nil }
func (s stubBlockBody) MarshalSSZTo([]byte) ([]byte, error) { return nil, nil }
func (s stubBlockBody) UnmarshalSSZ([]byte) error { return nil }
func (s stubBlockBody) SizeSSZ() int { return 0 }
type stubBlock struct {
slot primitives.Slot
proposer primitives.ValidatorIndex
parentRoot [32]byte
body stubBlockBody
v int
}
var (
_ interfaces.ReadOnlyBeaconBlockBody = (*stubBlockBody)(nil)
_ interfaces.ReadOnlyBeaconBlock = (*stubBlock)(nil)
)
func (s stubBlock) Slot() primitives.Slot { return s.slot }
func (s stubBlock) ProposerIndex() primitives.ValidatorIndex { return s.proposer }
func (s stubBlock) ParentRoot() [32]byte { return s.parentRoot }
func (s stubBlock) StateRoot() [32]byte { return [32]byte{} }
func (s stubBlock) Body() interfaces.ReadOnlyBeaconBlockBody { return s.body }
func (s stubBlock) IsNil() bool { return false }
func (s stubBlock) IsBlinded() bool { return false }
func (s stubBlock) HashTreeRoot() ([32]byte, error) { return [32]byte{}, nil }
func (s stubBlock) Proto() (proto.Message, error) { return nil, nil }
func (s stubBlock) MarshalSSZ() ([]byte, error) { return nil, nil }
func (s stubBlock) MarshalSSZTo([]byte) ([]byte, error) { return nil, nil }
func (s stubBlock) UnmarshalSSZ([]byte) error { return nil }
func (s stubBlock) SizeSSZ() int { return 0 }
func (s stubBlock) Version() int { return s.v }
func (s stubBlock) AsSignRequestObject() (validatorpb.SignRequestObject, error) {
return nil, nil
}
func (s stubBlock) HashTreeRootWith(*fastssz.Hasher) error { return nil }
func buildGloasState(t *testing.T, slot primitives.Slot, proposerIdx primitives.ValidatorIndex, builderIdx primitives.BuilderIndex, balance uint64, randao [32]byte, latestHash [32]byte, builderPubkey [48]byte) *state_native.BeaconState {
t.Helper()
cfg := params.BeaconConfig()
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
for i := range blockRoots {
blockRoots[i] = bytes.Repeat([]byte{0xAA}, 32)
stateRoots[i] = bytes.Repeat([]byte{0xBB}, 32)
}
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
for i := range randaoMixes {
randaoMixes[i] = randao[:]
}
withdrawalCreds := make([]byte, 32)
withdrawalCreds[0] = cfg.BuilderWithdrawalPrefixByte
validatorCount := int(proposerIdx) + 1
validators := make([]*ethpb.Validator, validatorCount)
balances := make([]uint64, validatorCount)
for i := range validatorCount {
validators[i] = &ethpb.Validator{
PublicKey: builderPubkey[:],
WithdrawalCredentials: withdrawalCreds,
EffectiveBalance: balance,
Slashed: false,
ActivationEligibilityEpoch: 0,
ActivationEpoch: 0,
ExitEpoch: cfg.FarFutureEpoch,
WithdrawableEpoch: cfg.FarFutureEpoch,
}
balances[i] = balance
}
payments := make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2)
for i := range payments {
payments[i] = &ethpb.BuilderPendingPayment{Withdrawal: &ethpb.BuilderPendingWithdrawal{}}
}
var builders []*ethpb.Builder
if builderIdx != params.BeaconConfig().BuilderIndexSelfBuild {
builderCount := int(builderIdx) + 1
builders = make([]*ethpb.Builder, builderCount)
builders[builderCount-1] = &ethpb.Builder{
Pubkey: builderPubkey[:],
Version: []byte{0},
ExecutionAddress: bytes.Repeat([]byte{0x01}, 20),
Balance: primitives.Gwei(balance),
DepositEpoch: 0,
WithdrawableEpoch: cfg.FarFutureEpoch,
}
}
stProto := &ethpb.BeaconStateGloas{
Slot: slot,
GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, 32),
Fork: &ethpb.Fork{
CurrentVersion: bytes.Repeat([]byte{0x22}, 4),
PreviousVersion: bytes.Repeat([]byte{0x22}, 4),
Epoch: 0,
},
BlockRoots: blockRoots,
StateRoots: stateRoots,
RandaoMixes: randaoMixes,
Validators: validators,
Balances: balances,
LatestBlockHash: latestHash[:],
BuilderPendingPayments: payments,
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
Builders: builders,
FinalizedCheckpoint: &ethpb.Checkpoint{
Epoch: 1,
},
}
st, err := state_native.InitializeFromProtoGloas(stProto)
require.NoError(t, err)
return st.(*state_native.BeaconState)
}
func signBid(t *testing.T, sk common.SecretKey, bid *ethpb.ExecutionPayloadBid, fork *ethpb.Fork, genesisRoot [32]byte) [96]byte {
t.Helper()
epoch := slots.ToEpoch(primitives.Slot(bid.Slot))
domain, err := signing.Domain(fork, epoch, params.BeaconConfig().DomainBeaconBuilder, genesisRoot[:])
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(bid, domain)
require.NoError(t, err)
sig := sk.Sign(root[:]).Marshal()
var out [96]byte
copy(out[:], sig)
return out
}
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
slot := primitives.Slot(12)
proposerIdx := primitives.ValidatorIndex(0)
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
pubKey := [48]byte{}
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 0,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
Signature: common.InfiniteSignature[:],
}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
require.NoError(t, ProcessExecutionPayloadBid(state, block))
stateProto, ok := state.ToProto().(*ethpb.BeaconStateGloas)
require.Equal(t, true, ok)
slotIndex := params.BeaconConfig().SlotsPerEpoch + (slot % params.BeaconConfig().SlotsPerEpoch)
require.Equal(t, primitives.Gwei(0), stateProto.BuilderPendingPayments[slotIndex].Withdrawal.Amount)
}
func TestProcessExecutionPayloadBid_SelfBuildNonZeroAmountFails(t *testing.T) {
slot := primitives.Slot(2)
proposerIdx := primitives.ValidatorIndex(0)
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
randao := [32]byte{}
latestHash := [32]byte{1}
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, [48]byte{})
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
Signature: common.InfiniteSignature[:],
}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err := ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "self-build amount must be zero", err)
}
func TestProcessExecutionPayloadBid_PendingPaymentAndCacheBid(t *testing.T) {
slot := primitives.Slot(8)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
pub := sk.PublicKey().Marshal()
var pubKey [48]byte
copy(pubKey[:], pub)
balance := params.BeaconConfig().MinActivationBalance + 1_000_000
state := buildGloasState(t, slot, proposerIdx, builderIdx, balance, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 500_000,
ExecutionPayment: 1,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{
Message: bid,
Signature: sig[:],
}
block := stubBlock{
slot: slot,
proposer: proposerIdx, // not self-build
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
require.NoError(t, ProcessExecutionPayloadBid(state, block))
stateProto, ok := state.ToProto().(*ethpb.BeaconStateGloas)
require.Equal(t, true, ok)
slotIndex := params.BeaconConfig().SlotsPerEpoch + (slot % params.BeaconConfig().SlotsPerEpoch)
require.Equal(t, primitives.Gwei(500_000), stateProto.BuilderPendingPayments[slotIndex].Withdrawal.Amount)
require.NotNil(t, stateProto.LatestExecutionPayloadBid)
require.Equal(t, primitives.BuilderIndex(1), stateProto.LatestExecutionPayloadBid.BuilderIndex)
require.Equal(t, primitives.Gwei(500_000), stateProto.LatestExecutionPayloadBid.Value)
}
func TestProcessExecutionPayloadBid_BuilderNotActive(t *testing.T) {
slot := primitives.Slot(4)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0x01}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0x02}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
// Make builder inactive by setting withdrawable_epoch.
stateProto := state.ToProto().(*ethpb.BeaconStateGloas)
stateProto.Builders[int(builderIdx)].WithdrawableEpoch = 0
stateIface, err := state_native.InitializeFromProtoGloas(stateProto)
require.NoError(t, err)
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
BlockHash: bytes.Repeat([]byte{0x04}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "is not active", err)
}
func TestProcessExecutionPayloadBid_CannotCoverBid(t *testing.T) {
slot := primitives.Slot(5)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0x0A}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0x0B}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+10, randao, latestHash, pubKey)
stateProto := state.ToProto().(*ethpb.BeaconStateGloas)
// Add pending balances to push below required balance.
stateProto.BuilderPendingWithdrawals = []*ethpb.BuilderPendingWithdrawal{
{Amount: 15, BuilderIndex: builderIdx},
}
stateProto.BuilderPendingPayments = []*ethpb.BuilderPendingPayment{
{Withdrawal: &ethpb.BuilderPendingWithdrawal{Amount: 20, BuilderIndex: builderIdx}},
}
stateIface, err := state_native.InitializeFromProtoGloas(stateProto)
require.NoError(t, err)
state = stateIface.(*state_native.BeaconState)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 25,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "cannot cover bid amount", err)
}
func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
slot := primitives.Slot(6)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 10,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
}
// Use an invalid signature.
invalidSig := [96]byte{1}
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: invalidSig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "bid signature validation failed", err)
}
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
slot := primitives.Slot(10)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot + 1, // mismatch
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "bid slot", err)
}
func TestProcessExecutionPayloadBid_ParentHashMismatch(t *testing.T) {
slot := primitives.Slot(11)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "parent block hash mismatch", err)
}
func TestProcessExecutionPayloadBid_ParentRootMismatch(t *testing.T) {
slot := primitives.Slot(12)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
parentRoot := bytes.Repeat([]byte{0x22}, 32)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: parentRoot,
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: randao[:],
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bytes.Repeat([]byte{0x99}, 32)), // mismatch
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "parent block root mismatch", err)
}
func TestProcessExecutionPayloadBid_PrevRandaoMismatch(t *testing.T) {
slot := primitives.Slot(13)
builderIdx := primitives.BuilderIndex(1)
proposerIdx := primitives.ValidatorIndex(2)
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
sk, err := bls.RandKey()
require.NoError(t, err)
var pubKey [48]byte
copy(pubKey[:], sk.PublicKey().Marshal())
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
bid := &ethpb.ExecutionPayloadBid{
ParentBlockHash: latestHash[:],
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
BlockHash: bytes.Repeat([]byte{0x33}, 32),
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
GasLimit: 1,
BuilderIndex: builderIdx,
Slot: slot,
Value: 1,
ExecutionPayment: 0,
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
}
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
sig := signBid(t, sk, bid, state.Fork(), genesis)
signed := &ethpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
block := stubBlock{
slot: slot,
proposer: proposerIdx,
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
body: stubBlockBody{signedBid: signed},
v: version.Gloas,
}
err = ProcessExecutionPayloadBid(state, block)
require.ErrorContains(t, "prev randao mismatch", err)
}

View File

@@ -1,253 +0,0 @@
package gloas
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"slices"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/crypto/hash"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
// ProcessPayloadAttestations validates payload attestations in a block body.
// Spec v1.7.0-alpha.0 (pseudocode):
// process_payload_attestation(state: BeaconState, payload_attestation: PayloadAttestation):
//
// data = payload_attestation.data
// assert data.beacon_block_root == state.latest_block_header.parent_root
// assert data.slot + 1 == state.slot
// indexed = get_indexed_payload_attestation(state, data.slot, payload_attestation)
// assert is_valid_indexed_payload_attestation(state, indexed)
func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
atts, err := body.PayloadAttestations()
if err != nil {
return errors.Wrap(err, "failed to get payload attestations from block body")
}
if len(atts) == 0 {
return nil
}
header := st.LatestBlockHeader()
for i, att := range atts {
data := att.Data
if !bytes.Equal(data.BeaconBlockRoot, header.ParentRoot) {
return fmt.Errorf("payload attestation %d has wrong parent: got %x want %x", i, data.BeaconBlockRoot, header.ParentRoot)
}
dataSlot, err := data.Slot.SafeAdd(1)
if err != nil {
return errors.Wrapf(err, "payload attestation %d has invalid slot addition", i)
}
if dataSlot != st.Slot() {
return fmt.Errorf("payload attestation %d has wrong slot: got %d want %d", i, data.Slot+1, st.Slot())
}
indexed, err := indexedPayloadAttestation(ctx, st, att)
if err != nil {
return errors.Wrapf(err, "payload attestation %d failed to convert to indexed form", i)
}
if err := validIndexedPayloadAttestation(st, indexed); err != nil {
return errors.Wrapf(err, "payload attestation %d failed to verify indexed form", i)
}
}
return nil
}
// indexedPayloadAttestation converts a payload attestation into its indexed form.
func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState, att *eth.PayloadAttestation) (*consensus_types.IndexedPayloadAttestation, error) {
committee, err := payloadCommittee(ctx, st, att.Data.Slot)
if err != nil {
return nil, err
}
indices := make([]primitives.ValidatorIndex, 0, len(committee))
for i, idx := range committee {
if att.AggregationBits.BitAt(uint64(i)) {
indices = append(indices, idx)
}
}
slices.Sort(indices)
return &consensus_types.IndexedPayloadAttestation{
AttestingIndices: indices,
Data: att.Data,
Signature: att.Signature,
}, nil
}
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
// Spec v1.7.0-alpha.0 (pseudocode):
// get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
//
// epoch = compute_epoch_at_slot(slot)
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
// indices = []
// committees_per_slot = get_committee_count_per_slot(state, epoch)
// for i in range(committees_per_slot):
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
// indices.extend(committee)
// return compute_balance_weighted_selection(state, indices, seed, size=PTC_SIZE, shuffle_indices=False)
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
epoch := slots.ToEpoch(slot)
seed, err := ptcSeed(st, epoch, slot)
if err != nil {
return nil, err
}
activeCount, err := helpers.ActiveValidatorCount(ctx, st, epoch)
if err != nil {
return nil, err
}
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
out := make([]primitives.ValidatorIndex, 0, activeCount/uint64(params.BeaconConfig().SlotsPerEpoch))
for i := primitives.CommitteeIndex(0); i < primitives.CommitteeIndex(committeesPerSlot); i++ {
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, i)
if err != nil {
return nil, errors.Wrapf(err, "failed to get beacon committee %d", i)
}
out = append(out, committee...)
}
return selectByBalance(ctx, st, out, seed, fieldparams.PTCSize)
}
// ptcSeed computes the seed for the payload timeliness committee.
func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitives.Slot) ([32]byte, error) {
seed, err := helpers.Seed(st, epoch, params.BeaconConfig().DomainPTCAttester)
if err != nil {
return [32]byte{}, err
}
return hash.Hash(append(seed[:], bytesutil.Bytes8(uint64(slot))...)), nil
}
// selectByBalance selects a balance-weighted subset of input candidates.
// Spec v1.7.0-alpha.0 (pseudocode):
// compute_balance_weighted_selection(state, indices, seed, size, shuffle_indices):
// Note: shuffle_indices is false for PTC.
//
// total = len(indices); selected = []; i = 0
// while len(selected) < size:
// next = i % total
// if shuffle_indices: next = compute_shuffled_index(next, total, seed)
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
// selected.append(indices[next])
// i += 1
func selectByBalance(ctx context.Context, st state.ReadOnlyBeaconState, candidates []primitives.ValidatorIndex, seed [32]byte, count uint64) ([]primitives.ValidatorIndex, error) {
if len(candidates) == 0 {
return nil, errors.New("no candidates for balance weighted selection")
}
hashFunc := hash.CustomSHA256Hasher()
// Pre-allocate buffer for hash input: seed (32 bytes) + round counter (8 bytes).
var buf [40]byte
copy(buf[:], seed[:])
maxBalance := params.BeaconConfig().MaxEffectiveBalanceElectra
selected := make([]primitives.ValidatorIndex, 0, count)
total := uint64(len(candidates))
for i := uint64(0); uint64(len(selected)) < count; i++ {
if ctx.Err() != nil {
return nil, ctx.Err()
}
idx := candidates[i%total]
ok, err := acceptByBalance(st, idx, buf[:], hashFunc, maxBalance, i)
if err != nil {
return nil, err
}
if ok {
selected = append(selected, idx)
}
}
return selected, nil
}
// acceptByBalance determines if a validator is accepted based on its effective balance.
// Spec v1.7.0-alpha.0 (pseudocode):
// compute_balance_weighted_acceptance(state, index, seed, i):
//
// MAX_RANDOM_VALUE = 2**16 - 1
// random_bytes = hash(seed + uint_to_bytes(i // 16))
// offset = i % 16 * 2
// random_value = bytes_to_uint64(random_bytes[offset:offset+2])
// effective_balance = state.validators[index].effective_balance
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex, seedBuf []byte, hashFunc func([]byte) [32]byte, maxBalance uint64, round uint64) (bool, error) {
// Reuse the seed buffer by overwriting the last 8 bytes with the round counter.
binary.LittleEndian.PutUint64(seedBuf[len(seedBuf)-8:], round/16)
random := hashFunc(seedBuf)
offset := (round % 16) * 2
randomValue := uint64(binary.LittleEndian.Uint16(random[offset : offset+2])) // 16-bit draw per spec
val, err := st.ValidatorAtIndex(idx)
if err != nil {
return false, errors.Wrapf(err, "validator %d", idx)
}
return val.EffectiveBalance*fieldparams.MaxRandomValueElectra >= maxBalance*randomValue, nil
}
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
// Spec v1.7.0-alpha.0 (pseudocode):
// is_valid_indexed_payload_attestation(state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
//
// indices = indexed_payload_attestation.attesting_indices
// return len(indices) > 0 and indices == sorted(indices) and
// bls.FastAggregateVerify(
// [state.validators[i].pubkey for i in indices],
// compute_signing_root(indexed_payload_attestation.data, get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot)),
// indexed_payload_attestation.signature,
// )
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
indices := att.AttestingIndices
if len(indices) == 0 || !slices.IsSorted(indices) {
return errors.New("attesting indices empty or unsorted")
}
pubkeys := make([]bls.PublicKey, len(indices))
for i, idx := range indices {
val, err := st.ValidatorAtIndexReadOnly(idx)
if err != nil {
return errors.Wrapf(err, "validator %d", idx)
}
keyBytes := val.PublicKey()
key, err := bls.PublicKeyFromBytes(keyBytes[:])
if err != nil {
return errors.Wrapf(err, "pubkey %d", idx)
}
pubkeys[i] = key
}
domain, err := signing.Domain(st.Fork(), slots.ToEpoch(att.Data.Slot), params.BeaconConfig().DomainPTCAttester, st.GenesisValidatorsRoot())
if err != nil {
return err
}
root, err := signing.ComputeSigningRoot(att.Data, domain)
if err != nil {
return err
}
sig, err := bls.SignatureFromBytes(att.Signature)
if err != nil {
return err
}
if !sig.FastAggregateVerify(pubkeys, root) {
return errors.New("invalid signature")
}
return nil
}

View File

@@ -1,305 +0,0 @@
package gloas_test
import (
"bytes"
"testing"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
testutil "github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/OffchainLabs/prysm/v7/time/slots"
)
func TestProcessPayloadAttestations_WrongParent(t *testing.T) {
setupTestConfig(t)
_, pk := newKey(t)
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
require.NoError(t, st.SetSlot(2))
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
require.NoError(t, st.SetLatestBlockHeader(&eth.BeaconBlockHeader{ParentRoot: parentRoot}))
att := &eth.PayloadAttestation{
Data: &eth.PayloadAttestationData{
BeaconBlockRoot: bytes.Repeat([]byte{0xbb}, 32),
Slot: 1,
},
AggregationBits: bitfield.NewBitvector512(),
Signature: make([]byte, 96),
}
body := buildBody(t, att)
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
require.ErrorContains(t, "wrong parent", err)
}
func TestProcessPayloadAttestations_WrongSlot(t *testing.T) {
setupTestConfig(t)
_, pk := newKey(t)
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
require.NoError(t, st.SetSlot(3))
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
require.NoError(t, st.SetLatestBlockHeader(&eth.BeaconBlockHeader{ParentRoot: parentRoot}))
att := &eth.PayloadAttestation{
Data: &eth.PayloadAttestationData{
BeaconBlockRoot: parentRoot,
Slot: 1,
},
AggregationBits: bitfield.NewBitvector512(),
Signature: make([]byte, 96),
}
body := buildBody(t, att)
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
require.ErrorContains(t, "wrong slot", err)
}
func TestProcessPayloadAttestations_InvalidSignature(t *testing.T) {
setupTestConfig(t)
_, pk1 := newKey(t)
sk2, pk2 := newKey(t)
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
st := newTestState(t, vals, 2)
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
require.NoError(t, st.SetLatestBlockHeader(&eth.BeaconBlockHeader{ParentRoot: parentRoot}))
attData := &eth.PayloadAttestationData{
BeaconBlockRoot: parentRoot,
Slot: 1,
}
att := &eth.PayloadAttestation{
Data: attData,
AggregationBits: setBits(bitfield.NewBitvector512(), 0),
Signature: signAttestation(t, st, attData, []common.SecretKey{sk2}),
}
body := buildBody(t, att)
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
require.ErrorContains(t, "failed to verify indexed form", err)
require.ErrorContains(t, "invalid signature", err)
}
func TestProcessPayloadAttestations_EmptyAggregationBits(t *testing.T) {
setupTestConfig(t)
_, pk := newKey(t)
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
require.NoError(t, st.SetSlot(2))
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
require.NoError(t, st.SetLatestBlockHeader(&eth.BeaconBlockHeader{ParentRoot: parentRoot}))
attData := &eth.PayloadAttestationData{
BeaconBlockRoot: parentRoot,
Slot: 1,
}
att := &eth.PayloadAttestation{
Data: attData,
AggregationBits: bitfield.NewBitvector512(),
Signature: make([]byte, 96),
}
body := buildBody(t, att)
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
require.ErrorContains(t, "failed to verify indexed form", err)
require.ErrorContains(t, "attesting indices empty or unsorted", err)
}
func TestProcessPayloadAttestations_HappyPath(t *testing.T) {
helpers.ClearCache()
setupTestConfig(t)
sk1, pk1 := newKey(t)
sk2, pk2 := newKey(t)
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
st := newTestState(t, vals, 2)
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
require.NoError(t, st.SetLatestBlockHeader(&eth.BeaconBlockHeader{ParentRoot: parentRoot}))
attData := &eth.PayloadAttestationData{
BeaconBlockRoot: parentRoot,
Slot: 1,
}
aggBits := bitfield.NewBitvector512()
aggBits.SetBitAt(0, true)
aggBits.SetBitAt(1, true)
att := &eth.PayloadAttestation{
Data: attData,
AggregationBits: aggBits,
Signature: signAttestation(t, st, attData, []common.SecretKey{sk1, sk2}),
}
body := buildBody(t, att)
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
require.NoError(t, err)
}
func TestProcessPayloadAttestations_MultipleAttestations(t *testing.T) {
helpers.ClearCache()
setupTestConfig(t)
sk1, pk1 := newKey(t)
sk2, pk2 := newKey(t)
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
st := newTestState(t, vals, 2)
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
require.NoError(t, st.SetLatestBlockHeader(&eth.BeaconBlockHeader{ParentRoot: parentRoot}))
attData1 := &eth.PayloadAttestationData{
BeaconBlockRoot: parentRoot,
Slot: 1,
}
attData2 := &eth.PayloadAttestationData{
BeaconBlockRoot: parentRoot,
Slot: 1,
}
att1 := &eth.PayloadAttestation{
Data: attData1,
AggregationBits: setBits(bitfield.NewBitvector512(), 0),
Signature: signAttestation(t, st, attData1, []common.SecretKey{sk1}),
}
att2 := &eth.PayloadAttestation{
Data: attData2,
AggregationBits: setBits(bitfield.NewBitvector512(), 1),
Signature: signAttestation(t, st, attData2, []common.SecretKey{sk2}),
}
body := buildBody(t, att1, att2)
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
require.NoError(t, err)
}
func TestProcessPayloadAttestations_IndexedVerificationError(t *testing.T) {
setupTestConfig(t)
_, pk := newKey(t)
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
require.NoError(t, st.SetLatestBlockHeader(&eth.BeaconBlockHeader{ParentRoot: parentRoot}))
attData := &eth.PayloadAttestationData{
BeaconBlockRoot: parentRoot,
Slot: 0,
}
att := &eth.PayloadAttestation{
Data: attData,
AggregationBits: setBits(bitfield.NewBitvector512(), 0),
Signature: make([]byte, 96),
}
body := buildBody(t, att)
errState := &validatorLookupErrState{
BeaconState: st,
errIndex: 0,
}
err := gloas.ProcessPayloadAttestations(t.Context(), errState, body)
require.ErrorContains(t, "failed to verify indexed form", err)
require.ErrorContains(t, "validator 0", err)
}
func newTestState(t *testing.T, vals []*eth.Validator, slot primitives.Slot) state.BeaconState {
st, err := testutil.NewBeaconState()
require.NoError(t, err)
for _, v := range vals {
require.NoError(t, st.AppendValidator(v))
require.NoError(t, st.AppendBalance(v.EffectiveBalance))
}
require.NoError(t, st.SetSlot(slot))
require.NoError(t, helpers.UpdateCommitteeCache(t.Context(), st, slots.ToEpoch(slot)))
return st
}
func setupTestConfig(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()
cfg.SlotsPerEpoch = 1
cfg.MaxEffectiveBalanceElectra = cfg.MaxEffectiveBalance
params.OverrideBeaconConfig(cfg)
}
func buildBody(t *testing.T, atts ...*eth.PayloadAttestation) interfaces.ReadOnlyBeaconBlockBody {
body := &eth.BeaconBlockBodyGloas{
PayloadAttestations: atts,
RandaoReveal: make([]byte, 96),
Eth1Data: &eth.Eth1Data{},
Graffiti: make([]byte, 32),
ProposerSlashings: []*eth.ProposerSlashing{},
AttesterSlashings: []*eth.AttesterSlashingElectra{},
Attestations: []*eth.AttestationElectra{},
Deposits: []*eth.Deposit{},
VoluntaryExits: []*eth.SignedVoluntaryExit{},
SyncAggregate: &eth.SyncAggregate{},
BlsToExecutionChanges: []*eth.SignedBLSToExecutionChange{},
}
wrapped, err := blocks.NewBeaconBlockBody(body)
require.NoError(t, err)
return wrapped
}
func setBits(bits bitfield.Bitvector512, idx uint64) bitfield.Bitvector512 {
bits.SetBitAt(idx, true)
return bits
}
func activeValidator(pub []byte) *eth.Validator {
return &eth.Validator{
PublicKey: pub,
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
WithdrawalCredentials: make([]byte, 32),
ActivationEligibilityEpoch: 0,
ActivationEpoch: 0,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
func newKey(t *testing.T) (common.SecretKey, []byte) {
sk, err := bls.RandKey()
require.NoError(t, err)
return sk, sk.PublicKey().Marshal()
}
func signAttestation(t *testing.T, st state.ReadOnlyBeaconState, data *eth.PayloadAttestationData, sks []common.SecretKey) []byte {
domain, err := signing.Domain(st.Fork(), slots.ToEpoch(data.Slot), params.BeaconConfig().DomainPTCAttester, st.GenesisValidatorsRoot())
require.NoError(t, err)
root, err := signing.ComputeSigningRoot(data, domain)
require.NoError(t, err)
sigs := make([]common.Signature, len(sks))
for i, sk := range sks {
sigs[i] = sk.Sign(root[:])
}
agg := bls.AggregateSignatures(sigs)
return agg.Marshal()
}
type validatorLookupErrState struct {
state.BeaconState
errIndex primitives.ValidatorIndex
}
// ValidatorAtIndexReadOnly is overridden to simulate a missing validator lookup.
func (s *validatorLookupErrState) ValidatorAtIndexReadOnly(idx primitives.ValidatorIndex) (state.ReadOnlyValidator, error) {
if idx == s.errIndex {
return nil, state.ErrNilValidatorsInState
}
return s.BeaconState.ValidatorAtIndexReadOnly(idx)
}

View File

@@ -1,76 +0,0 @@
package gloas
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
// Spec v1.7.0-alpha.0 (pseudocode):
// def process_builder_pending_payments(state: BeaconState) -> None:
//
// quorum = get_builder_payment_quorum_threshold(state)
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
// if payment.weight >= quorum:
// state.builder_pending_withdrawals.append(payment.withdrawal)
//
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
// state.builder_pending_payments = old_payments + new_payments
func ProcessBuilderPendingPayments(state state.BeaconState) error {
quorum, err := builderQuorumThreshold(state)
if err != nil {
return errors.Wrap(err, "could not compute builder payment quorum threshold")
}
payments, err := state.BuilderPendingPayments()
if err != nil {
return errors.Wrap(err, "could not get builder pending payments")
}
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
var withdrawals []*ethpb.BuilderPendingWithdrawal
for _, payment := range payments[:slotsPerEpoch] {
if quorum > payment.Weight {
continue
}
withdrawals = append(withdrawals, payment.Withdrawal)
}
if err := state.AppendBuilderPendingWithdrawals(withdrawals); err != nil {
return errors.Wrap(err, "could not append builder pending withdrawals")
}
if err := state.RotateBuilderPendingPayments(); err != nil {
return errors.Wrap(err, "could not rotate builder pending payments")
}
return nil
}
// builderQuorumThreshold calculates the quorum threshold for builder payments.
// Spec v1.7.0-alpha.0 (pseudocode):
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
//
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
activeBalance, err := helpers.TotalActiveBalance(state)
if err != nil {
return 0, errors.Wrap(err, "could not get total active balance")
}
cfg := params.BeaconConfig()
slotsPerEpoch := uint64(cfg.SlotsPerEpoch)
numerator := cfg.BuilderPaymentThresholdNumerator
denominator := cfg.BuilderPaymentThresholdDenominator
activeBalancePerSlot := activeBalance / slotsPerEpoch
quorum := (activeBalancePerSlot * numerator) / denominator
return primitives.Gwei(quorum), nil
}

View File

@@ -1,119 +0,0 @@
package gloas
import (
"slices"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestBuilderQuorumThreshold(t *testing.T) {
helpers.ClearCache()
cfg := params.BeaconConfig()
validators := []*ethpb.Validator{
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
}
st, err := state_native.InitializeFromProtoUnsafeGloas(&ethpb.BeaconStateGloas{Validators: validators})
require.NoError(t, err)
got, err := builderQuorumThreshold(st)
require.NoError(t, err)
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
perSlot := total / uint64(cfg.SlotsPerEpoch)
want := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
require.Equal(t, primitives.Gwei(want), got)
}
func TestProcessBuilderPendingPayments(t *testing.T) {
helpers.ClearCache()
cfg := params.BeaconConfig()
buildPayments := func(weights ...primitives.Gwei) []*ethpb.BuilderPendingPayment {
p := make([]*ethpb.BuilderPendingPayment, 2*int(cfg.SlotsPerEpoch))
for i := range p {
p[i] = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)},
}
}
for i, w := range weights {
p[i].Weight = w
p[i].Withdrawal.Amount = 1
}
return p
}
validators := []*ethpb.Validator{
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
}
pbSt, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{Validators: validators})
require.NoError(t, err)
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
perSlot := total / uint64(cfg.SlotsPerEpoch)
quorum := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
slotsPerEpoch := int(cfg.SlotsPerEpoch)
t.Run("append qualifying withdrawals", func(t *testing.T) {
payments := buildPayments(primitives.Gwei(quorum+1), primitives.Gwei(quorum+2))
st := &testProcessState{BeaconState: pbSt, payments: payments}
require.NoError(t, ProcessBuilderPendingPayments(st))
require.Equal(t, 2, len(st.withdrawals))
require.Equal(t, payments[0].Withdrawal, st.withdrawals[0])
require.Equal(t, payments[1].Withdrawal, st.withdrawals[1])
require.Equal(t, 2*slotsPerEpoch, len(st.payments))
for i := slotsPerEpoch; i < 2*slotsPerEpoch; i++ {
require.Equal(t, primitives.Gwei(0), st.payments[i].Weight)
require.Equal(t, primitives.Gwei(0), st.payments[i].Withdrawal.Amount)
require.Equal(t, 20, len(st.payments[i].Withdrawal.FeeRecipient))
}
})
t.Run("no withdrawals when below quorum", func(t *testing.T) {
payments := buildPayments(primitives.Gwei(quorum - 1))
st := &testProcessState{BeaconState: pbSt, payments: payments}
require.NoError(t, ProcessBuilderPendingPayments(st))
require.Equal(t, 0, len(st.withdrawals))
})
}
type testProcessState struct {
state.BeaconState
payments []*ethpb.BuilderPendingPayment
withdrawals []*ethpb.BuilderPendingWithdrawal
}
func (t *testProcessState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error) {
return t.payments, nil
}
func (t *testProcessState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.BuilderPendingWithdrawal) error {
t.withdrawals = append(t.withdrawals, withdrawals...)
return nil
}
func (t *testProcessState) RotateBuilderPendingPayments() error {
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
rotated := slices.Clone(t.payments[slotsPerEpoch:])
for range slotsPerEpoch {
rotated = append(rotated, &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
},
})
}
t.payments = rotated
return nil
}

View File

@@ -1,43 +0,0 @@
package gloas
import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/pkg/errors"
)
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
// Spec v1.7.0 (pseudocode):
//
// slot = header_1.slot
// proposal_epoch = compute_epoch_at_slot(slot)
// if proposal_epoch == get_current_epoch(state):
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
// elif proposal_epoch == get_previous_epoch(state):
// payment_index = slot % SLOTS_PER_EPOCH
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
proposalEpoch := slots.ToEpoch(header.Slot)
currentEpoch := time.CurrentEpoch(st)
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
var paymentIndex primitives.Slot
if proposalEpoch == currentEpoch {
paymentIndex = slotsPerEpoch + header.Slot%slotsPerEpoch
} else if proposalEpoch+1 == currentEpoch {
paymentIndex = header.Slot % slotsPerEpoch
} else {
return nil
}
if err := st.ClearBuilderPendingPayment(paymentIndex); err != nil {
return errors.Wrap(err, "could not clear builder pending payment")
}
return nil
}

View File

@@ -1,112 +0,0 @@
package gloas
import (
"bytes"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestRemoveBuilderPendingPayment_CurrentEpoch(t *testing.T) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
stateSlot := slotsPerEpoch*2 + 1
headerSlot := slotsPerEpoch * 2
st := newGloasStateWithPayments(t, stateSlot)
paymentIndex := int(slotsPerEpoch + headerSlot%slotsPerEpoch)
setPendingPayment(t, st, paymentIndex, 123)
err := RemoveBuilderPendingPayment(st, &eth.BeaconBlockHeader{Slot: headerSlot})
require.NoError(t, err)
got := getPendingPayment(t, st, paymentIndex)
require.NotNil(t, got.Withdrawal)
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
}
func TestRemoveBuilderPendingPayment_PreviousEpoch(t *testing.T) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
stateSlot := slotsPerEpoch*2 + 1
headerSlot := slotsPerEpoch + 7
st := newGloasStateWithPayments(t, stateSlot)
paymentIndex := int(headerSlot % slotsPerEpoch)
setPendingPayment(t, st, paymentIndex, 456)
err := RemoveBuilderPendingPayment(st, &eth.BeaconBlockHeader{Slot: headerSlot})
require.NoError(t, err)
got := getPendingPayment(t, st, paymentIndex)
require.NotNil(t, got.Withdrawal)
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
}
func TestRemoveBuilderPendingPayment_OlderThanTwoEpoch(t *testing.T) {
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
stateSlot := slotsPerEpoch*4 + 1 // current epoch far ahead
headerSlot := slotsPerEpoch * 2 // two epochs behind
st := newGloasStateWithPayments(t, stateSlot)
paymentIndex := int(headerSlot % slotsPerEpoch)
original := getPendingPayment(t, st, paymentIndex)
err := RemoveBuilderPendingPayment(st, &eth.BeaconBlockHeader{Slot: headerSlot})
require.NoError(t, err)
after := getPendingPayment(t, st, paymentIndex)
require.DeepEqual(t, original.Withdrawal.FeeRecipient, after.Withdrawal.FeeRecipient)
require.Equal(t, original.Withdrawal.Amount, after.Withdrawal.Amount)
}
func newGloasStateWithPayments(t *testing.T, slot primitives.Slot) state.BeaconState {
t.Helper()
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
paymentCount := int(slotsPerEpoch * 2)
payments := make([]*eth.BuilderPendingPayment, paymentCount)
for i := range payments {
payments[i] = &eth.BuilderPendingPayment{
Withdrawal: &eth.BuilderPendingWithdrawal{
FeeRecipient: bytes.Repeat([]byte{0x01}, 20),
Amount: 1,
},
}
}
st, err := state_native.InitializeFromProtoUnsafeGloas(&eth.BeaconStateGloas{
Slot: slot,
BuilderPendingPayments: payments,
})
require.NoError(t, err)
return st
}
func setPendingPayment(t *testing.T, st state.BeaconState, index int, amount uint64) {
t.Helper()
payment := &eth.BuilderPendingPayment{
Withdrawal: &eth.BuilderPendingWithdrawal{
FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
Amount: primitives.Gwei(amount),
},
}
require.NoError(t, st.SetBuilderPendingPayment(primitives.Slot(index), payment))
}
func getPendingPayment(t *testing.T, st state.BeaconState, index int) *eth.BuilderPendingPayment {
t.Helper()
stateProto := st.ToProtoUnsafe().(*eth.BeaconStateGloas)
return stateProto.BuilderPendingPayments[index]
}

View File

@@ -6,7 +6,6 @@ go_library(
"attestation.go",
"beacon_committee.go",
"block.go",
"deposit.go",
"genesis.go",
"legacy.go",
"log.go",
@@ -24,7 +23,6 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -33,7 +31,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//container/trie:go_default_library",
"//contracts/deposit:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
@@ -57,7 +54,6 @@ go_test(
"attestation_test.go",
"beacon_committee_test.go",
"block_test.go",
"deposit_test.go",
"legacy_test.go",
"private_access_fuzz_noop_test.go", # keep
"private_access_test.go",
@@ -76,7 +72,6 @@ go_test(
tags = ["CI_race_detection"],
deps = [
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/forkchoice/types:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -85,8 +80,6 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//container/slice:go_default_library",
"//container/trie:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",

View File

@@ -1,60 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"consolidations.go",
"deposits.go",
"log.go",
"withdrawals.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/validators:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls/common:go_default_library",
"//encoding/bytesutil:go_default_library",
"//math:go_default_library",
"//monitoring/tracing/trace:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//common/math:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"consolidations_test.go",
"deposits_test.go",
"withdrawals_test.go",
],
deps = [
":go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
],
)

View File

@@ -1,365 +0,0 @@
package requests
import (
"bytes"
"context"
"fmt"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
prysmMath "github.com/OffchainLabs/prysm/v7/math"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/ethereum/go-ethereum/common/math"
"github.com/pkg/errors"
)
// ProcessConsolidationRequests implements the spec definition below. This method makes mutating
// calls to the beacon state.
//
// def process_consolidation_request(
// state: BeaconState,
// consolidation_request: ConsolidationRequest
// ) -> None:
// if is_valid_switch_to_compounding_request(state, consolidation_request):
// validator_pubkeys = [v.pubkey for v in state.validators]
// request_source_pubkey = consolidation_request.source_pubkey
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
// switch_to_compounding_validator(state, source_index)
// return
//
// # Verify that source != target, so a consolidation cannot be used as an exit.
// if consolidation_request.source_pubkey == consolidation_request.target_pubkey:
// return
// # If the pending consolidations queue is full, consolidation requests are ignored
// if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
// return
// # If there is too little available consolidation churn limit, consolidation requests are ignored
// if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE:
// return
//
// validator_pubkeys = [v.pubkey for v in state.validators]
// # Verify pubkeys exists
// request_source_pubkey = consolidation_request.source_pubkey
// request_target_pubkey = consolidation_request.target_pubkey
// if request_source_pubkey not in validator_pubkeys:
// return
// if request_target_pubkey not in validator_pubkeys:
// return
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
// target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey))
// source_validator = state.validators[source_index]
// target_validator = state.validators[target_index]
//
// # Verify source withdrawal credentials
// has_correct_credential = has_execution_withdrawal_credential(source_validator)
// is_correct_source_address = (
// source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
// )
// if not (has_correct_credential and is_correct_source_address):
// return
//
// # Verify that target has compounding withdrawal credentials
// if not has_compounding_withdrawal_credential(target_validator):
// return
//
// # Verify the source and the target are active
// current_epoch = get_current_epoch(state)
// if not is_active_validator(source_validator, current_epoch):
// return
// if not is_active_validator(target_validator, current_epoch):
// return
// # Verify exits for source and target have not been initiated
// if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
// return
// if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
// return
//
// # Verify the source has been active long enough
// if current_epoch < source_validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
// return
//
// # Verify the source has no pending withdrawals in the queue
// if get_pending_balance_to_withdraw(state, source_index) > 0:
// return
// # Initiate source validator exit and append pending consolidation
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
// state, source_validator.effective_balance
// )
// source_validator.withdrawable_epoch = Epoch(
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
// )
// state.pending_consolidations.append(PendingConsolidation(
// source_index=source_index,
// target_index=target_index
// ))
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
ctx, span := trace.StartSpan(ctx, "requests.ProcessConsolidationRequests")
defer span.End()
if len(reqs) == 0 || st == nil {
return nil
}
curEpoch := slots.ToEpoch(st.Slot())
ffe := params.BeaconConfig().FarFutureEpoch
minValWithdrawDelay := params.BeaconConfig().MinValidatorWithdrawabilityDelay
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
for _, cr := range reqs {
if cr == nil {
return errors.New("nil consolidation request")
}
if ctx.Err() != nil {
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
}
if isValidSwitchToCompoundingRequest(st, cr) {
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
if !ok {
log.Error("Failed to find source validator index")
continue
}
if err := switchToCompoundingValidator(st, srcIdx); err != nil {
log.WithError(err).Error("Failed to switch to compounding validator")
}
continue
}
sourcePubkey := bytesutil.ToBytes48(cr.SourcePubkey)
targetPubkey := bytesutil.ToBytes48(cr.TargetPubkey)
if sourcePubkey == targetPubkey {
continue
}
if npc, err := st.NumPendingConsolidations(); err != nil {
return fmt.Errorf("failed to fetch number of pending consolidations: %w", err) // This should never happen.
} else if npc >= pcLimit {
continue
}
activeBal, err := helpers.TotalActiveBalance(st)
if err != nil {
return err
}
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
continue
}
srcIdx, ok := st.ValidatorIndexByPubkey(sourcePubkey)
if !ok {
continue
}
tgtIdx, ok := st.ValidatorIndexByPubkey(targetPubkey)
if !ok {
continue
}
srcV, err := st.ValidatorAtIndex(srcIdx)
if err != nil {
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
}
roSrcV, err := state_native.NewValidator(srcV)
if err != nil {
return err
}
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
if err != nil {
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
}
// Verify source withdrawal credentials.
if !roSrcV.HasExecutionWithdrawalCredentials() {
continue
}
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address.
if len(srcV.WithdrawalCredentials) != 32 || len(cr.SourceAddress) != 20 || !bytes.HasSuffix(srcV.WithdrawalCredentials, cr.SourceAddress) {
continue
}
// Target validator must have their withdrawal credentials set appropriately.
if !tgtV.HasCompoundingWithdrawalCredentials() {
continue
}
// Both validators must be active.
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
continue
}
// Neither validator is exiting.
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
continue
}
e, overflow := math.SafeAdd(uint64(srcV.ActivationEpoch), uint64(params.BeaconConfig().ShardCommitteePeriod))
if overflow {
log.Error("Overflow when adding activation epoch and shard committee period")
continue
}
if uint64(curEpoch) < e {
continue
}
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
if err != nil {
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
continue
}
if hasBal {
continue
}
exitEpoch, err := computeConsolidationEpochAndUpdateChurn(st, primitives.Gwei(srcV.EffectiveBalance))
if err != nil {
log.WithError(err).Error("Failed to compute consolidation epoch")
continue
}
srcV.ExitEpoch = exitEpoch
srcV.WithdrawableEpoch = exitEpoch + minValWithdrawDelay
if err := st.UpdateValidatorAtIndex(srcIdx, srcV); err != nil {
return fmt.Errorf("failed to update validator: %w", err) // This should never happen.
}
if err := st.AppendPendingConsolidation(&eth.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
}
}
return nil
}
func isValidSwitchToCompoundingRequest(st state.BeaconState, req *enginev1.ConsolidationRequest) bool {
if req.SourcePubkey == nil || req.TargetPubkey == nil {
return false
}
if !bytes.Equal(req.SourcePubkey, req.TargetPubkey) {
return false
}
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(req.SourcePubkey))
if !ok {
return false
}
srcV, err := st.ValidatorAtIndexReadOnly(srcIdx)
if err != nil {
return false
}
sourceAddress := req.SourceAddress
withdrawalCreds := srcV.GetWithdrawalCredentials()
if len(withdrawalCreds) != 32 || len(sourceAddress) != 20 || !bytes.HasSuffix(withdrawalCreds, sourceAddress) {
return false
}
if !srcV.HasETH1WithdrawalCredentials() {
return false
}
curEpoch := slots.ToEpoch(st.Slot())
if !helpers.IsActiveValidatorUsingTrie(srcV, curEpoch) {
return false
}
if srcV.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
return false
}
return true
}
func switchToCompoundingValidator(st state.BeaconState, idx primitives.ValidatorIndex) error {
v, err := st.ValidatorAtIndex(idx)
if err != nil {
return err
}
if len(v.WithdrawalCredentials) == 0 {
return errors.New("validator has no withdrawal credentials")
}
v.WithdrawalCredentials[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
if err := st.UpdateValidatorAtIndex(idx, v); err != nil {
return err
}
return queueExcessActiveBalance(st, idx)
}
func queueExcessActiveBalance(st state.BeaconState, idx primitives.ValidatorIndex) error {
bal, err := st.BalanceAtIndex(idx)
if err != nil {
return err
}
if bal > params.BeaconConfig().MinActivationBalance {
if err := st.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
return err
}
excessBalance := bal - params.BeaconConfig().MinActivationBalance
val, err := st.ValidatorAtIndexReadOnly(idx)
if err != nil {
return err
}
pk := val.PublicKey()
return st.AppendPendingDeposit(&eth.PendingDeposit{
PublicKey: pk[:],
WithdrawalCredentials: val.GetWithdrawalCredentials(),
Amount: excessBalance,
Signature: common.InfiniteSignature[:],
Slot: params.BeaconConfig().GenesisSlot,
})
}
return nil
}
func computeConsolidationEpochAndUpdateChurn(st state.BeaconState, consolidationBalance primitives.Gwei) (primitives.Epoch, error) {
earliestEpoch, err := st.EarliestConsolidationEpoch()
if err != nil {
return 0, err
}
earliestConsolidationEpoch := max(earliestEpoch, helpers.ActivationExitEpoch(slots.ToEpoch(st.Slot())))
activeBal, err := helpers.TotalActiveBalance(st)
if err != nil {
return 0, err
}
perEpochConsolidationChurn := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
var consolidationBalanceToConsume primitives.Gwei
if earliestEpoch < earliestConsolidationEpoch {
consolidationBalanceToConsume = perEpochConsolidationChurn
} else {
consolidationBalanceToConsume, err = st.ConsolidationBalanceToConsume()
if err != nil {
return 0, err
}
}
if consolidationBalance > consolidationBalanceToConsume {
balanceToProcess := consolidationBalance - consolidationBalanceToConsume
additionalEpochs, err := prysmMath.Div64(uint64(balanceToProcess-1), uint64(perEpochConsolidationChurn))
if err != nil {
return 0, err
}
additionalEpochs++
earliestConsolidationEpoch += primitives.Epoch(additionalEpochs)
consolidationBalanceToConsume += primitives.Gwei(additionalEpochs) * perEpochConsolidationChurn
}
if err := st.SetConsolidationBalanceToConsume(consolidationBalanceToConsume - consolidationBalance); err != nil {
return 0, err
}
if err := st.SetEarliestConsolidationEpoch(earliestConsolidationEpoch); err != nil {
return 0, err
}
return earliestConsolidationEpoch, nil
}

View File

@@ -1,316 +0,0 @@
package requests_test
import (
"context"
"fmt"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Validator {
num := totalBal / primitives.Gwei(params.BeaconConfig().MinActivationBalance)
vals := make([]*eth.Validator, num)
for i := range vals {
wd := make([]byte, 32)
wd[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
wd[31] = byte(i)
vals[i] = &eth.Validator{
ActivationEpoch: primitives.Epoch(0),
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: fmt.Appendf(nil, "val_%d", i),
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
WithdrawalCredentials: wd,
}
}
if totalBal%primitives.Gwei(params.BeaconConfig().MinActivationBalance) != 0 {
vals = append(vals, &eth.Validator{
ActivationEpoch: primitives.Epoch(0),
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: uint64(totalBal) % params.BeaconConfig().MinActivationBalance,
})
}
return vals
}
func TestProcessConsolidationRequests(t *testing.T) {
tests := []struct {
name string
state state.BeaconState
reqs []*enginev1.ConsolidationRequest
validate func(*testing.T, state.BeaconState)
wantErr bool
}{
{
name: "nil request",
state: func() state.BeaconState {
st := &eth.BeaconStateElectra{}
s, err := state_native.InitializeFromProtoElectra(st)
require.NoError(t, err)
return s
}(),
reqs: []*enginev1.ConsolidationRequest{nil},
validate: func(t *testing.T, st state.BeaconState) {
require.DeepEqual(t, st, st)
},
wantErr: true,
},
{
name: "one valid request",
state: func() state.BeaconState {
st := &eth.BeaconStateElectra{
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
}
// Validator scenario setup. See comments in reqs section.
st.Validators[3].WithdrawalCredentials = bytesutil.Bytes32(0)
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(1)
st.Validators[9].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
st.Validators[12].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
st.Validators[13].ExitEpoch = 10
st.Validators[16].ExitEpoch = 10
st.PendingPartialWithdrawals = []*eth.PendingPartialWithdrawal{
{
Index: 17,
Amount: 100,
},
}
s, err := state_native.InitializeFromProtoElectra(st)
require.NoError(t, err)
return s
}(),
reqs: []*enginev1.ConsolidationRequest{
// Source doesn't have withdrawal credentials.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
SourcePubkey: []byte("val_3"),
TargetPubkey: []byte("val_4"),
},
// Source withdrawal credentials don't match the consolidation address.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)), // Should be 5
SourcePubkey: []byte("val_5"),
TargetPubkey: []byte("val_6"),
},
// Target does not have their withdrawal credentials set appropriately. (Using eth1 address prefix)
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(7)),
SourcePubkey: []byte("val_7"),
TargetPubkey: []byte("val_8"),
},
// Source is inactive.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(9)),
SourcePubkey: []byte("val_9"),
TargetPubkey: []byte("val_10"),
},
// Target is inactive.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(11)),
SourcePubkey: []byte("val_11"),
TargetPubkey: []byte("val_12"),
},
// Source is exiting.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(13)),
SourcePubkey: []byte("val_13"),
TargetPubkey: []byte("val_14"),
},
// Target is exiting.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(15)),
SourcePubkey: []byte("val_15"),
TargetPubkey: []byte("val_16"),
},
// Source doesn't exist
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
SourcePubkey: []byte("INVALID"),
TargetPubkey: []byte("val_0"),
},
// Target doesn't exist
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
SourcePubkey: []byte("val_0"),
TargetPubkey: []byte("INVALID"),
},
// Source == target
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
SourcePubkey: []byte("val_0"),
TargetPubkey: []byte("val_0"),
},
// Has pending partial withdrawal
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
SourcePubkey: []byte("val_17"),
TargetPubkey: []byte("val_1"),
},
// Valid consolidation request. This should be last to ensure invalid requests do
// not end the processing early.
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
SourcePubkey: []byte("val_1"),
TargetPubkey: []byte("val_2"),
},
},
validate: func(t *testing.T, st state.BeaconState) {
// Verify a pending consolidation is created.
numPC, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, uint64(1), numPC)
pcs, err := st.PendingConsolidations()
require.NoError(t, err)
require.Equal(t, primitives.ValidatorIndex(1), pcs[0].SourceIndex)
require.Equal(t, primitives.ValidatorIndex(2), pcs[0].TargetIndex)
// Verify the source validator is exiting.
src, err := st.ValidatorAtIndex(1)
require.NoError(t, err)
require.NotEqual(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch not updated")
require.Equal(t, params.BeaconConfig().MinValidatorWithdrawabilityDelay, src.WithdrawableEpoch-src.ExitEpoch, "source validator withdrawable epoch not set correctly")
},
wantErr: false,
},
{
name: "pending consolidations limit reached",
state: func() state.BeaconState {
st := &eth.BeaconStateElectra{
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
}
s, err := state_native.InitializeFromProtoElectra(st)
require.NoError(t, err)
return s
}(),
reqs: []*enginev1.ConsolidationRequest{
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
SourcePubkey: []byte("val_1"),
TargetPubkey: []byte("val_2"),
},
},
validate: func(t *testing.T, st state.BeaconState) {
// Verify no pending consolidation is created.
numPC, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
// Verify the source validator is not exiting.
src, err := st.ValidatorAtIndex(1)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
},
wantErr: false,
},
{
name: "pending consolidations limit reached during processing",
state: func() state.BeaconState {
st := &eth.BeaconStateElectra{
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit-1),
}
s, err := state_native.InitializeFromProtoElectra(st)
require.NoError(t, err)
return s
}(),
reqs: []*enginev1.ConsolidationRequest{
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
SourcePubkey: []byte("val_1"),
TargetPubkey: []byte("val_2"),
},
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
SourcePubkey: []byte("val_3"),
TargetPubkey: []byte("val_4"),
},
},
validate: func(t *testing.T, st state.BeaconState) {
// Verify a pending consolidation is created.
numPC, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
// The first consolidation was appended.
pcs, err := st.PendingConsolidations()
require.NoError(t, err)
require.Equal(t, primitives.ValidatorIndex(1), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].SourceIndex)
require.Equal(t, primitives.ValidatorIndex(2), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].TargetIndex)
// Verify the second source validator is not exiting.
src, err := st.ValidatorAtIndex(3)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
},
wantErr: false,
},
{
name: "pending consolidations limit reached and compounded consolidation after",
state: func() state.BeaconState {
st := &eth.BeaconStateElectra{
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
}
// To allow compounding consolidation requests.
st.Validators[3].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
s, err := state_native.InitializeFromProtoElectra(st)
require.NoError(t, err)
return s
}(),
reqs: []*enginev1.ConsolidationRequest{
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
SourcePubkey: []byte("val_1"),
TargetPubkey: []byte("val_2"),
},
{
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
SourcePubkey: []byte("val_3"),
TargetPubkey: []byte("val_3"),
},
},
validate: func(t *testing.T, st state.BeaconState) {
// Verify a pending consolidation is created.
numPC, err := st.NumPendingConsolidations()
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
// Verify that the last consolidation was included
src, err := st.ValidatorAtIndex(3)
require.NoError(t, err)
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, src.WithdrawalCredentials[0], "source validator was not compounded")
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := requests.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
if (err != nil) != tt.wantErr {
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !tt.wantErr {
require.NoError(t, err)
}
if tt.validate != nil {
tt.validate(t, tt.state)
}
})
}
}

View File

@@ -1,73 +0,0 @@
package requests
import (
"context"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/pkg/errors"
)
// ProcessDepositRequests processes execution layer deposits requests.
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, reqs []*enginev1.DepositRequest) (state.BeaconState, error) {
_, span := trace.StartSpan(ctx, "requests.ProcessDepositRequests")
defer span.End()
if len(reqs) == 0 {
return beaconState, nil
}
var err error
for _, req := range reqs {
beaconState, err = processDepositRequest(beaconState, req)
if err != nil {
return nil, errors.Wrap(err, "could not apply deposit request")
}
}
return beaconState, nil
}
// processDepositRequest processes the specific deposit request
//
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
//
// # Set deposit request start index
// if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUESTS_START_INDEX:
// state.deposit_requests_start_index = deposit_request.index
//
// # Create pending deposit
// state.pending_deposits.append(PendingDeposit(
// pubkey=deposit_request.pubkey,
// withdrawal_credentials=deposit_request.withdrawal_credentials,
// amount=deposit_request.amount,
// signature=deposit_request.signature,
// slot=state.slot,
// ))
func processDepositRequest(beaconState state.BeaconState, req *enginev1.DepositRequest) (state.BeaconState, error) {
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
if err != nil {
return nil, errors.Wrap(err, "could not get deposit requests start index")
}
if req == nil {
return nil, errors.New("nil deposit request")
}
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
if err := beaconState.SetDepositRequestsStartIndex(req.Index); err != nil {
return nil, errors.Wrap(err, "could not set deposit requests start index")
}
}
if err := beaconState.AppendPendingDeposit(&ethpb.PendingDeposit{
PublicKey: bytesutil.SafeCopyBytes(req.Pubkey),
WithdrawalCredentials: bytesutil.SafeCopyBytes(req.WithdrawalCredentials),
Amount: req.Amount,
Signature: bytesutil.SafeCopyBytes(req.Signature),
Slot: beaconState.Slot(),
}); err != nil {
return nil, errors.Wrap(err, "could not append deposit request")
}
return beaconState, nil
}

View File

@@ -1,70 +0,0 @@
package requests_test
import (
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
)
func TestProcessDepositRequests(t *testing.T) {
st, _ := util.DeterministicGenesisStateElectra(t, 1)
sk, err := bls.RandKey()
require.NoError(t, err)
require.NoError(t, st.SetDepositRequestsStartIndex(1))
t.Run("empty requests continues", func(t *testing.T) {
newSt, err := requests.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
require.NoError(t, err)
require.DeepEqual(t, newSt, st)
})
t.Run("nil request errors", func(t *testing.T) {
_, err = requests.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
require.ErrorContains(t, "nil deposit request", err)
})
vals := st.Validators()
vals[0].PublicKey = sk.PublicKey().Marshal()
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
require.NoError(t, st.SetValidators(vals))
bals := st.Balances()
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
require.NoError(t, st.SetBalances(bals))
require.NoError(t, st.SetPendingDeposits(make([]*eth.PendingDeposit, 0))) // reset pbd as the deterministic state populates this already
withdrawalCred := make([]byte, 32)
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
depositMessage := &eth.DepositMessage{
PublicKey: sk.PublicKey().Marshal(),
Amount: 1000,
WithdrawalCredentials: withdrawalCred,
}
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
require.NoError(t, err)
sr, err := signing.ComputeSigningRoot(depositMessage, domain)
require.NoError(t, err)
sig := sk.Sign(sr[:])
reqs := []*enginev1.DepositRequest{
{
Pubkey: depositMessage.PublicKey,
Index: 0,
WithdrawalCredentials: depositMessage.WithdrawalCredentials,
Amount: depositMessage.Amount,
Signature: sig.Marshal(),
},
}
st, err = requests.ProcessDepositRequests(t.Context(), st, reqs)
require.NoError(t, err)
pbd, err := st.PendingDeposits()
require.NoError(t, err)
require.Equal(t, 1, len(pbd))
require.Equal(t, uint64(1000), pbd[0].Amount)
require.DeepEqual(t, bytesutil.SafeCopyBytes(reqs[0].Pubkey), pbd[0].PublicKey)
}

View File

@@ -1,9 +0,0 @@
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
// This file is created and regenerated automatically. Anything added here might get removed.
package requests
import "github.com/sirupsen/logrus"
// The prefix for logs from this package will be the text after the last slash in the package path.
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
var log = logrus.WithField("package", "beacon-chain/core/requests")

View File

@@ -3,8 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"electra.go",
"errors.go",
"log.go",
"skip_slot_cache.go",
"state.go",
@@ -27,7 +25,6 @@ go_library(
"//beacon-chain/core/execution:go_default_library",
"//beacon-chain/core/fulu:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/requests:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition/interop:go_default_library",
"//beacon-chain/core/validators:go_default_library",
@@ -65,14 +62,11 @@ go_test(
"altair_transition_no_verify_sig_test.go",
"bellatrix_transition_no_verify_sig_test.go",
"benchmarks_test.go",
"electra_test.go",
"exports_test.go",
"skip_slot_cache_test.go",
"state_fuzz_test.go",
"state_test.go",
"trailing_slot_state_cache_test.go",
"transition_fuzz_test.go",
"transition_gloas_test.go",
"transition_no_verify_sig_test.go",
"transition_test.go",
],
@@ -108,7 +102,6 @@ go_test(
"//time/slots:go_default_library",
"@com_github_google_gofuzz//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_stretchr_testify//require:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -1,216 +0,0 @@
package transition_test
import (
"context"
"errors"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
)
func TestProcessOperationsWithNilRequests(t *testing.T) {
tests := []struct {
name string
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
errMsg string
}{
{
name: "Nil deposit request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
},
errMsg: "nil deposit request",
},
{
name: "Nil withdrawal request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
},
errMsg: "nil withdrawal request",
},
{
name: "Nil consolidation request",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
},
errMsg: "nil consolidation request",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
st, ks := util.DeterministicGenesisStateElectra(t, 128)
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
tc.modifyBlk(blk)
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, st.SetSlot(1))
_, err = transition.ElectraOperations(t.Context(), st, b.Block())
require.ErrorContains(t, tc.errMsg, err)
})
}
}
func TestElectraOperations_ProcessingErrors(t *testing.T) {
tests := []struct {
name string
modifyBlk func(blk *ethpb.SignedBeaconBlockElectra)
errCheck func(t *testing.T, err error)
}{
{
name: "ErrProcessProposerSlashingsFailed",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
// Create invalid proposer slashing with out-of-bounds proposer index
blk.Block.Body.ProposerSlashings = []*ethpb.ProposerSlashing{
{
Header_1: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 999999, // Invalid index (out of bounds)
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
},
Header_2: &ethpb.SignedBeaconBlockHeader{
Header: &ethpb.BeaconBlockHeader{
Slot: 1,
ProposerIndex: 999999,
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
},
Signature: make([]byte, 96),
},
},
}
},
errCheck: func(t *testing.T, err error) {
require.ErrorContains(t, "process proposer slashings failed", err)
require.Equal(t, true, errors.Is(err, transition.ErrProcessProposerSlashingsFailed))
},
},
{
name: "ErrProcessAttestationsFailed",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
// Create attestation with invalid committee index
blk.Block.Body.Attestations = []*ethpb.AttestationElectra{
{
AggregationBits: []byte{0b00000001},
Data: &ethpb.AttestationData{
Slot: 1,
CommitteeIndex: 999999, // Invalid committee index
BeaconBlockRoot: make([]byte, 32),
Source: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
Target: &ethpb.Checkpoint{
Epoch: 0,
Root: make([]byte, 32),
},
},
CommitteeBits: []byte{0b00000001},
Signature: make([]byte, 96),
},
}
},
errCheck: func(t *testing.T, err error) {
require.ErrorContains(t, "process attestations failed", err)
require.Equal(t, true, errors.Is(err, transition.ErrProcessAttestationsFailed))
},
},
{
name: "ErrProcessDepositsFailed",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
// Create deposit with invalid proof length
blk.Block.Body.Deposits = []*ethpb.Deposit{
{
Proof: [][]byte{}, // Invalid: empty proof
Data: &ethpb.Deposit_Data{
PublicKey: make([]byte, 48),
WithdrawalCredentials: make([]byte, 32),
Amount: 32000000000, // 32 ETH in Gwei
Signature: make([]byte, 96),
},
},
}
},
errCheck: func(t *testing.T, err error) {
require.ErrorContains(t, "process deposits failed", err)
require.Equal(t, true, errors.Is(err, transition.ErrProcessDepositsFailed))
},
},
{
name: "ErrProcessVoluntaryExitsFailed",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
// Create voluntary exit with invalid validator index
blk.Block.Body.VoluntaryExits = []*ethpb.SignedVoluntaryExit{
{
Exit: &ethpb.VoluntaryExit{
Epoch: 0,
ValidatorIndex: 999999, // Invalid index (out of bounds)
},
Signature: make([]byte, 96),
},
}
},
errCheck: func(t *testing.T, err error) {
require.ErrorContains(t, "process voluntary exits failed", err)
require.Equal(t, true, errors.Is(err, transition.ErrProcessVoluntaryExitsFailed))
},
},
{
name: "ErrProcessBLSChangesFailed",
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
// Create BLS to execution change with invalid validator index
blk.Block.Body.BlsToExecutionChanges = []*ethpb.SignedBLSToExecutionChange{
{
Message: &ethpb.BLSToExecutionChange{
ValidatorIndex: 999999, // Invalid index (out of bounds)
FromBlsPubkey: make([]byte, 48),
ToExecutionAddress: make([]byte, 20),
},
Signature: make([]byte, 96),
},
}
},
errCheck: func(t *testing.T, err error) {
require.ErrorContains(t, "process BLS to execution changes failed", err)
require.Equal(t, true, errors.Is(err, transition.ErrProcessBLSChangesFailed))
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
st, ks := util.DeterministicGenesisStateElectra(t, 128)
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
tc.modifyBlk(blk)
b, err := blocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, st.SetSlot(primitives.Slot(1)))
_, err = transition.ElectraOperations(ctx, st, b.Block())
require.NotNil(t, err, "Expected an error but got nil")
tc.errCheck(t, err)
})
}
}

View File

@@ -1,19 +0,0 @@
package transition
import "errors"
var (
ErrAttestationsSignatureInvalid = errors.New("attestations signature invalid")
ErrRandaoSignatureInvalid = errors.New("randao signature invalid")
ErrBLSToExecutionChangesSignatureInvalid = errors.New("BLS to execution changes signature invalid")
ErrProcessWithdrawalsFailed = errors.New("process withdrawals failed")
ErrProcessRandaoFailed = errors.New("process randao failed")
ErrProcessEth1DataFailed = errors.New("process eth1 data failed")
ErrProcessProposerSlashingsFailed = errors.New("process proposer slashings failed")
ErrProcessAttesterSlashingsFailed = errors.New("process attester slashings failed")
ErrProcessAttestationsFailed = errors.New("process attestations failed")
ErrProcessDepositsFailed = errors.New("process deposits failed")
ErrProcessVoluntaryExitsFailed = errors.New("process voluntary exits failed")
ErrProcessBLSChangesFailed = errors.New("process BLS to execution changes failed")
ErrProcessSyncAggregateFailed = errors.New("process sync aggregate failed")
)

View File

@@ -1,3 +0,0 @@
package transition
var ElectraOperations = electraOperations

View File

@@ -142,18 +142,6 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
); err != nil {
return nil, err
}
// Spec v1.6.1 (pseudocode):
// # [New in Gloas:EIP7732]
// # Unset the next payload availability
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
if state.Version() >= version.Gloas {
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {
return nil, err
}
}
return state, nil
}

View File

@@ -1,141 +0,0 @@
package transition
import (
"bytes"
"context"
"fmt"
"testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/stretchr/testify/require"
)
func TestProcessSlot_GloasClearsNextPayloadAvailability(t *testing.T) {
slot := primitives.Slot(10)
cfg := params.BeaconConfig()
nextIdx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
byteIdx := nextIdx / 8
bitMask := byte(1 << (nextIdx % 8))
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
st := newGloasState(t, slot, availability)
_, err := ProcessSlot(context.Background(), st)
require.NoError(t, err)
post := st.ToProto().(*ethpb.BeaconStateGloas)
require.Equal(t, byte(0xFF)&^bitMask, post.ExecutionPayloadAvailability[byteIdx])
}
func TestProcessSlot_GloasClearsNextPayloadAvailability_Wrap(t *testing.T) {
cfg := params.BeaconConfig()
slot := primitives.Slot(cfg.SlotsPerHistoricalRoot - 1)
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
st := newGloasState(t, slot, availability)
_, err := ProcessSlot(context.Background(), st)
require.NoError(t, err)
post := st.ToProto().(*ethpb.BeaconStateGloas)
require.Equal(t, byte(0xFE), post.ExecutionPayloadAvailability[0])
}
func TestProcessSlot_GloasAvailabilityUpdateError(t *testing.T) {
slot := primitives.Slot(7)
availability := make([]byte, 1)
st := newGloasState(t, slot, availability)
_, err := ProcessSlot(context.Background(), st)
cfg := params.BeaconConfig()
idx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
byteIdx := idx / 8
require.EqualError(t, err, fmt.Sprintf(
"bit index %d (byte index %d) out of range for execution payload availability length %d",
idx, byteIdx, len(availability),
))
}
func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) state.BeaconState {
t.Helper()
cfg := params.BeaconConfig()
protoState := &ethpb.BeaconStateGloas{
Slot: slot,
LatestBlockHeader: testBeaconBlockHeader(),
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
RandaoMixes: make([][]byte, fieldparams.RandaoMixesLength),
ExecutionPayloadAvailability: availability,
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, int(cfg.SlotsPerEpoch*2)),
LatestExecutionPayloadBid: &ethpb.ExecutionPayloadBid{
ParentBlockHash: make([]byte, 32),
ParentBlockRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
PrevRandao: make([]byte, 32),
FeeRecipient: make([]byte, 20),
BlobKzgCommitmentsRoot: make([]byte, 32),
},
Eth1Data: &ethpb.Eth1Data{
DepositRoot: make([]byte, 32),
BlockHash: make([]byte, 32),
},
PreviousEpochParticipation: []byte{},
CurrentEpochParticipation: []byte{},
JustificationBits: []byte{0},
PreviousJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
CurrentJustifiedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
FinalizedCheckpoint: &ethpb.Checkpoint{Root: make([]byte, 32)},
CurrentSyncCommittee: &ethpb.SyncCommittee{},
NextSyncCommittee: &ethpb.SyncCommittee{},
}
for i := range protoState.BlockRoots {
protoState.BlockRoots[i] = make([]byte, 32)
}
for i := range protoState.StateRoots {
protoState.StateRoots[i] = make([]byte, 32)
}
for i := range protoState.RandaoMixes {
protoState.RandaoMixes[i] = make([]byte, 32)
}
for i := range protoState.BuilderPendingPayments {
protoState.BuilderPendingPayments[i] = &ethpb.BuilderPendingPayment{
Withdrawal: &ethpb.BuilderPendingWithdrawal{
FeeRecipient: make([]byte, 20),
},
}
}
pubkeys := make([][]byte, cfg.SyncCommitteeSize)
for i := range pubkeys {
pubkeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
}
aggPubkey := make([]byte, fieldparams.BLSPubkeyLength)
protoState.CurrentSyncCommittee = &ethpb.SyncCommittee{
Pubkeys: pubkeys,
AggregatePubkey: aggPubkey,
}
protoState.NextSyncCommittee = &ethpb.SyncCommittee{
Pubkeys: pubkeys,
AggregatePubkey: aggPubkey,
}
st, err := state_native.InitializeFromProtoGloas(protoState)
require.NoError(t, err)
require.Equal(t, version.Gloas, st.Version())
return st
}
func testBeaconBlockHeader() *ethpb.BeaconBlockHeader {
return &ethpb.BeaconBlockHeader{
ParentRoot: make([]byte, 32),
StateRoot: make([]byte, 32),
BodyRoot: make([]byte, 32),
}
}

View File

@@ -7,11 +7,12 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
b "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
@@ -69,11 +70,10 @@ func ExecuteStateTransitionNoVerifyAnySig(
}
// Execute per block transition.
sigSlice, st, err := ProcessBlockNoVerifyAnySig(ctx, st, signed)
set, st, err := ProcessBlockNoVerifyAnySig(ctx, st, signed)
if err != nil {
return nil, nil, errors.Wrap(err, "could not process block")
}
set := sigSlice.Batch()
// State root validation.
postStateRoot, err := st.HashTreeRoot(ctx)
@@ -113,7 +113,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
// assert block.state_root == hash_tree_root(state)
func CalculateStateRoot(
ctx context.Context,
rollback state.BeaconState,
state state.BeaconState,
signed interfaces.ReadOnlySignedBeaconBlock,
) ([32]byte, error) {
ctx, span := trace.StartSpan(ctx, "core.state.CalculateStateRoot")
@@ -122,7 +122,7 @@ func CalculateStateRoot(
tracing.AnnotateError(span, ctx.Err())
return [32]byte{}, ctx.Err()
}
if rollback == nil || rollback.IsNil() {
if state == nil || state.IsNil() {
return [32]byte{}, errors.New("nil state")
}
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
@@ -130,7 +130,7 @@ func CalculateStateRoot(
}
// Copy state to avoid mutating the state reference.
state := rollback.Copy()
state = state.Copy()
// Execute per slots transition.
var err error
@@ -141,103 +141,14 @@ func CalculateStateRoot(
}
// Execute per block transition.
if features.Get().EnableProposerPreprocessing {
state, err = processBlockForProposing(ctx, state, signed)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not process block for proposing")
}
} else {
state, err = ProcessBlockForStateRoot(ctx, state, signed)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not process block")
}
state, err = ProcessBlockForStateRoot(ctx, state, signed)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not process block")
}
return state.HashTreeRoot(ctx)
}
// processBlockVerifySigs processes the block and verifies the signatures within it. Block signatures are not verified as this block is not yet signed.
func processBlockForProposing(ctx context.Context, st state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
var err error
var set BlockSignatureBatches
set, st, err = ProcessBlockNoVerifyAnySig(ctx, st, signed)
if err != nil {
return nil, err
}
// We first try to verify all sigantures batched optimistically. We ignore block proposer signature.
sigSet := set.Batch()
valid, err := sigSet.Verify()
if err != nil || valid {
return st, err
}
// Some signature failed to verify.
// Verify Attestations signatures
attSigs := set.AttestationSignatures
if attSigs == nil {
return nil, ErrAttestationsSignatureInvalid
}
valid, err = attSigs.Verify()
if err != nil {
return nil, err
}
if !valid {
return nil, ErrAttestationsSignatureInvalid
}
// Verify Randao signature
randaoSigs := set.RandaoSignatures
if randaoSigs == nil {
return nil, ErrRandaoSignatureInvalid
}
valid, err = randaoSigs.Verify()
if err != nil {
return nil, err
}
if !valid {
return nil, ErrRandaoSignatureInvalid
}
if signed.Block().Version() < version.Capella {
//This should not happen as we must have failed one of the above signatures.
return st, nil
}
// Verify BLS to execution changes signatures
blsChangeSigs := set.BLSChangeSignatures
if blsChangeSigs == nil {
return nil, ErrBLSToExecutionChangesSignatureInvalid
}
valid, err = blsChangeSigs.Verify()
if err != nil {
return nil, err
}
if !valid {
return nil, ErrBLSToExecutionChangesSignatureInvalid
}
// We should not reach this point as one of the above signatures must have failed.
return st, nil
}
// BlockSignatureBatches holds the signature batches for different parts of a beacon block.
type BlockSignatureBatches struct {
RandaoSignatures *bls.SignatureBatch
AttestationSignatures *bls.SignatureBatch
BLSChangeSignatures *bls.SignatureBatch
}
// Batch returns the batch of signature batches in the BlockSignatureBatches.
func (b BlockSignatureBatches) Batch() *bls.SignatureBatch {
sigs := bls.NewSet()
if b.RandaoSignatures != nil {
sigs.Join(b.RandaoSignatures)
}
if b.AttestationSignatures != nil {
sigs.Join(b.AttestationSignatures)
}
if b.BLSChangeSignatures != nil {
sigs.Join(b.BLSChangeSignatures)
}
return sigs
}
// ProcessBlockNoVerifyAnySig creates a new, modified beacon state by applying block operation
// transformations as defined in the Ethereum Serenity specification. It does not validate
// any block signature except for deposit and slashing signatures. It also returns the relevant
@@ -254,48 +165,48 @@ func ProcessBlockNoVerifyAnySig(
ctx context.Context,
st state.BeaconState,
signed interfaces.ReadOnlySignedBeaconBlock,
) (BlockSignatureBatches, state.BeaconState, error) {
) (*bls.SignatureBatch, state.BeaconState, error) {
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockNoVerifyAnySig")
defer span.End()
set := BlockSignatureBatches{}
if err := blocks.BeaconBlockIsNil(signed); err != nil {
return set, nil, err
return nil, nil, err
}
if st.Version() != signed.Block().Version() {
return set, nil, fmt.Errorf("state and block are different version. %d != %d", st.Version(), signed.Block().Version())
return nil, nil, fmt.Errorf("state and block are different version. %d != %d", st.Version(), signed.Block().Version())
}
blk := signed.Block()
st, err := ProcessBlockForStateRoot(ctx, st, signed)
if err != nil {
return set, nil, err
return nil, nil, err
}
randaoReveal := signed.Block().Body().RandaoReveal()
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
if err != nil {
tracing.AnnotateError(span, err)
return set, nil, errors.Wrap(err, "could not retrieve randao signature set")
return nil, nil, errors.Wrap(err, "could not retrieve randao signature set")
}
set.RandaoSignatures = rSet
aSet, err := b.AttestationSignatureBatch(ctx, st, signed.Block().Body().Attestations())
if err != nil {
return set, nil, errors.Wrap(err, "could not retrieve attestation signature set")
return nil, nil, errors.Wrap(err, "could not retrieve attestation signature set")
}
set.AttestationSignatures = aSet
// Merge beacon block, randao and attestations signatures into a set.
set := bls.NewSet()
set.Join(rSet).Join(aSet)
if blk.Version() >= version.Capella {
changes, err := signed.Block().Body().BLSToExecutionChanges()
if err != nil {
return set, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
}
cSet, err := b.BLSChangesSignatureBatch(st, changes)
if err != nil {
return set, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
}
set.BLSChangeSignatures = cSet
set.Join(cSet)
}
return set, st, nil
}
@@ -357,7 +268,7 @@ func ProcessOperationsNoVerifyAttsSigs(
return nil, err
}
} else {
state, err = electraOperations(ctx, state, beaconBlock)
state, err = electra.ProcessOperations(ctx, state, beaconBlock)
if err != nil {
return nil, err
}
@@ -415,7 +326,7 @@ func ProcessBlockForStateRoot(
if state.Version() >= version.Capella {
state, err = b.ProcessWithdrawals(state, executionData)
if err != nil {
return nil, errors.Wrap(ErrProcessWithdrawalsFailed, err.Error())
return nil, errors.Wrap(err, "could not process withdrawals")
}
}
if err = b.ProcessPayload(state, blk.Body()); err != nil {
@@ -427,13 +338,13 @@ func ProcessBlockForStateRoot(
state, err = b.ProcessRandaoNoVerify(state, randaoReveal[:])
if err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(ErrProcessRandaoFailed, err.Error())
return nil, errors.Wrap(err, "could not verify and process randao")
}
state, err = b.ProcessEth1DataInBlock(ctx, state, signed.Block().Body().Eth1Data())
if err != nil {
tracing.AnnotateError(span, err)
return nil, errors.Wrap(ErrProcessEth1DataFailed, err.Error())
return nil, errors.Wrap(err, "could not process eth1 data")
}
state, err = ProcessOperationsNoVerifyAttsSigs(ctx, state, signed.Block())
@@ -452,7 +363,7 @@ func ProcessBlockForStateRoot(
}
state, _, err = altair.ProcessSyncAggregate(ctx, state, sa)
if err != nil {
return nil, errors.Wrap(ErrProcessSyncAggregateFailed, err.Error())
return nil, errors.Wrap(err, "process_sync_aggregate failed")
}
return state, nil
@@ -468,35 +379,31 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
exitInfo := &validators.ExitInfo{}
if hasSlashings || hasExits {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = validators.ExitInformation(st)
exitInfo = v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
}
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair proposer slashing")
}
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair attester slashing")
}
st, err = altair.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
if err != nil {
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair attestation")
}
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
return nil, errors.Wrap(err, "could not process altair deposit")
}
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
return nil, errors.Wrap(err, "could not process voluntary exits")
}
st, err = b.ProcessBLSToExecutionChanges(st, beaconBlock)
if err != nil {
return nil, errors.Wrap(ErrProcessBLSChangesFailed, err.Error())
}
return st, nil
return b.ProcessBLSToExecutionChanges(st, beaconBlock)
}
// This calls phase 0 block operations.
@@ -504,32 +411,32 @@ func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock int
var err error
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
hasExits := len(beaconBlock.Body().VoluntaryExits()) > 0
var exitInfo *validators.ExitInfo
var exitInfo *v.ExitInfo
if hasSlashings || hasExits {
// ExitInformation is expensive to compute, only do it if we need it.
exitInfo = validators.ExitInformation(st)
exitInfo = v.ExitInformation(st)
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
return nil, errors.Wrap(err, "could not update total active balance cache")
}
}
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
return nil, errors.Wrap(err, "could not process block proposer slashings")
}
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
return nil, errors.Wrap(err, "could not process block attester slashings")
}
st, err = b.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
if err != nil {
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
return nil, errors.Wrap(err, "could not process block attestations")
}
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
return nil, errors.Wrap(err, "could not process deposits")
}
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
if err != nil {
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
return nil, errors.Wrap(err, "could not process voluntary exits")
}
return st, nil
}

View File

@@ -132,8 +132,7 @@ func TestProcessBlockNoVerify_PassesProcessingConditions(t *testing.T) {
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
require.NoError(t, err)
// Test Signature set verifies.
sigSet := set.Batch()
verified, err := sigSet.Verify()
verified, err := set.Verify()
require.NoError(t, err)
assert.Equal(t, true, verified, "Could not verify signature set.")
}
@@ -146,8 +145,7 @@ func TestProcessBlockNoVerifyAnySigAltair_OK(t *testing.T) {
require.NoError(t, err)
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
require.NoError(t, err)
sigSet := set.Batch()
verified, err := sigSet.Verify()
verified, err := set.Verify()
require.NoError(t, err)
require.Equal(t, true, verified, "Could not verify signature set")
}
@@ -156,9 +154,8 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
beaconState, block, _, _, _ := createFullBlockWithOperations(t)
wsb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
signatures, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
require.NoError(t, err)
set := signatures.Batch()
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
assert.Equal(t, "randao signature", set.Descriptions[0])
assert.Equal(t, "attestation signature", set.Descriptions[1])

View File

@@ -7,6 +7,7 @@ go_library(
"cache.go",
"data_column.go",
"data_column_cache.go",
"doc.go",
"iteration.go",
"layout.go",
"layout_by_epoch.go",
@@ -14,8 +15,6 @@ go_library(
"log.go",
"metrics.go",
"mock.go",
"proof.go",
"proof_cache.go",
"pruner.go",
],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem",
@@ -31,7 +30,6 @@ go_library(
"//consensus-types/primitives:go_default_library",
"//encoding/bytesutil:go_default_library",
"//io/file:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/logging:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",

View File

@@ -512,11 +512,6 @@ func (dcs *DataColumnStorage) Get(root [fieldparams.RootLength]byte, indices []u
if err != nil {
return nil, errors.Wrap(err, "data column sidecars file path open")
}
defer func() {
if closeErr := file.Close(); closeErr != nil {
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during Get")
}
}()
// Read file metadata.
metadata, err := dcs.metadata(file)

View File

@@ -0,0 +1,104 @@
package filesystem
// nolint:dupword
/*
Data column sidecars storage documentation
==========================================
File organisation
-----------------
- The first byte represents the version of the file structure (up to 0xff = 255).
We set it to 0x01.
Note: This is not strictly needed, but it will help a lot if, in the future,
we want to modify the file structure.
- The next 4 bytes represents the size of a SSZ encoded data column sidecar.
(See the `Computation of the maximum size of a DataColumnSidecar` section to a description
of how this value is computed).
- The next 128 bytes represent the index in the file of a given column.
The first bit of each byte in the index is set to 0 if there is no data column,
and set to 1 if there is a data column.
The remaining 7 bits (from 0 to 127) represent the index of the data column.
This sentinel bit is needed to distinguish between the column with index 0 and no column.
Example: If the column with index 5 is in the 3th position in the file, then indices[5] = 0x80 + 0x03 = 0x83.
- The rest of the file is a repeat of the SSZ encoded data column sidecars.
|------------------------------------------|------------------------------------------------------------------------------------|
| Byte offset | Description |
|------------------------------------------|------------------------------------------------------------------------------------|
| 0 | version (1 byte) | sszEncodedDataColumnSidecarSize (4 bytes) | indices (128 bytes) |
|133 + 0*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|133 + 1*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|133 + 2*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
| ... | ... |
|133 + 127*sszEncodedDataColumnSidecarSize | sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes) |
|------------------------------------------|------------------------------------------------------------------------------------|
Each file is named after the block root where the data columns were data columns are committed to.
Example: `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
Database organisation
---------------------
SSZ encoded data column sidecars are stored following the `by-epoch` layout.
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by the 4096.
- The second layer is a directory corresponding to the epoch.
- Then all files are stored in the epoch directory.
Example:
data-columns
├── 0
│   ├── 3638
│   │   ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
│   │   ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
│   │   ├── ...
│   │   ├── 0xeb78e2b2350a71c640f1e96fea9e42f38e65705ab7e6e100c8bc9c589f2c5f2b.sszs
│   │   └── 0xeb7ee68da988fd20d773d45aad01dd62527734367a146e2b048715bd68a4e370.sszs
│   └── 3639
│      ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
│      ├── 0x1bf5edff6b6ba2b65b1db325ff3312bbb57da461ef2ae651bd741af851aada3a.sszs
│      ├── ...
│      ├── 0xa156a527e631f858fee79fab7ef1fde3f6117a2e1201d47c09fbab0c6780c937.sszs
│      └── 0xcd80bc535ddc467dea1d19e0c39c1160875ccd1989061bcd8ce206e3c1261c87.sszs
└── 1
├── 4096
│   ├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
│   ├── 0x11f420928d8de41c50e735caab0369996824a5299c5f054e097965855925697d.sszs
│   ├── ...
│   ├── 0xbe91fc782877ed400d95c02c61aebfdd592635d11f8e64c94b46abd84f45c967.sszs
│   └── 0xf246189f078f02d30173ff74605cf31c9e65b5e463275ebdbeb40476638135ff.sszs
└── 4097
   ├── 0x454d000674793c479e90504c0fe9827b50bb176ae022dab4e37d6a21471ab570.sszs
   ├── 0xac5eb7437d7190c48cfa863e3c45f96a7f8af371d47ac12ccda07129a06af763.sszs
   ├── ...
   ├── 0xb7df30561d9d92ab5fafdd96bca8b44526497c8debf0fc425c7a0770b2abeb83.sszs
   └── 0xc1dd0b1ae847b6ec62303a36d08c6a4a2e9e3ec4be3ff70551972a0ee3de9c14.sszs
Computation of the maximum size of a DataColumnSidecar
------------------------------------------------------
https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#datacolumnsidecar
class DataColumnSidecar(Container):
index: ColumnIndex # Index of column in extended matrix
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
signed_block_header: SignedBeaconBlockHeader
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
- index: 2 bytes (ColumnIndex)
- `column`: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 64 (FIELD_ELEMENTS_PER_CELL) * 32 bytes (BYTES_PER_FIELD_ELEMENT) = 8,388,608 bytes
- kzg_commitments: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGCommitment) = 196,608 bytes
- kzg_proofs: 4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) * 48 bytes (KZGProof) = 196,608 bytes
- signed_block_header: 2 bytes (Slot) + 2 bytes (ValidatorIndex) + 3 * 2 bytes (Root) + 96 bytes (BLSSignature) = 106 bytes
- kzg_commitments_inclusion_proof: 4 (KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH) * 32 bytes = 128 bytes
TOTAL: 8,782,060 bytes = 70,256,480 bits
log(70,256,480) / log(2) ~= 26.07
==> 32 bits (4 bytes) are enough to store the maximum size of a data column sidecar.
The maximum size of an SSZ encoded data column can be 2**32 bits = 536,879,912 bytes,
which left a room of 536,879,912 bytes - 8,782,060 bytes ~= 503 mega bytes to store the extra data needed by SSZ encoding (which is more than enough.)
*/

View File

@@ -1,197 +0,0 @@
# Filesystem storage documentation
This document describes the file formats and database organization for storing data column sidecars and execution proofs.
---
# Data column sidecars
## File organisation
- The first byte represents the version of the file structure (up to `0xff = 255`).
We set it to `0x01`.
_(Note: This is not strictly needed, but it will help a lot if, in the future, we want to modify the file structure.)_
- The next 4 bytes represents the size of a SSZ encoded data column sidecar.
(See the [Computation of the maximum size of a DataColumnSidecar](#computation-of-the-maximum-size-of-a-datacolumnsidecar) section for a description
of how this value is computed).
- The next 128 bytes represent the index in the file of a given column.
The first bit of each byte in the index is set to 0 if there is no data column,
and set to 1 if there is a data column.
The remaining 7 bits (from 0 to 127) represent the index of the data column.
This sentinel bit is needed to distinguish between the column with index 0 and no column.
**Example:** If the column with index 5 is in the 3rd position in the file, then `indices[5] = 0x80 + 0x03 = 0x83`.
- The rest of the file is a repeat of the SSZ encoded data column sidecars.
### File layout
| Byte offset | Description |
|-------------|-------------|
| `0` | `version (1 byte) \| sszEncodedDataColumnSidecarSize (4 bytes) \| indices (128 bytes)` |
| `133 + 0×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
| `133 + 1×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
| `133 + 2×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
| ... | ... |
| `133 + 127×sszEncodedDataColumnSidecarSize` | `sszEncodedDataColumnSidecar (sszEncodedDataColumnSidecarSize bytes)` |
Each file is named after the block root where the data columns are committed to.
**Example:** `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
## Database organisation
SSZ encoded data column sidecars are stored following the `by-epoch` layout.
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by 4096.
- The second layer is a directory corresponding to the epoch.
- Then all files are stored in the epoch directory.
### Example directory structure
```
data-columns
├── 0
│ ├── 3638
│ │ ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
│ │ ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
│ │ ├── ...
│ │ ├── 0xeb78e2b2350a71c640f1e96fea9e42f38e65705ab7e6e100c8bc9c589f2c5f2b.sszs
│ │ └── 0xeb7ee68da988fd20d773d45aad01dd62527734367a146e2b048715bd68a4e370.sszs
│ └── 3639
│ ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
│ ├── 0x1bf5edff6b6ba2b65b1db325ff3312bbb57da461ef2ae651bd741af851aada3a.sszs
│ ├── ...
│ ├── 0xa156a527e631f858fee79fab7ef1fde3f6117a2e1201d47c09fbab0c6780c937.sszs
│ └── 0xcd80bc535ddc467dea1d19e0c39c1160875ccd1989061bcd8ce206e3c1261c87.sszs
└── 1
├── 4096
│ ├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
│ ├── 0x11f420928d8de41c50e735caab0369996824a5299c5f054e097965855925697d.sszs
│ ├── ...
│ ├── 0xbe91fc782877ed400d95c02c61aebfdd592635d11f8e64c94b46abd84f45c967.sszs
│ └── 0xf246189f078f02d30173ff74605cf31c9e65b5e463275ebdbeb40476638135ff.sszs
└── 4097
├── 0x454d000674793c479e90504c0fe9827b50bb176ae022dab4e37d6a21471ab570.sszs
├── 0xac5eb7437d7190c48cfa863e3c45f96a7f8af371d47ac12ccda07129a06af763.sszs
├── ...
├── 0xb7df30561d9d92ab5fafdd96bca8b44526497c8debf0fc425c7a0770b2abeb83.sszs
└── 0xc1dd0b1ae847b6ec62303a36d08c6a4a2e9e3ec4be3ff70551972a0ee3de9c14.sszs
```
## Computation of the maximum size of a `DataColumnSidecar`
Reference: [Ethereum Consensus Specs - Fulu DAS Core](https://github.com/ethereum/consensus-specs/blob/master/specs/fulu/das-core.md#datacolumnsidecar)
```python
class DataColumnSidecar(Container):
index: ColumnIndex # Index of column in extended matrix
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
signed_block_header: SignedBeaconBlockHeader
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
```
### Size breakdown
| Field | Calculation | Size |
|-------|-------------|------|
| `index` | `ColumnIndex` | `2 bytes` |
| `column` | `4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) × 64 (FIELD_ELEMENTS_PER_CELL) × 32 bytes (BYTES_PER_FIELD_ELEMENT)` | `8,388,608 bytes` |
| `kzg_commitments` | `4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) × 48 bytes (KZGCommitment)` | `196,608 bytes` |
| `kzg_proofs` | `4,096 (MAX_BLOB_COMMITMENTS_PER_BLOCK) × 48 bytes (KZGProof)` | `196,608 bytes` |
| `signed_block_header` | `2 bytes (Slot) + 2 bytes (ValidatorIndex) + 3 × 2 bytes (Root) + 96 bytes (BLSSignature)` | `106 bytes` |
| `kzg_commitments_inclusion_proof` | `4 (KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH) × 32 bytes` | `128 bytes` |
**TOTAL:** `8,782,060 bytes = 70,256,480 bits`
```
log(70,256,480) / log(2) ≈ 26.07
```
**Conclusion:** 32 bits (4 bytes) are enough to store the maximum size of a data column sidecar.
The maximum size of an SSZ encoded data column can be `2³² bits = 536,879,912 bytes`,
which leaves a room of `536,879,912 bytes - 8,782,060 bytes ≈ 503 megabytes` to store the extra data needed by SSZ encoding (which is more than enough).
---
# Execution proofs
## File organisation
Unlike data column sidecars (which have a fixed size per block), execution proofs have variable sizes.
To handle this, we use an offset table that stores the position and size of each proof.
- The first byte represents the version of the file structure (up to `0xff = 255`).
We set it to `0x01`.
- The next 64 bytes represent the offset table with 8 slots (one per proof type).
Each slot contains:
- 4 bytes for the offset (relative to end of header)
- 4 bytes for the size of the SSZ-encoded proof
If the size is 0, the proof is not present.
- The rest of the file contains the SSZ encoded proofs, stored contiguously.
### File layout
| Byte offset | Description |
|-------------|-------------|
| `0` | `version (1 byte) \| offsetTable (64 bytes)` |
| `65 + offsetTable[0].offset` | `sszEncodedProof (offsetTable[0].size bytes)` |
| `65 + offsetTable[1].offset` | `sszEncodedProof (offsetTable[1].size bytes)` |
| ... | ... |
| `65 + offsetTable[7].offset` | `sszEncodedProof (offsetTable[7].size bytes)` |
**Header size:** 1 (version) + 64 (offset table) = **65 bytes**
### Offset table entry format
Each slot in the offset table (8 bytes per slot):
- `offset` (4 bytes, big-endian): Offset from end of header where proof data begins
- `size` (4 bytes, big-endian): Size of the SSZ-encoded proof in bytes
**Note:** Offsets are relative to the end of the header (byte 65), not the start of the file.
This maximizes the usable range of the 4-byte offset field.
### Reading a proof with `proofID=N (O(1) access)`
1. Read header (65 bytes)
2. Check slot N: if `size == 0`, proof not present
3. Seek to `(65 + offset)`, read `size` bytes, SSZ unmarshal
Each file is named after the block root.
**Example:** `0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs`
## Database Organisation
SSZ encoded execution proofs are stored following the same `by-epoch` layout as data column sidecars.
- The first layer is a directory corresponding to the `period`, which corresponds to the epoch divided by 4096.
- The second layer is a directory corresponding to the epoch.
- Then all files are stored in the epoch directory.
### Example Directory Structure
```
proofs
├── 0
│ ├── 100
│ │ ├── 0x259c6d2f6a0bb75e2405cea7cb248e5663dc26b9404fd3bcd777afc20de91c1e.sszs
│ │ ├── 0x2a855b1f6e9a2f04f8383e336325bf7d5ba02d1eab3ef90ef183736f8c768533.sszs
│ │ └── ...
│ └── 101
│ ├── 0x0fd231fe95e57936fa44f6c712c490b9e337a481b661dfd46768901e90444330.sszs
│ └── ...
└── 1
└── 4096
├── 0x0d244009093e2bedb72eb265280290199e8c7bf1d90d7583c41af40d9f662269.sszs
└── ...
```

View File

@@ -70,36 +70,4 @@ var (
Name: "data_column_prune_latency",
Help: "Latency of data column prune operations in milliseconds",
})
// Proofs
proofSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
Name: "proof_storage_save_latency",
Help: "Latency of proof storage save operations in milliseconds",
Buckets: []float64{3, 5, 7, 9, 11, 13, 20, 50},
})
proofFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
Name: "proof_storage_get_latency",
Help: "Latency of proof storage get operations in milliseconds",
Buckets: []float64{3, 5, 7, 9, 11, 13},
})
proofPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "proof_pruned",
Help: "Number of proof files pruned.",
})
proofWrittenCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "proof_written",
Help: "Number of proof files written",
})
proofDiskCount = promauto.NewGauge(prometheus.GaugeOpts{
Name: "proof_disk_count",
Help: "Approximate number of proof files in storage",
})
proofFileSyncLatency = promauto.NewSummary(prometheus.SummaryOpts{
Name: "proof_file_sync_latency",
Help: "Latency of sync operations when saving proofs in milliseconds",
})
proofPruneLatency = promauto.NewSummary(prometheus.SummaryOpts{
Name: "proof_prune_latency",
Help: "Latency of proof prune operations in milliseconds",
})
)

View File

@@ -144,45 +144,3 @@ func NewEphemeralDataColumnStorageWithMocker(t testing.TB) (*DataColumnMocker, *
fs, dcs := NewEphemeralDataColumnStorageAndFs(t)
return &DataColumnMocker{fs: fs, dcs: dcs}, dcs
}
// Proofs
// ------
// NewEphemeralProofStorage should only be used for tests.
// The instance of ProofStorage returned is backed by an in-memory virtual filesystem,
// improving test performance and simplifying cleanup.
func NewEphemeralProofStorage(t testing.TB, opts ...ProofStorageOption) *ProofStorage {
return NewWarmedEphemeralProofStorageUsingFs(t, afero.NewMemMapFs(), opts...)
}
// NewEphemeralProofStorageAndFs can be used by tests that want access to the virtual filesystem
// in order to interact with it outside the parameters of the ProofStorage API.
func NewEphemeralProofStorageAndFs(t testing.TB, opts ...ProofStorageOption) (afero.Fs, *ProofStorage) {
fs := afero.NewMemMapFs()
ps := NewWarmedEphemeralProofStorageUsingFs(t, fs, opts...)
return fs, ps
}
// NewEphemeralProofStorageUsingFs creates a ProofStorage backed by the provided filesystem.
func NewEphemeralProofStorageUsingFs(t testing.TB, fs afero.Fs, opts ...ProofStorageOption) *ProofStorage {
defaultOpts := []ProofStorageOption{
WithProofRetentionEpochs(params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest),
WithProofFs(fs),
}
// User opts come last so they can override defaults
allOpts := append(defaultOpts, opts...)
ps, err := NewProofStorage(context.Background(), allOpts...)
if err != nil {
t.Fatal(err)
}
return ps
}
// NewWarmedEphemeralProofStorageUsingFs creates a ProofStorage with a warmed cache.
func NewWarmedEphemeralProofStorageUsingFs(t testing.TB, fs afero.Fs, opts ...ProofStorageOption) *ProofStorage {
ps := NewEphemeralProofStorageUsingFs(t, fs, opts...)
ps.WarmCache()
return ps
}

View File

@@ -1,952 +0,0 @@
package filesystem
import (
"context"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"slices"
"strconv"
"strings"
"sync"
"time"
"github.com/OffchainLabs/prysm/v7/async"
"github.com/OffchainLabs/prysm/v7/async/event"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/io/file"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/spf13/afero"
)
const (
proofVersion = 0x01
proofVersionSize = 1 // bytes
maxProofTypes = 8 // ExecutionProofId max value (EXECUTION_PROOF_TYPE_COUNT)
proofOffsetSize = 4 // bytes for offset (uint32)
proofSizeSize = 4 // bytes for size (uint32)
proofSlotSize = proofOffsetSize + proofSizeSize // 8 bytes per slot
proofOffsetTableSize = maxProofTypes * proofSlotSize // 64 bytes
proofHeaderSize = proofVersionSize + proofOffsetTableSize // 65 bytes
proofsFileExtension = "sszs"
proofPrunePeriod = 1 * time.Minute
)
var (
errProofIDTooLarge = errors.New("proof ID too large")
errWrongProofBytesWritten = errors.New("wrong number of bytes written")
errWrongProofVersion = errors.New("wrong proof version")
errWrongProofBytesRead = errors.New("wrong number of bytes read")
errNoProofBasePath = errors.New("ProofStorage base path not specified in init")
errProofAlreadyExists = errors.New("proof already exists")
)
type (
// ProofIdent is a unique identifier for a proof.
ProofIdent struct {
BlockRoot [fieldparams.RootLength]byte
Epoch primitives.Epoch
ProofType uint8
}
// ProofsIdent is a collection of unique identifiers for proofs.
ProofsIdent struct {
BlockRoot [fieldparams.RootLength]byte
Epoch primitives.Epoch
ProofTypes []uint8
}
// ProofStorage is the concrete implementation of the filesystem backend for saving and retrieving ExecutionProofs.
ProofStorage struct {
base string
retentionEpochs primitives.Epoch
fs afero.Fs
cache *proofCache
proofFeed *event.Feed
pruneMu sync.RWMutex
mu sync.Mutex // protects muChans
muChans map[[fieldparams.RootLength]byte]*proofMuChan
}
// ProofStorageOption is a functional option for configuring a ProofStorage.
ProofStorageOption func(*ProofStorage) error
proofMuChan struct {
mu *sync.RWMutex
toStore chan []blocks.VerifiedROSignedExecutionProof
}
// proofSlotEntry represents the offset and size for a proof in the file.
proofSlotEntry struct {
offset uint32
size uint32
}
// proofOffsetTable is the offset table with 8 slots indexed by proofID.
proofOffsetTable [maxProofTypes]proofSlotEntry
// proofFileMetadata contains metadata extracted from a proof file path.
proofFileMetadata struct {
period uint64
epoch primitives.Epoch
blockRoot [fieldparams.RootLength]byte
}
)
// WithProofBasePath is a required option that sets the base path of proof storage.
func WithProofBasePath(base string) ProofStorageOption {
return func(ps *ProofStorage) error {
ps.base = base
return nil
}
}
// WithProofRetentionEpochs is an option that changes the number of epochs proofs will be persisted.
func WithProofRetentionEpochs(e primitives.Epoch) ProofStorageOption {
return func(ps *ProofStorage) error {
ps.retentionEpochs = e
return nil
}
}
// WithProofFs allows the afero.Fs implementation to be customized.
// Used by tests to substitute an in-memory filesystem.
func WithProofFs(fs afero.Fs) ProofStorageOption {
return func(ps *ProofStorage) error {
ps.fs = fs
return nil
}
}
// NewProofStorage creates a new instance of the ProofStorage object.
func NewProofStorage(ctx context.Context, opts ...ProofStorageOption) (*ProofStorage, error) {
storage := &ProofStorage{
proofFeed: new(event.Feed),
muChans: make(map[[fieldparams.RootLength]byte]*proofMuChan),
}
for _, o := range opts {
if err := o(storage); err != nil {
return nil, fmt.Errorf("failed to create proof storage: %w", err)
}
}
// Allow tests to set up a different fs using WithProofFs.
if storage.fs == nil {
if storage.base == "" {
return nil, errNoProofBasePath
}
storage.base = path.Clean(storage.base)
if err := file.MkdirAll(storage.base); err != nil {
return nil, fmt.Errorf("failed to create proof storage at %s: %w", storage.base, err)
}
storage.fs = afero.NewBasePathFs(afero.NewOsFs(), storage.base)
}
storage.cache = newProofCache()
async.RunEvery(ctx, proofPrunePeriod, func() {
storage.pruneMu.Lock()
defer storage.pruneMu.Unlock()
storage.prune()
})
return storage, nil
}
// WarmCache warms the cache of the proof filesystem.
func (ps *ProofStorage) WarmCache() {
start := time.Now()
log.Info("Proof filesystem cache warm-up started")
ps.pruneMu.Lock()
defer ps.pruneMu.Unlock()
// List all period directories
periodFileInfos, err := afero.ReadDir(ps.fs, ".")
if err != nil {
log.WithError(err).Error("Error reading top directory during proof warm cache")
return
}
// Iterate through periods
for _, periodFileInfo := range periodFileInfos {
if !periodFileInfo.IsDir() {
continue
}
periodPath := periodFileInfo.Name()
// List all epoch directories in this period
epochFileInfos, err := afero.ReadDir(ps.fs, periodPath)
if err != nil {
log.WithError(err).WithField("period", periodPath).Error("Error reading period directory during proof warm cache")
continue
}
// Iterate through epochs
for _, epochFileInfo := range epochFileInfos {
if !epochFileInfo.IsDir() {
continue
}
epochPath := path.Join(periodPath, epochFileInfo.Name())
// List all .sszs files in this epoch
files, err := ps.listProofEpochFiles(epochPath)
if err != nil {
log.WithError(err).WithField("epoch", epochPath).Error("Error listing epoch files during proof warm cache")
continue
}
// Process all files in this epoch in parallel
ps.processProofEpochFiles(files)
}
}
// Prune the cache and the filesystem
ps.prune()
totalElapsed := time.Since(start)
log.WithField("elapsed", totalElapsed).Info("Proof filesystem cache warm-up complete")
}
// listProofEpochFiles lists all .sszs files in an epoch directory.
func (ps *ProofStorage) listProofEpochFiles(epochPath string) ([]string, error) {
fileInfos, err := afero.ReadDir(ps.fs, epochPath)
if err != nil {
return nil, fmt.Errorf("read epoch directory: %w", err)
}
files := make([]string, 0, len(fileInfos))
for _, fileInfo := range fileInfos {
if fileInfo.IsDir() {
continue
}
fileName := fileInfo.Name()
if strings.HasSuffix(fileName, "."+proofsFileExtension) {
files = append(files, path.Join(epochPath, fileName))
}
}
return files, nil
}
// processProofEpochFiles processes all proof files in an epoch in parallel.
func (ps *ProofStorage) processProofEpochFiles(files []string) {
var wg sync.WaitGroup
for _, filePath := range files {
wg.Go(func() {
if err := ps.processProofFile(filePath); err != nil {
log.WithError(err).WithField("file", filePath).Error("Error processing proof file during warm cache")
}
})
}
wg.Wait()
}
// processProofFile processes a single .sszs proof file for cache warming.
func (ps *ProofStorage) processProofFile(filePath string) error {
// Extract metadata from the file path
fileMetadata, err := extractProofFileMetadata(filePath)
if err != nil {
return fmt.Errorf("extract proof file metadata: %w", err)
}
// Open the file
f, err := ps.fs.Open(filePath)
if err != nil {
return fmt.Errorf("open file: %w", err)
}
defer func() {
if closeErr := f.Close(); closeErr != nil {
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during proof warm cache")
}
}()
// Read the offset table
offsetTable, _, err := ps.readHeader(f)
if err != nil {
return fmt.Errorf("read header: %w", err)
}
// Add all present proofs to the cache
for proofID, entry := range offsetTable {
if entry.size == 0 {
continue
}
proofIdent := ProofIdent{
BlockRoot: fileMetadata.blockRoot,
Epoch: fileMetadata.epoch,
ProofType: uint8(proofID),
}
ps.cache.set(proofIdent)
}
return nil
}
// Summary returns the ProofStorageSummary for a given root.
func (ps *ProofStorage) Summary(root [fieldparams.RootLength]byte) ProofStorageSummary {
return ps.cache.Summary(root)
}
// Save saves execution proofs into the database.
// The proofs must all belong to the same block (same block root).
func (ps *ProofStorage) Save(proofs []blocks.VerifiedROSignedExecutionProof) error {
startTime := time.Now()
if len(proofs) == 0 {
return nil
}
// Safely retrieve the block root and the epoch.
first := proofs[0]
blockRoot := first.BlockRoot()
epoch := first.Epoch()
proofTypes := make([]uint8, 0, len(proofs))
for _, proof := range proofs {
// Check if the proof ID is valid.
proofType := proof.Message.ProofType[0]
if proofType >= maxProofTypes {
return errProofIDTooLarge
}
// Save proofs in the filesystem.
if err := ps.saveFilesystem(proof.BlockRoot(), proof.Epoch(), proofs); err != nil {
return fmt.Errorf("save filesystem: %w", err)
}
proofTypes = append(proofTypes, proof.Message.ProofType[0])
}
// Compute the proofs ident.
proofsIdent := ProofsIdent{BlockRoot: blockRoot, Epoch: epoch, ProofTypes: proofTypes}
// Set proofs in the cache.
ps.cache.setMultiple(proofsIdent)
// Notify the proof feed.
ps.proofFeed.Send(proofsIdent)
proofSaveLatency.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
}
// saveFilesystem saves proofs into the database.
// This function expects all proofs to belong to the same block.
func (ps *ProofStorage) saveFilesystem(root [fieldparams.RootLength]byte, epoch primitives.Epoch, proofs []blocks.VerifiedROSignedExecutionProof) error {
// Compute the file path.
filePath := proofFilePath(root, epoch)
ps.pruneMu.RLock()
defer ps.pruneMu.RUnlock()
fileMu, toStore := ps.fileMutexChan(root)
toStore <- proofs
fileMu.Lock()
defer fileMu.Unlock()
// Check if the file exists.
exists, err := afero.Exists(ps.fs, filePath)
if err != nil {
return fmt.Errorf("afero exists: %w", err)
}
if exists {
if err := ps.saveProofExistingFile(filePath, toStore); err != nil {
return fmt.Errorf("save proof existing file: %w", err)
}
return nil
}
if err := ps.saveProofNewFile(filePath, toStore); err != nil {
return fmt.Errorf("save proof new file: %w", err)
}
return nil
}
// Subscribe subscribes to the proof feed.
// It returns the subscription and a 1-size buffer channel to receive proof idents.
func (ps *ProofStorage) Subscribe() (event.Subscription, <-chan ProofsIdent) {
identsChan := make(chan ProofsIdent, 1)
subscription := ps.proofFeed.Subscribe(identsChan)
return subscription, identsChan
}
// Get retrieves signed execution proofs from the database.
// If one of the requested proofs is not found, it is just skipped.
// If proofIDs is nil, then all stored proofs are returned.
func (ps *ProofStorage) Get(root [fieldparams.RootLength]byte, proofIDs []uint8) ([]*ethpb.SignedExecutionProof, error) {
ps.pruneMu.RLock()
defer ps.pruneMu.RUnlock()
fileMu, _ := ps.fileMutexChan(root)
fileMu.RLock()
defer fileMu.RUnlock()
startTime := time.Now()
// Build all proofIDs if none are provided.
if proofIDs == nil {
proofIDs = make([]uint8, maxProofTypes)
for i := range proofIDs {
proofIDs[i] = uint8(i)
}
}
summary, ok := ps.cache.get(root)
if !ok {
// Nothing found in db. Exit early.
return nil, nil
}
// Check if any requested proofID exists.
if !slices.ContainsFunc(proofIDs, summary.HasProof) {
return nil, nil
}
// Compute the file path.
filePath := proofFilePath(root, summary.epoch)
// Open the proof file.
file, err := ps.fs.Open(filePath)
if err != nil {
return nil, fmt.Errorf("proof file open: %w", err)
}
defer func() {
if closeErr := file.Close(); closeErr != nil {
log.WithError(closeErr).WithField("file", filePath).Error("Error closing proof file")
}
}()
// Read the header.
offsetTable, _, err := ps.readHeader(file)
if err != nil {
return nil, fmt.Errorf("read header: %w", err)
}
// Retrieve proofs from the file.
proofs := make([]*ethpb.SignedExecutionProof, 0, len(proofIDs))
for _, proofID := range proofIDs {
if proofID >= maxProofTypes {
continue
}
entry := offsetTable[proofID]
// Skip if the proof is not saved.
if entry.size == 0 {
continue
}
// Seek to the proof offset (offset is relative to end of header).
_, err = file.Seek(proofHeaderSize+int64(entry.offset), io.SeekStart)
if err != nil {
return nil, fmt.Errorf("seek: %w", err)
}
// Read the SSZ encoded proof.
sszProof := make([]byte, entry.size)
n, err := io.ReadFull(file, sszProof)
if err != nil {
return nil, fmt.Errorf("read proof: %w", err)
}
if n != int(entry.size) {
return nil, errWrongProofBytesRead
}
// Unmarshal the signed proof.
proof := new(ethpb.SignedExecutionProof)
if err := proof.UnmarshalSSZ(sszProof); err != nil {
return nil, fmt.Errorf("unmarshal proof: %w", err)
}
proofs = append(proofs, proof)
}
proofFetchLatency.Observe(float64(time.Since(startTime).Milliseconds()))
return proofs, nil
}
// Remove deletes all proofs for a given root.
func (ps *ProofStorage) Remove(blockRoot [fieldparams.RootLength]byte) error {
ps.pruneMu.RLock()
defer ps.pruneMu.RUnlock()
fileMu, _ := ps.fileMutexChan(blockRoot)
fileMu.Lock()
defer fileMu.Unlock()
summary, ok := ps.cache.get(blockRoot)
if !ok {
// Nothing found in db. Exit early.
return nil
}
// Remove the proofs from the cache.
ps.cache.evict(blockRoot)
// Remove the proof file.
filePath := proofFilePath(blockRoot, summary.epoch)
if err := ps.fs.Remove(filePath); err != nil {
return fmt.Errorf("remove: %w", err)
}
return nil
}
// Clear deletes all files on the filesystem.
func (ps *ProofStorage) Clear() error {
ps.pruneMu.Lock()
defer ps.pruneMu.Unlock()
dirs, err := listDir(ps.fs, ".")
if err != nil {
return fmt.Errorf("list dir: %w", err)
}
ps.cache.clear()
for _, dir := range dirs {
if err := ps.fs.RemoveAll(dir); err != nil {
return fmt.Errorf("remove all: %w", err)
}
}
return nil
}
// saveProofNewFile saves proofs to a new file.
func (ps *ProofStorage) saveProofNewFile(filePath string, inputProofs chan []blocks.VerifiedROSignedExecutionProof) (err error) {
// Initialize the offset table.
var offsetTable proofOffsetTable
var sszEncodedProofs []byte
currentOffset := uint32(0)
for {
proofs := pullProofChan(inputProofs)
if len(proofs) == 0 {
break
}
for _, proof := range proofs {
proofType := proof.Message.ProofType[0]
if proofType >= maxProofTypes {
continue
}
// Skip if already in offset table (duplicate).
if offsetTable[proofType].size != 0 {
continue
}
// SSZ encode the full signed proof.
sszProof, err := proof.SignedExecutionProof.MarshalSSZ()
if err != nil {
return fmt.Errorf("marshal proof SSZ: %w", err)
}
proofSize := uint32(len(sszProof))
// Update offset table.
offsetTable[proofType] = proofSlotEntry{
offset: currentOffset,
size: proofSize,
}
// Append SSZ encoded proof.
sszEncodedProofs = append(sszEncodedProofs, sszProof...)
currentOffset += proofSize
}
}
if len(sszEncodedProofs) == 0 {
// Nothing to save.
return nil
}
// Create directory structure.
dir := filepath.Dir(filePath)
if err := ps.fs.MkdirAll(dir, directoryPermissions()); err != nil {
return fmt.Errorf("mkdir all: %w", err)
}
file, err := ps.fs.Create(filePath)
if err != nil {
return fmt.Errorf("create proof file: %w", err)
}
defer func() {
closeErr := file.Close()
if closeErr != nil && err == nil {
err = closeErr
}
}()
// Build the file content.
countToWrite := proofHeaderSize + len(sszEncodedProofs)
bytes := make([]byte, 0, countToWrite)
// Write version byte.
bytes = append(bytes, byte(proofVersion))
// Write offset table.
bytes = append(bytes, encodeOffsetTable(offsetTable)...)
// Write SSZ encoded proofs.
bytes = append(bytes, sszEncodedProofs...)
countWritten, err := file.Write(bytes)
if err != nil {
return fmt.Errorf("write: %w", err)
}
if countWritten != countToWrite {
return errWrongProofBytesWritten
}
syncStart := time.Now()
if err := file.Sync(); err != nil {
return fmt.Errorf("sync: %w", err)
}
proofFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
return nil
}
// saveProofExistingFile saves proofs to an existing file.
func (ps *ProofStorage) saveProofExistingFile(filePath string, inputProofs chan []blocks.VerifiedROSignedExecutionProof) (err error) {
// Open the file for read/write.
file, err := ps.fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600))
if err != nil {
return fmt.Errorf("open proof file: %w", err)
}
defer func() {
closeErr := file.Close()
if closeErr != nil && err == nil {
err = closeErr
}
}()
// Read current header.
offsetTable, fileSize, err := ps.readHeader(file)
if err != nil {
return fmt.Errorf("read header: %w", err)
}
var sszEncodedProofs []byte
currentOffset := uint32(fileSize - proofHeaderSize)
modified := false
for {
proofs := pullProofChan(inputProofs)
if len(proofs) == 0 {
break
}
for _, proof := range proofs {
proofType := proof.Message.ProofType[0]
if proofType >= maxProofTypes {
continue
}
// Skip if proof already exists.
if offsetTable[proofType].size != 0 {
continue
}
// SSZ encode the full signed proof.
sszProof, err := proof.SignedExecutionProof.MarshalSSZ()
if err != nil {
return fmt.Errorf("marshal proof SSZ: %w", err)
}
proofSize := uint32(len(sszProof))
// Update offset table.
offsetTable[proofType] = proofSlotEntry{
offset: currentOffset,
size: proofSize,
}
// Append SSZ encoded proof.
sszEncodedProofs = append(sszEncodedProofs, sszProof...)
currentOffset += proofSize
modified = true
}
}
if !modified {
return nil
}
// Write updated offset table back to file (at position 1, after version byte).
encodedTable := encodeOffsetTable(offsetTable)
count, err := file.WriteAt(encodedTable, int64(proofVersionSize))
if err != nil {
return fmt.Errorf("write offset table: %w", err)
}
if count != proofOffsetTableSize {
return errWrongProofBytesWritten
}
// Append the SSZ encoded proofs to the end of the file.
count, err = file.WriteAt(sszEncodedProofs, fileSize)
if err != nil {
return fmt.Errorf("write SSZ encoded proofs: %w", err)
}
if count != len(sszEncodedProofs) {
return errWrongProofBytesWritten
}
syncStart := time.Now()
if err := file.Sync(); err != nil {
return fmt.Errorf("sync: %w", err)
}
proofFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
return nil
}
// readHeader reads the file header and returns the offset table and file size.
func (ps *ProofStorage) readHeader(file afero.File) (proofOffsetTable, int64, error) {
var header [proofHeaderSize]byte
countRead, err := file.ReadAt(header[:], 0)
if err != nil {
return proofOffsetTable{}, 0, fmt.Errorf("read at: %w", err)
}
if countRead != proofHeaderSize {
return proofOffsetTable{}, 0, errWrongProofBytesRead
}
// Check version.
fileVersion := int(header[0])
if fileVersion != proofVersion {
return proofOffsetTable{}, 0, errWrongProofVersion
}
// Decode offset table and compute file size.
var offsetTable proofOffsetTable
fileSize := int64(proofHeaderSize)
for i := range offsetTable {
pos := proofVersionSize + i*proofSlotSize
offsetTable[i].offset = binary.BigEndian.Uint32(header[pos : pos+proofOffsetSize])
offsetTable[i].size = binary.BigEndian.Uint32(header[pos+proofOffsetSize : pos+proofSlotSize])
fileSize += int64(offsetTable[i].size)
}
return offsetTable, fileSize, nil
}
// prune cleans the cache, the filesystem and mutexes.
func (ps *ProofStorage) prune() {
startTime := time.Now()
defer func() {
proofPruneLatency.Observe(float64(time.Since(startTime).Milliseconds()))
}()
highestStoredEpoch := ps.cache.HighestEpoch()
// Check if we need to prune.
if highestStoredEpoch < ps.retentionEpochs {
return
}
highestEpochToPrune := highestStoredEpoch - ps.retentionEpochs
highestPeriodToPrune := proofPeriod(highestEpochToPrune)
// Prune the cache.
prunedCount := ps.cache.pruneUpTo(highestEpochToPrune)
if prunedCount == 0 {
return
}
proofPrunedCounter.Add(float64(prunedCount))
// Prune the filesystem.
periodFileInfos, err := afero.ReadDir(ps.fs, ".")
if err != nil {
log.WithError(err).Error("Error encountered while reading top directory during proof prune")
return
}
for _, periodFileInfo := range periodFileInfos {
periodStr := periodFileInfo.Name()
period, err := strconv.ParseUint(periodStr, 10, 64)
if err != nil {
log.WithError(err).Errorf("Error encountered while parsing period %s", periodStr)
continue
}
if period < highestPeriodToPrune {
// Remove everything lower than highest period to prune.
if err := ps.fs.RemoveAll(periodStr); err != nil {
log.WithError(err).Error("Error encountered while removing period directory during proof prune")
}
continue
}
if period > highestPeriodToPrune {
// Do not remove anything higher than highest period to prune.
continue
}
// if period == highestPeriodToPrune
epochFileInfos, err := afero.ReadDir(ps.fs, periodStr)
if err != nil {
log.WithError(err).Error("Error encountered while reading epoch directory during proof prune")
continue
}
for _, epochFileInfo := range epochFileInfos {
epochStr := epochFileInfo.Name()
epochDir := path.Join(periodStr, epochStr)
epoch, err := strconv.ParseUint(epochStr, 10, 64)
if err != nil {
log.WithError(err).Errorf("Error encountered while parsing epoch %s", epochStr)
continue
}
if primitives.Epoch(epoch) > highestEpochToPrune {
continue
}
if err := ps.fs.RemoveAll(epochDir); err != nil {
log.WithError(err).Error("Error encountered while removing epoch directory during proof prune")
continue
}
}
}
ps.mu.Lock()
defer ps.mu.Unlock()
clear(ps.muChans)
}
// fileMutexChan returns the file mutex and channel for a given block root.
func (ps *ProofStorage) fileMutexChan(root [fieldparams.RootLength]byte) (*sync.RWMutex, chan []blocks.VerifiedROSignedExecutionProof) {
ps.mu.Lock()
defer ps.mu.Unlock()
mc, ok := ps.muChans[root]
if !ok {
mc = &proofMuChan{
mu: new(sync.RWMutex),
toStore: make(chan []blocks.VerifiedROSignedExecutionProof, 1),
}
ps.muChans[root] = mc
return mc.mu, mc.toStore
}
return mc.mu, mc.toStore
}
// pullProofChan pulls proofs from the input channel until it is empty.
func pullProofChan(inputProofs chan []blocks.VerifiedROSignedExecutionProof) []blocks.VerifiedROSignedExecutionProof {
proofs := make([]blocks.VerifiedROSignedExecutionProof, 0, maxProofTypes)
for {
select {
case batch := <-inputProofs:
proofs = append(proofs, batch...)
default:
return proofs
}
}
}
// proofFilePath builds the file path in database for a given root and epoch.
func proofFilePath(root [fieldparams.RootLength]byte, epoch primitives.Epoch) string {
return path.Join(
fmt.Sprintf("%d", proofPeriod(epoch)),
fmt.Sprintf("%d", epoch),
fmt.Sprintf("%#x.%s", root, proofsFileExtension),
)
}
// extractProofFileMetadata extracts the metadata from a proof file path.
func extractProofFileMetadata(path string) (*proofFileMetadata, error) {
// Use filepath.Separator to handle both Windows (\) and Unix (/) path separators
parts := strings.Split(path, string(filepath.Separator))
if len(parts) != 3 {
return nil, fmt.Errorf("unexpected proof file %s", path)
}
period, err := strconv.ParseUint(parts[0], 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse period from %s: %w", path, err)
}
epoch, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse epoch from %s: %w", path, err)
}
partsRoot := strings.Split(parts[2], ".")
if len(partsRoot) != 2 {
return nil, fmt.Errorf("failed to parse root from %s", path)
}
blockRootString := partsRoot[0]
if len(blockRootString) != 2+2*fieldparams.RootLength || blockRootString[:2] != "0x" {
return nil, fmt.Errorf("unexpected proof file name %s", path)
}
if partsRoot[1] != proofsFileExtension {
return nil, fmt.Errorf("unexpected extension %s", path)
}
blockRootSlice, err := hex.DecodeString(blockRootString[2:])
if err != nil {
return nil, fmt.Errorf("decode string from %s: %w", path, err)
}
var blockRoot [fieldparams.RootLength]byte
copy(blockRoot[:], blockRootSlice)
result := &proofFileMetadata{period: period, epoch: primitives.Epoch(epoch), blockRoot: blockRoot}
return result, nil
}
// proofPeriod computes the period of a given epoch.
func proofPeriod(epoch primitives.Epoch) uint64 {
return uint64(epoch / params.BeaconConfig().MinEpochsForDataColumnSidecarsRequest)
}
// encodeOffsetTable encodes the offset table to bytes.
func encodeOffsetTable(table proofOffsetTable) []byte {
result := make([]byte, proofOffsetTableSize)
for i, entry := range table {
offset := i * proofSlotSize
binary.BigEndian.PutUint32(result[offset:offset+proofOffsetSize], entry.offset)
binary.BigEndian.PutUint32(result[offset+proofOffsetSize:offset+proofSlotSize], entry.size)
}
return result
}

View File

@@ -1,206 +0,0 @@
package filesystem
import (
"slices"
"sync"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
)
// ProofStorageSummary represents cached information about the proofs on disk for each root the cache knows about.
type ProofStorageSummary struct {
epoch primitives.Epoch
proofTypes map[uint8]bool
}
// HasProof returns true if the proof with the given proofID is available in the filesystem.
func (s ProofStorageSummary) HasProof(proofID uint8) bool {
if s.proofTypes == nil {
return false
}
_, ok := s.proofTypes[proofID]
return ok
}
// Count returns the number of available proofs.
func (s ProofStorageSummary) Count() int {
return len(s.proofTypes)
}
// All returns all stored proofIDs sorted in ascending order.
func (s ProofStorageSummary) All() []uint8 {
if s.proofTypes == nil {
return nil
}
proofTypes := make([]uint8, 0, len(s.proofTypes))
for proofType := range s.proofTypes {
proofTypes = append(proofTypes, proofType)
}
slices.Sort(proofTypes)
return proofTypes
}
type proofCache struct {
mu sync.RWMutex
proofCount float64
lowestCachedEpoch primitives.Epoch
highestCachedEpoch primitives.Epoch
cache map[[fieldparams.RootLength]byte]ProofStorageSummary
}
func newProofCache() *proofCache {
return &proofCache{
cache: make(map[[fieldparams.RootLength]byte]ProofStorageSummary),
lowestCachedEpoch: params.BeaconConfig().FarFutureEpoch,
}
}
// Summary returns the ProofStorageSummary for `root`.
// The ProofStorageSummary can be used to check for the presence of proofs based on proofID.
func (pc *proofCache) Summary(root [fieldparams.RootLength]byte) ProofStorageSummary {
pc.mu.RLock()
defer pc.mu.RUnlock()
return pc.cache[root]
}
// HighestEpoch returns the highest cached epoch.
func (pc *proofCache) HighestEpoch() primitives.Epoch {
pc.mu.RLock()
defer pc.mu.RUnlock()
return pc.highestCachedEpoch
}
// set adds a proof to the cache.
func (pc *proofCache) set(ident ProofIdent) {
pc.mu.Lock()
defer pc.mu.Unlock()
summary := pc.cache[ident.BlockRoot]
if summary.proofTypes == nil {
summary.proofTypes = make(map[uint8]bool)
}
summary.epoch = ident.Epoch
if _, exists := summary.proofTypes[ident.ProofType]; exists {
pc.cache[ident.BlockRoot] = summary
return
}
summary.proofTypes[ident.ProofType] = true
pc.lowestCachedEpoch = min(pc.lowestCachedEpoch, ident.Epoch)
pc.highestCachedEpoch = max(pc.highestCachedEpoch, ident.Epoch)
pc.cache[ident.BlockRoot] = summary
pc.proofCount++
proofDiskCount.Set(pc.proofCount)
proofWrittenCounter.Inc()
}
// setMultiple adds multiple proofs to the cache.
func (pc *proofCache) setMultiple(ident ProofsIdent) {
pc.mu.Lock()
defer pc.mu.Unlock()
summary := pc.cache[ident.BlockRoot]
if summary.proofTypes == nil {
summary.proofTypes = make(map[uint8]bool)
}
summary.epoch = ident.Epoch
addedCount := 0
for _, proofID := range ident.ProofTypes {
if _, exists := summary.proofTypes[proofID]; exists {
continue
}
summary.proofTypes[proofID] = true
addedCount++
}
if addedCount == 0 {
pc.cache[ident.BlockRoot] = summary
return
}
pc.lowestCachedEpoch = min(pc.lowestCachedEpoch, ident.Epoch)
pc.highestCachedEpoch = max(pc.highestCachedEpoch, ident.Epoch)
pc.cache[ident.BlockRoot] = summary
pc.proofCount += float64(addedCount)
proofDiskCount.Set(pc.proofCount)
proofWrittenCounter.Add(float64(addedCount))
}
// get returns the ProofStorageSummary for the given block root.
// If the root is not in the cache, the second return value will be false.
func (pc *proofCache) get(blockRoot [fieldparams.RootLength]byte) (ProofStorageSummary, bool) {
pc.mu.RLock()
defer pc.mu.RUnlock()
summary, ok := pc.cache[blockRoot]
return summary, ok
}
// evict removes the ProofStorageSummary for the given block root from the cache.
func (pc *proofCache) evict(blockRoot [fieldparams.RootLength]byte) int {
pc.mu.Lock()
defer pc.mu.Unlock()
summary, ok := pc.cache[blockRoot]
if !ok {
return 0
}
deleted := len(summary.proofTypes)
delete(pc.cache, blockRoot)
if deleted > 0 {
pc.proofCount -= float64(deleted)
proofDiskCount.Set(pc.proofCount)
}
return deleted
}
// pruneUpTo removes all entries from the cache up to the given target epoch included.
func (pc *proofCache) pruneUpTo(targetEpoch primitives.Epoch) uint64 {
pc.mu.Lock()
defer pc.mu.Unlock()
prunedCount := uint64(0)
newLowestCachedEpoch := params.BeaconConfig().FarFutureEpoch
newHighestCachedEpoch := primitives.Epoch(0)
for blockRoot, summary := range pc.cache {
epoch := summary.epoch
if epoch > targetEpoch {
newLowestCachedEpoch = min(newLowestCachedEpoch, epoch)
newHighestCachedEpoch = max(newHighestCachedEpoch, epoch)
}
if epoch <= targetEpoch {
prunedCount += uint64(len(summary.proofTypes))
delete(pc.cache, blockRoot)
}
}
if prunedCount > 0 {
pc.lowestCachedEpoch = newLowestCachedEpoch
pc.highestCachedEpoch = newHighestCachedEpoch
pc.proofCount -= float64(prunedCount)
proofDiskCount.Set(pc.proofCount)
}
return prunedCount
}
// clear removes all entries from the cache.
func (pc *proofCache) clear() uint64 {
return pc.pruneUpTo(params.BeaconConfig().FarFutureEpoch)
}

View File

@@ -89,7 +89,6 @@ type NoHeadAccessDatabase interface {
SaveBlocks(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock) error
SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error
SlotByBlockRoot(context.Context, [32]byte) (primitives.Slot, error)
// State related methods.
SaveState(ctx context.Context, state state.ReadOnlyBeaconState, blockRoot [32]byte) error
SaveStates(ctx context.Context, states []state.ReadOnlyBeaconState, blockRoots [][32]byte) error
@@ -97,7 +96,6 @@ type NoHeadAccessDatabase interface {
DeleteStates(ctx context.Context, blockRoots [][32]byte) error
SaveStateSummary(ctx context.Context, summary *ethpb.StateSummary) error
SaveStateSummaries(ctx context.Context, summaries []*ethpb.StateSummary) error
SlotInDiffTree(primitives.Slot) (uint64, int, error)
// Checkpoint operations.
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error

View File

@@ -32,7 +32,6 @@ go_library(
"state_diff_helpers.go",
"state_summary.go",
"state_summary_cache.go",
"testing_helpers.go",
"utils.go",
"validated_checkpoint.go",
"wss.go",

View File

@@ -22,10 +22,6 @@ var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
// ErrNotFoundMetadataSeqNum is a not found error specifically for the metadata sequence number getter
var ErrNotFoundMetadataSeqNum = errors.Wrap(ErrNotFound, "metadata sequence number")
// ErrStateDiffIncompatible is returned when state-diff feature is enabled
// but the database was created without state-diff support.
var ErrStateDiffIncompatible = errors.New("state-diff feature enabled but database was created without state-diff support")
var errEmptyBlockSlice = errors.New("[]blocks.ROBlock is empty")
var errIncorrectBlockParent = errors.New("unexpected missing or forked blocks in a []ROBlock")
var errFinalizedChildNotFound = errors.New("unable to find finalized root descending from backfill batch")

View File

@@ -42,10 +42,6 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
if err := s.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
return errors.Wrap(err, "could not save genesis block root")
}
if err := s.initializeStateDiff(0, genesisState); err != nil {
return errors.Wrap(err, "failed to initialize state diff for genesis")
}
return nil
}

View File

@@ -203,47 +203,17 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
return nil, err
}
if err := kv.startStateDiff(ctx); err != nil {
if errors.Is(err, ErrStateDiffIncompatible) {
return kv, err
if features.Get().EnableStateDiff {
sdCache, err := newStateDiffCache(kv)
if err != nil {
return nil, err
}
return nil, err
kv.stateDiffCache = sdCache
}
return kv, nil
}
func (kv *Store) startStateDiff(ctx context.Context) error {
if !features.Get().EnableStateDiff {
return nil
}
// Check if offset already exists (existing state-diff database).
hasOffset, err := kv.hasStateDiffOffset()
if err != nil {
return err
}
if hasOffset {
// Existing state-diff database - restarts not yet supported.
return errors.New("restarting with existing state-diff database not yet supported")
}
// Check if this is a new database (no head block).
headBlock, err := kv.HeadBlock(ctx)
if err != nil {
return errors.Wrap(err, "could not get head block")
}
if headBlock == nil {
// New database - will be initialized later during checkpoint/genesis sync.
// stateDiffCache stays nil until SaveOrigin or SaveGenesisData initializes it.
log.Info("State-diff enabled: will be initialized during checkpoint or genesis sync")
} else {
// Existing database without state-diff - return store with error for caller to handle.
return ErrStateDiffIncompatible
}
return nil
}
// ClearDB removes the previously stored database in the data directory.
func (s *Store) ClearDB() error {
if err := s.Close(); err != nil {

View File

@@ -1053,10 +1053,6 @@ func (s *Store) getStateUsingStateDiff(ctx context.Context, blockRoot [32]byte)
return nil, err
}
if uint64(slot) < s.getOffset() {
return nil, ErrSlotBeforeOffset
}
st, err := s.stateByDiff(ctx, slot)
if err != nil {
return nil, err
@@ -1074,10 +1070,6 @@ func (s *Store) hasStateUsingStateDiff(ctx context.Context, blockRoot [32]byte)
return false, err
}
if uint64(slot) < s.getOffset() {
return false, ErrSlotBeforeOffset
}
stateLvl := computeLevel(s.getOffset(), slot)
return stateLvl != -1, nil
}

View File

@@ -23,16 +23,6 @@ const (
The data at level 0 is saved every 2**exponent[0] slots and always contains a full state snapshot that is used as a base for the delta saved at other levels.
*/
// SlotInDiffTree returns whether the given slot is a saving point in the diff tree.
// If it is, it also returns the offset and level in the tree.
func (s *Store) SlotInDiffTree(slot primitives.Slot) (uint64, int, error) {
offset := s.getOffset()
if uint64(slot) < offset {
return 0, -1, ErrSlotBeforeOffset
}
return offset, computeLevel(offset, slot), nil
}
// saveStateByDiff takes a state and decides between saving a full state snapshot or a diff.
func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconState) error {
_, span := trace.StartSpan(ctx, "BeaconDB.saveStateByDiff")
@@ -43,10 +33,13 @@ func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconStat
}
slot := st.Slot()
offset, lvl, err := s.SlotInDiffTree(slot)
if err != nil {
return errors.Wrap(err, "could not determine if slot is in diff tree")
offset := s.getOffset()
if uint64(slot) < offset {
return ErrSlotBeforeOffset
}
// Find the level to save the state.
lvl := computeLevel(offset, slot)
if lvl == -1 {
return nil
}

View File

@@ -25,7 +25,7 @@ func newStateDiffCache(s *Store) (*stateDiffCache, error) {
return bbolt.ErrBucketNotFound
}
offsetBytes := bucket.Get(offsetKey)
offsetBytes := bucket.Get([]byte("offset"))
if offsetBytes == nil {
return errors.New("state diff cache: offset not found")
}

View File

@@ -9,19 +9,17 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
statenative "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/consensus-types/hdiff"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/math"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/runtime/version"
pkgerrors "github.com/pkg/errors"
"go.etcd.io/bbolt"
)
var (
offsetKey = []byte("offset")
ErrSlotBeforeOffset = errors.New("slot is before state-diff root offset")
ErrSlotBeforeOffset = errors.New("slot is before root offset")
)
func makeKeyForStateDiffTree(level int, slot uint64) []byte {
@@ -75,9 +73,6 @@ func (s *Store) getAnchorState(offset uint64, lvl int, slot primitives.Slot) (an
// computeLevel computes the level in the diff tree. Returns -1 in case slot should not be in tree.
func computeLevel(offset uint64, slot primitives.Slot) int {
if uint64(slot) < offset {
return -1
}
rel := uint64(slot) - offset
for i, exp := range flags.Get().StateDiffExponents {
if exp < 2 || exp >= 64 {
@@ -124,66 +119,6 @@ func (s *Store) getOffset() uint64 {
return s.stateDiffCache.getOffset()
}
// hasStateDiffOffset checks if the state-diff offset has been set in the database.
// This is used to detect if an existing database has state-diff enabled.
func (s *Store) hasStateDiffOffset() (bool, error) {
var hasOffset bool
err := s.db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return nil
}
hasOffset = bucket.Get(offsetKey) != nil
return nil
})
return hasOffset, err
}
// initializeStateDiff sets up the state-diff schema for a new database.
// This should be called during checkpoint sync or genesis sync.
func (s *Store) initializeStateDiff(slot primitives.Slot, initialState state.ReadOnlyBeaconState) error {
// Return early if the feature is not set
if !features.Get().EnableStateDiff {
return nil
}
// Only reinitialize if the offset is different
if s.stateDiffCache != nil {
if s.stateDiffCache.getOffset() == uint64(slot) {
log.WithField("offset", slot).Warning("Ignoring state diff cache reinitialization")
return nil
}
}
// Write offset directly to the database (without using cache which doesn't exist yet).
err := s.db.Update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bbolt.ErrBucketNotFound
}
offsetBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(offsetBytes, uint64(slot))
return bucket.Put(offsetKey, offsetBytes)
})
if err != nil {
return pkgerrors.Wrap(err, "failed to set offset")
}
// Create the state diff cache (this will read the offset from the database).
sdCache, err := newStateDiffCache(s)
if err != nil {
return pkgerrors.Wrap(err, "failed to create state diff cache")
}
s.stateDiffCache = sdCache
// Save the initial state as a full snapshot.
if err := s.saveFullSnapshot(initialState); err != nil {
return pkgerrors.Wrap(err, "failed to save initial snapshot")
}
log.WithField("offset", slot).Info("Initialized state-diff cache")
return nil
}
func keyForSnapshot(v int) ([]byte, error) {
switch v {
case version.Fulu:

View File

@@ -43,12 +43,8 @@ func TestStateDiff_ComputeLevel(t *testing.T) {
offset := db.getOffset()
// should be -1. slot < offset
lvl := computeLevel(10, primitives.Slot(9))
require.Equal(t, -1, lvl)
// 2 ** 21
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(21)))
lvl := computeLevel(offset, primitives.Slot(math.PowerOf2(21)))
require.Equal(t, 0, lvl)
// 2 ** 21 * 3

View File

@@ -1395,23 +1395,6 @@ func TestStore_CanSaveRetrieveStateUsingStateDiff(t *testing.T) {
require.IsNil(t, readSt)
})
t.Run("slot before offset", func(t *testing.T) {
db := setupDB(t)
setDefaultStateDiffExponents()
err := setOffsetInDB(db, 10)
require.NoError(t, err)
r := bytesutil.ToBytes32([]byte{'A'})
ss := &ethpb.StateSummary{Slot: 9, Root: r[:]}
err = db.SaveStateSummary(t.Context(), ss)
require.NoError(t, err)
st, err := db.getStateUsingStateDiff(t.Context(), r)
require.ErrorIs(t, err, ErrSlotBeforeOffset)
require.IsNil(t, st)
})
t.Run("Full state snapshot", func(t *testing.T) {
t.Run("using state summary", func(t *testing.T) {
for v := range version.All() {
@@ -1644,21 +1627,4 @@ func TestStore_HasStateUsingStateDiff(t *testing.T) {
}
})
t.Run("slot before offset", func(t *testing.T) {
db := setupDB(t)
setDefaultStateDiffExponents()
err := setOffsetInDB(db, 10)
require.NoError(t, err)
r := bytesutil.ToBytes32([]byte{'B'})
ss := &ethpb.StateSummary{Slot: 0, Root: r[:]}
err = db.SaveStateSummary(t.Context(), ss)
require.NoError(t, err)
hasState, err := db.hasStateUsingStateDiff(t.Context(), r)
require.ErrorIs(t, err, ErrSlotBeforeOffset)
require.Equal(t, false, hasState)
})
}

View File

@@ -1,37 +0,0 @@
package kv
import (
"encoding/binary"
"testing"
"go.etcd.io/bbolt"
)
// InitStateDiffCacheForTesting initializes the state diff cache with the given offset.
// This is intended for testing purposes when setting up state diff after database creation.
// This file is only compiled when the "testing" build tag is set.
func (s *Store) InitStateDiffCacheForTesting(t testing.TB, offset uint64) error {
// First, set the offset in the database.
err := s.db.Update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(stateDiffBucket)
if bucket == nil {
return bbolt.ErrBucketNotFound
}
offsetBytes := make([]byte, 8)
binary.LittleEndian.PutUint64(offsetBytes, offset)
return bucket.Put([]byte("offset"), offsetBytes)
})
if err != nil {
return err
}
// Then create the state diff cache.
sdCache, err := newStateDiffCache(s)
if err != nil {
return err
}
s.stateDiffCache = sdCache
return nil
}

View File

@@ -110,8 +110,6 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
if err = s.SaveFinalizedCheckpoint(ctx, chkpt); err != nil {
return errors.Wrap(err, "save finalized checkpoint")
}
if err := s.initializeStateDiff(state.Slot(), state); err != nil {
return errors.Wrap(err, "failed to initialize state diff")
}
return nil
}

View File

@@ -75,6 +75,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
p2p := p2pTesting.NewTestP2P(t)
lcStore := NewLightClientStore(p2p, new(event.Feed), testDB.SetupDB(t))
timeForGoroutinesToFinish := 20 * time.Microsecond
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
l0 := util.NewTestLightClient(t, version.Altair)
update0, err := NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
@@ -86,9 +87,8 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update0, true)
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
require.Eventually(t, func() bool {
return p2p.BroadcastCalled.Load()
}, time.Second, 10*time.Millisecond, "Broadcast should have been called after setting a new last finality update when previous is nil")
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
p2p.BroadcastCalled.Store(false) // Reset for next test
// update 1 with same finality slot, increased attested slot, and no supermajority - should save but not broadcast
@@ -102,7 +102,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update1, true)
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
p2p.BroadcastCalled.Store(false) // Reset for next test
@@ -117,9 +117,8 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update2, true)
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
require.Eventually(t, func() bool {
return p2p.BroadcastCalled.Load()
}, time.Second, 10*time.Millisecond, "Broadcast should have been called after setting a new last finality update with supermajority")
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
p2p.BroadcastCalled.Store(false) // Reset for next test
// update 3 with same finality slot, increased attested slot, and supermajority - should save but not broadcast
@@ -133,7 +132,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update3, true)
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
@@ -147,9 +146,8 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update4, true)
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
require.Eventually(t, func() bool {
return p2p.BroadcastCalled.Load()
}, time.Second, 10*time.Millisecond, "Broadcast should have been called after a new finality update with increased finality slot")
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
p2p.BroadcastCalled.Store(false) // Reset for next test
// update 5 with the same new finality slot, increased attested slot, and supermajority - should save but not broadcast
@@ -163,7 +161,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update5, true)
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
@@ -177,7 +175,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
lcStore.SetLastFinalityUpdate(update6, true)
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
}

View File

@@ -64,6 +64,7 @@ go_library(
"//monitoring/tracing:go_default_library",
"//runtime:go_default_library",
"//runtime/prereqs:go_default_library",
"//runtime/version:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",

View File

@@ -66,6 +66,7 @@ import (
"github.com/OffchainLabs/prysm/v7/monitoring/prometheus"
"github.com/OffchainLabs/prysm/v7/runtime"
"github.com/OffchainLabs/prysm/v7/runtime/prereqs"
"github.com/OffchainLabs/prysm/v7/runtime/version"
"github.com/ethereum/go-ethereum/common"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -123,8 +124,6 @@ type BeaconNode struct {
BlobStorageOptions []filesystem.BlobStorageOption
DataColumnStorage *filesystem.DataColumnStorage
DataColumnStorageOptions []filesystem.DataColumnStorageOption
ProofStorage *filesystem.ProofStorage
ProofStorageOptions []filesystem.ProofStorageOption
verifyInitWaiter *verification.InitializerWaiter
lhsp *verification.LazyHeadStateProvider
syncChecker *initialsync.SyncChecker
@@ -229,15 +228,6 @@ func New(cliCtx *cli.Context, cancel context.CancelFunc, opts ...Option) (*Beaco
return nil, errors.Wrap(err, "could not clear data column storage")
}
if beacon.ProofStorage == nil {
proofStorage, err := filesystem.NewProofStorage(cliCtx.Context, beacon.ProofStorageOptions...)
if err != nil {
return nil, errors.Wrap(err, "new proof storage")
}
beacon.ProofStorage = proofStorage
}
bfs, err := startBaseServices(cliCtx, beacon, depositAddress, dbClearer)
if err != nil {
return nil, errors.Wrap(err, "could not start modules")
@@ -479,6 +469,10 @@ func (b *BeaconNode) OperationFeed() event.SubscriberSender {
func (b *BeaconNode) Start() {
b.lock.Lock()
log.WithFields(logrus.Fields{
"version": version.Version(),
}).Info("Starting beacon node")
b.services.StartAll()
stop := b.stop
@@ -546,12 +540,7 @@ func openDB(ctx context.Context, dbPath string, clearer *dbClearer) (*kv.Store,
log.WithField("databasePath", dbPath).Info("Checking DB")
d, err := kv.NewKVStore(ctx, dbPath)
if errors.Is(err, kv.ErrStateDiffIncompatible) {
log.WithError(err).Warn("Disabling state-diff feature")
cfg := features.Get()
cfg.EnableStateDiff = false
features.Init(cfg)
} else if err != nil {
if err != nil {
return nil, errors.Wrapf(err, "could not create database at %s", dbPath)
}
@@ -758,13 +747,11 @@ func (b *BeaconNode) registerBlockchainService(fc forkchoice.ForkChoicer, gs *st
blockchain.WithSyncComplete(syncComplete),
blockchain.WithBlobStorage(b.BlobStorage),
blockchain.WithDataColumnStorage(b.DataColumnStorage),
blockchain.WithProofStorage(b.ProofStorage),
blockchain.WithTrackedValidatorsCache(b.trackedValidatorsCache),
blockchain.WithPayloadIDCache(b.payloadIDCache),
blockchain.WithSyncChecker(b.syncChecker),
blockchain.WithSlasherEnabled(b.slasherEnabled),
blockchain.WithLightClientStore(b.lcStore),
blockchain.WithOperationNotifier(b),
)
blockchainService, err := blockchain.NewService(b.ctx, opts...)
@@ -849,7 +836,6 @@ func (b *BeaconNode) registerSyncService(initialSyncComplete chan struct{}, bFil
regularsync.WithStateNotifier(b),
regularsync.WithBlobStorage(b.BlobStorage),
regularsync.WithDataColumnStorage(b.DataColumnStorage),
regularsync.WithExecutionProofStorage(b.ProofStorage),
regularsync.WithVerifierWaiter(b.verifyInitWaiter),
regularsync.WithAvailableBlocker(bFillStore),
regularsync.WithTrackedValidatorsCache(b.trackedValidatorsCache),
@@ -976,7 +962,6 @@ func (b *BeaconNode) registerRPCService(router *http.ServeMux) error {
BlockReceiver: chainService,
BlobReceiver: chainService,
DataColumnReceiver: chainService,
ProofReceiver: chainService,
AttestationReceiver: chainService,
GenesisTimeFetcher: chainService,
GenesisFetcher: chainService,

View File

@@ -68,6 +68,7 @@ func TestNodeClose_OK(t *testing.T) {
}
func TestNodeStart_Ok(t *testing.T) {
hook := logTest.NewGlobal()
app := cli.App{}
tmp := fmt.Sprintf("%s/datadirtest2", t.TempDir())
set := flag.NewFlagSet("test", 0)
@@ -96,9 +97,11 @@ func TestNodeStart_Ok(t *testing.T) {
}()
time.Sleep(3 * time.Second)
node.Close()
require.LogsContain(t, hook, "Starting beacon node")
}
func TestNodeStart_SyncChecker(t *testing.T) {
hook := logTest.NewGlobal()
app := cli.App{}
tmp := fmt.Sprintf("%s/datadirtest2", t.TempDir())
set := flag.NewFlagSet("test", 0)
@@ -124,6 +127,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
time.Sleep(3 * time.Second)
assert.NotNil(t, node.syncChecker.Svc)
node.Close()
require.LogsContain(t, hook, "Starting beacon node")
}
// TestClearDB tests clearing the database

View File

@@ -35,13 +35,6 @@ func WithBuilderFlagOptions(opts []builder.Option) Option {
}
}
func WithConfigOptions(opt ...params.Option) Option {
return func(bn *BeaconNode) error {
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
return nil
}
}
// WithBlobStorage sets the BlobStorage backend for the BeaconNode
func WithBlobStorage(bs *filesystem.BlobStorage) Option {
return func(bn *BeaconNode) error {
@@ -59,6 +52,13 @@ func WithBlobStorageOptions(opt ...filesystem.BlobStorageOption) Option {
}
}
func WithConfigOptions(opt ...params.Option) Option {
return func(bn *BeaconNode) error {
bn.ConfigOptions = append(bn.ConfigOptions, opt...)
return nil
}
}
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
func WithDataColumnStorage(bs *filesystem.DataColumnStorage) Option {
return func(bn *BeaconNode) error {
@@ -75,20 +75,3 @@ func WithDataColumnStorageOptions(opt ...filesystem.DataColumnStorageOption) Opt
return nil
}
}
// WithDataColumnStorage sets the DataColumnStorage backend for the BeaconNode
func WithProofStorage(bs *filesystem.ProofStorage) Option {
return func(bn *BeaconNode) error {
bn.ProofStorage = bs
return nil
}
}
// WithDataColumnStorageOptions appends 1 or more filesystem.DataColumnStorageOption on the beacon node,
// to be used when initializing data column storage.
func WithProofStorageOption(opt ...filesystem.ProofStorageOption) Option {
return func(bn *BeaconNode) error {
bn.ProofStorageOptions = append(bn.ProofStorageOptions, opt...)
return nil
}
}

View File

@@ -49,7 +49,6 @@ func (s *Service) prepareForkChoiceAtts() {
}
case <-s.ctx.Done():
log.Debug("Context closed, exiting routine")
ticker.Done()
return
}
}

View File

@@ -13,6 +13,8 @@ go_library(
"doc.go",
"fork.go",
"fork_watcher.go",
"gossip_peer_controller.go",
"gossip_peer_crawler.go",
"gossip_scoring_params.go",
"gossip_topic_mappings.go",
"handshake.go",
@@ -52,6 +54,7 @@ go_library(
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/gossipcrawler:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
@@ -116,6 +119,7 @@ go_library(
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
"@org_golang_x_sync//semaphore:go_default_library",
],
)
@@ -129,6 +133,8 @@ go_test(
"dial_relay_node_test.go",
"discovery_test.go",
"fork_test.go",
"gossip_peer_controller_test.go",
"gossip_peer_crawler_test.go",
"gossip_scoring_params_test.go",
"gossip_topic_mappings_test.go",
"message_id_test.go",
@@ -155,9 +161,11 @@ go_test(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/peerdas:go_default_library",
"//beacon-chain/core/signing:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/iface:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/gossipcrawler:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
@@ -166,7 +174,6 @@ go_test(
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state/stategen/mock:go_default_library",
"//cmd/beacon-chain/flags:go_default_library",
"//config/features:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
@@ -205,8 +212,10 @@ go_test(
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus/testutil:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@com_github_stretchr_testify//require:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -61,7 +61,10 @@ func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error {
if !ok {
return errors.Errorf("message of %T does not support marshaller interface", msg)
}
return s.broadcastObject(ctx, castMsg, fmt.Sprintf(topic, forkDigest))
fullTopic := fmt.Sprintf(topic, forkDigest) + s.Encoding().ProtocolSuffix()
return s.broadcastObject(ctx, castMsg, fullTopic)
}
// BroadcastAttestation broadcasts an attestation to the p2p network, the message is assumed to be
@@ -107,6 +110,7 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
}
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att, forkDigest [fieldparams.VersionLength]byte) {
topic := AttestationSubnetTopic(forkDigest, subnet)
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@@ -117,7 +121,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
// Ensure we have peers with this subnet.
s.subnetLocker(subnet).RLock()
hasPeer := s.hasPeerWithSubnet(attestationToTopic(subnet, forkDigest))
hasPeer := s.hasPeerWithTopic(topic)
s.subnetLocker(subnet).RUnlock()
span.SetAttributes(
@@ -132,7 +136,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
s.subnetLocker(subnet).Lock()
defer s.subnetLocker(subnet).Unlock()
if err := s.FindAndDialPeersWithSubnets(ctx, AttestationSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
return errors.Wrap(err, "find peers with subnets")
}
@@ -154,13 +158,14 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
return
}
if err := s.broadcastObject(ctx, att, attestationToTopic(subnet, forkDigest)); err != nil {
if err := s.broadcastObject(ctx, att, topic); err != nil {
log.WithError(err).Error("Failed to broadcast attestation")
tracing.AnnotateError(span, err)
}
}
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
topic := SyncCommitteeSubnetTopic(forkDigest, subnet)
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@@ -174,7 +179,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
// to ensure that we can reuse the same subnet locker.
wrappedSubIdx := subnet + syncLockerVal
s.subnetLocker(wrappedSubIdx).RLock()
hasPeer := s.hasPeerWithSubnet(syncCommitteeToTopic(subnet, forkDigest))
hasPeer := s.hasPeerWithTopic(topic)
s.subnetLocker(wrappedSubIdx).RUnlock()
span.SetAttributes(
@@ -188,7 +193,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
if err := func() error {
s.subnetLocker(wrappedSubIdx).Lock()
defer s.subnetLocker(wrappedSubIdx).Unlock()
if err := s.FindAndDialPeersWithSubnets(ctx, SyncCommitteeSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
return errors.Wrap(err, "find peers with subnets")
}
@@ -206,7 +211,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
return
}
if err := s.broadcastObject(ctx, sMsg, syncCommitteeToTopic(subnet, forkDigest)); err != nil {
if err := s.broadcastObject(ctx, sMsg, topic); err != nil {
log.WithError(err).Error("Failed to broadcast sync committee message")
tracing.AnnotateError(span, err)
}
@@ -234,6 +239,7 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
}
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [fieldparams.VersionLength]byte) {
topic := BlobSubnetTopic(forkDigest, subnet)
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
defer span.End()
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
@@ -244,7 +250,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
wrappedSubIdx := subnet + blobSubnetLockerVal
s.subnetLocker(wrappedSubIdx).RLock()
hasPeer := s.hasPeerWithSubnet(blobSubnetToTopic(subnet, forkDigest))
hasPeer := s.hasPeerWithTopic(topic)
s.subnetLocker(wrappedSubIdx).RUnlock()
if !hasPeer {
@@ -253,7 +259,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
s.subnetLocker(wrappedSubIdx).Lock()
defer s.subnetLocker(wrappedSubIdx).Unlock()
if err := s.FindAndDialPeersWithSubnets(ctx, BlobSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
return errors.Wrap(err, "find peers with subnets")
}
@@ -265,7 +271,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
}
}
if err := s.broadcastObject(ctx, blobSidecar, blobSubnetToTopic(subnet, forkDigest)); err != nil {
if err := s.broadcastObject(ctx, blobSidecar, topic); err != nil {
log.WithError(err).Error("Failed to broadcast blob sidecar")
tracing.AnnotateError(span, err)
}
@@ -294,7 +300,7 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
}
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
if err := s.broadcastObject(ctx, update, LcOptimisticToTopic(digest)); err != nil {
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
err := errors.Wrap(err, "could not publish message")
tracing.AnnotateError(span, err)
@@ -328,7 +334,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
}
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
if err := s.broadcastObject(ctx, update, LcFinalityToTopic(forkDigest)); err != nil {
log.WithError(err).Debug("Failed to broadcast light client finality update")
err := errors.Wrap(err, "could not publish message")
tracing.AnnotateError(span, err)
@@ -374,7 +380,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
topicFunc := func(sidecar blocks.VerifiedRODataColumn) (topic string, wrappedSubIdx uint64, subnet uint64) {
subnet = peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
topic = dataColumnSubnetToTopic(subnet, forkDigest)
topic = DataColumnSubnetTopic(forkDigest, subnet)
wrappedSubIdx = subnet + dataColumnSubnetVal
return
}
@@ -390,7 +396,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
// Check if we have a peer for this subnet (use RLock for read-only check).
mu := s.subnetLocker(wrappedSubIdx)
mu.RLock()
hasPeer := s.hasPeerWithSubnet(topic)
hasPeer := s.hasPeerWithTopic(topic)
mu.RUnlock()
if hasPeer {
@@ -433,10 +439,10 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
ctx := trace.NewContext(s.ctx, span)
defer span.End()
topic, wrappedSubIdx, subnet := topicFunc(sidecar)
topic, wrappedSubIdx, _ := topicFunc(sidecar)
// Find peers for this sidecar's subnet.
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, topic); err != nil {
tracing.AnnotateError(span, err)
log.WithError(err).Error("Cannot find peers if needed")
return
@@ -539,41 +545,37 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
func (s *Service) findPeersIfNeeded(
ctx context.Context,
wrappedSubIdx uint64,
topicFormat string,
forkDigest [fieldparams.VersionLength]byte,
subnet uint64,
topic string,
) error {
// Sending a data column sidecar to only one peer is not ideal,
// but it ensures at least one peer receives it.
s.subnetLocker(wrappedSubIdx).Lock()
defer s.subnetLocker(wrappedSubIdx).Unlock()
// No peers found, attempt to find peers with this subnet.
if err := s.FindAndDialPeersWithSubnets(ctx, topicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
return errors.Wrap(err, "find peers with subnet")
}
return nil
}
// encodeGossipMessage encodes an object for gossip transmission.
// It returns the encoded bytes and the full topic with protocol suffix.
func (s *Service) encodeGossipMessage(obj ssz.Marshaler, topic string) ([]byte, string, error) {
func (s *Service) encodeGossipMessage(obj ssz.Marshaler) ([]byte, error) {
buf := new(bytes.Buffer)
if _, err := s.Encoding().EncodeGossip(buf, obj); err != nil {
return nil, "", fmt.Errorf("could not encode message: %w", err)
return nil, fmt.Errorf("could not encode message: %w", err)
}
return buf.Bytes(), topic + s.Encoding().ProtocolSuffix(), nil
return buf.Bytes(), nil
}
// broadcastObject broadcasts a message to other peers in our gossip mesh.
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, fullTopic string) error {
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
defer span.End()
span.SetAttributes(trace.StringAttribute("topic", topic))
span.SetAttributes(trace.StringAttribute("topic", fullTopic))
data, fullTopic, err := s.encodeGossipMessage(obj, topic)
data, err := s.encodeGossipMessage(obj)
if err != nil {
tracing.AnnotateError(span, err)
return err
@@ -597,13 +599,13 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
// batchObject adds an object to a message batch for a future broadcast.
// The caller MUST publish the batch after all messages have been added.
func (s *Service) batchObject(ctx context.Context, batch *pubsub.MessageBatch, obj ssz.Marshaler, topic string) error {
func (s *Service) batchObject(ctx context.Context, batch *pubsub.MessageBatch, obj ssz.Marshaler, fullTopic string) error {
ctx, span := trace.StartSpan(ctx, "p2p.batchObject")
defer span.End()
span.SetAttributes(trace.StringAttribute("topic", topic))
span.SetAttributes(trace.StringAttribute("topic", fullTopic))
data, fullTopic, err := s.encodeGossipMessage(obj, topic)
data, err := s.encodeGossipMessage(obj)
if err != nil {
tracing.AnnotateError(span, err)
return err
@@ -624,27 +626,3 @@ func (s *Service) batchObject(ctx context.Context, batch *pubsub.MessageBatch, o
}
return nil
}
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
}
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
}
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
}
func lcOptimisticToTopic(forkDigest [4]byte) string {
return fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, forkDigest)
}
func lcFinalityToTopic(forkDigest [4]byte) string {
return fmt.Sprintf(LightClientFinalityUpdateTopicFormat, forkDigest)
}
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
}

View File

@@ -30,6 +30,7 @@ import (
"github.com/OffchainLabs/prysm/v7/testing/require"
"github.com/OffchainLabs/prysm/v7/testing/util"
"github.com/OffchainLabs/prysm/v7/time/slots"
"github.com/ethereum/go-ethereum/p2p/enode"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
@@ -72,10 +73,7 @@ func TestService_Broadcast(t *testing.T) {
sub, err := p2.SubscribeToTopic(topic)
require.NoError(t, err)
// Wait for libp2p mesh to establish
require.Eventually(t, func() bool {
return len(p.pubsub.ListPeers(topic)) > 0
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
// Async listen for the pubsub, must be before the broadcast.
var wg sync.WaitGroup
@@ -114,6 +112,7 @@ func TestService_Attestation_Subnet(t *testing.T) {
if gtm := GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()]; gtm != AttestationSubnetTopicFormat {
t.Errorf("Constant is out of date. Wanted %s, got %s", AttestationSubnetTopicFormat, gtm)
}
s := Service{}
tests := []struct {
att *ethpb.Attestation
@@ -126,7 +125,7 @@ func TestService_Attestation_Subnet(t *testing.T) {
Slot: 2,
},
},
topic: "/eth2/00000000/beacon_attestation_2",
topic: "/eth2/00000000/beacon_attestation_2" + s.Encoding().ProtocolSuffix(),
},
{
att: &ethpb.Attestation{
@@ -135,7 +134,7 @@ func TestService_Attestation_Subnet(t *testing.T) {
Slot: 10,
},
},
topic: "/eth2/00000000/beacon_attestation_21",
topic: "/eth2/00000000/beacon_attestation_21" + s.Encoding().ProtocolSuffix(),
},
{
att: &ethpb.Attestation{
@@ -144,12 +143,12 @@ func TestService_Attestation_Subnet(t *testing.T) {
Slot: 529,
},
},
topic: "/eth2/00000000/beacon_attestation_8",
topic: "/eth2/00000000/beacon_attestation_8" + s.Encoding().ProtocolSuffix(),
},
}
for _, tt := range tests {
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(100, tt.att.Data.CommitteeIndex, tt.att.Data.Slot)
assert.Equal(t, tt.topic, attestationToTopic(subnet, [4]byte{} /* fork digest */), "Wrong topic")
assert.Equal(t, tt.topic, AttestationSubnetTopic([4]byte{}, subnet), "Wrong topic")
}
}
@@ -178,21 +177,16 @@ func TestService_BroadcastAttestation(t *testing.T) {
msg := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
subnet := uint64(5)
topic := AttestationSubnetTopicFormat
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = topic
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = AttestationSubnetTopicFormat
digest, err := p.currentForkDigest()
require.NoError(t, err)
topic = fmt.Sprintf(topic, digest, subnet)
topic := AttestationSubnetTopic(digest, subnet)
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
sub, err := p2.SubscribeToTopic(topic)
require.NoError(t, err)
// Wait for libp2p mesh to establish
require.Eventually(t, func() bool {
return len(p.pubsub.ListPeers(topic)) > 0
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
// Async listen for the pubsub, must be before the broadcast.
var wg sync.WaitGroup
@@ -234,6 +228,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
// Setup bootnode.
cfg := &Config{PingInterval: testPingInterval, DB: db}
cfg.UDPPort = uint(port)
cfg.TCPPort = uint(port)
_, pkey := createAddrAndPrivKey(t)
ipAddr := net.ParseIP("127.0.0.1")
genesisTime := time.Now()
@@ -259,8 +254,9 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
var listeners []*listenerWrapper
var hosts []host.Host
var configs []*Config
// setup other nodes.
cfg = &Config{
baseCfg := &Config{
Discv5BootStrapAddrs: []string{bootNode.String()},
MaxPeers: 2,
PingInterval: testPingInterval,
@@ -269,11 +265,21 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
// Setup 2 different hosts
for i := uint(1); i <= 2; i++ {
h, pkey, ipAddr := createHost(t, port+i)
cfg.UDPPort = uint(port + i)
cfg.TCPPort = uint(port + i)
// Create a new config for each service to avoid shared mutations
cfg := &Config{
Discv5BootStrapAddrs: baseCfg.Discv5BootStrapAddrs,
MaxPeers: baseCfg.MaxPeers,
PingInterval: baseCfg.PingInterval,
DB: baseCfg.DB,
UDPPort: uint(port + i),
TCPPort: uint(port + i),
}
if len(listeners) > 0 {
cfg.Discv5BootStrapAddrs = append(cfg.Discv5BootStrapAddrs, listeners[len(listeners)-1].Self().String())
}
s := &Service{
cfg: cfg,
genesisTime: genesisTime,
@@ -286,18 +292,22 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
close(s.custodyInfoSet)
listener, err := s.startDiscoveryV5(ipAddr, pkey)
// Set for 2nd peer
assert.NoError(t, err, "Could not start discovery for node")
// Set listener for the service
s.dv5Listener = listener
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
// Set subnet for 2nd peer
if i == 2 {
s.dv5Listener = listener
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
bitV := bitfield.NewBitvector64()
bitV.SetBitAt(subnet, true)
err := s.updateSubnetRecordWithMetadata(bitV)
require.NoError(t, err)
}
assert.NoError(t, err, "Could not start discovery for node")
listeners = append(listeners, listener)
hosts = append(hosts, h)
configs = append(configs, cfg)
}
defer func() {
// Close down all peers.
@@ -332,7 +342,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
pubsub: ps1,
dv5Listener: listeners[0],
joinedTopics: map[string]*pubsub.Topic{},
cfg: cfg,
cfg: configs[0],
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
subnetsLock: make(map[uint64]*sync.RWMutex),
@@ -348,7 +358,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
pubsub: ps2,
dv5Listener: listeners[1],
joinedTopics: map[string]*pubsub.Topic{},
cfg: cfg,
cfg: configs[1],
genesisTime: time.Now(),
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
subnetsLock: make(map[uint64]*sync.RWMutex),
@@ -361,14 +371,12 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
go p2.listenForNewNodes()
msg := util.HydrateAttestation(&ethpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
topic := AttestationSubnetTopicFormat
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = topic
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = AttestationSubnetTopicFormat
digest, err := p.currentForkDigest()
require.NoError(t, err)
topic = fmt.Sprintf(topic, digest, subnet)
topic := AttestationSubnetTopic(digest, subnet)
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
// We don't use our internal subscribe method
// due to using floodsub over here.
tpHandle, err := p2.JoinTopic(topic)
@@ -381,15 +389,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
_, err = tpHandle.Subscribe()
require.NoError(t, err)
// This test specifically tests discovery-based peer finding, which requires
// time for nodes to discover each other. Using a fixed sleep here is intentional
// as we're testing the discovery timing behavior.
time.Sleep(500 * time.Millisecond)
// Verify mesh establishment after discovery
require.Eventually(t, func() bool {
return len(p.pubsub.ListPeers(topic)) > 0 && len(p2.pubsub.ListPeers(topic)) > 0
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
time.Sleep(500 * time.Millisecond) // libp2p fails without this delay...
nodePeers := p.pubsub.ListPeers(topic)
nodePeers2 := p2.pubsub.ListPeers(topic)
@@ -447,21 +447,16 @@ func TestService_BroadcastSyncCommittee(t *testing.T) {
msg := util.HydrateSyncCommittee(&ethpb.SyncCommitteeMessage{})
subnet := uint64(5)
topic := SyncCommitteeSubnetTopicFormat
GossipTypeMapping[reflect.TypeFor[*ethpb.SyncCommitteeMessage]()] = topic
GossipTypeMapping[reflect.TypeFor[*ethpb.SyncCommitteeMessage]()] = SyncCommitteeSubnetTopicFormat
digest, err := p.currentForkDigest()
require.NoError(t, err)
topic = fmt.Sprintf(topic, digest, subnet)
topic := SyncCommitteeSubnetTopic(digest, subnet)
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
sub, err := p2.SubscribeToTopic(topic)
require.NoError(t, err)
// Wait for libp2p mesh to establish
require.Eventually(t, func() bool {
return len(p.pubsub.ListPeers(topic)) > 0
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
// Async listen for the pubsub, must be before the broadcast.
var wg sync.WaitGroup
@@ -527,21 +522,16 @@ func TestService_BroadcastBlob(t *testing.T) {
}
subnet := uint64(0)
topic := BlobSubnetTopicFormat
GossipTypeMapping[reflect.TypeFor[*ethpb.BlobSidecar]()] = topic
GossipTypeMapping[reflect.TypeFor[*ethpb.BlobSidecar]()] = BlobSubnetTopicFormat
digest, err := p.currentForkDigest()
require.NoError(t, err)
topic = fmt.Sprintf(topic, digest, subnet)
topic := BlobSubnetTopic(digest, subnet)
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
sub, err := p2.SubscribeToTopic(topic)
require.NoError(t, err)
// Wait for libp2p mesh to establish
require.Eventually(t, func() bool {
return len(p.pubsub.ListPeers(topic)) > 0
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
// Async listen for the pubsub, must be before the broadcast.
var wg sync.WaitGroup
@@ -597,17 +587,13 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
require.NoError(t, err)
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
topic := LcOptimisticToTopic(params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
sub, err := p2.SubscribeToTopic(topic)
require.NoError(t, err)
// Wait for libp2p mesh to establish
require.Eventually(t, func() bool {
return len(p.pubsub.ListPeers(topic)) > 0
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
// Async listen for the pubsub, must be before the broadcast.
var wg sync.WaitGroup
@@ -676,17 +662,13 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
require.NoError(t, err)
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
topic := LcFinalityToTopic(params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
// External peer subscribes to the topic.
topic += p.Encoding().ProtocolSuffix()
sub, err := p2.SubscribeToTopic(topic)
require.NoError(t, err)
// Wait for libp2p mesh to establish
require.Eventually(t, func() bool {
return len(p.pubsub.ListPeers(topic)) > 0
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
// Async listen for the pubsub, must be before the broadcast.
var wg sync.WaitGroup
@@ -730,7 +712,6 @@ func TestService_BroadcastDataColumn(t *testing.T) {
const (
port = 2000
columnIndex = 12
topicFormat = DataColumnSubnetTopicFormat
)
ctx := t.Context()
@@ -788,7 +769,17 @@ func TestService_BroadcastDataColumn(t *testing.T) {
require.NoError(t, err)
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
topic := fmt.Sprintf(topicFormat, digest, subnet) + service.Encoding().ProtocolSuffix()
topic := DataColumnSubnetTopic(digest, subnet)
crawler, err := NewGossipPeerCrawler(t.Context(), service, listener, 1*time.Second, 1*time.Second, 10,
func(n *enode.Node) bool { return true },
service.Peers().Scorers().Score)
require.NoError(t, err)
err = crawler.Start(func(ctx context.Context, node *enode.Node) ([]string, error) {
return []string{topic}, nil
})
require.NoError(t, err)
service.gossipDialer = NewGossipPeerDialer(t.Context(), crawler, service.PubSub().ListPeers, service.DialPeers)
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
verifiedRoSidecar := verifiedRoSidecars[0]
@@ -797,10 +788,8 @@ func TestService_BroadcastDataColumn(t *testing.T) {
sub, err := p2.SubscribeToTopic(topic)
require.NoError(t, err)
// Wait for libp2p mesh to establish
require.Eventually(t, func() bool {
return len(service.pubsub.ListPeers(topic)) > 0
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
// libp2p fails without this delay
time.Sleep(50 * time.Millisecond)
// Broadcast to peers and wait.
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})

View File

@@ -369,11 +369,11 @@ func (s *Service) listenForNewNodes() {
}
}
// FindAndDialPeersWithSubnets ensures that our node is connected to enough peers.
// If, the threshold is met, then this function immediately returns.
// findAndDialPeers ensures that our node is connected to enough peers.
// If the threshold is met, then this function immediately returns.
// Otherwise, it searches for new peers and dials them.
// If `ctx is canceled while searching for peers, search is stopped, but new found peers are still dialed.
// In this case, the function returns an error.
// If `ctx` is canceled while searching for peers, search is stopped, but newly
// found peers are still dialed. In this case, the function returns an error.
func (s *Service) findAndDialPeers(ctx context.Context) error {
// Restrict dials if limit is applied.
maxConcurrentDials := math.MaxInt
@@ -404,8 +404,7 @@ func (s *Service) findAndDialPeers(ctx context.Context) error {
return err
}
dialedPeerCount := s.dialPeers(s.ctx, maxConcurrentDials, peersToDial)
dialedPeerCount := s.DialPeers(s.ctx, maxConcurrentDials, peersToDial)
if dialedPeerCount > missingPeerCount {
missingPeerCount = 0
continue
@@ -554,6 +553,7 @@ func (s *Service) createListener(
Bootnodes: bootNodes,
PingInterval: s.cfg.PingInterval,
NoFindnodeLivenessCheck: s.cfg.DisableLivenessCheck,
V5RespTimeout: 300 * time.Millisecond,
}
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
@@ -589,11 +589,6 @@ func (s *Service) createLocalNode(
localNode.Set(quicEntry)
}
if features.Get().EnableZkvm {
zkvmKeyEntry := enr.WithEntry(zkvmEnabledKeyEnrKey, true)
localNode.Set(zkvmKeyEntry)
}
localNode.SetFallbackIP(ipAddr)
localNode.SetFallbackUDP(udpPort)

View File

@@ -25,7 +25,6 @@ import (
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
testp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
"github.com/OffchainLabs/prysm/v7/config/features"
"github.com/OffchainLabs/prysm/v7/config/params"
"github.com/OffchainLabs/prysm/v7/consensus-types/wrapper"
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
@@ -244,19 +243,12 @@ func TestCreateLocalNode(t *testing.T) {
name string
cfg *Config
expectedError bool
zkvmEnabled bool
}{
{
name: "valid config",
cfg: &Config{},
expectedError: false,
},
{
name: "valid config with zkVM enabled",
cfg: &Config{},
expectedError: false,
zkvmEnabled: true,
},
{
name: "invalid host address",
cfg: &Config{HostAddress: "invalid"},
@@ -281,15 +273,6 @@ func TestCreateLocalNode(t *testing.T) {
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
if tt.zkvmEnabled {
resetCfg := features.InitWithReset(&features.Flags{
EnableZkvm: true,
})
t.Cleanup(func() {
resetCfg()
})
}
// Define ports. Use unique ports since this test validates ENR content.
const (
udpPort = 3100
@@ -365,14 +348,6 @@ func TestCreateLocalNode(t *testing.T) {
custodyGroupCount := new(uint64)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().CustodyGroupCountKey, custodyGroupCount)))
require.Equal(t, custodyRequirement, *custodyGroupCount)
// Check zkVM enabled key if applicable.
if tt.zkvmEnabled {
zkvmEnabled := new(bool)
require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(params.BeaconNetworkConfig().ZkvmEnabledKey, zkvmEnabled)))
require.Equal(t, features.Get().EnableZkvm, *zkvmEnabled)
}
})
}
}
@@ -507,12 +482,12 @@ func TestStaticPeering_PeersAreAdded(t *testing.T) {
s.Start()
<-exitRoutine
}()
time.Sleep(50 * time.Millisecond) // Wait for service initialization
time.Sleep(50 * time.Millisecond)
var vr [32]byte
require.NoError(t, cs.SetClock(startup.NewClock(time.Now(), vr)))
require.Eventually(t, func() bool {
return len(s.host.Network().Peers()) == 5
}, 10*time.Second, 100*time.Millisecond, "Not all peers added to peerstore")
time.Sleep(4 * time.Second)
ps := s.host.Network().Peers()
assert.Equal(t, 5, len(ps), "Not all peers added to peerstore")
require.NoError(t, s.Stop())
exitRoutine <- true
}

View File

@@ -0,0 +1,343 @@
package p2p
import (
"context"
"math"
"slices"
"sync"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/libp2p/go-libp2p/core/peer"
)
const dialInterval = 500 * time.Millisecond
const peerCountLogInterval = 5 * time.Minute
const topicMonitorInterval = 200 * time.Millisecond
// GossipPeerDialer maintains minimum peer counts for gossip topics by periodically
// dialing new peers discovered by a crawler. It runs a background loop that checks each
// topic's peer count and dials new peers when below the target threshold.
type GossipPeerDialer struct {
ctx context.Context
listPeers func(topic string) []peer.ID
dialPeers func(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint
crawler gossipcrawler.Crawler
topicsProvider gossipcrawler.SubnetTopicsProvider
cachedTopics map[string]int
once sync.Once
}
// NewGossipPeerDialer creates a new GossipPeerDialer instance.
//
// Parameters:
// - ctx: Parent context that controls the lifecycle of the dialer. When cancelled,
// the background dial loop will terminate.
// - crawler: Source of peer candidates for each topic. The crawler maintains a registry
// of peers discovered through DHT crawling, indexed by the topics they subscribe to.
// - listPeers: Function that returns the current peers connected for a given topic.
// Used to determine how many additional peers need to be dialed.
// - dialPeers: Function that dials the given enode.Node peers with a concurrency limit.
// Returns the number of successful dials.
//
// The dialer must be started with Start() before it begins maintaining peer counts.
func NewGossipPeerDialer(
ctx context.Context,
crawler gossipcrawler.Crawler,
listPeers func(topic string) []peer.ID,
dialPeers func(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint,
) *GossipPeerDialer {
return &GossipPeerDialer{
ctx: ctx,
listPeers: listPeers,
dialPeers: dialPeers,
crawler: crawler,
}
}
// Start begins the background dial loop that maintains peer counts for all topics.
//
// The provider function is called on each tick to get the current list of topics that
// need peer maintenance. This allows the set of topics to change dynamically as the node
// subscribes/unsubscribes from subnets.
//
// Start is idempotent - calling it multiple times has no effect after the first call.
// Only the provider from the first call will be used; subsequent calls are ignored.
//
// The dial loop runs every dialInterval (1 second) and for each topic:
// 1. Checks current peer count via listPeers()
// 2. If below the per-topic min peer count, requests candidates from the crawler
// 3. Deduplicates peers across all topics to avoid redundant dials
// 4. Dials missing peers with rate limiting if enabled
//
// Returns nil always (error return preserved for interface compatibility).
func (g *GossipPeerDialer) Start(provider gossipcrawler.SubnetTopicsProvider) error {
g.once.Do(func() {
g.topicsProvider = provider
g.cachedTopics = make(map[string]int)
go g.dialLoop()
go g.logPeerCountsLoop()
go g.topicMonitorLoop()
})
return nil
}
func (g *GossipPeerDialer) dialLoop() {
ticker := time.NewTicker(dialInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
peersToDial := g.selectPeersForTopics()
if len(peersToDial) == 0 {
continue
}
g.dialPeersWithRatelimiting(peersToDial)
case <-g.ctx.Done():
return
}
}
}
func (g *GossipPeerDialer) logPeerCountsLoop() {
ticker := time.NewTicker(peerCountLogInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
topics := g.topicsProvider()
for topic, minPeers := range topics {
currentPeers := len(g.listPeers(topic))
log.WithField("topic", topic).
WithField("currentPeers", currentPeers).
WithField("minPeers", minPeers).
Debug("Gossip topic peer count")
}
case <-g.ctx.Done():
return
}
}
}
// topicsChanged compares the new topics with cached topics and returns true
// if topics have been added or removed. Changes to min peer counts are ignored.
func (g *GossipPeerDialer) topicsChanged(newTopics map[string]int) bool {
if len(newTopics) != len(g.cachedTopics) {
return true
}
for topic := range newTopics {
if _, ok := g.cachedTopics[topic]; !ok {
return true
}
}
return false
}
func (g *GossipPeerDialer) topicMonitorLoop() {
ticker := time.NewTicker(topicMonitorInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
topics := g.topicsProvider()
if g.topicsChanged(topics) {
g.cachedTopics = topics
g.crawler.TriggerCrawl()
}
case <-g.ctx.Done():
return
}
}
}
// selectPeersForTopics builds a bidirectional mapping of topics to peers and selects
// peers to dial using a greedy algorithm that prioritizes peers serving multiple topics.
// When a peer is selected, the needed count is decremented for ALL topics that peer serves,
// avoiding redundant dials when one peer can satisfy multiple topic requirements.
func (g *GossipPeerDialer) selectPeersForTopics() []*enode.Node {
topicsWithMinPeers := g.topicsProvider()
// Calculate how many peers each topic still needs.
neededByTopic := make(map[string]int)
for topic, minPeers := range topicsWithMinPeers {
currentCount := len(g.listPeers(topic))
if needed := minPeers - currentCount; needed > 0 {
neededByTopic[topic] = needed
}
}
if len(neededByTopic) == 0 {
return nil
}
peerToTopics := make(map[enode.ID][]string)
nodeByID := make(map[enode.ID]*enode.Node)
for topic := range neededByTopic {
candidates := g.crawler.PeersForTopic(topic)
for _, node := range candidates {
id := node.ID()
if _, exists := nodeByID[id]; !exists {
nodeByID[id] = node
}
peerToTopics[id] = append(peerToTopics[id], topic)
}
}
// Build candidate list sorted by topic count (descending).
// Peers serving more topics are prioritized.
type candidate struct {
node *enode.Node
topics []string
}
candidates := make([]candidate, 0, len(peerToTopics))
for id, topics := range peerToTopics {
candidates = append(candidates, candidate{node: nodeByID[id], topics: topics})
}
// sort candidates by topic count (descending)
slices.SortFunc(candidates, func(a, b candidate) int {
return len(b.topics) - len(a.topics)
})
// Greedy selection with cross-topic accounting.
var selected []*enode.Node
for _, c := range candidates {
// Check if this peer serves any topic we still need.
servesNeededTopic := false
for _, topic := range c.topics {
if neededByTopic[topic] > 0 {
servesNeededTopic = true
break
}
}
if !servesNeededTopic {
continue
}
// Select this peer and decrement needed count for ALL topics it serves.
selected = append(selected, c.node)
for _, topic := range c.topics {
if neededByTopic[topic] > 0 {
neededByTopic[topic]--
}
}
}
return selected
}
// DialPeersForTopicBlocking blocks until the specified topic has at least nPeers connected,
// or until the context is cancelled.
//
// This method is useful when you need to ensure a minimum number of peers are connected
// for a specific topic before proceeding (e.g., before publishing a message).
//
// The method polls in a loop:
// 1. Check if current peer count >= nPeers, return nil if satisfied
// 2. Get peer candidates from crawler for this topic
// 3. Dial candidates with rate limiting
// 4. Wait 100ms for connections to establish in pubsub layer
// 5. Repeat until target reached or context cancelled
//
// Parameters:
// - ctx: Context to cancel the blocking operation. Takes precedence for cancellation.
// - topic: The gossipsub topic to ensure peers for.
// - nPeers: Minimum number of peers required before returning.
//
// Returns:
// - nil: Successfully reached the target peer count.
// - ctx.Err(): The provided context was cancelled.
// - g.ctx.Err(): The dialer's parent context was cancelled.
//
// Note: This may block indefinitely if the crawler cannot provide enough peers
// and the context has no deadline.
func (g *GossipPeerDialer) DialPeersForTopicBlocking(ctx context.Context, topic string, nPeers int) error {
for {
peers := g.listPeers(topic)
if len(peers) >= nPeers {
return nil
}
newPeers := g.peersForTopic(topic, nPeers)
if len(newPeers) > 0 {
g.dialPeersWithRatelimiting(newPeers)
}
select {
case <-ctx.Done():
return ctx.Err()
// some wait here is good after dialing as connections take some time to show up in pubsub
case <-time.After(100 * time.Millisecond):
case <-g.ctx.Done():
return g.ctx.Err()
}
}
}
func (g *GossipPeerDialer) peersForTopic(topic string, targetCount int) []*enode.Node {
peers := g.listPeers(topic)
peerCount := len(peers)
if peerCount >= targetCount {
return nil
}
missing := targetCount - peerCount
newPeers := g.crawler.PeersForTopic(topic)
if len(newPeers) > missing {
newPeers = newPeers[:missing]
}
return newPeers
}
// ProtectedPeers returns peer IDs that should be protected from pruning.
// For each topic, one connected peer is marked as protected to ensure
// we maintain connectivity to all subscribed topics.
func (g *GossipPeerDialer) ProtectedPeers() []peer.ID {
if g.topicsProvider == nil {
return nil
}
topics := g.topicsProvider()
protectedPeers := make(map[peer.ID]struct{})
for topic := range topics {
connectedPeers := g.listPeers(topic)
// Skip if no peers connected
if len(connectedPeers) == 0 {
continue
}
// Protect the first peer for this topic
protectedPeers[connectedPeers[0]] = struct{}{}
}
result := make([]peer.ID, 0, len(protectedPeers))
for pid := range protectedPeers {
result = append(result, pid)
}
return result
}
func (g *GossipPeerDialer) dialPeersWithRatelimiting(peers []*enode.Node) {
// Dial new peers in batches.
maxConcurrentDials := math.MaxInt
if flags.MaxDialIsActive() {
maxConcurrentDials = flags.Get().MaxConcurrentDials
}
g.dialPeers(g.ctx, maxConcurrentDials, peers)
}

View File

@@ -0,0 +1,691 @@
package p2p
import (
"context"
"crypto/rand"
"net"
"slices"
"sync"
"testing"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
"github.com/OffchainLabs/prysm/v7/crypto/ecdsa"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/stretchr/testify/require"
)
func TestGossipPeerDialer_Start(t *testing.T) {
tests := []struct {
name string
newCrawler func(t *testing.T) *mockCrawler
provider gossipcrawler.SubnetTopicsProvider
expectedConnects int
expectStartErr bool
}{
{
name: "dials unique peers across topics",
newCrawler: func(t *testing.T) *mockCrawler {
nodeA := newTestNode(t, "127.0.0.1", 30101)
nodeB := newTestNode(t, "127.0.0.1", 30102)
return &mockCrawler{
consume: true,
peers: map[string][]*enode.Node{
"topic/a": {nodeA, nodeB},
"topic/b": {nodeA},
},
}
},
provider: func() map[string]int {
return map[string]int{"topic/a": 2, "topic/b": 2}
},
expectedConnects: 2,
},
{
name: "uses per-topic min peer counts",
newCrawler: func(t *testing.T) *mockCrawler {
nodes := make([]*enode.Node, 5)
for i := range nodes {
nodes[i] = newTestNode(t, "127.0.0.1", uint16(30110+i))
}
return &mockCrawler{
consume: true,
peers: map[string][]*enode.Node{
// topic/mesh has 3 available peers, minPeers=2 -> should dial 2
"topic/mesh": {nodes[0], nodes[1], nodes[2]},
// topic/fanout has 3 available peers, minPeers=1 -> should dial 1
"topic/fanout": {nodes[3], nodes[4]},
},
}
},
provider: func() map[string]int {
return map[string]int{
"topic/mesh": 2,
"topic/fanout": 1,
}
},
// Total: 2 from mesh + 1 from fanout = 3 peers dialed
expectedConnects: 3,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
md := &mockDialer{}
listPeers := func(topic string) []peer.ID { return nil }
dialer := NewGossipPeerDialer(t.Context(), tt.newCrawler(t), listPeers, md.DialPeers)
err := dialer.Start(tt.provider)
if tt.expectStartErr {
require.Error(t, err)
return
}
require.NoError(t, err)
require.Eventually(t, func() bool {
return md.dialCount() >= tt.expectedConnects
}, 2*time.Second, 20*time.Millisecond)
require.Equal(t, tt.expectedConnects, md.dialCount())
})
}
}
func TestGossipPeerDialer_DialPeersForTopicBlocking(t *testing.T) {
tests := []struct {
name string
connectedPeers int
newCrawler func(t *testing.T) *mockCrawler
targetPeers int
ctx func() (context.Context, context.CancelFunc)
expectedConnects int
expectErr bool
}{
{
name: "returns immediately when enough peers",
connectedPeers: 1,
newCrawler: func(t *testing.T) *mockCrawler {
return &mockCrawler{}
},
targetPeers: 1,
ctx: func() (context.Context, context.CancelFunc) { return context.WithCancel(context.Background()) },
expectedConnects: 0,
expectErr: false,
},
{
name: "dials when peers are missing",
connectedPeers: 0,
newCrawler: func(t *testing.T) *mockCrawler {
nodeA := newTestNode(t, "127.0.0.1", 30201)
nodeB := newTestNode(t, "127.0.0.1", 30202)
return &mockCrawler{
peers: map[string][]*enode.Node{
"topic/a": {nodeA, nodeB},
},
}
},
targetPeers: 2,
ctx: func() (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), 1*time.Second)
},
expectedConnects: 2,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
md := &mockDialer{}
var mu sync.Mutex
connected := make([]peer.ID, 0)
for i := 0; i < tt.connectedPeers; i++ {
connected = append(connected, peer.ID(string(rune(i))))
}
listPeers := func(topic string) []peer.ID {
mu.Lock()
defer mu.Unlock()
return connected
}
dialPeers := func(ctx context.Context, max int, nodes []*enode.Node) uint {
cnt := md.DialPeers(ctx, max, nodes)
mu.Lock()
defer mu.Unlock()
for range nodes {
// Just add a dummy peer ID to simulate connection success
connected = append(connected, peer.ID("dummy"))
}
return cnt
}
crawler := tt.newCrawler(t)
dialer := NewGossipPeerDialer(t.Context(), crawler, listPeers, dialPeers)
topic := "topic/a"
ctx, cancel := tt.ctx()
defer cancel()
err := dialer.DialPeersForTopicBlocking(ctx, topic, tt.targetPeers)
if tt.expectErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
require.Equal(t, tt.expectedConnects, md.dialCount())
})
}
}
func TestGossipPeerDialer_peersForTopic(t *testing.T) {
tests := []struct {
name string
connected int
targetCount int
buildPeers func(t *testing.T) ([]*enode.Node, []*enode.Node)
}{
{
name: "returns nil when enough peers already connected",
connected: 1,
targetCount: 1,
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
return []*enode.Node{newTestNode(t, "127.0.0.1", 30301)}, nil
},
},
{
name: "returns crawler peers when none connected",
connected: 0,
targetCount: 2,
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
nodeA := newTestNode(t, "127.0.0.1", 30311)
nodeB := newTestNode(t, "127.0.0.1", 30312)
return []*enode.Node{nodeA, nodeB}, []*enode.Node{nodeA, nodeB}
},
},
{
name: "truncates peers when more than needed",
connected: 0,
targetCount: 1,
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
nodeA := newTestNode(t, "127.0.0.1", 30321)
nodeB := newTestNode(t, "127.0.0.1", 30322)
nodeC := newTestNode(t, "127.0.0.1", 30323)
return []*enode.Node{nodeA, nodeB, nodeC}, []*enode.Node{nodeA}
},
},
{
name: "only returns missing peers",
connected: 1,
targetCount: 3,
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
nodeA := newTestNode(t, "127.0.0.1", 30331)
nodeB := newTestNode(t, "127.0.0.1", 30332)
nodeC := newTestNode(t, "127.0.0.1", 30333)
return []*enode.Node{nodeA, nodeB, nodeC}, []*enode.Node{nodeA, nodeB}
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
listPeers := func(topic string) []peer.ID {
peers := make([]peer.ID, tt.connected)
for i := 0; i < tt.connected; i++ {
peers[i] = peer.ID(string(rune(i))) // Fake peer ID
}
return peers
}
crawlerPeers, expected := tt.buildPeers(t)
crawler := &mockCrawler{
peers: map[string][]*enode.Node{"topic/test": crawlerPeers},
consume: false,
}
dialer := NewGossipPeerDialer(t.Context(), crawler, listPeers, func(ctx context.Context,
maxConcurrentDials int, nodes []*enode.Node) uint {
return 0
})
got := dialer.peersForTopic("topic/test", tt.targetCount)
if expected == nil {
require.Nil(t, got)
return
}
require.Equal(t, len(expected), len(got))
for i := range expected {
require.Equal(t, expected[i], got[i])
}
})
}
}
func TestGossipPeerDialer_selectPeersForTopics(t *testing.T) {
tests := []struct {
name string
connectedPeers map[string]int // topic -> connected peer count
topicsProvider func() map[string]int
buildPeers func(t *testing.T) (map[string][]*enode.Node, []*enode.Node)
}{
{
name: "prioritizes multi-topic peer over single-topic peers",
connectedPeers: map[string]int{},
topicsProvider: func() map[string]int {
return map[string]int{
"topic/a": 1,
"topic/b": 1,
"topic/c": 1,
}
},
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
// Peer X serves all 3 topics
nodeX := newTestNode(t, "127.0.0.1", 30401)
// Peer Y serves only topic/a
nodeY := newTestNode(t, "127.0.0.1", 30402)
// Peer Z serves only topic/b
nodeZ := newTestNode(t, "127.0.0.1", 30403)
crawlerPeers := map[string][]*enode.Node{
"topic/a": {nodeX, nodeY},
"topic/b": {nodeX, nodeZ},
"topic/c": {nodeX},
}
// Only nodeX should be dialed (satisfies all 3 topics)
return crawlerPeers, []*enode.Node{nodeX}
},
},
{
name: "cross-topic decrement works correctly",
connectedPeers: map[string]int{},
topicsProvider: func() map[string]int {
return map[string]int{
"topic/a": 2, // Need 2 peers
"topic/b": 1, // Need 1 peer
}
},
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
// Peer X serves both topics
nodeX := newTestNode(t, "127.0.0.1", 30411)
// Peer Y serves only topic/a
nodeY := newTestNode(t, "127.0.0.1", 30412)
crawlerPeers := map[string][]*enode.Node{
"topic/a": {nodeX, nodeY},
"topic/b": {nodeX},
}
// nodeX covers topic/b fully, and 1 of 2 for topic/a
// nodeY covers remaining 1 for topic/a
return crawlerPeers, []*enode.Node{nodeX, nodeY}
},
},
{
name: "no redundant dials when one peer satisfies all",
connectedPeers: map[string]int{},
topicsProvider: func() map[string]int {
return map[string]int{
"topic/a": 1,
"topic/b": 1,
"topic/c": 1,
}
},
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
nodeX := newTestNode(t, "127.0.0.1", 30421)
crawlerPeers := map[string][]*enode.Node{
"topic/a": {nodeX},
"topic/b": {nodeX},
"topic/c": {nodeX},
}
// Only 1 dial needed for all 3 topics
return crawlerPeers, []*enode.Node{nodeX}
},
},
{
name: "skips topics with enough peers already",
connectedPeers: map[string]int{
"topic/a": 2, // Already has 2
},
topicsProvider: func() map[string]int {
return map[string]int{
"topic/a": 2, // min 2, already have 2
"topic/b": 1, // min 1, have 0
}
},
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
nodeX := newTestNode(t, "127.0.0.1", 30431)
nodeY := newTestNode(t, "127.0.0.1", 30432)
crawlerPeers := map[string][]*enode.Node{
"topic/a": {nodeX},
"topic/b": {nodeY},
}
// Only nodeY should be dialed (topic/a already satisfied)
return crawlerPeers, []*enode.Node{nodeY}
},
},
{
name: "returns nil when all topics satisfied",
connectedPeers: map[string]int{"topic/a": 2, "topic/b": 1},
topicsProvider: func() map[string]int {
return map[string]int{
"topic/a": 2,
"topic/b": 1,
}
},
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
nodeX := newTestNode(t, "127.0.0.1", 30441)
crawlerPeers := map[string][]*enode.Node{
"topic/a": {nodeX},
"topic/b": {nodeX},
}
// No dials needed
return crawlerPeers, nil
},
},
{
name: "handles empty crawler response",
connectedPeers: map[string]int{},
topicsProvider: func() map[string]int {
return map[string]int{"topic/a": 1}
},
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
return map[string][]*enode.Node{}, nil
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
listPeers := func(topic string) []peer.ID {
count := tt.connectedPeers[topic]
peers := make([]peer.ID, count)
for i := range count {
peers[i] = peer.ID(topic + string(rune(i)))
}
return peers
}
crawlerPeers, expected := tt.buildPeers(t)
crawler := &mockCrawler{
peers: crawlerPeers,
consume: false,
}
dialer := NewGossipPeerDialer(t.Context(), crawler, listPeers, func(ctx context.Context,
maxConcurrentDials int, nodes []*enode.Node) uint {
return 0
})
dialer.topicsProvider = tt.topicsProvider
got := dialer.selectPeersForTopics()
if expected == nil {
require.Nil(t, got)
return
}
require.Equal(t, len(expected), len(got), "expected %d peers, got %d", len(expected), len(got))
// Verify all expected nodes are present (order may vary for equal topic counts)
expectedIDs := make(map[enode.ID]struct{})
for _, n := range expected {
expectedIDs[n.ID()] = struct{}{}
}
for _, n := range got {
_, ok := expectedIDs[n.ID()]
require.True(t, ok, "unexpected peer %s in result", n.ID())
}
})
}
}
func TestGossipPeerDialer_ProtectedPeers(t *testing.T) {
peerA := peer.ID("peerA")
peerB := peer.ID("peerB")
peerC := peer.ID("peerC")
tests := []struct {
name string
topicsProvider func() map[string]int
connectedPeers map[string][]peer.ID // topic -> connected peers
expected []peer.ID
}{
{
name: "nil topics provider",
topicsProvider: nil,
connectedPeers: map[string][]peer.ID{},
expected: nil,
},
{
name: "no topics",
topicsProvider: func() map[string]int { return map[string]int{} },
connectedPeers: map[string][]peer.ID{},
expected: []peer.ID{},
},
{
name: "no peers for any topic",
topicsProvider: func() map[string]int { return map[string]int{"topic/a": 1, "topic/b": 1} },
connectedPeers: map[string][]peer.ID{"topic/a": {}, "topic/b": {}},
expected: []peer.ID{},
},
{
name: "multiple peers for all topics protects first peer from each",
topicsProvider: func() map[string]int { return map[string]int{"topic/a": 2, "topic/b": 2} },
connectedPeers: map[string][]peer.ID{"topic/a": {peerA, peerB}, "topic/b": {peerB, peerC}},
expected: []peer.ID{peerA, peerB},
},
{
name: "single peer for one topic",
topicsProvider: func() map[string]int { return map[string]int{"topic/a": 1} },
connectedPeers: map[string][]peer.ID{"topic/a": {peerA}},
expected: []peer.ID{peerA},
},
{
name: "same peer is first for multiple topics",
topicsProvider: func() map[string]int { return map[string]int{"topic/a": 1, "topic/b": 1} },
connectedPeers: map[string][]peer.ID{"topic/a": {peerA}, "topic/b": {peerA}},
expected: []peer.ID{peerA},
},
{
name: "different first peers for different topics",
topicsProvider: func() map[string]int { return map[string]int{"topic/a": 1, "topic/b": 1} },
connectedPeers: map[string][]peer.ID{"topic/a": {peerA}, "topic/b": {peerB}},
expected: []peer.ID{peerA, peerB},
},
{
name: "protects first peer from each topic",
topicsProvider: func() map[string]int { return map[string]int{"topic/a": 1, "topic/b": 2, "topic/c": 1} },
connectedPeers: map[string][]peer.ID{"topic/a": {peerA}, "topic/b": {peerB, peerC}, "topic/c": {peerC}},
expected: []peer.ID{peerA, peerB, peerC},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
listPeers := func(topic string) []peer.ID {
return tt.connectedPeers[topic]
}
dialer := &GossipPeerDialer{
topicsProvider: tt.topicsProvider,
listPeers: listPeers,
}
got := dialer.ProtectedPeers()
if tt.expected == nil {
require.Nil(t, got)
return
}
require.NotNil(t, got)
require.Equal(t, len(tt.expected), len(got), "expected %d peers, got %d", len(tt.expected), len(got))
if len(tt.expected) == 0 {
return
}
// Check all expected peers are present (order may vary due to map iteration)
expectedSet := make(map[peer.ID]struct{})
for _, p := range tt.expected {
expectedSet[p] = struct{}{}
}
for _, p := range got {
_, ok := expectedSet[p]
require.True(t, ok, "unexpected peer %s in result", p)
}
})
}
}
func TestGossipPeerDialer_topicsChanged(t *testing.T) {
tests := []struct {
name string
cachedTopics map[string]int
newTopics map[string]int
expected bool
}{
{
name: "both empty",
cachedTopics: map[string]int{},
newTopics: map[string]int{},
expected: false,
},
{
name: "same topics",
cachedTopics: map[string]int{"topic/a": 1, "topic/b": 2},
newTopics: map[string]int{"topic/a": 1, "topic/b": 2},
expected: false,
},
{
name: "same topics different min peer counts",
cachedTopics: map[string]int{"topic/a": 1, "topic/b": 2},
newTopics: map[string]int{"topic/a": 5, "topic/b": 10},
expected: false,
},
{
name: "new topic added",
cachedTopics: map[string]int{"topic/a": 1},
newTopics: map[string]int{"topic/a": 1, "topic/b": 2},
expected: true,
},
{
name: "topic removed",
cachedTopics: map[string]int{"topic/a": 1, "topic/b": 2},
newTopics: map[string]int{"topic/a": 1},
expected: true,
},
{
name: "same length different topics",
cachedTopics: map[string]int{"topic/a": 1, "topic/b": 2},
newTopics: map[string]int{"topic/a": 1, "topic/c": 2},
expected: true,
},
{
name: "from empty to non-empty",
cachedTopics: map[string]int{},
newTopics: map[string]int{"topic/a": 1},
expected: true,
},
{
name: "from non-empty to empty",
cachedTopics: map[string]int{"topic/a": 1},
newTopics: map[string]int{},
expected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dialer := &GossipPeerDialer{
cachedTopics: tt.cachedTopics,
}
got := dialer.topicsChanged(tt.newTopics)
require.Equal(t, tt.expected, got)
})
}
}
type mockCrawler struct {
mu sync.Mutex
peers map[string][]*enode.Node
consume bool
}
func (m *mockCrawler) Start(gossipcrawler.TopicExtractor) error {
return nil
}
func (m *mockCrawler) Stop() {}
func (m *mockCrawler) RemovePeerByPeerId(peer.ID) {}
func (m *mockCrawler) RemoveTopic(string) {}
func (m *mockCrawler) TriggerCrawl() {}
func (m *mockCrawler) PeersForTopic(topic string) []*enode.Node {
m.mu.Lock()
defer m.mu.Unlock()
nodes := m.peers[topic]
if len(nodes) == 0 {
return nil
}
copied := slices.Clone(nodes)
if m.consume {
m.peers[topic] = nil
}
return copied
}
type mockDialer struct {
mu sync.Mutex
dials []*enode.Node
}
func (m *mockDialer) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
m.mu.Lock()
defer m.mu.Unlock()
m.dials = append(m.dials, nodes...)
return uint(len(nodes))
}
func (m *mockDialer) dialCount() int {
m.mu.Lock()
defer m.mu.Unlock()
return len(m.dials)
}
func (m *mockDialer) dialedNodes() []*enode.Node {
m.mu.Lock()
defer m.mu.Unlock()
return slices.Clone(m.dials)
}
func newTestNode(t *testing.T, ip string, tcpPort uint16) *enode.Node {
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
require.NoError(t, err)
return newTestNodeWithPriv(t, priv, ip, tcpPort)
}
func newTestNodeWithPriv(t *testing.T, priv crypto.PrivKey, ip string, tcpPort uint16) *enode.Node {
t.Helper()
db, err := enode.OpenDB("")
require.NoError(t, err)
t.Cleanup(func() {
db.Close()
})
convertedKey, err := ecdsa.ConvertFromInterfacePrivKey(priv)
require.NoError(t, err)
localNode := enode.NewLocalNode(db, convertedKey)
localNode.SetStaticIP(net.ParseIP(ip))
localNode.Set(enr.TCP(tcpPort))
localNode.Set(enr.UDP(tcpPort))
return localNode.Node()
}

View File

@@ -0,0 +1,606 @@
package p2p
import (
"context"
"fmt"
"slices"
"sync"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
"github.com/pkg/errors"
"golang.org/x/sync/semaphore"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/ethereum/go-ethereum/p2p/enode"
)
const (
semaphoreWeight = int64(1)
// pingBufferSizeScaleFactor determines the ping channel buffer size as a multiple
// of maxConcurrentPings. The crawl loop blocks when the ping buffer is full, so we
// need headroom beyond the number of concurrent pings allowed. Since pings to
// unreachable peers may timeout (taking longer to complete), a larger buffer ensures
// the crawler can continue discovering peers without stalling while waiting for slow
// or failed pings to drain.
pingBufferSizeScaleFactor = 4
)
type peerNode struct {
isPinged bool
node *enode.Node
peerID peer.ID
topics map[string]struct{}
}
type crawledPeers struct {
mu sync.RWMutex
peerNodeByEnode map[enode.ID]*peerNode
peerNodeByPid map[peer.ID]*peerNode
peersByTopic map[string]map[*peerNode]struct{}
}
func (cp *crawledPeers) updateStatusToPinged(enodeID enode.ID) {
cp.mu.Lock()
defer cp.mu.Unlock()
existingPNode, ok := cp.peerNodeByEnode[enodeID]
if !ok {
return
}
// we only want to ping a node with a given NodeId once -> not on every sequence number change
// as ping is simply a test of a node being reachable and not fake
existingPNode.isPinged = true
}
func (cp *crawledPeers) updatePeer(node *enode.Node, topics []string) (bool, error) {
if node == nil {
return false, errors.New("node is nil")
}
cp.mu.Lock()
defer cp.mu.Unlock()
enodeID := node.ID()
existingPNode, ok := cp.peerNodeByEnode[enodeID]
if ok && existingPNode.node == nil {
return false, errors.New("enode is nil for enodeId")
}
// we don't want to update enodes with a lower sequence number as they're stale records
if ok && existingPNode.node.Seq() >= node.Seq() {
return false, nil
}
if !ok {
// this is a new peer
peerID, err := enodeToPeerID(node)
if err != nil {
return false, fmt.Errorf("converting enode to peer ID: %w", err)
}
existingPNode = &peerNode{
node: node,
peerID: peerID,
topics: make(map[string]struct{}),
}
cp.peerNodeByEnode[enodeID] = existingPNode
cp.peerNodeByPid[peerID] = existingPNode
} else {
existingPNode.node = node
}
cp.updateTopicsUnlocked(existingPNode, topics)
cp.recordMetricsUnlocked()
if existingPNode.isPinged || len(topics) == 0 {
return false, nil
}
return true, nil
}
func (cp *crawledPeers) removeTopic(topic string) {
cp.mu.Lock()
defer cp.mu.Unlock()
// Get all peers subscribed to this topic
peers, ok := cp.peersByTopic[topic]
if !ok {
return // Topic doesn't exist
}
// Remove the topic from each peer's topic list
for pnode := range peers {
delete(pnode.topics, topic)
// remove the peer if it has no more topics left
if len(pnode.topics) == 0 {
cp.updateTopicsUnlocked(pnode, nil)
}
}
// Remove the topic from byTopic map
delete(cp.peersByTopic, topic)
cp.recordMetricsUnlocked()
}
func (cp *crawledPeers) removePeerByPeerId(peerID peer.ID) {
cp.mu.Lock()
defer cp.mu.Unlock()
pnode, ok := cp.peerNodeByPid[peerID]
if !ok {
return
}
// Use updateTopicsUnlocked with empty topics to remove the peer
cp.updateTopicsUnlocked(pnode, nil)
cp.recordMetricsUnlocked()
}
func (cp *crawledPeers) removePeerByNodeId(enodeID enode.ID) {
cp.mu.Lock()
defer cp.mu.Unlock()
pnode, ok := cp.peerNodeByEnode[enodeID]
if !ok {
return
}
cp.updateTopicsUnlocked(pnode, nil)
cp.recordMetricsUnlocked()
}
func (cp *crawledPeers) recordMetricsUnlocked() {
gossipCrawlerPeersByEnodeCount.Set(float64(len(cp.peerNodeByEnode)))
gossipCrawlerPeersByPidCount.Set(float64(len(cp.peerNodeByPid)))
gossipCrawlerTopicsCount.Set(float64(len(cp.peersByTopic)))
}
func (cp *crawledPeers) logPeerCounts() {
cp.mu.RLock()
defer cp.mu.RUnlock()
for topic, peers := range cp.peersByTopic {
// Count only pinged peers (verified reachable)
pingedCount := 0
for pnode := range peers {
if pnode.isPinged {
pingedCount++
}
}
log.WithField("topic", topic).
WithField("totalPeers", len(peers)).
WithField("pingedPeers", pingedCount).
Debug("Crawler indexed peers for topic")
}
}
func (cp *crawledPeers) cleanupPeer(pnode *peerNode) {
delete(cp.peerNodeByPid, pnode.peerID)
delete(cp.peerNodeByEnode, pnode.node.ID())
for t := range pnode.topics {
if peers, ok := cp.peersByTopic[t]; ok {
delete(peers, pnode)
if len(peers) == 0 {
delete(cp.peersByTopic, t)
}
}
}
pnode.topics = nil // Clear topics to indicate removal.
}
func (cp *crawledPeers) removeOldTopicsFromPeer(pnode *peerNode, newTopics map[string]struct{}) {
for oldTopic := range pnode.topics {
if _, ok := newTopics[oldTopic]; !ok {
if peers, ok := cp.peersByTopic[oldTopic]; ok {
delete(peers, pnode)
if len(peers) == 0 {
delete(cp.peersByTopic, oldTopic)
}
}
}
}
}
func (cp *crawledPeers) addNewTopicsToPeer(pnode *peerNode, newTopics map[string]struct{}) {
for newTopic := range newTopics {
if _, ok := pnode.topics[newTopic]; !ok {
if _, ok := cp.peersByTopic[newTopic]; !ok {
cp.peersByTopic[newTopic] = make(map[*peerNode]struct{})
}
cp.peersByTopic[newTopic][pnode] = struct{}{}
}
}
}
// updateTopicsUnlocked updates the topics associated with a peer node.
// If the topics slice is empty, the peer is completely removed from the crawled peers.
// Otherwise, it updates the peer's topics by removing old topics that are no longer
// present and adding new topics. This method assumes the caller holds the lock on cp.mu.
// If a topic has no peers after this update, it is removed from the list of topics we track peers for.
func (cp *crawledPeers) updateTopicsUnlocked(pnode *peerNode, topics []string) {
// If topics is empty, remove the peer completely.
if len(topics) == 0 {
cp.cleanupPeer(pnode)
return
}
newTopics := make(map[string]struct{})
for _, t := range topics {
newTopics[t] = struct{}{}
}
// Remove old topics that are no longer present.
cp.removeOldTopicsFromPeer(pnode, newTopics)
// Add new topics.
cp.addNewTopicsToPeer(pnode, newTopics)
pnode.topics = newTopics
}
func (cp *crawledPeers) peersForTopic(topic string, filter gossipcrawler.PeerFilterFunc) []peerNode {
cp.mu.RLock()
defer cp.mu.RUnlock()
peers, ok := cp.peersByTopic[topic]
if !ok {
return nil
}
var peerNodes []peerNode
for pnode := range peers {
if pnode.node == nil {
continue
}
if pnode.isPinged && filter(pnode.node) {
peerNodes = append(peerNodes, *pnode)
}
}
return peerNodes
}
// GossipPeerCrawler discovers and maintains a registry of peers subscribed to gossipsub topics.
// It uses discv5 to find peers, extracts their topic subscriptions from ENR records, and verifies
// their reachability via ping. Only peers that have been successfully pinged are returned when
// querying for peers on a given topic. The crawler runs three background loops: one for discovery,
// one for ping verification, and one for periodic cleanup of stale or filtered-out peers.
type GossipPeerCrawler struct {
ctx context.Context
crawlInterval, crawlTimeout time.Duration
crawledPeers *crawledPeers
// Discovery interface for finding peers
dv5 ListenerRebooter
p2pSvc *Service
topicExtractor gossipcrawler.TopicExtractor
peerFilter gossipcrawler.PeerFilterFunc
scorer PeerScoreFunc
pingCh chan enode.Node
pingSemaphore *semaphore.Weighted
triggerCrawlCh chan struct{}
once sync.Once
}
// cleanupInterval controls how frequently we sweep crawled peers and prune
// those that are no longer useful.
const cleanupInterval = 5 * time.Minute
// crawlerLogInterval controls how frequently we log peer counts per topic.
const crawlerLogInterval = 10 * time.Minute
// PeerScoreFunc calculates a reputation score for a given peer ID.
// Higher scores indicate more desirable peers. This function is used by PeersForTopic
// to sort returned peers in descending order of quality, allowing callers to prioritize
// connections to the most reliable peers.
type PeerScoreFunc func(peer.ID) float64
// NewGossipPeerCrawler creates a new crawler for discovering gossipsub peers.
// The crawler uses the provided discv5 listener to discover peers and tracks their
// topic subscriptions. Parameters:
// - p2pSvc: The P2P service for network operations
// - dv5: The discv5 listener used for peer discovery and ping verification
// - crawlTimeout: Maximum duration for each crawl iteration
// - crawlInterval: The duration between each crawl iteration
// - maxConcurrentPings: Limits parallel ping operations to avoid overwhelming the network
// - peerFilter: Determines which discovered peers should be tracked
// - scorer: Calculates peer quality scores for sorting results
//
// Returns an error if any required parameter is nil or invalid.
func NewGossipPeerCrawler(
ctx context.Context,
p2pSvc *Service,
dv5 ListenerRebooter,
crawlTimeout time.Duration,
crawlInterval time.Duration,
maxConcurrentPings int64,
peerFilter gossipcrawler.PeerFilterFunc,
scorer PeerScoreFunc,
) (*GossipPeerCrawler, error) {
if p2pSvc == nil {
return nil, errors.New("p2pSvc is nil")
}
if dv5 == nil {
return nil, errors.New("dv5 is nil")
}
if crawlTimeout <= 0 {
return nil, errors.New("crawl timeout must be greater than 0")
}
if crawlInterval <= 0 {
return nil, errors.New("crawl interval must be greater than 0")
}
if maxConcurrentPings <= 0 {
return nil, errors.New("max concurrent pings must be greater than 0")
}
if peerFilter == nil {
return nil, errors.New("peer filter is nil")
}
if scorer == nil {
return nil, errors.New("peer scorer is nil")
}
g := &GossipPeerCrawler{
ctx: ctx,
crawlInterval: crawlInterval,
crawlTimeout: crawlTimeout,
p2pSvc: p2pSvc,
dv5: dv5,
peerFilter: peerFilter,
scorer: scorer,
}
g.pingCh = make(chan enode.Node, pingBufferSizeScaleFactor*maxConcurrentPings)
g.pingSemaphore = semaphore.NewWeighted(maxConcurrentPings)
g.triggerCrawlCh = make(chan struct{}, 1)
g.crawledPeers = &crawledPeers{
peerNodeByEnode: make(map[enode.ID]*peerNode),
peerNodeByPid: make(map[peer.ID]*peerNode),
peersByTopic: make(map[string]map[*peerNode]struct{}),
}
return g, nil
}
// PeersForTopic returns a list of enode records for peers subscribed to the given topic.
// Only peers that have been successfully pinged (verified as reachable) and pass the
// configured peer filter are included. Results are sorted in descending order by peer
// score, so higher-quality peers appear first. Returns nil if no peers are found for
// the topic. The returned slice should not be modified as it contains pointers to
// internal enode records.
func (g *GossipPeerCrawler) PeersForTopic(topic string) []*enode.Node {
peerNodes := g.crawledPeers.peersForTopic(topic, g.peerFilter)
slices.SortFunc(peerNodes, func(a, b peerNode) int {
scoreA := g.scorer(a.peerID)
scoreB := g.scorer(b.peerID)
if scoreA > scoreB {
return -1
}
if scoreA < scoreB {
return 1
}
return 0
})
nodes := make([]*enode.Node, 0, len(peerNodes))
for _, pn := range peerNodes {
nodes = append(nodes, pn.node)
}
return nodes
}
// RemovePeerByPeerId removes a peer from the crawler's registry by their libp2p peer ID.
// This also removes the peer from all topic subscriptions they were associated with.
// If the peer is not found, this operation is a no-op.
func (g *GossipPeerCrawler) RemovePeerByPeerId(peerID peer.ID) {
g.crawledPeers.removePeerByPeerId(peerID)
}
// RemoveTopic removes a topic and all its peer associations from the crawler.
// Peers that were only subscribed to this topic are completely removed from the registry.
// Peers subscribed to other topics remain tracked for those topics.
// If the topic does not exist, this operation is a no-op.
func (g *GossipPeerCrawler) RemoveTopic(topic string) {
g.crawledPeers.removeTopic(topic)
}
// Start begins the crawler's background operations. It launches three goroutines:
// a crawl loop that periodically discovers new peers via discv5, a ping loop that
// verifies peer reachability, and a cleanup loop that removes stale or filtered peers.
// The provided TopicExtractor is used to determine which gossipsub topics each
// discovered peer subscribes to. Start is idempotent; subsequent calls after the
// first are no-ops. Returns an error if the topic extractor is nil.
func (g *GossipPeerCrawler) Start(te gossipcrawler.TopicExtractor) error {
if te == nil {
return errors.New("topic extractor is nil")
}
g.once.Do(func() {
g.topicExtractor = te
go g.crawlLoop()
go g.pingLoop()
go g.cleanupLoop()
go g.logPeerCountsLoop()
})
return nil
}
func (g *GossipPeerCrawler) pingLoop() {
for {
select {
case node := <-g.pingCh:
if err := g.pingSemaphore.Acquire(g.ctx, semaphoreWeight); err != nil {
return
}
go func(node *enode.Node) {
defer g.pingSemaphore.Release(semaphoreWeight)
if err := g.dv5.Ping(node); err != nil {
log.WithError(err).WithField("node", node.ID()).Debug("Failed to ping node")
g.crawledPeers.removePeerByNodeId(node.ID())
return
}
g.crawledPeers.updateStatusToPinged(node.ID())
}(&node)
case <-g.ctx.Done():
return
}
}
}
func (g *GossipPeerCrawler) crawlLoop() {
for {
g.crawl()
select {
case <-time.After(g.crawlInterval):
case <-g.triggerCrawlCh:
case <-g.ctx.Done():
return
}
}
}
func (g *GossipPeerCrawler) crawl() {
ctx, cancel := context.WithTimeout(g.ctx, g.crawlTimeout)
defer cancel()
iterator := g.dv5.RandomNodes()
// Ensure iterator unblocks on context cancellation or timeout
go func() {
<-ctx.Done()
iterator.Close()
}()
for iterator.Next() {
if ctx.Err() != nil {
return
}
node := iterator.Node()
if node == nil {
continue
}
if !g.peerFilter(node) {
g.crawledPeers.removePeerByNodeId(node.ID())
continue
}
topics, err := g.topicExtractor(ctx, node)
if err != nil {
log.WithError(err).WithField("node", node.ID()).Debug("Failed to extract topics, skipping")
continue
}
shouldPing, err := g.crawledPeers.updatePeer(node, topics)
if err != nil {
log.WithError(err).WithField("node", node.ID()).Error("Failed to update crawled peers")
}
if !shouldPing {
continue
}
select {
case g.pingCh <- *node:
case <-g.ctx.Done():
return
}
}
}
// cleanupLoop periodically removes peers that the filter rejects or that
// have no topics of interest. It uses the same context lifecycle as other
// background loops.
func (g *GossipPeerCrawler) cleanupLoop() {
ticker := time.NewTicker(cleanupInterval)
defer ticker.Stop()
// Initial cleanup to catch any leftovers from startup state
g.cleanup()
for {
select {
case <-ticker.C:
g.cleanup()
case <-g.ctx.Done():
return
}
}
}
// cleanup scans the crawled peer set and removes entries that either fail
// the current peer filter or have no topics of interest remaining.
func (g *GossipPeerCrawler) cleanup() {
cp := g.crawledPeers
// Snapshot current peers to evaluate without holding the lock during
// filter and topic extraction.
cp.mu.RLock()
peers := make([]*peerNode, 0, len(cp.peerNodeByPid))
for _, p := range cp.peerNodeByPid {
peers = append(peers, p)
}
cp.mu.RUnlock()
for _, p := range peers {
// Remove peers that no longer pass the filter
if !g.peerFilter(p.node) {
cp.removePeerByNodeId(p.node.ID())
continue
}
// Re-extract topics; if the extractor errors or yields none, drop the peer.
topics, err := g.topicExtractor(g.ctx, p.node)
if err != nil || len(topics) == 0 {
cp.removePeerByNodeId(p.node.ID())
}
}
}
func (g *GossipPeerCrawler) logPeerCountsLoop() {
ticker := time.NewTicker(crawlerLogInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
g.crawledPeers.logPeerCounts()
case <-g.ctx.Done():
return
}
}
}
// TriggerCrawl requests an immediate crawl. If a crawl trigger is already
// pending, this call is ignored (non-blocking). This allows external systems
// to request a crawl without waiting for the regular interval.
func (g *GossipPeerCrawler) TriggerCrawl() {
select {
case g.triggerCrawlCh <- struct{}{}:
log.Info("Triggering crawl")
default:
// Channel full, crawl already triggered
}
}
// enodeToPeerID converts an enode record to a peer ID.
func enodeToPeerID(n *enode.Node) (peer.ID, error) {
info, _, err := convertToAddrInfo(n)
if err != nil {
return "", fmt.Errorf("converting enode to addr info: %w", err)
}
if info == nil {
return "", errors.New("peer info is nil")
}
return info.ID, nil
}

View File

@@ -0,0 +1,787 @@
package p2p
import (
"context"
"net"
"testing"
"time"
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/require"
require2 "github.com/stretchr/testify/require"
)
// Helpers for crawledPeers tests
func newTestCrawledPeers() *crawledPeers {
return &crawledPeers{
peerNodeByEnode: make(map[enode.ID]*peerNode),
peerNodeByPid: make(map[peer.ID]*peerNode),
peersByTopic: make(map[string]map[*peerNode]struct{}),
}
}
func addPeerWithTopics(t *testing.T, cp *crawledPeers, node *enode.Node, topics []string, pinged bool) *peerNode {
t.Helper()
pid, err := enodeToPeerID(node)
require.NoError(t, err)
p := &peerNode{
isPinged: pinged,
node: node,
peerID: pid,
topics: make(map[string]struct{}),
}
cp.mu.Lock()
cp.peerNodeByEnode[p.node.ID()] = p
cp.peerNodeByPid[p.peerID] = p
cp.updateTopicsUnlocked(p, topics)
cp.mu.Unlock()
return p
}
func TestUpdateStatusToPinged(t *testing.T) {
localNode := createTestNodeRandom(t)
node1 := localNode.Node()
localNode2 := createTestNodeRandom(t)
node2 := localNode2.Node()
cases := []struct {
name string
prep func(*crawledPeers)
target *enode.Node
expectPinged map[enode.ID]bool
}{
{
name: "sets pinged for existing peer",
prep: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, node1, []string{"a"}, false)
},
target: node1,
expectPinged: map[enode.ID]bool{
node1.ID(): true,
},
},
{
name: "idempotent when already pinged",
prep: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, node1, []string{"a"}, true)
},
target: node1,
expectPinged: map[enode.ID]bool{
node1.ID(): true,
},
},
{
name: "no change when peer missing",
prep: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, node1, []string{"a"}, false)
},
target: node2,
expectPinged: map[enode.ID]bool{
node1.ID(): false,
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
cp := newTestCrawledPeers()
tc.prep(cp)
cp.updateStatusToPinged(tc.target.ID())
cp.mu.RLock()
defer cp.mu.RUnlock()
for id, exp := range tc.expectPinged {
if p := cp.peerNodeByEnode[id]; p != nil {
require.Equal(t, exp, p.isPinged)
}
}
})
}
}
func TestRemoveTopic(t *testing.T) {
localNode := createTestNodeRandom(t)
node1 := localNode.Node()
localNode2 := createTestNodeRandom(t)
node2 := localNode2.Node()
topic1 := "t1"
topic2 := "t2"
cases := []struct {
name string
prep func(*crawledPeers)
topic string
check func(*testing.T, *crawledPeers)
}{
{
name: "removes topic from all peers and index",
prep: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, node1, []string{"t1", "t2"}, true)
addPeerWithTopics(t, cp, node2, []string{"t1"}, true)
},
topic: topic1,
check: func(t *testing.T, cp *crawledPeers) {
_, ok := cp.peersByTopic[topic1]
require.False(t, ok)
for _, p := range cp.peerNodeByPid {
_, has := p.topics[topic1]
require.False(t, has)
}
// Ensure other topics remain
_, ok = cp.peersByTopic[topic2]
require.True(t, ok)
},
},
{
name: "no-op when topic missing",
prep: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, node1, []string{"t2"}, true)
},
topic: topic1,
check: func(t *testing.T, cp *crawledPeers) {
_, ok := cp.peersByTopic[topic2]
require.True(t, ok)
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
cp := newTestCrawledPeers()
tc.prep(cp)
cp.removeTopic(tc.topic)
tc.check(t, cp)
})
}
}
func TestRemovePeer(t *testing.T) {
localNode := createTestNodeRandom(t)
node1 := localNode.Node()
localNode2 := createTestNodeRandom(t)
node2 := localNode2.Node()
cases := []struct {
name string
prep func(*crawledPeers)
target enode.ID
wantTopics int
}{
{
name: "removes existing peer and prunes empty topic",
prep: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
},
target: node1.ID(),
wantTopics: 0,
},
{
name: "removes only targeted peer; keeps topic for other",
prep: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
addPeerWithTopics(t, cp, node2, []string{"t1"}, true)
},
target: node1.ID(),
wantTopics: 1, // byTopic should still have t1 with one peer
},
{
name: "no-op when peer missing",
prep: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
},
target: node2.ID(),
wantTopics: 1,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
cp := newTestCrawledPeers()
tc.prep(cp)
cp.removePeerByNodeId(tc.target)
cp.mu.RLock()
defer cp.mu.RUnlock()
require.Len(t, cp.peersByTopic, tc.wantTopics)
})
}
}
func TestRemovePeerId(t *testing.T) {
localNode := createTestNodeRandom(t)
node1 := localNode.Node()
localNode2 := createTestNodeRandom(t)
node2 := localNode2.Node()
pid1, err := enodeToPeerID(node1)
require.NoError(t, err)
pid2, err := enodeToPeerID(node2)
require.NoError(t, err)
cases := []struct {
name string
prep func(*crawledPeers)
target peer.ID
wantTopics int
wantPeers int
}{
{
name: "removes existing peer by id and prunes topic",
prep: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
},
target: pid1,
wantTopics: 0,
wantPeers: 0,
},
{
name: "removes only targeted peer id; keeps topic for other",
prep: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
addPeerWithTopics(t, cp, node2, []string{"t1"}, true)
},
target: pid1,
wantTopics: 1,
wantPeers: 1,
},
{
name: "no-op when peer id missing",
prep: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
},
target: pid2,
wantTopics: 1,
wantPeers: 1,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
cp := newTestCrawledPeers()
tc.prep(cp)
cp.removePeerByPeerId(tc.target)
cp.mu.RLock()
defer cp.mu.RUnlock()
require.Len(t, cp.peersByTopic, tc.wantTopics)
require.Len(t, cp.peerNodeByPid, tc.wantPeers)
})
}
}
func TestUpdateCrawledIfNewer(t *testing.T) {
newCrawler := func() (*crawledPeers, *GossipPeerCrawler, func()) {
ctx, cancel := context.WithCancel(context.Background())
g := &GossipPeerCrawler{
ctx: ctx,
pingCh: make(chan enode.Node, 8),
}
cp := newTestCrawledPeers()
return cp, g, cancel
}
// Helper: local node that will cause enodeToPeerID to fail (no TCP/UDP multiaddrs)
newNodeNoPorts := func(t *testing.T) *enode.Node {
_, privKey := createAddrAndPrivKey(t)
db, err := enode.OpenDB("")
require.NoError(t, err)
t.Cleanup(func() { db.Close() })
ln := enode.NewLocalNode(db, privKey)
// Do not set TCP/UDP; keep only IP
ln.SetStaticIP(net.ParseIP("127.0.0.1"))
return ln.Node()
}
// Ensure both A nodes have the same enode.ID but differing seq
ln := createTestNodeRandom(t)
nodeA1 := ln.Node()
setNodeSeq(ln, nodeA1.Seq()+1)
nodeA2 := ln.Node()
tests := []struct {
name string
arrange func(*crawledPeers)
invokeNode *enode.Node
invokeTopics []string
expectedShouldPing bool
expectErr bool
assert func(*testing.T, *crawledPeers, <-chan enode.Node)
}{
{
name: "new peer with topics adds peer and pings",
arrange: func(cp *crawledPeers) {},
invokeNode: nodeA1,
invokeTopics: []string{"a"},
expectedShouldPing: true,
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
cp.mu.RLock()
require.Len(t, cp.peerNodeByEnode, 1)
require.Len(t, cp.peerNodeByPid, 1)
require.Contains(t, cp.peersByTopic, "a")
cp.mu.RUnlock()
},
},
{
name: "new peer with empty topics is removed",
arrange: func(cp *crawledPeers) {},
invokeNode: nodeA1,
invokeTopics: nil,
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
cp.mu.RLock()
require.Empty(t, cp.peerNodeByEnode)
require.Empty(t, cp.peerNodeByPid)
require.Empty(t, cp.peersByTopic)
cp.mu.RUnlock()
},
},
{
name: "existing peer lower seq is ignored",
arrange: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, nodeA2, []string{"x"}, false) // higher seq exists
},
invokeNode: nodeA1, // lower seq
invokeTopics: []string{"a", "b"},
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
cp.mu.RLock()
require.Contains(t, cp.peersByTopic, "x")
require.NotContains(t, cp.peersByTopic, "a")
cp.mu.RUnlock()
},
},
{
name: "existing peer equal seq is ignored",
arrange: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, nodeA1, []string{"x"}, false)
},
invokeNode: nodeA1,
invokeTopics: []string{"a"},
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
cp.mu.RLock()
require.Contains(t, cp.peersByTopic, "x")
require.NotContains(t, cp.peersByTopic, "a")
cp.mu.RUnlock()
},
},
{
name: "existing peer higher seq updates topics and pings",
arrange: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, nodeA1, []string{"x"}, false)
},
invokeNode: nodeA2,
invokeTopics: []string{"a"},
expectedShouldPing: true,
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
cp.mu.RLock()
require.NotContains(t, cp.peersByTopic, "x")
require.Contains(t, cp.peersByTopic, "a")
cp.mu.RUnlock()
},
},
{
name: "existing peer higher seq but empty topics removes peer",
arrange: func(cp *crawledPeers) {
addPeerWithTopics(t, cp, nodeA1, []string{"x"}, false)
},
invokeNode: nodeA2,
invokeTopics: nil,
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
cp.mu.RLock()
require.Empty(t, cp.peerNodeByEnode)
require.Empty(t, cp.peerNodeByPid)
cp.mu.RUnlock()
},
},
{
name: "corrupted existing entry with nil node is ignored",
arrange: func(cp *crawledPeers) {
pid, _ := enodeToPeerID(nodeA1)
cp.mu.Lock()
pn := &peerNode{node: nil, peerID: pid, topics: map[string]struct{}{"x": {}}}
cp.peerNodeByEnode[nodeA1.ID()] = pn
cp.peerNodeByPid[pid] = pn
cp.peersByTopic["x"] = map[*peerNode]struct{}{pn: {}}
cp.mu.Unlock()
},
expectErr: true,
invokeNode: nodeA2,
invokeTopics: []string{"a"},
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
cp.mu.RLock()
require.Contains(t, cp.peersByTopic, "x")
cp.mu.RUnlock()
},
},
{
name: "new peer with no ports causes enodeToPeerID error; no add",
arrange: func(cp *crawledPeers) {},
invokeNode: newNodeNoPorts(t),
invokeTopics: []string{"a"},
expectErr: true,
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
cp.mu.RLock()
require.Empty(t, cp.peerNodeByEnode)
require.Empty(t, cp.peerNodeByPid)
require.Empty(t, cp.peersByTopic)
cp.mu.RUnlock()
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
cp, g, cancel := newCrawler()
defer cancel()
tc.arrange(cp)
shouldPing, err := cp.updatePeer(tc.invokeNode, tc.invokeTopics)
if tc.expectErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
require.Equal(t, shouldPing, tc.expectedShouldPing)
tc.assert(t, cp, g.pingCh)
})
}
}
func TestPeersForTopic(t *testing.T) {
t.Parallel()
newCrawler := func(filter gossipcrawler.PeerFilterFunc) (*GossipPeerCrawler, *crawledPeers) {
g := &GossipPeerCrawler{
peerFilter: filter,
scorer: func(peer.ID) float64 { return 0 },
crawledPeers: newTestCrawledPeers(),
}
return g, g.crawledPeers
}
// Prepare nodes
ln1 := createTestNodeRandom(t)
ln2 := createTestNodeRandom(t)
ln3 := createTestNodeRandom(t)
n1, n2, n3 := ln1.Node(), ln2.Node(), ln3.Node()
topic := "top"
cases := []struct {
name string
filter gossipcrawler.PeerFilterFunc
setup func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers)
wantIDs []enode.ID
}{
{
name: "no peers for topic returns empty",
filter: func(*enode.Node) bool { return true },
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {},
wantIDs: nil,
},
{
name: "excludes unpinged peers",
filter: func(*enode.Node) bool { return true },
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
// Add one pinged and one not pinged on same topic
addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
addPeerWithTopics(t, cp, n2, []string{string(topic)}, false)
},
wantIDs: []enode.ID{n1.ID()},
},
{
name: "applies peer filter to exclude",
filter: func(n *enode.Node) bool { return n.ID() != n2.ID() },
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
addPeerWithTopics(t, cp, n2, []string{string(topic)}, true)
},
wantIDs: []enode.ID{n1.ID()},
},
{
name: "ignores peerNode with nil node",
filter: func(*enode.Node) bool { return true },
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
// Add n2 then set its node to nil to simulate corrupted entry
p2 := addPeerWithTopics(t, cp, n2, []string{string(topic)}, true)
cp.mu.Lock()
p2.node = nil
cp.mu.Unlock()
},
wantIDs: []enode.ID{n1.ID()},
},
{
name: "sorted by score descending",
filter: func(*enode.Node) bool { return true },
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
// Add three pinged peers
p1 := addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
p2 := addPeerWithTopics(t, cp, n2, []string{string(topic)}, true)
p3 := addPeerWithTopics(t, cp, n3, []string{string(topic)}, true)
// Provide a deterministic scoring function
scores := map[peer.ID]float64{
p1.peerID: 3.0,
p2.peerID: 2.0,
p3.peerID: 1.0,
}
g.scorer = func(id peer.ID) float64 { return scores[id] }
},
wantIDs: []enode.ID{n1.ID(), n2.ID(), n3.ID()},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
g, cp := newCrawler(tc.filter)
tc.setup(t, g, cp)
got := g.PeersForTopic(topic)
var gotIDs []enode.ID
for _, n := range got {
gotIDs = append(gotIDs, n.ID())
}
if tc.wantIDs == nil {
require.Empty(t, gotIDs)
return
}
require.Equal(t, tc.wantIDs, gotIDs)
})
}
}
func TestCrawler_AddsAndPingsPeer(t *testing.T) {
// Create a test node with valid ENR entries (IP/TCP/UDP)
localNode := createTestNodeRandom(t)
node := localNode.Node()
// Prepare a mock iterator returning our single node
iterator := p2ptest.NewMockIterator([]*enode.Node{node})
// Prepare a mock listener with successful Ping
mockListener := p2ptest.NewMockListener(localNode, iterator)
mockListener.PingFunc = func(*enode.Node) error { return nil }
// Inject a permissive peer filter
filter := gossipcrawler.PeerFilterFunc(func(n *enode.Node) bool { return true })
// Create crawler with small intervals
scorer := func(peer.ID) float64 { return 0 }
g, err := NewGossipPeerCrawler(t.Context(), &Service{}, mockListener, 2*time.Second, 10*time.Millisecond, 4, filter, scorer)
require.NoError(t, err)
// Assign a simple topic extractor
topic := "test/topic"
topicExtractor := func(ctx context.Context, n *enode.Node) ([]string, error) {
return []string{topic}, nil
}
// Run ping loop in background and perform a single crawl
require.NoError(t, g.Start(topicExtractor))
// Verify that the peer has been indexed under the topic and marked as pinged
require2.Eventually(t, func() bool {
g.crawledPeers.mu.RLock()
defer g.crawledPeers.mu.RUnlock()
peers := g.crawledPeers.peersByTopic[topic]
if len(peers) == 0 {
return false
}
// Fetch the single peerNode and check status
for pn := range peers {
if pn == nil {
return false
}
return pn.isPinged
}
return false
}, 2*time.Second, 10*time.Millisecond)
}
func TestCrawler_SkipsPeer_WhenFilterRejects(t *testing.T) {
t.Parallel()
localNode := createTestNodeRandom(t)
node := localNode.Node()
iterator := p2ptest.NewMockIterator([]*enode.Node{node})
mockListener := p2ptest.NewMockListener(localNode, iterator)
mockListener.PingFunc = func(*enode.Node) error { return nil }
// Reject all peers via injected filter
filter := gossipcrawler.PeerFilterFunc(func(n *enode.Node) bool { return false })
scorer := func(peer.ID) float64 { return 0 }
g, err := NewGossipPeerCrawler(t.Context(), &Service{}, mockListener, 2*time.Second, 10*time.Millisecond, 2, filter, scorer)
if err != nil {
t.Fatalf("NewGossipPeerCrawler error: %v", err)
}
topic := "test/topic"
g.topicExtractor = func(ctx context.Context, n *enode.Node) ([]string, error) { return []string{topic}, nil }
g.crawl()
// Verify no peers are indexed, because filter rejected the node
g.crawledPeers.mu.RLock()
defer g.crawledPeers.mu.RUnlock()
if len(g.crawledPeers.peerNodeByEnode) != 0 || len(g.crawledPeers.peerNodeByPid) != 0 || len(g.crawledPeers.peersByTopic) != 0 {
t.Fatalf("expected no peers indexed, got byEnode=%d byPeerId=%d byTopic=%d",
len(g.crawledPeers.peerNodeByEnode), len(g.crawledPeers.peerNodeByPid), len(g.crawledPeers.peersByTopic))
}
}
func TestCrawler_RemoveTopic_RemovesTopicFromIndexes(t *testing.T) {
t.Parallel()
localNode := createTestNodeRandom(t)
node := localNode.Node()
iterator := p2ptest.NewMockIterator([]*enode.Node{node})
mockListener := p2ptest.NewMockListener(localNode, iterator)
mockListener.PingFunc = func(*enode.Node) error { return nil }
filter := gossipcrawler.PeerFilterFunc(func(n *enode.Node) bool { return true })
scorer := func(peer.ID) float64 { return 0 }
g, err := NewGossipPeerCrawler(t.Context(), &Service{}, mockListener, 2*time.Second, 10*time.Millisecond, 2, filter, scorer)
if err != nil {
t.Fatalf("NewGossipPeerCrawler error: %v", err)
}
topic1 := "test/topic1"
topic2 := "test/topic2"
g.topicExtractor = func(ctx context.Context, n *enode.Node) ([]string, error) { return []string{topic1, topic2}, nil }
// Single crawl to index topics
g.crawl()
// Remove one topic and assert it is pruned from all indexes
g.RemoveTopic(topic1)
g.crawledPeers.mu.RLock()
defer g.crawledPeers.mu.RUnlock()
if _, ok := g.crawledPeers.peersByTopic[topic1]; ok {
t.Fatalf("expected topic1 to be removed from byTopic")
}
// Ensure peer still exists and retains topic2
for _, pn := range g.crawledPeers.peerNodeByEnode {
if _, has1 := pn.topics[topic1]; has1 {
t.Fatalf("expected topic1 to be removed from peer topics")
}
if _, has2 := pn.topics[topic2]; !has2 {
t.Fatalf("expected topic2 to remain for peer")
}
}
}
func TestCrawledPeersMetrics(t *testing.T) {
localNode1 := createTestNodeRandom(t)
node1 := localNode1.Node()
localNode2 := createTestNodeRandom(t)
node2 := localNode2.Node()
pid1, err := enodeToPeerID(node1)
require.NoError(t, err)
t.Run("updatePeer records metrics", func(t *testing.T) {
cp := newTestCrawledPeers()
// Add first peer with two topics
_, err := cp.updatePeer(node1, []string{"topic1", "topic2"})
require.NoError(t, err)
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
// Add second peer with one overlapping topic
_, err = cp.updatePeer(node2, []string{"topic1", "topic3"})
require.NoError(t, err)
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
require.Equal(t, float64(3), testutil.ToFloat64(gossipCrawlerTopicsCount))
})
t.Run("removePeerByPeerId records metrics", func(t *testing.T) {
cp := newTestCrawledPeers()
// Add two peers
_, err := cp.updatePeer(node1, []string{"topic1"})
require.NoError(t, err)
_, err = cp.updatePeer(node2, []string{"topic1", "topic2"})
require.NoError(t, err)
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
// Remove first peer by peer ID
cp.removePeerByPeerId(pid1)
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
})
t.Run("removePeerByNodeId records metrics", func(t *testing.T) {
cp := newTestCrawledPeers()
// Add two peers
_, err := cp.updatePeer(node1, []string{"topic1"})
require.NoError(t, err)
_, err = cp.updatePeer(node2, []string{"topic2"})
require.NoError(t, err)
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
// Remove first peer by enode ID
cp.removePeerByNodeId(node1.ID())
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerTopicsCount))
})
t.Run("removeTopic records metrics", func(t *testing.T) {
cp := newTestCrawledPeers()
// Add two peers with overlapping topics
_, err := cp.updatePeer(node1, []string{"topic1", "topic2"})
require.NoError(t, err)
_, err = cp.updatePeer(node2, []string{"topic1"})
require.NoError(t, err)
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
// Remove topic1 - this should also remove node2 which only had topic1
cp.removeTopic("topic1")
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerTopicsCount))
})
t.Run("updatePeer with empty topics removes peer and records metrics", func(t *testing.T) {
cp := newTestCrawledPeers()
// Add peer with topics
_, err := cp.updatePeer(node1, []string{"topic1"})
require.NoError(t, err)
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerTopicsCount))
// Increment sequence number to ensure update is processed
setNodeSeq(localNode1, node1.Seq()+1)
node1Updated := localNode1.Node()
// Update with empty topics - should remove the peer
_, err = cp.updatePeer(node1Updated, nil)
require.NoError(t, err)
require.Equal(t, float64(0), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
require.Equal(t, float64(0), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
require.Equal(t, float64(0), testutil.ToFloat64(gossipCrawlerTopicsCount))
})
}

View File

@@ -52,9 +52,6 @@ const (
// lightClientFinalityUpdateWeight specifies the scoring weight that we apply to
// our light client finality update topic.
lightClientFinalityUpdateWeight = 0.05
// executionProofWeight specifies the scoring weight that we apply to
// our execution proof topic.
executionProofWeight = 0.05
// maxInMeshScore describes the max score a peer can attain from being in the mesh.
maxInMeshScore = 10
@@ -148,8 +145,6 @@ func (s *Service) topicScoreParams(topic string) (*pubsub.TopicScoreParams, erro
return defaultLightClientOptimisticUpdateTopicParams(), nil
case strings.Contains(topic, GossipLightClientFinalityUpdateMessage):
return defaultLightClientFinalityUpdateTopicParams(), nil
case strings.Contains(topic, GossipExecutionProofMessage):
return defaultExecutionProofTopicParams(), nil
default:
return nil, errors.Errorf("unrecognized topic provided for parameter registration: %s", topic)
}
@@ -515,28 +510,6 @@ func defaultBlsToExecutionChangeTopicParams() *pubsub.TopicScoreParams {
}
}
func defaultExecutionProofTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{
TopicWeight: executionProofWeight,
TimeInMeshWeight: maxInMeshScore / inMeshCap(),
TimeInMeshQuantum: inMeshTime(),
TimeInMeshCap: inMeshCap(),
FirstMessageDeliveriesWeight: 2,
FirstMessageDeliveriesDecay: scoreDecay(oneHundredEpochs),
FirstMessageDeliveriesCap: 5,
MeshMessageDeliveriesWeight: 0,
MeshMessageDeliveriesDecay: 0,
MeshMessageDeliveriesCap: 0,
MeshMessageDeliveriesThreshold: 0,
MeshMessageDeliveriesWindow: 0,
MeshMessageDeliveriesActivation: 0,
MeshFailurePenaltyWeight: 0,
MeshFailurePenaltyDecay: 0,
InvalidMessageDeliveriesWeight: -2000,
InvalidMessageDeliveriesDecay: scoreDecay(invalidDecayPeriod),
}
}
func defaultLightClientOptimisticUpdateTopicParams() *pubsub.TopicScoreParams {
return &pubsub.TopicScoreParams{
TopicWeight: lightClientOptimisticUpdateWeight,

View File

@@ -25,7 +25,6 @@ var gossipTopicMappings = map[string]func() proto.Message{
LightClientOptimisticUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientOptimisticUpdateAltair{} },
LightClientFinalityUpdateTopicFormat: func() proto.Message { return &ethpb.LightClientFinalityUpdateAltair{} },
DataColumnSubnetTopicFormat: func() proto.Message { return &ethpb.DataColumnSidecar{} },
ExecutionProofSubnetTopicFormat: func() proto.Message { return &ethpb.ExecutionProof{} },
}
// GossipTopicMappings is a function to return the assigned data type

Some files were not shown because too many files have changed in this diff Show More