mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-30 15:48:00 -05:00
Compare commits
108 Commits
e2e-debugg
...
feat/gossi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
46b300c6ca | ||
|
|
04d45b6194 | ||
|
|
3e7cd8c2f1 | ||
|
|
ea1962bf17 | ||
|
|
6072e9c335 | ||
|
|
7d92cc5c32 | ||
|
|
4b8973289a | ||
|
|
51c64e75c0 | ||
|
|
d1ceff6621 | ||
|
|
e63c1bebfe | ||
|
|
25becdcd33 | ||
|
|
a2459aa365 | ||
|
|
80641dc3ae | ||
|
|
59bb4a8301 | ||
|
|
08f117f04f | ||
|
|
0b6365781f | ||
|
|
0c994445ea | ||
|
|
77c32203af | ||
|
|
743e6bab07 | ||
|
|
451d2a8bc5 | ||
|
|
6508bdfa9a | ||
|
|
d2bf512f36 | ||
|
|
e62fe66b0a | ||
|
|
08fff3dec4 | ||
|
|
75a3d45470 | ||
|
|
bbd2d4da0f | ||
|
|
378468c1ec | ||
|
|
25d375dbe0 | ||
|
|
2d5f3112c8 | ||
|
|
1d49a2a88d | ||
|
|
b119290584 | ||
|
|
9b0cffcdea | ||
|
|
979466a3d7 | ||
|
|
08ba8dd487 | ||
|
|
a235a581e1 | ||
|
|
45b88de7f2 | ||
|
|
f69e017f6a | ||
|
|
19e5684875 | ||
|
|
24fc76b7fb | ||
|
|
0a4aad543b | ||
|
|
ab8584d138 | ||
|
|
9b6cd96012 | ||
|
|
bba1358637 | ||
|
|
e6af417c62 | ||
|
|
3509622323 | ||
|
|
80d7bd6084 | ||
|
|
8d3c3fd40b | ||
|
|
1caf7074a7 | ||
|
|
92bd155e3f | ||
|
|
8360b0f882 | ||
|
|
bc0d60138c | ||
|
|
32f377a665 | ||
|
|
2ab792b0fe | ||
|
|
2ae8de05dd | ||
|
|
cff96129b5 | ||
|
|
8abd1db7c1 | ||
|
|
a4d726184f | ||
|
|
85acf242f6 | ||
|
|
ba6d1d0c6b | ||
|
|
69d3453f97 | ||
|
|
a69cf6f5d4 | ||
|
|
0a46c2d16d | ||
|
|
3c1b8859bc | ||
|
|
9f645ae0a4 | ||
|
|
5eeeb9ed15 | ||
|
|
2061fc8a2f | ||
|
|
eb93350583 | ||
|
|
affdab7776 | ||
|
|
eb556adab5 | ||
|
|
09d886c676 | ||
|
|
11c5c6fb8b | ||
|
|
10804bbb56 | ||
|
|
e6f3b636ac | ||
|
|
e95676dd91 | ||
|
|
86b65e0912 | ||
|
|
e9dac06037 | ||
|
|
893cf60921 | ||
|
|
41c9f160a2 | ||
|
|
707abe6112 | ||
|
|
76975a134d | ||
|
|
672de432a2 | ||
|
|
4a6d88d9fb | ||
|
|
1ff836e549 | ||
|
|
08f038fe80 | ||
|
|
63279bcadf | ||
|
|
61628efd44 | ||
|
|
9b07f13cd3 | ||
|
|
1397a79b4c | ||
|
|
929115639d | ||
|
|
14dca40786 | ||
|
|
fa7596bceb | ||
|
|
5161f087fc | ||
|
|
71050ab076 | ||
|
|
614367ddcf | ||
|
|
3f7371445b | ||
|
|
a15a1ade17 | ||
|
|
798376b1d7 | ||
|
|
93271050bf | ||
|
|
8dfbabc691 | ||
|
|
af2522e5f0 | ||
|
|
452d42bd10 | ||
|
|
3e985377ce | ||
|
|
ab2e836d3f | ||
|
|
14158bea9c | ||
|
|
e14590636f | ||
|
|
ce3660d2e7 | ||
|
|
7853cb9db0 | ||
|
|
8cfeda1473 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -38,6 +38,7 @@ metaData
|
||||
|
||||
# execution API authentication
|
||||
jwt.hex
|
||||
execution/
|
||||
|
||||
# manual testing
|
||||
tmp
|
||||
|
||||
10
BUILD.bazel
10
BUILD.bazel
@@ -1,4 +1,5 @@
|
||||
load("@bazel_gazelle//:def.bzl", "gazelle")
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:def.bzl", "gometalinter")
|
||||
load("@com_github_atlassian_bazel_tools//goimports:def.bzl", "goimports")
|
||||
load("@io_kubernetes_build//defs:run_in_workspace.bzl", "workspace_binary")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "nogo")
|
||||
@@ -54,6 +55,15 @@ alias(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
gometalinter(
|
||||
name = "gometalinter",
|
||||
config = "//:.gometalinter.json",
|
||||
paths = [
|
||||
"./...",
|
||||
],
|
||||
prefix = prefix,
|
||||
)
|
||||
|
||||
goimports(
|
||||
name = "goimports",
|
||||
display_diffs = True,
|
||||
|
||||
19
CHANGELOG.md
19
CHANGELOG.md
@@ -4,25 +4,6 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on Keep a Changelog, and this project adheres to Semantic Versioning.
|
||||
|
||||
## [v7.1.2](https://github.com/prysmaticlabs/prysm/compare/v7.1.1...v7.1.2) - 2026-01-07
|
||||
|
||||
Happy new year! This patch release is very small. The main improvement is better management of pending attestation aggregation via [PR 16153](https://github.com/OffchainLabs/prysm/pull/16153).
|
||||
|
||||
### Added
|
||||
|
||||
- `primitives.BuilderIndex`: SSZ `uint64` wrapper for builder registry indices. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16169)
|
||||
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16152)
|
||||
- `validateDataColumn`: Remove error logs. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16157)
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16153)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16151)
|
||||
- Do not process slots and copy states for next epoch proposers after Fulu. [[PR]](https://github.com/prysmaticlabs/prysm/pull/16168)
|
||||
|
||||
## [v7.1.1](https://github.com/prysmaticlabs/prysm/compare/v7.1.0...v7.1.1) - 2025-12-18
|
||||
|
||||
Release highlights:
|
||||
|
||||
@@ -72,7 +72,7 @@ Do NOT add new `go_repository` to the WORKSPACE file. All dependencies should li
|
||||
|
||||
To enable conditional compilation and custom configuration for tests (where compiled code has more
|
||||
debug info, while not being completely optimized), we rely on Go's build tags/constraints mechanism
|
||||
(see official docs on [build constraints](https://pkg.go.dev/go/build#hdr-Build_Constraints)).
|
||||
(see official docs on [build constraints](https://golang.org/pkg/go/build/#hdr-Build_Constraints)).
|
||||
Therefore, whenever using `go test`, do not forget to pass in extra build tag, eg:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -9,7 +9,7 @@ This README details how to setup Prysm for interop testing for usage with other
|
||||
|
||||
## Installation & Setup
|
||||
|
||||
1. Install [Bazel](https://bazel.build/install) **(Recommended)**
|
||||
1. Install [Bazel](https://docs.bazel.build/versions/master/install.html) **(Recommended)**
|
||||
2. `git clone https://github.com/OffchainLabs/prysm && cd prysm`
|
||||
3. `bazel build //cmd/...`
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
## 📖 Overview
|
||||
|
||||
This is the core repository for Prysm, a [Golang](https://go.dev/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com).
|
||||
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the [Ethereum Consensus](https://ethereum.org/en/developers/docs/consensus-mechanisms/#proof-of-stake) [specification](https://github.com/ethereum/consensus-specs), developed by [Offchain Labs](https://www.offchainlabs.com).
|
||||
|
||||
See the [Changelog](https://github.com/OffchainLabs/prysm/releases) for details of the latest releases and upcoming breaking changes.
|
||||
|
||||
@@ -23,7 +23,7 @@ See the [Changelog](https://github.com/OffchainLabs/prysm/releases) for details
|
||||
|
||||
## 🚀 Getting Started
|
||||
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the **[official documentation portal](https://prysm.offchainlabs.com/docs/)**.
|
||||
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the **[official documentation portal](https://docs.prylabs.network)**.
|
||||
|
||||
💬 **Need help?** Join our **[Discord Community](https://discord.gg/prysm)** for support.
|
||||
|
||||
@@ -51,7 +51,7 @@ Prysm maintains two permanent branches:
|
||||
|
||||
### 🛠 Contribution Guide
|
||||
|
||||
Want to get involved? Check out our **[Contribution Guide](https://prysm.offchainlabs.com/docs/contribute/contribution-guidelines/)** to learn more!
|
||||
Want to get involved? Check out our **[Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/)** to learn more!
|
||||
|
||||
---
|
||||
|
||||
|
||||
14
WORKSPACE
14
WORKSPACE
@@ -273,16 +273,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.7.0-alpha.1"
|
||||
consensus_spec_version = "v1.6.0"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-j5R3jA7Oo4OSDMTvpMuD+8RomaCByeFSwtfkq6fL0Zg=",
|
||||
"minimal": "sha256-tdTqByoyswOS4r6OxFmo70y2BP7w1TgEok+gf4cbxB0=",
|
||||
"mainnet": "sha256-5gB4dt6SnSDKzdBc06VedId3NkgvSYyv9n9FRxWKwYI=",
|
||||
"general": "sha256-54hTaUNF9nLg+hRr3oHoq0yjZpW3MNiiUUuCQu6Rajk=",
|
||||
"minimal": "sha256-1JHIGg3gVMjvcGYRHR5cwdDgOvX47oR/MWp6gyAeZfA=",
|
||||
"mainnet": "sha256-292h3W2Ffts0YExgDTyxYe9Os7R0bZIXuAaMO8P6kl4=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -298,7 +298,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-J+43DrK1pF658kTXTwMS6zGf4KDjvas++m8w2a8swpg=",
|
||||
integrity = "sha256-VzBgrEokvYSMIIXVnSA5XS9I3m9oxpvToQGxC1N5lzw=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -423,6 +423,10 @@ load("@prysm//testing/endtoend:deps.bzl", "e2e_deps")
|
||||
|
||||
e2e_deps()
|
||||
|
||||
load("@com_github_atlassian_bazel_tools//gometalinter:deps.bzl", "gometalinter_dependencies")
|
||||
|
||||
gometalinter_dependencies()
|
||||
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
|
||||
|
||||
gazelle_dependencies(go_sdk = "go_sdk")
|
||||
|
||||
@@ -503,77 +503,3 @@ func (s *SignedBlindedBeaconBlockFulu) MessageRawJson() ([]byte, error) {
|
||||
func (s *SignedBlindedBeaconBlockFulu) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Gloas
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type ExecutionPayloadBid struct {
|
||||
ParentBlockHash string `json:"parent_block_hash"`
|
||||
ParentBlockRoot string `json:"parent_block_root"`
|
||||
BlockHash string `json:"block_hash"`
|
||||
PrevRandao string `json:"prev_randao"`
|
||||
FeeRecipient string `json:"fee_recipient"`
|
||||
GasLimit string `json:"gas_limit"`
|
||||
BuilderIndex string `json:"builder_index"`
|
||||
Slot string `json:"slot"`
|
||||
Value string `json:"value"`
|
||||
ExecutionPayment string `json:"execution_payment"`
|
||||
BlobKzgCommitmentsRoot string `json:"blob_kzg_commitments_root"`
|
||||
}
|
||||
|
||||
type SignedExecutionPayloadBid struct {
|
||||
Message *ExecutionPayloadBid `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type PayloadAttestationData struct {
|
||||
BeaconBlockRoot string `json:"beacon_block_root"`
|
||||
Slot string `json:"slot"`
|
||||
PayloadPresent bool `json:"payload_present"`
|
||||
BlobDataAvailable bool `json:"blob_data_available"`
|
||||
}
|
||||
|
||||
type PayloadAttestation struct {
|
||||
AggregationBits string `json:"aggregation_bits"`
|
||||
Data *PayloadAttestationData `json:"data"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
type BeaconBlockBodyGloas struct {
|
||||
RandaoReveal string `json:"randao_reveal"`
|
||||
Eth1Data *Eth1Data `json:"eth1_data"`
|
||||
Graffiti string `json:"graffiti"`
|
||||
ProposerSlashings []*ProposerSlashing `json:"proposer_slashings"`
|
||||
AttesterSlashings []*AttesterSlashingElectra `json:"attester_slashings"`
|
||||
Attestations []*AttestationElectra `json:"attestations"`
|
||||
Deposits []*Deposit `json:"deposits"`
|
||||
VoluntaryExits []*SignedVoluntaryExit `json:"voluntary_exits"`
|
||||
SyncAggregate *SyncAggregate `json:"sync_aggregate"`
|
||||
BLSToExecutionChanges []*SignedBLSToExecutionChange `json:"bls_to_execution_changes"`
|
||||
SignedExecutionPayloadBid *SignedExecutionPayloadBid `json:"signed_execution_payload_bid"`
|
||||
PayloadAttestations []*PayloadAttestation `json:"payload_attestations"`
|
||||
}
|
||||
|
||||
type BeaconBlockGloas struct {
|
||||
Slot string `json:"slot"`
|
||||
ProposerIndex string `json:"proposer_index"`
|
||||
ParentRoot string `json:"parent_root"`
|
||||
StateRoot string `json:"state_root"`
|
||||
Body *BeaconBlockBodyGloas `json:"body"`
|
||||
}
|
||||
|
||||
type SignedBeaconBlockGloas struct {
|
||||
Message *BeaconBlockGloas `json:"message"`
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
var _ SignedMessageJsoner = &SignedBeaconBlockGloas{}
|
||||
|
||||
func (s *SignedBeaconBlockGloas) MessageRawJson() ([]byte, error) {
|
||||
return json.Marshal(s.Message)
|
||||
}
|
||||
|
||||
func (s *SignedBeaconBlockGloas) SigString() string {
|
||||
return s.Signature
|
||||
}
|
||||
|
||||
@@ -268,8 +268,6 @@ func SignedBeaconBlockMessageJsoner(block interfaces.ReadOnlySignedBeaconBlock)
|
||||
return SignedBlindedBeaconBlockFuluFromConsensus(pbStruct)
|
||||
case *eth.SignedBeaconBlockFulu:
|
||||
return SignedBeaconBlockFuluFromConsensus(pbStruct)
|
||||
case *eth.SignedBeaconBlockGloas:
|
||||
return SignedBeaconBlockGloasFromConsensus(pbStruct)
|
||||
default:
|
||||
return nil, ErrUnsupportedConversion
|
||||
}
|
||||
@@ -2887,379 +2885,3 @@ func SignedBeaconBlockFuluFromConsensus(b *eth.SignedBeaconBlockFulu) (*SignedBe
|
||||
Signature: hexutil.Encode(b.Signature),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Gloas
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func SignedBeaconBlockGloasFromConsensus(b *eth.SignedBeaconBlockGloas) (*SignedBeaconBlockGloas, error) {
|
||||
block, err := BeaconBlockGloasFromConsensus(b.Block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &SignedBeaconBlockGloas{
|
||||
Message: block,
|
||||
Signature: hexutil.Encode(b.Signature),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func BeaconBlockGloasFromConsensus(b *eth.BeaconBlockGloas) (*BeaconBlockGloas, error) {
|
||||
payloadAttestations := make([]*PayloadAttestation, len(b.Body.PayloadAttestations))
|
||||
for i, pa := range b.Body.PayloadAttestations {
|
||||
payloadAttestations[i] = PayloadAttestationFromConsensus(pa)
|
||||
}
|
||||
|
||||
return &BeaconBlockGloas{
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
ProposerIndex: fmt.Sprintf("%d", b.ProposerIndex),
|
||||
ParentRoot: hexutil.Encode(b.ParentRoot),
|
||||
StateRoot: hexutil.Encode(b.StateRoot),
|
||||
Body: &BeaconBlockBodyGloas{
|
||||
RandaoReveal: hexutil.Encode(b.Body.RandaoReveal),
|
||||
Eth1Data: Eth1DataFromConsensus(b.Body.Eth1Data),
|
||||
Graffiti: hexutil.Encode(b.Body.Graffiti),
|
||||
ProposerSlashings: ProposerSlashingsFromConsensus(b.Body.ProposerSlashings),
|
||||
AttesterSlashings: AttesterSlashingsElectraFromConsensus(b.Body.AttesterSlashings),
|
||||
Attestations: AttsElectraFromConsensus(b.Body.Attestations),
|
||||
Deposits: DepositsFromConsensus(b.Body.Deposits),
|
||||
VoluntaryExits: SignedExitsFromConsensus(b.Body.VoluntaryExits),
|
||||
SyncAggregate: SyncAggregateFromConsensus(b.Body.SyncAggregate),
|
||||
BLSToExecutionChanges: SignedBLSChangesFromConsensus(b.Body.BlsToExecutionChanges),
|
||||
SignedExecutionPayloadBid: SignedExecutionPayloadBidFromConsensus(b.Body.SignedExecutionPayloadBid),
|
||||
PayloadAttestations: payloadAttestations,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func SignedExecutionPayloadBidFromConsensus(b *eth.SignedExecutionPayloadBid) *SignedExecutionPayloadBid {
|
||||
return &SignedExecutionPayloadBid{
|
||||
Message: ExecutionPayloadBidFromConsensus(b.Message),
|
||||
Signature: hexutil.Encode(b.Signature),
|
||||
}
|
||||
}
|
||||
|
||||
func ExecutionPayloadBidFromConsensus(b *eth.ExecutionPayloadBid) *ExecutionPayloadBid {
|
||||
return &ExecutionPayloadBid{
|
||||
ParentBlockHash: hexutil.Encode(b.ParentBlockHash),
|
||||
ParentBlockRoot: hexutil.Encode(b.ParentBlockRoot),
|
||||
BlockHash: hexutil.Encode(b.BlockHash),
|
||||
PrevRandao: hexutil.Encode(b.PrevRandao),
|
||||
FeeRecipient: hexutil.Encode(b.FeeRecipient),
|
||||
GasLimit: fmt.Sprintf("%d", b.GasLimit),
|
||||
BuilderIndex: fmt.Sprintf("%d", b.BuilderIndex),
|
||||
Slot: fmt.Sprintf("%d", b.Slot),
|
||||
Value: fmt.Sprintf("%d", b.Value),
|
||||
ExecutionPayment: fmt.Sprintf("%d", b.ExecutionPayment),
|
||||
BlobKzgCommitmentsRoot: hexutil.Encode(b.BlobKzgCommitmentsRoot),
|
||||
}
|
||||
}
|
||||
|
||||
func PayloadAttestationFromConsensus(pa *eth.PayloadAttestation) *PayloadAttestation {
|
||||
return &PayloadAttestation{
|
||||
AggregationBits: hexutil.Encode(pa.AggregationBits),
|
||||
Data: PayloadAttestationDataFromConsensus(pa.Data),
|
||||
Signature: hexutil.Encode(pa.Signature),
|
||||
}
|
||||
}
|
||||
|
||||
func PayloadAttestationDataFromConsensus(d *eth.PayloadAttestationData) *PayloadAttestationData {
|
||||
return &PayloadAttestationData{
|
||||
BeaconBlockRoot: hexutil.Encode(d.BeaconBlockRoot),
|
||||
Slot: fmt.Sprintf("%d", d.Slot),
|
||||
PayloadPresent: d.PayloadPresent,
|
||||
BlobDataAvailable: d.BlobDataAvailable,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *SignedBeaconBlockGloas) ToConsensus() (*eth.SignedBeaconBlockGloas, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
|
||||
sig, err := bytesutil.DecodeHexWithLength(b.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
block, err := b.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
}
|
||||
return ð.SignedBeaconBlockGloas{
|
||||
Block: block,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *BeaconBlockGloas) ToConsensus() (*eth.BeaconBlockGloas, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
if b.Body == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body")
|
||||
}
|
||||
if b.Body.Eth1Data == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.Eth1Data")
|
||||
}
|
||||
if b.Body.SyncAggregate == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.SyncAggregate")
|
||||
}
|
||||
if b.Body.SignedExecutionPayloadBid == nil {
|
||||
return nil, server.NewDecodeError(errNilValue, "Body.SignedExecutionPayloadBid")
|
||||
}
|
||||
|
||||
slot, err := strconv.ParseUint(b.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Slot")
|
||||
}
|
||||
proposerIndex, err := strconv.ParseUint(b.ProposerIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ProposerIndex")
|
||||
}
|
||||
parentRoot, err := bytesutil.DecodeHexWithLength(b.ParentRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ParentRoot")
|
||||
}
|
||||
stateRoot, err := bytesutil.DecodeHexWithLength(b.StateRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "StateRoot")
|
||||
}
|
||||
body, err := b.Body.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Body")
|
||||
}
|
||||
return ð.BeaconBlockGloas{
|
||||
Slot: primitives.Slot(slot),
|
||||
ProposerIndex: primitives.ValidatorIndex(proposerIndex),
|
||||
ParentRoot: parentRoot,
|
||||
StateRoot: stateRoot,
|
||||
Body: body,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *BeaconBlockBodyGloas) ToConsensus() (*eth.BeaconBlockBodyGloas, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
|
||||
randaoReveal, err := bytesutil.DecodeHexWithLength(b.RandaoReveal, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "RandaoReveal")
|
||||
}
|
||||
depositRoot, err := bytesutil.DecodeHexWithLength(b.Eth1Data.DepositRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Eth1Data.DepositRoot")
|
||||
}
|
||||
depositCount, err := strconv.ParseUint(b.Eth1Data.DepositCount, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Eth1Data.DepositCount")
|
||||
}
|
||||
blockHash, err := bytesutil.DecodeHexWithLength(b.Eth1Data.BlockHash, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Eth1Data.BlockHash")
|
||||
}
|
||||
graffiti, err := bytesutil.DecodeHexWithLength(b.Graffiti, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Graffiti")
|
||||
}
|
||||
proposerSlashings, err := ProposerSlashingsToConsensus(b.ProposerSlashings)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ProposerSlashings")
|
||||
}
|
||||
attesterSlashings, err := AttesterSlashingsElectraToConsensus(b.AttesterSlashings)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AttesterSlashings")
|
||||
}
|
||||
atts, err := AttsElectraToConsensus(b.Attestations)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Attestations")
|
||||
}
|
||||
deposits, err := DepositsToConsensus(b.Deposits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Deposits")
|
||||
}
|
||||
exits, err := SignedExitsToConsensus(b.VoluntaryExits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "VoluntaryExits")
|
||||
}
|
||||
syncCommitteeBits, err := bytesutil.DecodeHexWithLength(b.SyncAggregate.SyncCommitteeBits, fieldparams.SyncAggregateSyncCommitteeBytesLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SyncAggregate.SyncCommitteeBits")
|
||||
}
|
||||
syncCommitteeSig, err := bytesutil.DecodeHexWithLength(b.SyncAggregate.SyncCommitteeSignature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SyncAggregate.SyncCommitteeSignature")
|
||||
}
|
||||
blsChanges, err := SignedBLSChangesToConsensus(b.BLSToExecutionChanges)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BLSToExecutionChanges")
|
||||
}
|
||||
signedBid, err := b.SignedExecutionPayloadBid.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "SignedExecutionPayloadBid")
|
||||
}
|
||||
payloadAttestations, err := PayloadAttestationsToConsensus(b.PayloadAttestations)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "PayloadAttestations")
|
||||
}
|
||||
|
||||
return ð.BeaconBlockBodyGloas{
|
||||
RandaoReveal: randaoReveal,
|
||||
Eth1Data: ð.Eth1Data{
|
||||
DepositRoot: depositRoot,
|
||||
DepositCount: depositCount,
|
||||
BlockHash: blockHash,
|
||||
},
|
||||
Graffiti: graffiti,
|
||||
ProposerSlashings: proposerSlashings,
|
||||
AttesterSlashings: attesterSlashings,
|
||||
Attestations: atts,
|
||||
Deposits: deposits,
|
||||
VoluntaryExits: exits,
|
||||
SyncAggregate: ð.SyncAggregate{
|
||||
SyncCommitteeBits: syncCommitteeBits,
|
||||
SyncCommitteeSignature: syncCommitteeSig,
|
||||
},
|
||||
BlsToExecutionChanges: blsChanges,
|
||||
SignedExecutionPayloadBid: signedBid,
|
||||
PayloadAttestations: payloadAttestations,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *SignedExecutionPayloadBid) ToConsensus() (*eth.SignedExecutionPayloadBid, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(b.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
message, err := b.Message.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Message")
|
||||
}
|
||||
return ð.SignedExecutionPayloadBid{
|
||||
Message: message,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *ExecutionPayloadBid) ToConsensus() (*eth.ExecutionPayloadBid, error) {
|
||||
if b == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
parentBlockHash, err := bytesutil.DecodeHexWithLength(b.ParentBlockHash, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ParentBlockHash")
|
||||
}
|
||||
parentBlockRoot, err := bytesutil.DecodeHexWithLength(b.ParentBlockRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ParentBlockRoot")
|
||||
}
|
||||
blockHash, err := bytesutil.DecodeHexWithLength(b.BlockHash, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BlockHash")
|
||||
}
|
||||
prevRandao, err := bytesutil.DecodeHexWithLength(b.PrevRandao, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "PrevRandao")
|
||||
}
|
||||
feeRecipient, err := bytesutil.DecodeHexWithLength(b.FeeRecipient, fieldparams.FeeRecipientLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "FeeRecipient")
|
||||
}
|
||||
gasLimit, err := strconv.ParseUint(b.GasLimit, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "GasLimit")
|
||||
}
|
||||
builderIndex, err := strconv.ParseUint(b.BuilderIndex, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BuilderIndex")
|
||||
}
|
||||
slot, err := strconv.ParseUint(b.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Slot")
|
||||
}
|
||||
value, err := strconv.ParseUint(b.Value, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Value")
|
||||
}
|
||||
executionPayment, err := strconv.ParseUint(b.ExecutionPayment, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "ExecutionPayment")
|
||||
}
|
||||
blobKzgCommitmentsRoot, err := bytesutil.DecodeHexWithLength(b.BlobKzgCommitmentsRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BlobKzgCommitmentsRoot")
|
||||
}
|
||||
return ð.ExecutionPayloadBid{
|
||||
ParentBlockHash: parentBlockHash,
|
||||
ParentBlockRoot: parentBlockRoot,
|
||||
BlockHash: blockHash,
|
||||
PrevRandao: prevRandao,
|
||||
FeeRecipient: feeRecipient,
|
||||
GasLimit: gasLimit,
|
||||
BuilderIndex: primitives.BuilderIndex(builderIndex),
|
||||
Slot: primitives.Slot(slot),
|
||||
Value: primitives.Gwei(value),
|
||||
ExecutionPayment: primitives.Gwei(executionPayment),
|
||||
BlobKzgCommitmentsRoot: blobKzgCommitmentsRoot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func PayloadAttestationsToConsensus(pa []*PayloadAttestation) ([]*eth.PayloadAttestation, error) {
|
||||
if pa == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
result := make([]*eth.PayloadAttestation, len(pa))
|
||||
for i, p := range pa {
|
||||
converted, err := p.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, fmt.Sprintf("[%d]", i))
|
||||
}
|
||||
result[i] = converted
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *PayloadAttestation) ToConsensus() (*eth.PayloadAttestation, error) {
|
||||
if p == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
aggregationBits, err := hexutil.Decode(p.AggregationBits)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "AggregationBits")
|
||||
}
|
||||
data, err := p.Data.ToConsensus()
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Data")
|
||||
}
|
||||
sig, err := bytesutil.DecodeHexWithLength(p.Signature, fieldparams.BLSSignatureLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Signature")
|
||||
}
|
||||
return ð.PayloadAttestation{
|
||||
AggregationBits: aggregationBits,
|
||||
Data: data,
|
||||
Signature: sig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *PayloadAttestationData) ToConsensus() (*eth.PayloadAttestationData, error) {
|
||||
if d == nil {
|
||||
return nil, errNilValue
|
||||
}
|
||||
beaconBlockRoot, err := bytesutil.DecodeHexWithLength(d.BeaconBlockRoot, fieldparams.RootLength)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "BeaconBlockRoot")
|
||||
}
|
||||
slot, err := strconv.ParseUint(d.Slot, 10, 64)
|
||||
if err != nil {
|
||||
return nil, server.NewDecodeError(err, "Slot")
|
||||
}
|
||||
return ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: beaconBlockRoot,
|
||||
Slot: primitives.Slot(slot),
|
||||
PayloadPresent: d.PayloadPresent,
|
||||
BlobDataAvailable: d.BlobDataAvailable,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -74,6 +74,7 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -173,6 +174,7 @@ go_test(
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -13,7 +13,7 @@ go_library(
|
||||
deps = [
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"@com_github_crate_crypto_go_kzg_4844//:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844_v2//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_c_kzg_4844//bindings/go:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//crypto/kzg4844:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -221,19 +221,6 @@ var (
|
||||
Buckets: []float64{1, 2, 4, 8, 16, 32},
|
||||
},
|
||||
)
|
||||
commitmentCount = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "commitment_count_max_21",
|
||||
Help: "The number of blob KZG commitments per block.",
|
||||
Buckets: []float64{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21},
|
||||
},
|
||||
)
|
||||
maxBlobsPerBlock = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "max_blobs_per_block",
|
||||
Help: "The maximum number of blobs allowed in a block.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
// reportSlotMetrics reports slot related metrics.
|
||||
|
||||
@@ -94,7 +94,6 @@ func (s *Service) spawnProcessAttestationsRoutine() {
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
ticker.Done()
|
||||
return
|
||||
case slotInterval := <-ticker.C():
|
||||
if slotInterval.Interval > 0 {
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/slasher/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -259,55 +258,32 @@ func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityChecker, blo
|
||||
}
|
||||
|
||||
func (s *Service) reportPostBlockProcessing(
|
||||
signedBlock interfaces.SignedBeaconBlock,
|
||||
block interfaces.SignedBeaconBlock,
|
||||
blockRoot [32]byte,
|
||||
receivedTime time.Time,
|
||||
daWaitedTime time.Duration,
|
||||
) {
|
||||
block := signedBlock.Block()
|
||||
if block == nil {
|
||||
log.WithField("blockRoot", blockRoot).Error("Nil block")
|
||||
return
|
||||
}
|
||||
|
||||
// Reports on block and fork choice metrics.
|
||||
cp := s.cfg.ForkChoiceStore.FinalizedCheckpoint()
|
||||
finalized := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
reportSlotMetrics(block.Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
reportSlotMetrics(block.Block().Slot(), s.HeadSlot(), s.CurrentSlot(), finalized)
|
||||
|
||||
// Log block sync status.
|
||||
cp = s.cfg.ForkChoiceStore.JustifiedCheckpoint()
|
||||
justified := ðpb.Checkpoint{Epoch: cp.Epoch, Root: bytesutil.SafeCopyBytes(cp.Root[:])}
|
||||
if err := logBlockSyncStatus(block, blockRoot, justified, finalized, receivedTime, s.genesisTime, daWaitedTime); err != nil {
|
||||
if err := logBlockSyncStatus(block.Block(), blockRoot, justified, finalized, receivedTime, s.genesisTime, daWaitedTime); err != nil {
|
||||
log.WithError(err).Error("Unable to log block sync status")
|
||||
}
|
||||
|
||||
// Log payload data
|
||||
if err := logPayload(block); err != nil {
|
||||
if err := logPayload(block.Block()); err != nil {
|
||||
log.WithError(err).Error("Unable to log debug block payload data")
|
||||
}
|
||||
|
||||
// Log state transition data.
|
||||
if err := logStateTransitionData(block); err != nil {
|
||||
if err := logStateTransitionData(block.Block()); err != nil {
|
||||
log.WithError(err).Error("Unable to log state transition data")
|
||||
}
|
||||
|
||||
timeWithoutDaWait := time.Since(receivedTime) - daWaitedTime
|
||||
chainServiceProcessingTime.Observe(float64(timeWithoutDaWait.Milliseconds()))
|
||||
|
||||
body := block.Body()
|
||||
if body == nil {
|
||||
log.WithField("blockRoot", blockRoot).Error("Nil block body")
|
||||
return
|
||||
}
|
||||
|
||||
commitments, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Unable to get blob KZG commitments")
|
||||
}
|
||||
|
||||
commitmentCount.Observe(float64(len(commitments)))
|
||||
maxBlobsPerBlock.Set(float64(params.BeaconConfig().MaxBlobsPerBlock(block.Slot())))
|
||||
}
|
||||
|
||||
func (s *Service) executePostFinalizationTasks(ctx context.Context, finalizedState state.BeaconState) {
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
ethpbv1 "github.com/OffchainLabs/prysm/v7/proto/eth/v1"
|
||||
@@ -131,10 +130,12 @@ func TestService_ReceiveBlock(t *testing.T) {
|
||||
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) >= 1
|
||||
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
|
||||
// Hacky sleep, should use a better way to be able to resolve the race
|
||||
// between event being sent out and processed.
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -221,10 +222,10 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) {
|
||||
require.NoError(t, s.ReceiveBlock(ctx, wsb, root, nil))
|
||||
})
|
||||
wg.Wait()
|
||||
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) >= 1
|
||||
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
// Verify fork choice has processed the block. (Genesis block and the new block)
|
||||
assert.Equal(t, 2, s.cfg.ForkChoiceStore.NodeCount())
|
||||
}
|
||||
@@ -264,10 +265,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) {
|
||||
block: genFullBlock(t, util.DefaultBlockGenConfig(), 1 /*slot*/),
|
||||
},
|
||||
check: func(t *testing.T, s *Service) {
|
||||
notifier := s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier)
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) >= 1
|
||||
}, 2*time.Second, 10*time.Millisecond, "Expected at least 1 state notification")
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if recvd := len(s.cfg.StateNotifier.(*blockchainTesting.MockStateNotifier).ReceivedEvents()); recvd < 1 {
|
||||
t.Errorf("Received %d state notifications, expected at least 1", recvd)
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -511,9 +512,8 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) == 1
|
||||
}, 5*time.Second, 50*time.Millisecond, "Expected exactly 1 state notification")
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
@@ -552,9 +552,8 @@ func Test_executePostFinalizationTasks(t *testing.T) {
|
||||
s.cfg.StateNotifier = notifier
|
||||
s.executePostFinalizationTasks(s.ctx, headState)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return len(notifier.ReceivedEvents()) == 1
|
||||
}, 5*time.Second, 50*time.Millisecond, "Expected exactly 1 state notification")
|
||||
time.Sleep(1 * time.Second) // sleep for a second because event is in a separate go routine
|
||||
require.Equal(t, 1, len(notifier.ReceivedEvents()))
|
||||
e := notifier.ReceivedEvents()[0]
|
||||
assert.Equal(t, statefeed.FinalizedCheckpoint, int(e.Type))
|
||||
fc, ok := e.Data.(*ethpbv1.EventFinalizedCheckpoint)
|
||||
@@ -597,13 +596,13 @@ func TestProcessLightClientBootstrap(t *testing.T) {
|
||||
|
||||
s.executePostFinalizationTasks(s.ctx, l.AttestedState)
|
||||
|
||||
// Wait for the light client bootstrap to be saved (runs in goroutine)
|
||||
var b interfaces.LightClientBootstrap
|
||||
require.Eventually(t, func() bool {
|
||||
var err error
|
||||
b, err = s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
|
||||
return err == nil && b != nil
|
||||
}, 5*time.Second, 50*time.Millisecond, "Light client bootstrap was not saved within timeout")
|
||||
// wait for the goroutine to finish processing
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Check that the light client bootstrap is saved
|
||||
b, err := s.lcStore.LightClientBootstrap(ctx, [32]byte(cp.Root))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, b)
|
||||
|
||||
btst, err := lightClient.NewLightClientBootstrapFromBeaconState(ctx, l.FinalizedState.Slot(), l.FinalizedState, l.FinalizedBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
@@ -29,6 +30,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -289,6 +291,19 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "failed to initialize blockchain service")
|
||||
}
|
||||
|
||||
if !params.FuluEnabled() {
|
||||
return nil
|
||||
}
|
||||
|
||||
earliestAvailableSlot, custodySubnetCount, err := s.updateCustodyInfoInDB(saved.Slot())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get and save custody group count")
|
||||
}
|
||||
|
||||
if _, _, err := s.cfg.P2P.UpdateCustodyInfo(earliestAvailableSlot, custodySubnetCount); err != nil {
|
||||
return errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -453,6 +468,73 @@ func (s *Service) removeStartupState() {
|
||||
s.cfg.FinalizedStateAtStartUp = nil
|
||||
}
|
||||
|
||||
// UpdateCustodyInfoInDB updates the custody information in the database.
|
||||
// It returns the (potentially updated) custody group count and the earliest available slot.
|
||||
func (s *Service) updateCustodyInfoInDB(slot primitives.Slot) (primitives.Slot, uint64, error) {
|
||||
isSupernode := flags.Get().Supernode
|
||||
isSemiSupernode := flags.Get().SemiSupernode
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
custodyRequirement := cfg.CustodyRequirement
|
||||
|
||||
// Check if the node was previously subscribed to all data subnets, and if so,
|
||||
// store the new status accordingly.
|
||||
wasSupernode, err := s.cfg.BeaconDB.UpdateSubscribedToAllDataSubnets(s.ctx, isSupernode)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update subscribed to all data subnets")
|
||||
}
|
||||
|
||||
// Compute the target custody group count based on current flag configuration.
|
||||
targetCustodyGroupCount := custodyRequirement
|
||||
|
||||
// Supernode: custody all groups (either currently set or previously enabled)
|
||||
if isSupernode {
|
||||
targetCustodyGroupCount = cfg.NumberOfCustodyGroups
|
||||
}
|
||||
|
||||
// Semi-supernode: custody minimum needed for reconstruction, or custody requirement if higher
|
||||
if isSemiSupernode {
|
||||
semiSupernodeCustody, err := peerdas.MinimumCustodyGroupCountToReconstruct()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "minimum custody group count")
|
||||
}
|
||||
|
||||
targetCustodyGroupCount = max(custodyRequirement, semiSupernodeCustody)
|
||||
}
|
||||
|
||||
// Safely compute the fulu fork slot.
|
||||
fuluForkSlot, err := fuluForkSlot()
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "fulu fork slot")
|
||||
}
|
||||
|
||||
// If slot is before the fulu fork slot, then use the earliest stored slot as the reference slot.
|
||||
if slot < fuluForkSlot {
|
||||
slot, err = s.cfg.BeaconDB.EarliestSlot(s.ctx)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "earliest slot")
|
||||
}
|
||||
}
|
||||
|
||||
earliestAvailableSlot, actualCustodyGroupCount, err := s.cfg.BeaconDB.UpdateCustodyInfo(s.ctx, slot, targetCustodyGroupCount)
|
||||
if err != nil {
|
||||
return 0, 0, errors.Wrap(err, "update custody info")
|
||||
}
|
||||
|
||||
if isSupernode {
|
||||
log.WithFields(logrus.Fields{
|
||||
"current": actualCustodyGroupCount,
|
||||
"target": cfg.NumberOfCustodyGroups,
|
||||
}).Info("Supernode mode enabled. Will custody all data columns going forward.")
|
||||
}
|
||||
|
||||
if wasSupernode && !isSupernode {
|
||||
log.Warningf("Because the `--%s` flag was previously used, the node will continue to act as a super node.", flags.Supernode.Name)
|
||||
}
|
||||
|
||||
return earliestAvailableSlot, actualCustodyGroupCount, nil
|
||||
}
|
||||
|
||||
func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db db.HeadAccessDatabase) {
|
||||
currentTime := prysmTime.Now()
|
||||
if currentTime.After(genesisTime) {
|
||||
@@ -469,3 +551,19 @@ func spawnCountdownIfPreGenesis(ctx context.Context, genesisTime time.Time, db d
|
||||
}
|
||||
go slots.CountdownToGenesis(ctx, genesisTime, uint64(gState.NumValidators()), gRoot)
|
||||
}
|
||||
|
||||
func fuluForkSlot() (primitives.Slot, error) {
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
fuluForkEpoch := cfg.FuluForkEpoch
|
||||
if fuluForkEpoch == cfg.FarFutureEpoch {
|
||||
return cfg.FarFutureSlot, nil
|
||||
}
|
||||
|
||||
forkFuluSlot, err := slots.EpochStart(fuluForkEpoch)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "epoch start")
|
||||
}
|
||||
|
||||
return forkFuluSlot, nil
|
||||
}
|
||||
|
||||
@@ -23,9 +23,11 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -594,3 +596,218 @@ func TestNotifyIndex(t *testing.T) {
|
||||
t.Errorf("Notifier channel did not receive the index")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateCustodyInfoInDB(t *testing.T) {
|
||||
const (
|
||||
fuluForkEpoch = 10
|
||||
custodyRequirement = uint64(4)
|
||||
earliestStoredSlot = primitives.Slot(12)
|
||||
numberOfCustodyGroups = uint64(64)
|
||||
)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.FuluForkEpoch = fuluForkEpoch
|
||||
cfg.CustodyRequirement = custodyRequirement
|
||||
cfg.NumberOfCustodyGroups = numberOfCustodyGroups
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := t.Context()
|
||||
pbBlock := util.NewBeaconBlock()
|
||||
pbBlock.Block.Slot = 12
|
||||
signedBeaconBlock, err := blocks.NewSignedBeaconBlock(pbBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlock, err := blocks.NewROBlock(signedBeaconBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("CGC increases before fulu", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Before Fulu
|
||||
// -----------
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(19)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// After Fulu
|
||||
// ----------
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("CGC increases after fulu", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Before Fulu
|
||||
// -----------
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(15)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(17)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, earliestStoredSlot, actualEas)
|
||||
require.Equal(t, custodyRequirement, actualCgc)
|
||||
|
||||
// After Fulu
|
||||
// ----------
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
})
|
||||
|
||||
t.Run("Supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc)
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.Supernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should still be supernode
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Still 64, not downgraded
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode downgrade prevented", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Try to downgrade by removing flag
|
||||
gFlags.SemiSupernode = false
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// UpdateCustodyInfo should prevent downgrade - custody count should remain at 64
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(slot + 2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Still 64 due to downgrade prevention by UpdateCustodyInfo
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode to supernode upgrade allowed", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Start with semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc) // Semi-supernode custodies 64 groups
|
||||
|
||||
// Upgrade to full supernode
|
||||
gFlags.SemiSupernode = false
|
||||
gFlags.Supernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Should upgrade to full supernode
|
||||
upgradeSlot := slot + 2
|
||||
actualEas, actualCgc, err = service.updateCustodyInfoInDB(upgradeSlot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, upgradeSlot, actualEas) // Earliest slot updates when upgrading
|
||||
require.Equal(t, numberOfCustodyGroups, actualCgc) // Upgraded to 128
|
||||
})
|
||||
|
||||
t.Run("Semi-supernode with high validator requirements uses higher custody", func(t *testing.T) {
|
||||
service, requirements := minimalTestService(t)
|
||||
err = requirements.db.SaveBlock(ctx, roBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Enable semi-supernode
|
||||
resetFlags := flags.Get()
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.SemiSupernode = true
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(resetFlags)
|
||||
|
||||
// Mock a high custody requirement (simulating many validators)
|
||||
// We need to override the custody requirement calculation
|
||||
// For this test, we'll verify the logic by checking if custodyRequirement > 64
|
||||
// Since custodyRequirement in minimalTestService is 4, we can't test the high case here
|
||||
// This would require a different test setup with actual validators
|
||||
slot := fuluForkEpoch*primitives.Slot(cfg.SlotsPerEpoch) + 1
|
||||
actualEas, actualCgc, err := service.updateCustodyInfoInDB(slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, slot, actualEas)
|
||||
semiSupernodeCustody := numberOfCustodyGroups / 2 // 64
|
||||
// With low validator requirements (4), should use semi-supernode minimum (64)
|
||||
require.Equal(t, semiSupernodeCustody, actualCgc)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package altair
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -23,7 +24,7 @@ func ProcessPreGenesisDeposits(
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process deposit")
|
||||
}
|
||||
beaconState, err = helpers.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
beaconState, err = blocks.ActivateValidatorWithEffectiveBalance(beaconState, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -36,7 +37,7 @@ func ProcessDeposits(
|
||||
beaconState state.BeaconState,
|
||||
deposits []*ethpb.Deposit,
|
||||
) (state.BeaconState, error) {
|
||||
allSignaturesVerified, err := helpers.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -81,7 +82,7 @@ func ProcessDeposits(
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
if err := helpers.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -121,7 +122,7 @@ func ApplyDeposit(beaconState state.BeaconState, data *ethpb.Deposit_Data, allSi
|
||||
index, ok := beaconState.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
if !allSignaturesVerified {
|
||||
valid, err := helpers.IsValidDepositSignature(data)
|
||||
valid, err := blocks.IsValidDepositSignature(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"attestation.go",
|
||||
"attester_slashing.go",
|
||||
"deposit.go",
|
||||
"error.go",
|
||||
"eth1_data.go",
|
||||
"exit.go",
|
||||
@@ -20,7 +21,6 @@ go_library(
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/gloas:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
@@ -33,6 +33,8 @@ go_library(
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -59,6 +61,7 @@ go_test(
|
||||
"attester_slashing_test.go",
|
||||
"block_operations_fuzz_test.go",
|
||||
"block_regression_test.go",
|
||||
"deposit_test.go",
|
||||
"eth1_data_test.go",
|
||||
"exit_test.go",
|
||||
"exports_test.go",
|
||||
@@ -87,6 +90,7 @@ go_test(
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
|
||||
@@ -3,7 +3,6 @@ package blocks
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -319,7 +318,7 @@ func TestFuzzverifyDeposit_10000(t *testing.T) {
|
||||
fuzzer.Fuzz(deposit)
|
||||
s, err := state_native.InitializeFromProtoUnsafePhase0(state)
|
||||
require.NoError(t, err)
|
||||
err = helpers.VerifyDeposit(s, deposit)
|
||||
err = VerifyDeposit(s, deposit)
|
||||
_ = err
|
||||
fuzz.FreeMemory(i)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package helpers
|
||||
package blocks
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,9 +1,9 @@
|
||||
package helpers_test
|
||||
package blocks_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -45,7 +45,7 @@ func TestBatchVerifyDepositsSignatures_Ok(t *testing.T) {
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
verified, err := helpers.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
@@ -68,7 +68,7 @@ func TestBatchVerifyDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
|
||||
deposit.Proof = proof
|
||||
require.NoError(t, err)
|
||||
verified, err := helpers.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
verified, err := blocks.BatchVerifyDepositsSignatures(t.Context(), []*ethpb.Deposit{deposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
@@ -99,7 +99,7 @@ func TestVerifyDeposit_MerkleBranchFailsVerification(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
want := "deposit root did not verify"
|
||||
err = helpers.VerifyDeposit(beaconState, deposit)
|
||||
err = blocks.VerifyDeposit(beaconState, deposit)
|
||||
require.ErrorContains(t, want, err)
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ func TestIsValidDepositSignature_Ok(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
depositData.Signature = sig.Marshal()
|
||||
valid, err := helpers.IsValidDepositSignature(depositData)
|
||||
valid, err := blocks.IsValidDepositSignature(depositData)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, valid)
|
||||
}
|
||||
@@ -163,7 +163,7 @@ func TestBatchVerifyPendingDepositsSignatures_Ok(t *testing.T) {
|
||||
sig2 := sk2.Sign(sr2[:])
|
||||
pendingDeposit2.Signature = sig2.Marshal()
|
||||
|
||||
verified, err := helpers.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit, pendingDeposit2})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified)
|
||||
}
|
||||
@@ -174,7 +174,7 @@ func TestBatchVerifyPendingDepositsSignatures_InvalidSignature(t *testing.T) {
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
verified, err := helpers.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit})
|
||||
verified, err := blocks.BatchVerifyPendingDepositsSignatures(t.Context(), []*ethpb.PendingDeposit{pendingDeposit})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, verified)
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
@@ -12,7 +11,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -128,16 +126,7 @@ func processProposerSlashing(
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process proposer slashing")
|
||||
}
|
||||
|
||||
var err error
|
||||
// [New in Gloas:EIP7732]: remove the BuilderPendingPayment corresponding to the slashed proposer within 2 epoch window
|
||||
if beaconState.Version() >= version.Gloas {
|
||||
err = gloas.RemoveBuilderPendingPayment(beaconState, slashing.Header_1.Header)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
|
||||
@@ -12,13 +12,16 @@ go_library(
|
||||
"log.go",
|
||||
"registry_updates.go",
|
||||
"transition.go",
|
||||
"transition_no_verify_sig.go",
|
||||
"upgrade.go",
|
||||
"validator.go",
|
||||
"withdrawals.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/altair:go_default_library",
|
||||
"//beacon-chain/core/blocks:go_default_library",
|
||||
"//beacon-chain/core/epoch:go_default_library",
|
||||
"//beacon-chain/core/epoch/precompute:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
@@ -41,6 +44,8 @@ go_library(
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/math:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
@@ -57,14 +62,17 @@ go_test(
|
||||
"error_test.go",
|
||||
"export_test.go",
|
||||
"registry_updates_test.go",
|
||||
"transition_no_verify_sig_test.go",
|
||||
"transition_test.go",
|
||||
"upgrade_test.go",
|
||||
"validator_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
@@ -77,12 +85,16 @@ go_test(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/fuzz:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,14 +3,19 @@ package electra
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -90,6 +95,217 @@ func ProcessPendingConsolidations(ctx context.Context, st state.BeaconState) err
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessConsolidationRequests implements the spec definition below. This method makes mutating
|
||||
// calls to the beacon state.
|
||||
//
|
||||
// def process_consolidation_request(
|
||||
// state: BeaconState,
|
||||
// consolidation_request: ConsolidationRequest
|
||||
// ) -> None:
|
||||
// if is_valid_switch_to_compounding_request(state, consolidation_request):
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// switch_to_compounding_validator(state, source_index)
|
||||
// return
|
||||
//
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// if consolidation_request.source_pubkey == consolidation_request.target_pubkey:
|
||||
// return
|
||||
// # If the pending consolidations queue is full, consolidation requests are ignored
|
||||
// if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
|
||||
// return
|
||||
// # If there is too little available consolidation churn limit, consolidation requests are ignored
|
||||
// if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE:
|
||||
// return
|
||||
//
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// # Verify pubkeys exists
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// request_target_pubkey = consolidation_request.target_pubkey
|
||||
// if request_source_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// if request_target_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey))
|
||||
// source_validator = state.validators[source_index]
|
||||
// target_validator = state.validators[target_index]
|
||||
//
|
||||
// # Verify source withdrawal credentials
|
||||
// has_correct_credential = has_execution_withdrawal_credential(source_validator)
|
||||
// is_correct_source_address = (
|
||||
// source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||
// )
|
||||
// if not (has_correct_credential and is_correct_source_address):
|
||||
// return
|
||||
//
|
||||
// # Verify that target has compounding withdrawal credentials
|
||||
// if not has_compounding_withdrawal_credential(target_validator):
|
||||
// return
|
||||
//
|
||||
// # Verify the source and the target are active
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// if not is_active_validator(source_validator, current_epoch):
|
||||
// return
|
||||
// if not is_active_validator(target_validator, current_epoch):
|
||||
// return
|
||||
// # Verify exits for source and target have not been initiated
|
||||
// if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
// if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
//
|
||||
// # Verify the source has been active long enough
|
||||
// if current_epoch < source_validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
|
||||
// return
|
||||
//
|
||||
// # Verify the source has no pending withdrawals in the queue
|
||||
// if get_pending_balance_to_withdraw(state, source_index) > 0:
|
||||
// return
|
||||
// # Initiate source validator exit and append pending consolidation
|
||||
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
||||
// state, source_validator.effective_balance
|
||||
// )
|
||||
// source_validator.withdrawable_epoch = Epoch(
|
||||
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
// )
|
||||
// state.pending_consolidations.append(PendingConsolidation(
|
||||
// source_index=source_index,
|
||||
// target_index=target_index
|
||||
// ))
|
||||
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
|
||||
if len(reqs) == 0 || st == nil {
|
||||
return nil
|
||||
}
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
minValWithdrawDelay := params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
|
||||
|
||||
for _, cr := range reqs {
|
||||
if cr == nil {
|
||||
return errors.New("nil consolidation request")
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
|
||||
}
|
||||
if IsValidSwitchToCompoundingRequest(st, cr) {
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
log.Error("Failed to find source validator index")
|
||||
continue
|
||||
}
|
||||
if err := SwitchToCompoundingValidator(st, srcIdx); err != nil {
|
||||
log.WithError(err).Error("Failed to switch to compounding validator")
|
||||
}
|
||||
continue
|
||||
}
|
||||
sourcePubkey := bytesutil.ToBytes48(cr.SourcePubkey)
|
||||
targetPubkey := bytesutil.ToBytes48(cr.TargetPubkey)
|
||||
if sourcePubkey == targetPubkey {
|
||||
continue
|
||||
}
|
||||
|
||||
if npc, err := st.NumPendingConsolidations(); err != nil {
|
||||
return fmt.Errorf("failed to fetch number of pending consolidations: %w", err) // This should never happen.
|
||||
} else if npc >= pcLimit {
|
||||
continue
|
||||
}
|
||||
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
continue
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(sourcePubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
tgtIdx, ok := st.ValidatorIndexByPubkey(targetPubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
srcV, err := st.ValidatorAtIndex(srcIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
roSrcV, err := state_native.NewValidator(srcV)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
// Verify source withdrawal credentials
|
||||
if !roSrcV.HasExecutionWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||
if len(srcV.WithdrawalCredentials) != 32 || len(cr.SourceAddress) != 20 || !bytes.HasSuffix(srcV.WithdrawalCredentials, cr.SourceAddress) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Target validator must have their withdrawal credentials set appropriately.
|
||||
if !tgtV.HasCompoundingWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Both validators must be active.
|
||||
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
|
||||
continue
|
||||
}
|
||||
// Neither validator is exiting.
|
||||
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
|
||||
continue
|
||||
}
|
||||
|
||||
e, overflow := math.SafeAdd(uint64(srcV.ActivationEpoch), uint64(params.BeaconConfig().ShardCommitteePeriod))
|
||||
if overflow {
|
||||
log.Error("Overflow when adding activation epoch and shard committee period")
|
||||
continue
|
||||
}
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if hasBal {
|
||||
continue
|
||||
}
|
||||
|
||||
// Initiate the exit of the source validator.
|
||||
exitEpoch, err := ComputeConsolidationEpochAndUpdateChurn(ctx, st, primitives.Gwei(srcV.EffectiveBalance))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to compute consolidation epoch")
|
||||
continue
|
||||
}
|
||||
srcV.ExitEpoch = exitEpoch
|
||||
srcV.WithdrawableEpoch = exitEpoch + minValWithdrawDelay
|
||||
if err := st.UpdateValidatorAtIndex(srcIdx, srcV); err != nil {
|
||||
return fmt.Errorf("failed to update validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
if err := st.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
|
||||
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValidSwitchToCompoundingRequest returns true if the given consolidation request is valid for switching to compounding.
|
||||
//
|
||||
// Spec code:
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
@@ -201,6 +203,275 @@ func TestProcessPendingConsolidations(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessConsolidationRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
reqs []*enginev1.ConsolidationRequest
|
||||
validate func(*testing.T, state.BeaconState)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil request",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{nil},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
require.DeepEqual(t, st, st)
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "one valid request",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
}
|
||||
// Validator scenario setup. See comments in reqs section.
|
||||
st.Validators[3].WithdrawalCredentials = bytesutil.Bytes32(0)
|
||||
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(1)
|
||||
st.Validators[9].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
st.Validators[12].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
st.Validators[13].ExitEpoch = 10
|
||||
st.Validators[16].ExitEpoch = 10
|
||||
st.PendingPartialWithdrawals = []*eth.PendingPartialWithdrawal{
|
||||
{
|
||||
Index: 17,
|
||||
Amount: 100,
|
||||
},
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
// Source doesn't have withdrawal credentials.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_4"),
|
||||
},
|
||||
// Source withdrawal credentials don't match the consolidation address.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)), // Should be 5
|
||||
SourcePubkey: []byte("val_5"),
|
||||
TargetPubkey: []byte("val_6"),
|
||||
},
|
||||
// Target does not have their withdrawal credentials set appropriately. (Using eth1 address prefix)
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(7)),
|
||||
SourcePubkey: []byte("val_7"),
|
||||
TargetPubkey: []byte("val_8"),
|
||||
},
|
||||
// Source is inactive.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(9)),
|
||||
SourcePubkey: []byte("val_9"),
|
||||
TargetPubkey: []byte("val_10"),
|
||||
},
|
||||
// Target is inactive.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(11)),
|
||||
SourcePubkey: []byte("val_11"),
|
||||
TargetPubkey: []byte("val_12"),
|
||||
},
|
||||
// Source is exiting.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(13)),
|
||||
SourcePubkey: []byte("val_13"),
|
||||
TargetPubkey: []byte("val_14"),
|
||||
},
|
||||
// Target is exiting.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(15)),
|
||||
SourcePubkey: []byte("val_15"),
|
||||
TargetPubkey: []byte("val_16"),
|
||||
},
|
||||
// Source doesn't exist
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("INVALID"),
|
||||
TargetPubkey: []byte("val_0"),
|
||||
},
|
||||
// Target doesn't exist
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_0"),
|
||||
TargetPubkey: []byte("INVALID"),
|
||||
},
|
||||
// Source == target
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_0"),
|
||||
TargetPubkey: []byte("val_0"),
|
||||
},
|
||||
// Has pending partial withdrawal
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_17"),
|
||||
TargetPubkey: []byte("val_1"),
|
||||
},
|
||||
// Valid consolidation request. This should be last to ensure invalid requests do
|
||||
// not end the processing early.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), numPC)
|
||||
pcs, err := st.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), pcs[0].SourceIndex)
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pcs[0].TargetIndex)
|
||||
|
||||
// Verify the source validator is exiting.
|
||||
src, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch not updated")
|
||||
require.Equal(t, params.BeaconConfig().MinValidatorWithdrawabilityDelay, src.WithdrawableEpoch-src.ExitEpoch, "source validator withdrawable epoch not set correctly")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify no pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// Verify the source validator is not exiting.
|
||||
src, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached during processing",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit-1),
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_4"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// The first consolidation was appended.
|
||||
pcs, err := st.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].SourceIndex)
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].TargetIndex)
|
||||
|
||||
// Verify the second source validator is not exiting.
|
||||
src, err := st.ValidatorAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached and compounded consolidation after",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
|
||||
}
|
||||
// To allow compounding consolidation requests.
|
||||
st.Validators[3].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_3"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// Verify that the last consolidation was included
|
||||
src, err := st.ValidatorAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, src.WithdrawalCredentials[0], "source validator was not compounded")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := electra.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tt.validate != nil {
|
||||
tt.validate(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidSwitchToCompoundingRequest(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
t.Run("nil source pubkey", func(t *testing.T) {
|
||||
|
||||
@@ -3,6 +3,7 @@ package electra
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/contracts/deposit"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
@@ -35,7 +37,7 @@ func ProcessDeposits(
|
||||
defer span.End()
|
||||
// Attempt to verify all deposit signatures at once, if this fails then fall back to processing
|
||||
// individual deposits with signature verification enabled.
|
||||
allSignaturesVerified, err := helpers.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
allSignaturesVerified, err := blocks.BatchVerifyDepositsSignatures(ctx, deposits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify deposit signatures in batch")
|
||||
}
|
||||
@@ -80,7 +82,7 @@ func ProcessDeposits(
|
||||
// signature=deposit.data.signature,
|
||||
// )
|
||||
func ProcessDeposit(beaconState state.BeaconState, deposit *ethpb.Deposit, allSignaturesVerified bool) (state.BeaconState, error) {
|
||||
if err := helpers.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if err := blocks.VerifyDeposit(beaconState, deposit); err != nil {
|
||||
if deposit == nil || deposit.Data == nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -375,7 +377,7 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
return nil
|
||||
}
|
||||
|
||||
allSignaturesVerified, err := helpers.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
|
||||
allSignaturesVerified, err := blocks.BatchVerifyPendingDepositsSignatures(ctx, pendingDeposits)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "batch signature verification failed")
|
||||
}
|
||||
@@ -384,7 +386,7 @@ func batchProcessNewPendingDeposits(ctx context.Context, state state.BeaconState
|
||||
validSig := allSignaturesVerified
|
||||
|
||||
if !allSignaturesVerified {
|
||||
validSig, err = helpers.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
validSig, err = blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.SafeCopyBytes(pd.PublicKey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(pd.WithdrawalCredentials),
|
||||
Amount: pd.Amount,
|
||||
@@ -439,7 +441,7 @@ func ApplyPendingDeposit(ctx context.Context, st state.BeaconState, deposit *eth
|
||||
defer span.End()
|
||||
index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(deposit.PublicKey))
|
||||
if !ok {
|
||||
verified, err := helpers.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
verified, err := blocks.IsValidDepositSignature(ðpb.Deposit_Data{
|
||||
PublicKey: bytesutil.SafeCopyBytes(deposit.PublicKey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(deposit.WithdrawalCredentials),
|
||||
Amount: deposit.Amount,
|
||||
@@ -535,3 +537,62 @@ func GetValidatorFromDeposit(pubKey []byte, withdrawalCredentials []byte, amount
|
||||
validator.EffectiveBalance = min(amount-(amount%params.BeaconConfig().EffectiveBalanceIncrement), maxEffectiveBalance)
|
||||
return validator, nil
|
||||
}
|
||||
|
||||
// ProcessDepositRequests is a function as part of electra to process execution layer deposits
|
||||
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, requests []*enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
_, span := trace.StartSpan(ctx, "electra.ProcessDepositRequests")
|
||||
defer span.End()
|
||||
|
||||
if len(requests) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
for _, receipt := range requests {
|
||||
beaconState, err = processDepositRequest(beaconState, receipt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not apply deposit request")
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// processDepositRequest processes the specific deposit request
|
||||
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||
//
|
||||
// # Set deposit request start index
|
||||
// if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUESTS_START_INDEX:
|
||||
// state.deposit_requests_start_index = deposit_request.index
|
||||
//
|
||||
// # Create pending deposit
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=deposit_request.pubkey,
|
||||
// withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||
// amount=deposit_request.amount,
|
||||
// signature=deposit_request.signature,
|
||||
// slot=state.slot,
|
||||
// ))
|
||||
func processDepositRequest(beaconState state.BeaconState, request *enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get deposit requests start index")
|
||||
}
|
||||
if request == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
}
|
||||
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
|
||||
if err := beaconState.SetDepositRequestsStartIndex(request.Index); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set deposit requests start index")
|
||||
}
|
||||
}
|
||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: bytesutil.SafeCopyBytes(request.Pubkey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(request.WithdrawalCredentials),
|
||||
Amount: request.Amount,
|
||||
Signature: bytesutil.SafeCopyBytes(request.Signature),
|
||||
Slot: beaconState.Slot(),
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not append deposit request")
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
stateTesting "github.com/OffchainLabs/prysm/v7/beacon-chain/state/testing"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
@@ -359,6 +361,60 @@ func TestBatchProcessNewPendingDeposits(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessDepositRequests(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetDepositRequestsStartIndex(1))
|
||||
|
||||
t.Run("empty requests continues", func(t *testing.T) {
|
||||
newSt, err := electra.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, newSt, st)
|
||||
})
|
||||
t.Run("nil request errors", func(t *testing.T) {
|
||||
_, err = electra.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
|
||||
require.ErrorContains(t, "nil deposit request", err)
|
||||
})
|
||||
|
||||
vals := st.Validators()
|
||||
vals[0].PublicKey = sk.PublicKey().Marshal()
|
||||
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
require.NoError(t, st.SetPendingDeposits(make([]*eth.PendingDeposit, 0))) // reset pbd as the determinitstic state populates this already
|
||||
withdrawalCred := make([]byte, 32)
|
||||
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
depositMessage := ð.DepositMessage{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: 1000,
|
||||
WithdrawalCredentials: withdrawalCred,
|
||||
}
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(depositMessage, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
requests := []*enginev1.DepositRequest{
|
||||
{
|
||||
Pubkey: depositMessage.PublicKey,
|
||||
Index: 0,
|
||||
WithdrawalCredentials: depositMessage.WithdrawalCredentials,
|
||||
Amount: depositMessage.Amount,
|
||||
Signature: sig.Marshal(),
|
||||
},
|
||||
}
|
||||
st, err = electra.ProcessDepositRequests(t.Context(), st, requests)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||
}
|
||||
|
||||
func TestProcessDeposit_Electra_Simple(t *testing.T) {
|
||||
deps, _, err := util.DeterministicDepositsAndKeysSameValidator(3)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -6,11 +6,6 @@ type execReqErr struct {
|
||||
error
|
||||
}
|
||||
|
||||
// NewExecReqError creates a new execReqErr.
|
||||
func NewExecReqError(msg string) error {
|
||||
return execReqErr{errors.New(msg)}
|
||||
}
|
||||
|
||||
// IsExecutionRequestError returns true if the error has `execReqErr`.
|
||||
func IsExecutionRequestError(e error) bool {
|
||||
if e == nil {
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package transition
|
||||
package electra
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
coreRequests "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -49,7 +47,7 @@ var (
|
||||
// # [New in Electra:EIP7251]
|
||||
// for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
|
||||
|
||||
func electraOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
func ProcessOperations(ctx context.Context, st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
// 6110 validations are in VerifyOperationLengths
|
||||
@@ -65,60 +63,59 @@ func electraOperations(ctx context.Context, st state.BeaconState, block interfac
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = blocks.ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
}
|
||||
st, err = blocks.ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
|
||||
st, err = ProcessAttesterSlashings(ctx, st, bb.AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
}
|
||||
st, err = electra.ProcessAttestationsNoVerifySignature(ctx, st, block)
|
||||
st, err = ProcessAttestationsNoVerifySignature(ctx, st, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process altair attestation")
|
||||
}
|
||||
if _, err := electra.ProcessDeposits(ctx, st, bb.Deposits()); err != nil {
|
||||
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
|
||||
if _, err := ProcessDeposits(ctx, st, bb.Deposits()); err != nil { // new in electra
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
st, err = blocks.ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
|
||||
st, err = ProcessVoluntaryExits(ctx, st, bb.VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
st, err = blocks.ProcessBLSToExecutionChanges(st, block)
|
||||
st, err = ProcessBLSToExecutionChanges(st, block)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessBLSChangesFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process bls-to-execution changes")
|
||||
}
|
||||
// new in electra
|
||||
requests, err := bb.ExecutionRequests()
|
||||
if err != nil {
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not get execution requests").Error())
|
||||
return nil, errors.Wrap(err, "could not get execution requests")
|
||||
}
|
||||
for _, d := range requests.Deposits {
|
||||
if d == nil {
|
||||
return nil, electra.NewExecReqError("nil deposit request")
|
||||
return nil, errors.New("nil deposit request")
|
||||
}
|
||||
}
|
||||
st, err = coreRequests.ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
st, err = ProcessDepositRequests(ctx, st, requests.Deposits)
|
||||
if err != nil {
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process deposit requests").Error())
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process deposit requests")}
|
||||
}
|
||||
|
||||
for _, w := range requests.Withdrawals {
|
||||
if w == nil {
|
||||
return nil, electra.NewExecReqError("nil withdrawal request")
|
||||
return nil, errors.New("nil withdrawal request")
|
||||
}
|
||||
}
|
||||
st, err = coreRequests.ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
st, err = ProcessWithdrawalRequests(ctx, st, requests.Withdrawals)
|
||||
if err != nil {
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process withdrawal requests").Error())
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process withdrawal requests")}
|
||||
}
|
||||
for _, c := range requests.Consolidations {
|
||||
if c == nil {
|
||||
return nil, electra.NewExecReqError("nil consolidation request")
|
||||
return nil, errors.New("nil consolidation request")
|
||||
}
|
||||
}
|
||||
if err := coreRequests.ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, electra.NewExecReqError(errors.Wrap(err, "could not process consolidation requests").Error())
|
||||
if err := ProcessConsolidationRequests(ctx, st, requests.Consolidations); err != nil {
|
||||
return nil, execReqErr{errors.Wrap(err, "could not process consolidation requests")}
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
60
beacon-chain/core/electra/transition_no_verify_sig_test.go
Normal file
60
beacon-chain/core/electra/transition_no_verify_sig_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessOperationsWithNilRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "Nil deposit request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
|
||||
},
|
||||
errMsg: "nil deposit request",
|
||||
},
|
||||
{
|
||||
name: "Nil withdrawal request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
|
||||
},
|
||||
errMsg: "nil withdrawal request",
|
||||
},
|
||||
{
|
||||
name: "Nil consolidation request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
|
||||
},
|
||||
errMsg: "nil consolidation request",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
|
||||
_, err = electra.ProcessOperations(t.Context(), st, b.Block())
|
||||
require.ErrorContains(t, tc.errMsg, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package requests
|
||||
package electra
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -88,7 +88,7 @@ import (
|
||||
// withdrawable_epoch=withdrawable_epoch,
|
||||
// ))
|
||||
func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []*enginev1.WithdrawalRequest) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "requests.ProcessWithdrawalRequests")
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
|
||||
defer span.End()
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
if len(wrs) == 0 {
|
||||
@@ -1,9 +1,9 @@
|
||||
package requests_test
|
||||
package electra_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -289,7 +289,7 @@ func TestProcessWithdrawRequests(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
got, err := requests.ProcessWithdrawalRequests(t.Context(), tt.args.st, tt.args.wrs)
|
||||
got, err := electra.ProcessWithdrawalRequests(t.Context(), tt.args.st, tt.args.wrs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -1,66 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"bid.go",
|
||||
"payload_attestation.go",
|
||||
"pending_payment.go",
|
||||
"proposer_slashing.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"bid_test.go",
|
||||
"payload_attestation_test.go",
|
||||
"pending_payment_test.go",
|
||||
"proposer_slashing_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/validator-client:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,193 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessExecutionPayloadBid processes a signed execution payload bid in the Gloas fork.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// process_execution_payload_bid(state: BeaconState, block: BeaconBlock):
|
||||
//
|
||||
// signed_bid = block.body.signed_execution_payload_bid
|
||||
// bid = signed_bid.message
|
||||
// builder_index = bid.builder_index
|
||||
// amount = bid.value
|
||||
// if builder_index == BUILDER_INDEX_SELF_BUILD:
|
||||
// assert amount == 0
|
||||
// assert signed_bid.signature == G2_POINT_AT_INFINITY
|
||||
// else:
|
||||
// assert is_active_builder(state, builder_index)
|
||||
// assert can_builder_cover_bid(state, builder_index, amount)
|
||||
// assert verify_execution_payload_bid_signature(state, signed_bid)
|
||||
// assert bid.slot == block.slot
|
||||
// assert bid.parent_block_hash == state.latest_block_hash
|
||||
// assert bid.parent_block_root == block.parent_root
|
||||
// assert bid.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||
// if amount > 0:
|
||||
// state.builder_pending_payments[...] = BuilderPendingPayment(weight=0, withdrawal=BuilderPendingWithdrawal(fee_recipient=bid.fee_recipient, amount=amount, builder_index=builder_index))
|
||||
// state.latest_execution_payload_bid = bid
|
||||
func ProcessExecutionPayloadBid(st state.BeaconState, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
signedBid, err := block.Body().SignedExecutionPayloadBid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get signed execution payload bid")
|
||||
}
|
||||
|
||||
wrappedBid, err := blocks.WrappedROSignedExecutionPayloadBid(signedBid)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to wrap signed bid")
|
||||
}
|
||||
|
||||
bid, err := wrappedBid.Bid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get bid from wrapped bid")
|
||||
}
|
||||
|
||||
builderIndex := bid.BuilderIndex()
|
||||
amount := bid.Value()
|
||||
|
||||
if builderIndex == params.BeaconConfig().BuilderIndexSelfBuild {
|
||||
if amount != 0 {
|
||||
return fmt.Errorf("self-build amount must be zero, got %d", amount)
|
||||
}
|
||||
if wrappedBid.Signature() != common.InfiniteSignature {
|
||||
return errors.New("self-build signature must be point at infinity")
|
||||
}
|
||||
} else {
|
||||
ok, err := st.IsActiveBuilder(builderIndex)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "builder active check failed")
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("builder %d is not active", builderIndex)
|
||||
}
|
||||
|
||||
ok, err = st.CanBuilderCoverBid(builderIndex, amount)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "builder balance check failed")
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("builder %d cannot cover bid amount %d", builderIndex, amount)
|
||||
}
|
||||
|
||||
if err := validatePayloadBidSignature(st, wrappedBid); err != nil {
|
||||
return errors.Wrap(err, "bid signature validation failed")
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateBidConsistency(st, bid, block); err != nil {
|
||||
return errors.Wrap(err, "bid consistency validation failed")
|
||||
}
|
||||
|
||||
if amount > 0 {
|
||||
feeRecipient := bid.FeeRecipient()
|
||||
pendingPayment := ðpb.BuilderPendingPayment{
|
||||
Weight: 0,
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: feeRecipient[:],
|
||||
Amount: amount,
|
||||
BuilderIndex: builderIndex,
|
||||
},
|
||||
}
|
||||
slotIndex := params.BeaconConfig().SlotsPerEpoch + (bid.Slot() % params.BeaconConfig().SlotsPerEpoch)
|
||||
if err := st.SetBuilderPendingPayment(slotIndex, pendingPayment); err != nil {
|
||||
return errors.Wrap(err, "failed to set pending payment")
|
||||
}
|
||||
}
|
||||
|
||||
if err := st.SetExecutionPayloadBid(bid); err != nil {
|
||||
return errors.Wrap(err, "failed to cache execution payload bid")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateBidConsistency checks that the bid is consistent with the current beacon state.
|
||||
func validateBidConsistency(st state.BeaconState, bid interfaces.ROExecutionPayloadBid, block interfaces.ReadOnlyBeaconBlock) error {
|
||||
if bid.Slot() != block.Slot() {
|
||||
return fmt.Errorf("bid slot %d does not match block slot %d", bid.Slot(), block.Slot())
|
||||
}
|
||||
|
||||
latestBlockHash, err := st.LatestBlockHash()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get latest block hash")
|
||||
}
|
||||
if bid.ParentBlockHash() != latestBlockHash {
|
||||
return fmt.Errorf("bid parent block hash mismatch: got %x, expected %x",
|
||||
bid.ParentBlockHash(), latestBlockHash)
|
||||
}
|
||||
|
||||
if bid.ParentBlockRoot() != block.ParentRoot() {
|
||||
return fmt.Errorf("bid parent block root mismatch: got %x, expected %x",
|
||||
bid.ParentBlockRoot(), block.ParentRoot())
|
||||
}
|
||||
|
||||
randaoMix, err := helpers.RandaoMix(st, slots.ToEpoch(st.Slot()))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get randao mix")
|
||||
}
|
||||
if bid.PrevRandao() != [32]byte(randaoMix) {
|
||||
return fmt.Errorf("bid prev randao mismatch: got %x, expected %x", bid.PrevRandao(), randaoMix)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validatePayloadBidSignature verifies the BLS signature on a signed execution payload bid.
|
||||
// It validates that the signature was created by the builder specified in the bid
|
||||
// using the appropriate domain for the beacon builder.
|
||||
func validatePayloadBidSignature(st state.ReadOnlyBeaconState, signedBid interfaces.ROSignedExecutionPayloadBid) error {
|
||||
bid, err := signedBid.Bid()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get bid")
|
||||
}
|
||||
|
||||
pubkey, err := st.BuilderPubkey(bid.BuilderIndex())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get builder pubkey")
|
||||
}
|
||||
|
||||
publicKey, err := bls.PublicKeyFromBytes(pubkey[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "invalid builder public key")
|
||||
}
|
||||
|
||||
signatureBytes := signedBid.Signature()
|
||||
signature, err := bls.SignatureFromBytes(signatureBytes[:])
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "invalid signature format")
|
||||
}
|
||||
|
||||
currentEpoch := slots.ToEpoch(bid.Slot())
|
||||
domain, err := signing.Domain(
|
||||
st.Fork(),
|
||||
currentEpoch,
|
||||
params.BeaconConfig().DomainBeaconBuilder,
|
||||
st.GenesisValidatorsRoot(),
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to compute signing domain")
|
||||
}
|
||||
|
||||
signingRoot, err := signedBid.SigningRoot(domain)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to compute signing root")
|
||||
}
|
||||
|
||||
if !signature.Verify(publicKey, signingRoot[:]) {
|
||||
return signing.ErrSigFailedToVerify
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,633 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
validatorpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1/validator-client"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
fastssz "github.com/prysmaticlabs/fastssz"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type stubBlockBody struct {
|
||||
signedBid *ethpb.SignedExecutionPayloadBid
|
||||
}
|
||||
|
||||
func (s stubBlockBody) Version() int { return version.Gloas }
|
||||
func (s stubBlockBody) RandaoReveal() [96]byte { return [96]byte{} }
|
||||
func (s stubBlockBody) Eth1Data() *ethpb.Eth1Data { return nil }
|
||||
func (s stubBlockBody) Graffiti() [32]byte { return [32]byte{} }
|
||||
func (s stubBlockBody) ProposerSlashings() []*ethpb.ProposerSlashing { return nil }
|
||||
func (s stubBlockBody) AttesterSlashings() []ethpb.AttSlashing { return nil }
|
||||
func (s stubBlockBody) Attestations() []ethpb.Att { return nil }
|
||||
func (s stubBlockBody) Deposits() []*ethpb.Deposit { return nil }
|
||||
func (s stubBlockBody) VoluntaryExits() []*ethpb.SignedVoluntaryExit { return nil }
|
||||
func (s stubBlockBody) SyncAggregate() (*ethpb.SyncAggregate, error) { return nil, nil }
|
||||
func (s stubBlockBody) IsNil() bool { return s.signedBid == nil }
|
||||
func (s stubBlockBody) HashTreeRoot() ([32]byte, error) { return [32]byte{}, nil }
|
||||
func (s stubBlockBody) Proto() (proto.Message, error) { return nil, nil }
|
||||
func (s stubBlockBody) Execution() (interfaces.ExecutionData, error) { return nil, nil }
|
||||
func (s stubBlockBody) BLSToExecutionChanges() ([]*ethpb.SignedBLSToExecutionChange, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (s stubBlockBody) BlobKzgCommitments() ([][]byte, error) { return nil, nil }
|
||||
func (s stubBlockBody) ExecutionRequests() (*enginev1.ExecutionRequests, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (s stubBlockBody) PayloadAttestations() ([]*ethpb.PayloadAttestation, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (s stubBlockBody) SignedExecutionPayloadBid() (*ethpb.SignedExecutionPayloadBid, error) {
|
||||
return s.signedBid, nil
|
||||
}
|
||||
func (s stubBlockBody) MarshalSSZ() ([]byte, error) { return nil, nil }
|
||||
func (s stubBlockBody) MarshalSSZTo([]byte) ([]byte, error) { return nil, nil }
|
||||
func (s stubBlockBody) UnmarshalSSZ([]byte) error { return nil }
|
||||
func (s stubBlockBody) SizeSSZ() int { return 0 }
|
||||
|
||||
type stubBlock struct {
|
||||
slot primitives.Slot
|
||||
proposer primitives.ValidatorIndex
|
||||
parentRoot [32]byte
|
||||
body stubBlockBody
|
||||
v int
|
||||
}
|
||||
|
||||
var (
|
||||
_ interfaces.ReadOnlyBeaconBlockBody = (*stubBlockBody)(nil)
|
||||
_ interfaces.ReadOnlyBeaconBlock = (*stubBlock)(nil)
|
||||
)
|
||||
|
||||
func (s stubBlock) Slot() primitives.Slot { return s.slot }
|
||||
func (s stubBlock) ProposerIndex() primitives.ValidatorIndex { return s.proposer }
|
||||
func (s stubBlock) ParentRoot() [32]byte { return s.parentRoot }
|
||||
func (s stubBlock) StateRoot() [32]byte { return [32]byte{} }
|
||||
func (s stubBlock) Body() interfaces.ReadOnlyBeaconBlockBody { return s.body }
|
||||
func (s stubBlock) IsNil() bool { return false }
|
||||
func (s stubBlock) IsBlinded() bool { return false }
|
||||
func (s stubBlock) HashTreeRoot() ([32]byte, error) { return [32]byte{}, nil }
|
||||
func (s stubBlock) Proto() (proto.Message, error) { return nil, nil }
|
||||
func (s stubBlock) MarshalSSZ() ([]byte, error) { return nil, nil }
|
||||
func (s stubBlock) MarshalSSZTo([]byte) ([]byte, error) { return nil, nil }
|
||||
func (s stubBlock) UnmarshalSSZ([]byte) error { return nil }
|
||||
func (s stubBlock) SizeSSZ() int { return 0 }
|
||||
func (s stubBlock) Version() int { return s.v }
|
||||
func (s stubBlock) AsSignRequestObject() (validatorpb.SignRequestObject, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (s stubBlock) HashTreeRootWith(*fastssz.Hasher) error { return nil }
|
||||
|
||||
func buildGloasState(t *testing.T, slot primitives.Slot, proposerIdx primitives.ValidatorIndex, builderIdx primitives.BuilderIndex, balance uint64, randao [32]byte, latestHash [32]byte, builderPubkey [48]byte) *state_native.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
blockRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
stateRoots := make([][]byte, cfg.SlotsPerHistoricalRoot)
|
||||
for i := range blockRoots {
|
||||
blockRoots[i] = bytes.Repeat([]byte{0xAA}, 32)
|
||||
stateRoots[i] = bytes.Repeat([]byte{0xBB}, 32)
|
||||
}
|
||||
randaoMixes := make([][]byte, cfg.EpochsPerHistoricalVector)
|
||||
for i := range randaoMixes {
|
||||
randaoMixes[i] = randao[:]
|
||||
}
|
||||
|
||||
withdrawalCreds := make([]byte, 32)
|
||||
withdrawalCreds[0] = cfg.BuilderWithdrawalPrefixByte
|
||||
|
||||
validatorCount := int(proposerIdx) + 1
|
||||
validators := make([]*ethpb.Validator, validatorCount)
|
||||
balances := make([]uint64, validatorCount)
|
||||
for i := range validatorCount {
|
||||
validators[i] = ðpb.Validator{
|
||||
PublicKey: builderPubkey[:],
|
||||
WithdrawalCredentials: withdrawalCreds,
|
||||
EffectiveBalance: balance,
|
||||
Slashed: false,
|
||||
ActivationEligibilityEpoch: 0,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: cfg.FarFutureEpoch,
|
||||
WithdrawableEpoch: cfg.FarFutureEpoch,
|
||||
}
|
||||
balances[i] = balance
|
||||
}
|
||||
|
||||
payments := make([]*ethpb.BuilderPendingPayment, cfg.SlotsPerEpoch*2)
|
||||
for i := range payments {
|
||||
payments[i] = ðpb.BuilderPendingPayment{Withdrawal: ðpb.BuilderPendingWithdrawal{}}
|
||||
}
|
||||
|
||||
var builders []*ethpb.Builder
|
||||
if builderIdx != params.BeaconConfig().BuilderIndexSelfBuild {
|
||||
builderCount := int(builderIdx) + 1
|
||||
builders = make([]*ethpb.Builder, builderCount)
|
||||
builders[builderCount-1] = ðpb.Builder{
|
||||
Pubkey: builderPubkey[:],
|
||||
Version: []byte{0},
|
||||
ExecutionAddress: bytes.Repeat([]byte{0x01}, 20),
|
||||
Balance: primitives.Gwei(balance),
|
||||
DepositEpoch: 0,
|
||||
WithdrawableEpoch: cfg.FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
stProto := ðpb.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
GenesisValidatorsRoot: bytes.Repeat([]byte{0x11}, 32),
|
||||
Fork: ðpb.Fork{
|
||||
CurrentVersion: bytes.Repeat([]byte{0x22}, 4),
|
||||
PreviousVersion: bytes.Repeat([]byte{0x22}, 4),
|
||||
Epoch: 0,
|
||||
},
|
||||
BlockRoots: blockRoots,
|
||||
StateRoots: stateRoots,
|
||||
RandaoMixes: randaoMixes,
|
||||
Validators: validators,
|
||||
Balances: balances,
|
||||
LatestBlockHash: latestHash[:],
|
||||
BuilderPendingPayments: payments,
|
||||
BuilderPendingWithdrawals: []*ethpb.BuilderPendingWithdrawal{},
|
||||
Builders: builders,
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
},
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(stProto)
|
||||
require.NoError(t, err)
|
||||
return st.(*state_native.BeaconState)
|
||||
}
|
||||
|
||||
func signBid(t *testing.T, sk common.SecretKey, bid *ethpb.ExecutionPayloadBid, fork *ethpb.Fork, genesisRoot [32]byte) [96]byte {
|
||||
t.Helper()
|
||||
epoch := slots.ToEpoch(primitives.Slot(bid.Slot))
|
||||
domain, err := signing.Domain(fork, epoch, params.BeaconConfig().DomainBeaconBuilder, genesisRoot[:])
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(bid, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(root[:]).Marshal()
|
||||
var out [96]byte
|
||||
copy(out[:], sig)
|
||||
return out
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_SelfBuildSuccess(t *testing.T) {
|
||||
slot := primitives.Slot(12)
|
||||
proposerIdx := primitives.ValidatorIndex(0)
|
||||
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
pubKey := [48]byte{}
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 0,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
}
|
||||
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
require.NoError(t, ProcessExecutionPayloadBid(state, block))
|
||||
|
||||
stateProto, ok := state.ToProto().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, true, ok)
|
||||
slotIndex := params.BeaconConfig().SlotsPerEpoch + (slot % params.BeaconConfig().SlotsPerEpoch)
|
||||
require.Equal(t, primitives.Gwei(0), stateProto.BuilderPendingPayments[slotIndex].Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_SelfBuildNonZeroAmountFails(t *testing.T) {
|
||||
slot := primitives.Slot(2)
|
||||
proposerIdx := primitives.ValidatorIndex(0)
|
||||
builderIdx := params.BeaconConfig().BuilderIndexSelfBuild
|
||||
randao := [32]byte{}
|
||||
latestHash := [32]byte{1}
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinActivationBalance+1000, randao, latestHash, [48]byte{})
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||
PrevRandao: randao[:],
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
}
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err := ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "self-build amount must be zero", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_PendingPaymentAndCacheBid(t *testing.T) {
|
||||
slot := primitives.Slot(8)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
pub := sk.PublicKey().Marshal()
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], pub)
|
||||
|
||||
balance := params.BeaconConfig().MinActivationBalance + 1_000_000
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, balance, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 500_000,
|
||||
ExecutionPayment: 1,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{
|
||||
Message: bid,
|
||||
Signature: sig[:],
|
||||
}
|
||||
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx, // not self-build
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
require.NoError(t, ProcessExecutionPayloadBid(state, block))
|
||||
|
||||
stateProto, ok := state.ToProto().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, true, ok)
|
||||
slotIndex := params.BeaconConfig().SlotsPerEpoch + (slot % params.BeaconConfig().SlotsPerEpoch)
|
||||
require.Equal(t, primitives.Gwei(500_000), stateProto.BuilderPendingPayments[slotIndex].Withdrawal.Amount)
|
||||
|
||||
require.NotNil(t, stateProto.LatestExecutionPayloadBid)
|
||||
require.Equal(t, primitives.BuilderIndex(1), stateProto.LatestExecutionPayloadBid.BuilderIndex)
|
||||
require.Equal(t, primitives.Gwei(500_000), stateProto.LatestExecutionPayloadBid.Value)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_BuilderNotActive(t *testing.T) {
|
||||
slot := primitives.Slot(4)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0x01}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0x02}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
// Make builder inactive by setting withdrawable_epoch.
|
||||
stateProto := state.ToProto().(*ethpb.BeaconStateGloas)
|
||||
stateProto.Builders[int(builderIdx)].WithdrawableEpoch = 0
|
||||
stateIface, err := state_native.InitializeFromProtoGloas(stateProto)
|
||||
require.NoError(t, err)
|
||||
state = stateIface.(*state_native.BeaconState)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x03}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x04}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x05}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x06}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "is not active", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_CannotCoverBid(t *testing.T) {
|
||||
slot := primitives.Slot(5)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0x0A}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0x0B}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+10, randao, latestHash, pubKey)
|
||||
stateProto := state.ToProto().(*ethpb.BeaconStateGloas)
|
||||
// Add pending balances to push below required balance.
|
||||
stateProto.BuilderPendingWithdrawals = []*ethpb.BuilderPendingWithdrawal{
|
||||
{Amount: 15, BuilderIndex: builderIdx},
|
||||
}
|
||||
stateProto.BuilderPendingPayments = []*ethpb.BuilderPendingPayment{
|
||||
{Withdrawal: ðpb.BuilderPendingWithdrawal{Amount: 20, BuilderIndex: builderIdx}},
|
||||
}
|
||||
stateIface, err := state_native.InitializeFromProtoGloas(stateProto)
|
||||
require.NoError(t, err)
|
||||
state = stateIface.(*state_native.BeaconState)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 25,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "cannot cover bid amount", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_InvalidSignature(t *testing.T) {
|
||||
slot := primitives.Slot(6)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xDD}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 10,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xEE}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xFF}, 20),
|
||||
}
|
||||
// Use an invalid signature.
|
||||
invalidSig := [96]byte{1}
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: invalidSig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "bid signature validation failed", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_SlotMismatch(t *testing.T) {
|
||||
slot := primitives.Slot(10)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0xAA}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0xBB}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot + 1, // mismatch
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0xCC}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0xDD}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "bid slot", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_ParentHashMismatch(t *testing.T) {
|
||||
slot := primitives.Slot(11)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: bytes.Repeat([]byte{0x11}, 32), // mismatch
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "parent block hash mismatch", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_ParentRootMismatch(t *testing.T) {
|
||||
slot := primitives.Slot(12)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
parentRoot := bytes.Repeat([]byte{0x22}, 32)
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: parentRoot,
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: randao[:],
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bytes.Repeat([]byte{0x99}, 32)), // mismatch
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "parent block root mismatch", err)
|
||||
}
|
||||
|
||||
func TestProcessExecutionPayloadBid_PrevRandaoMismatch(t *testing.T) {
|
||||
slot := primitives.Slot(13)
|
||||
builderIdx := primitives.BuilderIndex(1)
|
||||
proposerIdx := primitives.ValidatorIndex(2)
|
||||
randao := [32]byte(bytes.Repeat([]byte{0xAA}, 32))
|
||||
latestHash := [32]byte(bytes.Repeat([]byte{0xBB}, 32))
|
||||
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
var pubKey [48]byte
|
||||
copy(pubKey[:], sk.PublicKey().Marshal())
|
||||
|
||||
state := buildGloasState(t, slot, proposerIdx, builderIdx, params.BeaconConfig().MinDepositAmount+1000, randao, latestHash, pubKey)
|
||||
|
||||
bid := ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: latestHash[:],
|
||||
ParentBlockRoot: bytes.Repeat([]byte{0x22}, 32),
|
||||
BlockHash: bytes.Repeat([]byte{0x33}, 32),
|
||||
PrevRandao: bytes.Repeat([]byte{0x01}, 32), // mismatch
|
||||
GasLimit: 1,
|
||||
BuilderIndex: builderIdx,
|
||||
Slot: slot,
|
||||
Value: 1,
|
||||
ExecutionPayment: 0,
|
||||
BlobKzgCommitmentsRoot: bytes.Repeat([]byte{0x44}, 32),
|
||||
FeeRecipient: bytes.Repeat([]byte{0x55}, 20),
|
||||
}
|
||||
genesis := bytesutil.ToBytes32(state.GenesisValidatorsRoot())
|
||||
sig := signBid(t, sk, bid, state.Fork(), genesis)
|
||||
signed := ðpb.SignedExecutionPayloadBid{Message: bid, Signature: sig[:]}
|
||||
block := stubBlock{
|
||||
slot: slot,
|
||||
proposer: proposerIdx,
|
||||
parentRoot: bytesutil.ToBytes32(bid.ParentBlockRoot),
|
||||
body: stubBlockBody{signedBid: signed},
|
||||
v: version.Gloas,
|
||||
}
|
||||
|
||||
err = ProcessExecutionPayloadBid(state, block)
|
||||
require.ErrorContains(t, "prev randao mismatch", err)
|
||||
}
|
||||
@@ -1,253 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
consensus_types "github.com/OffchainLabs/prysm/v7/consensus-types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessPayloadAttestations validates payload attestations in a block body.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// process_payload_attestation(state: BeaconState, payload_attestation: PayloadAttestation):
|
||||
//
|
||||
// data = payload_attestation.data
|
||||
// assert data.beacon_block_root == state.latest_block_header.parent_root
|
||||
// assert data.slot + 1 == state.slot
|
||||
// indexed = get_indexed_payload_attestation(state, data.slot, payload_attestation)
|
||||
// assert is_valid_indexed_payload_attestation(state, indexed)
|
||||
func ProcessPayloadAttestations(ctx context.Context, st state.BeaconState, body interfaces.ReadOnlyBeaconBlockBody) error {
|
||||
atts, err := body.PayloadAttestations()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get payload attestations from block body")
|
||||
}
|
||||
if len(atts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
header := st.LatestBlockHeader()
|
||||
|
||||
for i, att := range atts {
|
||||
data := att.Data
|
||||
if !bytes.Equal(data.BeaconBlockRoot, header.ParentRoot) {
|
||||
return fmt.Errorf("payload attestation %d has wrong parent: got %x want %x", i, data.BeaconBlockRoot, header.ParentRoot)
|
||||
}
|
||||
|
||||
dataSlot, err := data.Slot.SafeAdd(1)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "payload attestation %d has invalid slot addition", i)
|
||||
}
|
||||
if dataSlot != st.Slot() {
|
||||
return fmt.Errorf("payload attestation %d has wrong slot: got %d want %d", i, data.Slot+1, st.Slot())
|
||||
}
|
||||
|
||||
indexed, err := indexedPayloadAttestation(ctx, st, att)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "payload attestation %d failed to convert to indexed form", i)
|
||||
}
|
||||
if err := validIndexedPayloadAttestation(st, indexed); err != nil {
|
||||
return errors.Wrapf(err, "payload attestation %d failed to verify indexed form", i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// indexedPayloadAttestation converts a payload attestation into its indexed form.
|
||||
func indexedPayloadAttestation(ctx context.Context, st state.ReadOnlyBeaconState, att *eth.PayloadAttestation) (*consensus_types.IndexedPayloadAttestation, error) {
|
||||
committee, err := payloadCommittee(ctx, st, att.Data.Slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
indices := make([]primitives.ValidatorIndex, 0, len(committee))
|
||||
for i, idx := range committee {
|
||||
if att.AggregationBits.BitAt(uint64(i)) {
|
||||
indices = append(indices, idx)
|
||||
}
|
||||
}
|
||||
slices.Sort(indices)
|
||||
|
||||
return &consensus_types.IndexedPayloadAttestation{
|
||||
AttestingIndices: indices,
|
||||
Data: att.Data,
|
||||
Signature: att.Signature,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// payloadCommittee returns the payload timeliness committee for a given slot for the state.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
|
||||
//
|
||||
// epoch = compute_epoch_at_slot(slot)
|
||||
// seed = hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
|
||||
// indices = []
|
||||
// committees_per_slot = get_committee_count_per_slot(state, epoch)
|
||||
// for i in range(committees_per_slot):
|
||||
// committee = get_beacon_committee(state, slot, CommitteeIndex(i))
|
||||
// indices.extend(committee)
|
||||
// return compute_balance_weighted_selection(state, indices, seed, size=PTC_SIZE, shuffle_indices=False)
|
||||
func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot primitives.Slot) ([]primitives.ValidatorIndex, error) {
|
||||
epoch := slots.ToEpoch(slot)
|
||||
seed, err := ptcSeed(st, epoch, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
activeCount, err := helpers.ActiveValidatorCount(ctx, st, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
|
||||
out := make([]primitives.ValidatorIndex, 0, activeCount/uint64(params.BeaconConfig().SlotsPerEpoch))
|
||||
|
||||
for i := primitives.CommitteeIndex(0); i < primitives.CommitteeIndex(committeesPerSlot); i++ {
|
||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get beacon committee %d", i)
|
||||
}
|
||||
out = append(out, committee...)
|
||||
}
|
||||
|
||||
return selectByBalance(ctx, st, out, seed, fieldparams.PTCSize)
|
||||
}
|
||||
|
||||
// ptcSeed computes the seed for the payload timeliness committee.
|
||||
func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitives.Slot) ([32]byte, error) {
|
||||
seed, err := helpers.Seed(st, epoch, params.BeaconConfig().DomainPTCAttester)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return hash.Hash(append(seed[:], bytesutil.Bytes8(uint64(slot))...)), nil
|
||||
}
|
||||
|
||||
// selectByBalance selects a balance-weighted subset of input candidates.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// compute_balance_weighted_selection(state, indices, seed, size, shuffle_indices):
|
||||
// Note: shuffle_indices is false for PTC.
|
||||
//
|
||||
// total = len(indices); selected = []; i = 0
|
||||
// while len(selected) < size:
|
||||
// next = i % total
|
||||
// if shuffle_indices: next = compute_shuffled_index(next, total, seed)
|
||||
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
|
||||
// selected.append(indices[next])
|
||||
// i += 1
|
||||
func selectByBalance(ctx context.Context, st state.ReadOnlyBeaconState, candidates []primitives.ValidatorIndex, seed [32]byte, count uint64) ([]primitives.ValidatorIndex, error) {
|
||||
if len(candidates) == 0 {
|
||||
return nil, errors.New("no candidates for balance weighted selection")
|
||||
}
|
||||
|
||||
hashFunc := hash.CustomSHA256Hasher()
|
||||
// Pre-allocate buffer for hash input: seed (32 bytes) + round counter (8 bytes).
|
||||
var buf [40]byte
|
||||
copy(buf[:], seed[:])
|
||||
maxBalance := params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||
|
||||
selected := make([]primitives.ValidatorIndex, 0, count)
|
||||
total := uint64(len(candidates))
|
||||
for i := uint64(0); uint64(len(selected)) < count; i++ {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
idx := candidates[i%total]
|
||||
ok, err := acceptByBalance(st, idx, buf[:], hashFunc, maxBalance, i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
selected = append(selected, idx)
|
||||
}
|
||||
}
|
||||
return selected, nil
|
||||
}
|
||||
|
||||
// acceptByBalance determines if a validator is accepted based on its effective balance.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// compute_balance_weighted_acceptance(state, index, seed, i):
|
||||
//
|
||||
// MAX_RANDOM_VALUE = 2**16 - 1
|
||||
// random_bytes = hash(seed + uint_to_bytes(i // 16))
|
||||
// offset = i % 16 * 2
|
||||
// random_value = bytes_to_uint64(random_bytes[offset:offset+2])
|
||||
// effective_balance = state.validators[index].effective_balance
|
||||
// return effective_balance * MAX_RANDOM_VALUE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_value
|
||||
func acceptByBalance(st state.ReadOnlyBeaconState, idx primitives.ValidatorIndex, seedBuf []byte, hashFunc func([]byte) [32]byte, maxBalance uint64, round uint64) (bool, error) {
|
||||
// Reuse the seed buffer by overwriting the last 8 bytes with the round counter.
|
||||
binary.LittleEndian.PutUint64(seedBuf[len(seedBuf)-8:], round/16)
|
||||
random := hashFunc(seedBuf)
|
||||
offset := (round % 16) * 2
|
||||
randomValue := uint64(binary.LittleEndian.Uint16(random[offset : offset+2])) // 16-bit draw per spec
|
||||
|
||||
val, err := st.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "validator %d", idx)
|
||||
}
|
||||
|
||||
return val.EffectiveBalance*fieldparams.MaxRandomValueElectra >= maxBalance*randomValue, nil
|
||||
}
|
||||
|
||||
// validIndexedPayloadAttestation verifies the signature of an indexed payload attestation.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// is_valid_indexed_payload_attestation(state: BeaconState, indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
|
||||
//
|
||||
// indices = indexed_payload_attestation.attesting_indices
|
||||
// return len(indices) > 0 and indices == sorted(indices) and
|
||||
// bls.FastAggregateVerify(
|
||||
// [state.validators[i].pubkey for i in indices],
|
||||
// compute_signing_root(indexed_payload_attestation.data, get_domain(state, DOMAIN_PTC_ATTESTER, compute_epoch_at_slot(attestation.data.slot)),
|
||||
// indexed_payload_attestation.signature,
|
||||
// )
|
||||
func validIndexedPayloadAttestation(st state.ReadOnlyBeaconState, att *consensus_types.IndexedPayloadAttestation) error {
|
||||
indices := att.AttestingIndices
|
||||
if len(indices) == 0 || !slices.IsSorted(indices) {
|
||||
return errors.New("attesting indices empty or unsorted")
|
||||
}
|
||||
|
||||
pubkeys := make([]bls.PublicKey, len(indices))
|
||||
for i, idx := range indices {
|
||||
val, err := st.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "validator %d", idx)
|
||||
}
|
||||
keyBytes := val.PublicKey()
|
||||
key, err := bls.PublicKeyFromBytes(keyBytes[:])
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "pubkey %d", idx)
|
||||
}
|
||||
pubkeys[i] = key
|
||||
}
|
||||
|
||||
domain, err := signing.Domain(st.Fork(), slots.ToEpoch(att.Data.Slot), params.BeaconConfig().DomainPTCAttester, st.GenesisValidatorsRoot())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err := signing.ComputeSigningRoot(att.Data, domain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sig, err := bls.SignatureFromBytes(att.Signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !sig.FastAggregateVerify(pubkeys, root) {
|
||||
return errors.New("invalid signature")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,305 +0,0 @@
|
||||
package gloas_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/gloas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
testutil "github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
func TestProcessPayloadAttestations_WrongParent(t *testing.T) {
|
||||
setupTestConfig(t)
|
||||
|
||||
_, pk := newKey(t)
|
||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
||||
require.NoError(t, st.SetSlot(2))
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
att := ð.PayloadAttestation{
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: bytes.Repeat([]byte{0xbb}, 32),
|
||||
Slot: 1,
|
||||
},
|
||||
AggregationBits: bitfield.NewBitvector512(),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
body := buildBody(t, att)
|
||||
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
||||
require.ErrorContains(t, "wrong parent", err)
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_WrongSlot(t *testing.T) {
|
||||
setupTestConfig(t)
|
||||
|
||||
_, pk := newKey(t)
|
||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
||||
require.NoError(t, st.SetSlot(3))
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
att := ð.PayloadAttestation{
|
||||
Data: ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 1,
|
||||
},
|
||||
AggregationBits: bitfield.NewBitvector512(),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
body := buildBody(t, att)
|
||||
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
||||
require.ErrorContains(t, "wrong slot", err)
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_InvalidSignature(t *testing.T) {
|
||||
setupTestConfig(t)
|
||||
|
||||
_, pk1 := newKey(t)
|
||||
sk2, pk2 := newKey(t)
|
||||
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
|
||||
st := newTestState(t, vals, 2)
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
attData := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 1,
|
||||
}
|
||||
att := ð.PayloadAttestation{
|
||||
Data: attData,
|
||||
AggregationBits: setBits(bitfield.NewBitvector512(), 0),
|
||||
Signature: signAttestation(t, st, attData, []common.SecretKey{sk2}),
|
||||
}
|
||||
body := buildBody(t, att)
|
||||
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
||||
require.ErrorContains(t, "failed to verify indexed form", err)
|
||||
require.ErrorContains(t, "invalid signature", err)
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_EmptyAggregationBits(t *testing.T) {
|
||||
setupTestConfig(t)
|
||||
|
||||
_, pk := newKey(t)
|
||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
||||
require.NoError(t, st.SetSlot(2))
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
attData := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 1,
|
||||
}
|
||||
att := ð.PayloadAttestation{
|
||||
Data: attData,
|
||||
AggregationBits: bitfield.NewBitvector512(),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
body := buildBody(t, att)
|
||||
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
||||
require.ErrorContains(t, "failed to verify indexed form", err)
|
||||
require.ErrorContains(t, "attesting indices empty or unsorted", err)
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_HappyPath(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
setupTestConfig(t)
|
||||
|
||||
sk1, pk1 := newKey(t)
|
||||
sk2, pk2 := newKey(t)
|
||||
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
|
||||
|
||||
st := newTestState(t, vals, 2)
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
attData := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 1,
|
||||
}
|
||||
aggBits := bitfield.NewBitvector512()
|
||||
aggBits.SetBitAt(0, true)
|
||||
aggBits.SetBitAt(1, true)
|
||||
|
||||
att := ð.PayloadAttestation{
|
||||
Data: attData,
|
||||
AggregationBits: aggBits,
|
||||
Signature: signAttestation(t, st, attData, []common.SecretKey{sk1, sk2}),
|
||||
}
|
||||
body := buildBody(t, att)
|
||||
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_MultipleAttestations(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
setupTestConfig(t)
|
||||
|
||||
sk1, pk1 := newKey(t)
|
||||
sk2, pk2 := newKey(t)
|
||||
vals := []*eth.Validator{activeValidator(pk1), activeValidator(pk2)}
|
||||
|
||||
st := newTestState(t, vals, 2)
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
attData1 := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 1,
|
||||
}
|
||||
attData2 := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 1,
|
||||
}
|
||||
|
||||
att1 := ð.PayloadAttestation{
|
||||
Data: attData1,
|
||||
AggregationBits: setBits(bitfield.NewBitvector512(), 0),
|
||||
Signature: signAttestation(t, st, attData1, []common.SecretKey{sk1}),
|
||||
}
|
||||
att2 := ð.PayloadAttestation{
|
||||
Data: attData2,
|
||||
AggregationBits: setBits(bitfield.NewBitvector512(), 1),
|
||||
Signature: signAttestation(t, st, attData2, []common.SecretKey{sk2}),
|
||||
}
|
||||
|
||||
body := buildBody(t, att1, att2)
|
||||
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), st, body)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestProcessPayloadAttestations_IndexedVerificationError(t *testing.T) {
|
||||
setupTestConfig(t)
|
||||
|
||||
_, pk := newKey(t)
|
||||
st := newTestState(t, []*eth.Validator{activeValidator(pk)}, 1)
|
||||
parentRoot := bytes.Repeat([]byte{0xaa}, 32)
|
||||
require.NoError(t, st.SetLatestBlockHeader(ð.BeaconBlockHeader{ParentRoot: parentRoot}))
|
||||
|
||||
attData := ð.PayloadAttestationData{
|
||||
BeaconBlockRoot: parentRoot,
|
||||
Slot: 0,
|
||||
}
|
||||
att := ð.PayloadAttestation{
|
||||
Data: attData,
|
||||
AggregationBits: setBits(bitfield.NewBitvector512(), 0),
|
||||
Signature: make([]byte, 96),
|
||||
}
|
||||
body := buildBody(t, att)
|
||||
|
||||
errState := &validatorLookupErrState{
|
||||
BeaconState: st,
|
||||
errIndex: 0,
|
||||
}
|
||||
err := gloas.ProcessPayloadAttestations(t.Context(), errState, body)
|
||||
require.ErrorContains(t, "failed to verify indexed form", err)
|
||||
require.ErrorContains(t, "validator 0", err)
|
||||
}
|
||||
|
||||
func newTestState(t *testing.T, vals []*eth.Validator, slot primitives.Slot) state.BeaconState {
|
||||
st, err := testutil.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
for _, v := range vals {
|
||||
require.NoError(t, st.AppendValidator(v))
|
||||
require.NoError(t, st.AppendBalance(v.EffectiveBalance))
|
||||
}
|
||||
require.NoError(t, st.SetSlot(slot))
|
||||
require.NoError(t, helpers.UpdateCommitteeCache(t.Context(), st, slots.ToEpoch(slot)))
|
||||
return st
|
||||
}
|
||||
|
||||
func setupTestConfig(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.SlotsPerEpoch = 1
|
||||
cfg.MaxEffectiveBalanceElectra = cfg.MaxEffectiveBalance
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
}
|
||||
|
||||
func buildBody(t *testing.T, atts ...*eth.PayloadAttestation) interfaces.ReadOnlyBeaconBlockBody {
|
||||
body := ð.BeaconBlockBodyGloas{
|
||||
PayloadAttestations: atts,
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ð.Eth1Data{},
|
||||
Graffiti: make([]byte, 32),
|
||||
ProposerSlashings: []*eth.ProposerSlashing{},
|
||||
AttesterSlashings: []*eth.AttesterSlashingElectra{},
|
||||
Attestations: []*eth.AttestationElectra{},
|
||||
Deposits: []*eth.Deposit{},
|
||||
VoluntaryExits: []*eth.SignedVoluntaryExit{},
|
||||
SyncAggregate: ð.SyncAggregate{},
|
||||
BlsToExecutionChanges: []*eth.SignedBLSToExecutionChange{},
|
||||
}
|
||||
wrapped, err := blocks.NewBeaconBlockBody(body)
|
||||
require.NoError(t, err)
|
||||
return wrapped
|
||||
}
|
||||
|
||||
func setBits(bits bitfield.Bitvector512, idx uint64) bitfield.Bitvector512 {
|
||||
bits.SetBitAt(idx, true)
|
||||
return bits
|
||||
}
|
||||
|
||||
func activeValidator(pub []byte) *eth.Validator {
|
||||
return ð.Validator{
|
||||
PublicKey: pub,
|
||||
EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance,
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
ActivationEligibilityEpoch: 0,
|
||||
ActivationEpoch: 0,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
}
|
||||
}
|
||||
|
||||
func newKey(t *testing.T) (common.SecretKey, []byte) {
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
return sk, sk.PublicKey().Marshal()
|
||||
}
|
||||
|
||||
func signAttestation(t *testing.T, st state.ReadOnlyBeaconState, data *eth.PayloadAttestationData, sks []common.SecretKey) []byte {
|
||||
domain, err := signing.Domain(st.Fork(), slots.ToEpoch(data.Slot), params.BeaconConfig().DomainPTCAttester, st.GenesisValidatorsRoot())
|
||||
require.NoError(t, err)
|
||||
root, err := signing.ComputeSigningRoot(data, domain)
|
||||
require.NoError(t, err)
|
||||
|
||||
sigs := make([]common.Signature, len(sks))
|
||||
for i, sk := range sks {
|
||||
sigs[i] = sk.Sign(root[:])
|
||||
}
|
||||
agg := bls.AggregateSignatures(sigs)
|
||||
return agg.Marshal()
|
||||
}
|
||||
|
||||
type validatorLookupErrState struct {
|
||||
state.BeaconState
|
||||
errIndex primitives.ValidatorIndex
|
||||
}
|
||||
|
||||
// ValidatorAtIndexReadOnly is overridden to simulate a missing validator lookup.
|
||||
func (s *validatorLookupErrState) ValidatorAtIndexReadOnly(idx primitives.ValidatorIndex) (state.ReadOnlyValidator, error) {
|
||||
if idx == s.errIndex {
|
||||
return nil, state.ErrNilValidatorsInState
|
||||
}
|
||||
return s.BeaconState.ValidatorAtIndexReadOnly(idx)
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessBuilderPendingPayments processes the builder pending payments from the previous epoch.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def process_builder_pending_payments(state: BeaconState) -> None:
|
||||
//
|
||||
// quorum = get_builder_payment_quorum_threshold(state)
|
||||
// for payment in state.builder_pending_payments[:SLOTS_PER_EPOCH]:
|
||||
// if payment.weight >= quorum:
|
||||
// state.builder_pending_withdrawals.append(payment.withdrawal)
|
||||
//
|
||||
// old_payments = state.builder_pending_payments[SLOTS_PER_EPOCH:]
|
||||
// new_payments = [BuilderPendingPayment() for _ in range(SLOTS_PER_EPOCH)]
|
||||
// state.builder_pending_payments = old_payments + new_payments
|
||||
func ProcessBuilderPendingPayments(state state.BeaconState) error {
|
||||
quorum, err := builderQuorumThreshold(state)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute builder payment quorum threshold")
|
||||
}
|
||||
|
||||
payments, err := state.BuilderPendingPayments()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get builder pending payments")
|
||||
}
|
||||
|
||||
slotsPerEpoch := uint64(params.BeaconConfig().SlotsPerEpoch)
|
||||
var withdrawals []*ethpb.BuilderPendingWithdrawal
|
||||
for _, payment := range payments[:slotsPerEpoch] {
|
||||
if quorum > payment.Weight {
|
||||
continue
|
||||
}
|
||||
withdrawals = append(withdrawals, payment.Withdrawal)
|
||||
}
|
||||
|
||||
if err := state.AppendBuilderPendingWithdrawals(withdrawals); err != nil {
|
||||
return errors.Wrap(err, "could not append builder pending withdrawals")
|
||||
}
|
||||
|
||||
if err := state.RotateBuilderPendingPayments(); err != nil {
|
||||
return errors.Wrap(err, "could not rotate builder pending payments")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// builderQuorumThreshold calculates the quorum threshold for builder payments.
|
||||
// Spec v1.7.0-alpha.0 (pseudocode):
|
||||
// def get_builder_payment_quorum_threshold(state: BeaconState) -> uint64:
|
||||
//
|
||||
// per_slot_balance = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
||||
// quorum = per_slot_balance * BUILDER_PAYMENT_THRESHOLD_NUMERATOR
|
||||
// return uint64(quorum // BUILDER_PAYMENT_THRESHOLD_DENOMINATOR)
|
||||
func builderQuorumThreshold(state state.ReadOnlyBeaconState) (primitives.Gwei, error) {
|
||||
activeBalance, err := helpers.TotalActiveBalance(state)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get total active balance")
|
||||
}
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
slotsPerEpoch := uint64(cfg.SlotsPerEpoch)
|
||||
numerator := cfg.BuilderPaymentThresholdNumerator
|
||||
denominator := cfg.BuilderPaymentThresholdDenominator
|
||||
|
||||
activeBalancePerSlot := activeBalance / slotsPerEpoch
|
||||
quorum := (activeBalancePerSlot * numerator) / denominator
|
||||
return primitives.Gwei(quorum), nil
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestBuilderQuorumThreshold(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
validators := []*ethpb.Validator{
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
}
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(ðpb.BeaconStateGloas{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := builderQuorumThreshold(st)
|
||||
require.NoError(t, err)
|
||||
|
||||
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
|
||||
perSlot := total / uint64(cfg.SlotsPerEpoch)
|
||||
want := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
|
||||
require.Equal(t, primitives.Gwei(want), got)
|
||||
}
|
||||
|
||||
func TestProcessBuilderPendingPayments(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
cfg := params.BeaconConfig()
|
||||
|
||||
buildPayments := func(weights ...primitives.Gwei) []*ethpb.BuilderPendingPayment {
|
||||
p := make([]*ethpb.BuilderPendingPayment, 2*int(cfg.SlotsPerEpoch))
|
||||
for i := range p {
|
||||
p[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{FeeRecipient: make([]byte, 20)},
|
||||
}
|
||||
}
|
||||
for i, w := range weights {
|
||||
p[i].Weight = w
|
||||
p[i].Withdrawal.Amount = 1
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
validators := []*ethpb.Validator{
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
{EffectiveBalance: cfg.MaxEffectiveBalance, ActivationEpoch: 0, ExitEpoch: 1},
|
||||
}
|
||||
pbSt, err := state_native.InitializeFromProtoPhase0(ðpb.BeaconState{Validators: validators})
|
||||
require.NoError(t, err)
|
||||
|
||||
total := uint64(len(validators)) * cfg.MaxEffectiveBalance
|
||||
perSlot := total / uint64(cfg.SlotsPerEpoch)
|
||||
quorum := (perSlot * cfg.BuilderPaymentThresholdNumerator) / cfg.BuilderPaymentThresholdDenominator
|
||||
slotsPerEpoch := int(cfg.SlotsPerEpoch)
|
||||
|
||||
t.Run("append qualifying withdrawals", func(t *testing.T) {
|
||||
payments := buildPayments(primitives.Gwei(quorum+1), primitives.Gwei(quorum+2))
|
||||
st := &testProcessState{BeaconState: pbSt, payments: payments}
|
||||
|
||||
require.NoError(t, ProcessBuilderPendingPayments(st))
|
||||
require.Equal(t, 2, len(st.withdrawals))
|
||||
require.Equal(t, payments[0].Withdrawal, st.withdrawals[0])
|
||||
require.Equal(t, payments[1].Withdrawal, st.withdrawals[1])
|
||||
|
||||
require.Equal(t, 2*slotsPerEpoch, len(st.payments))
|
||||
for i := slotsPerEpoch; i < 2*slotsPerEpoch; i++ {
|
||||
require.Equal(t, primitives.Gwei(0), st.payments[i].Weight)
|
||||
require.Equal(t, primitives.Gwei(0), st.payments[i].Withdrawal.Amount)
|
||||
require.Equal(t, 20, len(st.payments[i].Withdrawal.FeeRecipient))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no withdrawals when below quorum", func(t *testing.T) {
|
||||
payments := buildPayments(primitives.Gwei(quorum - 1))
|
||||
st := &testProcessState{BeaconState: pbSt, payments: payments}
|
||||
|
||||
require.NoError(t, ProcessBuilderPendingPayments(st))
|
||||
require.Equal(t, 0, len(st.withdrawals))
|
||||
})
|
||||
}
|
||||
|
||||
type testProcessState struct {
|
||||
state.BeaconState
|
||||
payments []*ethpb.BuilderPendingPayment
|
||||
withdrawals []*ethpb.BuilderPendingWithdrawal
|
||||
}
|
||||
|
||||
func (t *testProcessState) BuilderPendingPayments() ([]*ethpb.BuilderPendingPayment, error) {
|
||||
return t.payments, nil
|
||||
}
|
||||
|
||||
func (t *testProcessState) AppendBuilderPendingWithdrawals(withdrawals []*ethpb.BuilderPendingWithdrawal) error {
|
||||
t.withdrawals = append(t.withdrawals, withdrawals...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *testProcessState) RotateBuilderPendingPayments() error {
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
rotated := slices.Clone(t.payments[slotsPerEpoch:])
|
||||
for range slotsPerEpoch {
|
||||
rotated = append(rotated, ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
})
|
||||
}
|
||||
t.payments = rotated
|
||||
return nil
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// RemoveBuilderPendingPayment removes the pending builder payment for the proposal slot.
|
||||
// Spec v1.7.0 (pseudocode):
|
||||
//
|
||||
// slot = header_1.slot
|
||||
// proposal_epoch = compute_epoch_at_slot(slot)
|
||||
// if proposal_epoch == get_current_epoch(state):
|
||||
// payment_index = SLOTS_PER_EPOCH + slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
// elif proposal_epoch == get_previous_epoch(state):
|
||||
// payment_index = slot % SLOTS_PER_EPOCH
|
||||
// state.builder_pending_payments[payment_index] = BuilderPendingPayment()
|
||||
func RemoveBuilderPendingPayment(st state.BeaconState, header *eth.BeaconBlockHeader) error {
|
||||
proposalEpoch := slots.ToEpoch(header.Slot)
|
||||
currentEpoch := time.CurrentEpoch(st)
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
var paymentIndex primitives.Slot
|
||||
if proposalEpoch == currentEpoch {
|
||||
paymentIndex = slotsPerEpoch + header.Slot%slotsPerEpoch
|
||||
} else if proposalEpoch+1 == currentEpoch {
|
||||
paymentIndex = header.Slot % slotsPerEpoch
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := st.ClearBuilderPendingPayment(paymentIndex); err != nil {
|
||||
return errors.Wrap(err, "could not clear builder pending payment")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
package gloas
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestRemoveBuilderPendingPayment_CurrentEpoch(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
stateSlot := slotsPerEpoch*2 + 1
|
||||
headerSlot := slotsPerEpoch * 2
|
||||
|
||||
st := newGloasStateWithPayments(t, stateSlot)
|
||||
paymentIndex := int(slotsPerEpoch + headerSlot%slotsPerEpoch)
|
||||
|
||||
setPendingPayment(t, st, paymentIndex, 123)
|
||||
|
||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
got := getPendingPayment(t, st, paymentIndex)
|
||||
require.NotNil(t, got.Withdrawal)
|
||||
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
|
||||
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
|
||||
}
|
||||
|
||||
func TestRemoveBuilderPendingPayment_PreviousEpoch(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
stateSlot := slotsPerEpoch*2 + 1
|
||||
headerSlot := slotsPerEpoch + 7
|
||||
|
||||
st := newGloasStateWithPayments(t, stateSlot)
|
||||
paymentIndex := int(headerSlot % slotsPerEpoch)
|
||||
|
||||
setPendingPayment(t, st, paymentIndex, 456)
|
||||
|
||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
got := getPendingPayment(t, st, paymentIndex)
|
||||
require.NotNil(t, got.Withdrawal)
|
||||
require.DeepEqual(t, make([]byte, 20), got.Withdrawal.FeeRecipient)
|
||||
require.Equal(t, uint64(0), uint64(got.Withdrawal.Amount))
|
||||
}
|
||||
|
||||
func TestRemoveBuilderPendingPayment_OlderThanTwoEpoch(t *testing.T) {
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
stateSlot := slotsPerEpoch*4 + 1 // current epoch far ahead
|
||||
headerSlot := slotsPerEpoch * 2 // two epochs behind
|
||||
|
||||
st := newGloasStateWithPayments(t, stateSlot)
|
||||
paymentIndex := int(headerSlot % slotsPerEpoch)
|
||||
|
||||
original := getPendingPayment(t, st, paymentIndex)
|
||||
|
||||
err := RemoveBuilderPendingPayment(st, ð.BeaconBlockHeader{Slot: headerSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
after := getPendingPayment(t, st, paymentIndex)
|
||||
require.DeepEqual(t, original.Withdrawal.FeeRecipient, after.Withdrawal.FeeRecipient)
|
||||
require.Equal(t, original.Withdrawal.Amount, after.Withdrawal.Amount)
|
||||
}
|
||||
|
||||
func newGloasStateWithPayments(t *testing.T, slot primitives.Slot) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
paymentCount := int(slotsPerEpoch * 2)
|
||||
payments := make([]*eth.BuilderPendingPayment, paymentCount)
|
||||
for i := range payments {
|
||||
payments[i] = ð.BuilderPendingPayment{
|
||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x01}, 20),
|
||||
Amount: 1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoUnsafeGloas(ð.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
BuilderPendingPayments: payments,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
return st
|
||||
}
|
||||
|
||||
func setPendingPayment(t *testing.T, st state.BeaconState, index int, amount uint64) {
|
||||
t.Helper()
|
||||
|
||||
payment := ð.BuilderPendingPayment{
|
||||
Withdrawal: ð.BuilderPendingWithdrawal{
|
||||
FeeRecipient: bytes.Repeat([]byte{0x02}, 20),
|
||||
Amount: primitives.Gwei(amount),
|
||||
},
|
||||
}
|
||||
require.NoError(t, st.SetBuilderPendingPayment(primitives.Slot(index), payment))
|
||||
}
|
||||
|
||||
func getPendingPayment(t *testing.T, st state.BeaconState, index int) *eth.BuilderPendingPayment {
|
||||
t.Helper()
|
||||
|
||||
stateProto := st.ToProtoUnsafe().(*eth.BeaconStateGloas)
|
||||
|
||||
return stateProto.BuilderPendingPayments[index]
|
||||
}
|
||||
@@ -6,7 +6,6 @@ go_library(
|
||||
"attestation.go",
|
||||
"beacon_committee.go",
|
||||
"block.go",
|
||||
"deposit.go",
|
||||
"genesis.go",
|
||||
"legacy.go",
|
||||
"log.go",
|
||||
@@ -24,7 +23,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -33,7 +31,6 @@ go_library(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//contracts/deposit:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -57,7 +54,6 @@ go_test(
|
||||
"attestation_test.go",
|
||||
"beacon_committee_test.go",
|
||||
"block_test.go",
|
||||
"deposit_test.go",
|
||||
"legacy_test.go",
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
@@ -76,7 +72,6 @@ go_test(
|
||||
tags = ["CI_race_detection"],
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
@@ -85,8 +80,6 @@ go_test(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
"//container/trie:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"consolidations.go",
|
||||
"deposits.go",
|
||||
"log.go",
|
||||
"withdrawals.go",
|
||||
],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls/common:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//monitoring/tracing/trace:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/math:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"consolidations_test.go",
|
||||
"deposits_test.go",
|
||||
"withdrawals_test.go",
|
||||
],
|
||||
deps = [
|
||||
":go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,365 +0,0 @@
|
||||
package requests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls/common"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
prysmMath "github.com/OffchainLabs/prysm/v7/math"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessConsolidationRequests implements the spec definition below. This method makes mutating
|
||||
// calls to the beacon state.
|
||||
//
|
||||
// def process_consolidation_request(
|
||||
// state: BeaconState,
|
||||
// consolidation_request: ConsolidationRequest
|
||||
// ) -> None:
|
||||
// if is_valid_switch_to_compounding_request(state, consolidation_request):
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// switch_to_compounding_validator(state, source_index)
|
||||
// return
|
||||
//
|
||||
// # Verify that source != target, so a consolidation cannot be used as an exit.
|
||||
// if consolidation_request.source_pubkey == consolidation_request.target_pubkey:
|
||||
// return
|
||||
// # If the pending consolidations queue is full, consolidation requests are ignored
|
||||
// if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
|
||||
// return
|
||||
// # If there is too little available consolidation churn limit, consolidation requests are ignored
|
||||
// if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE:
|
||||
// return
|
||||
//
|
||||
// validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
// # Verify pubkeys exists
|
||||
// request_source_pubkey = consolidation_request.source_pubkey
|
||||
// request_target_pubkey = consolidation_request.target_pubkey
|
||||
// if request_source_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// if request_target_pubkey not in validator_pubkeys:
|
||||
// return
|
||||
// source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||
// target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey))
|
||||
// source_validator = state.validators[source_index]
|
||||
// target_validator = state.validators[target_index]
|
||||
//
|
||||
// # Verify source withdrawal credentials
|
||||
// has_correct_credential = has_execution_withdrawal_credential(source_validator)
|
||||
// is_correct_source_address = (
|
||||
// source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||
// )
|
||||
// if not (has_correct_credential and is_correct_source_address):
|
||||
// return
|
||||
//
|
||||
// # Verify that target has compounding withdrawal credentials
|
||||
// if not has_compounding_withdrawal_credential(target_validator):
|
||||
// return
|
||||
//
|
||||
// # Verify the source and the target are active
|
||||
// current_epoch = get_current_epoch(state)
|
||||
// if not is_active_validator(source_validator, current_epoch):
|
||||
// return
|
||||
// if not is_active_validator(target_validator, current_epoch):
|
||||
// return
|
||||
// # Verify exits for source and target have not been initiated
|
||||
// if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
// if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
// return
|
||||
//
|
||||
// # Verify the source has been active long enough
|
||||
// if current_epoch < source_validator.activation_epoch + SHARD_COMMITTEE_PERIOD:
|
||||
// return
|
||||
//
|
||||
// # Verify the source has no pending withdrawals in the queue
|
||||
// if get_pending_balance_to_withdraw(state, source_index) > 0:
|
||||
// return
|
||||
// # Initiate source validator exit and append pending consolidation
|
||||
// source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
||||
// state, source_validator.effective_balance
|
||||
// )
|
||||
// source_validator.withdrawable_epoch = Epoch(
|
||||
// source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
// )
|
||||
// state.pending_consolidations.append(PendingConsolidation(
|
||||
// source_index=source_index,
|
||||
// target_index=target_index
|
||||
// ))
|
||||
func ProcessConsolidationRequests(ctx context.Context, st state.BeaconState, reqs []*enginev1.ConsolidationRequest) error {
|
||||
ctx, span := trace.StartSpan(ctx, "requests.ProcessConsolidationRequests")
|
||||
defer span.End()
|
||||
|
||||
if len(reqs) == 0 || st == nil {
|
||||
return nil
|
||||
}
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
ffe := params.BeaconConfig().FarFutureEpoch
|
||||
minValWithdrawDelay := params.BeaconConfig().MinValidatorWithdrawabilityDelay
|
||||
pcLimit := params.BeaconConfig().PendingConsolidationsLimit
|
||||
|
||||
for _, cr := range reqs {
|
||||
if cr == nil {
|
||||
return errors.New("nil consolidation request")
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("cannot process consolidation requests: %w", ctx.Err())
|
||||
}
|
||||
|
||||
if isValidSwitchToCompoundingRequest(st, cr) {
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(cr.SourcePubkey))
|
||||
if !ok {
|
||||
log.Error("Failed to find source validator index")
|
||||
continue
|
||||
}
|
||||
if err := switchToCompoundingValidator(st, srcIdx); err != nil {
|
||||
log.WithError(err).Error("Failed to switch to compounding validator")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
sourcePubkey := bytesutil.ToBytes48(cr.SourcePubkey)
|
||||
targetPubkey := bytesutil.ToBytes48(cr.TargetPubkey)
|
||||
if sourcePubkey == targetPubkey {
|
||||
continue
|
||||
}
|
||||
|
||||
if npc, err := st.NumPendingConsolidations(); err != nil {
|
||||
return fmt.Errorf("failed to fetch number of pending consolidations: %w", err) // This should never happen.
|
||||
} else if npc >= pcLimit {
|
||||
continue
|
||||
}
|
||||
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
churnLimit := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
if churnLimit <= primitives.Gwei(params.BeaconConfig().MinActivationBalance) {
|
||||
continue
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(sourcePubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
tgtIdx, ok := st.ValidatorIndexByPubkey(targetPubkey)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
srcV, err := st.ValidatorAtIndex(srcIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch source validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
roSrcV, err := state_native.NewValidator(srcV)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tgtV, err := st.ValidatorAtIndexReadOnly(tgtIdx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch target validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
// Verify source withdrawal credentials.
|
||||
if !roSrcV.HasExecutionWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
// Confirm source_validator.withdrawal_credentials[12:] == consolidation_request.source_address.
|
||||
if len(srcV.WithdrawalCredentials) != 32 || len(cr.SourceAddress) != 20 || !bytes.HasSuffix(srcV.WithdrawalCredentials, cr.SourceAddress) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Target validator must have their withdrawal credentials set appropriately.
|
||||
if !tgtV.HasCompoundingWithdrawalCredentials() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Both validators must be active.
|
||||
if !helpers.IsActiveValidator(srcV, curEpoch) || !helpers.IsActiveValidatorUsingTrie(tgtV, curEpoch) {
|
||||
continue
|
||||
}
|
||||
// Neither validator is exiting.
|
||||
if srcV.ExitEpoch != ffe || tgtV.ExitEpoch() != ffe {
|
||||
continue
|
||||
}
|
||||
|
||||
e, overflow := math.SafeAdd(uint64(srcV.ActivationEpoch), uint64(params.BeaconConfig().ShardCommitteePeriod))
|
||||
if overflow {
|
||||
log.Error("Overflow when adding activation epoch and shard committee period")
|
||||
continue
|
||||
}
|
||||
if uint64(curEpoch) < e {
|
||||
continue
|
||||
}
|
||||
|
||||
hasBal, err := st.HasPendingBalanceToWithdraw(srcIdx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to fetch pending balance to withdraw")
|
||||
continue
|
||||
}
|
||||
if hasBal {
|
||||
continue
|
||||
}
|
||||
|
||||
exitEpoch, err := computeConsolidationEpochAndUpdateChurn(st, primitives.Gwei(srcV.EffectiveBalance))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to compute consolidation epoch")
|
||||
continue
|
||||
}
|
||||
srcV.ExitEpoch = exitEpoch
|
||||
srcV.WithdrawableEpoch = exitEpoch + minValWithdrawDelay
|
||||
if err := st.UpdateValidatorAtIndex(srcIdx, srcV); err != nil {
|
||||
return fmt.Errorf("failed to update validator: %w", err) // This should never happen.
|
||||
}
|
||||
|
||||
if err := st.AppendPendingConsolidation(ð.PendingConsolidation{SourceIndex: srcIdx, TargetIndex: tgtIdx}); err != nil {
|
||||
return fmt.Errorf("failed to append pending consolidation: %w", err) // This should never happen.
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isValidSwitchToCompoundingRequest(st state.BeaconState, req *enginev1.ConsolidationRequest) bool {
|
||||
if req.SourcePubkey == nil || req.TargetPubkey == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if !bytes.Equal(req.SourcePubkey, req.TargetPubkey) {
|
||||
return false
|
||||
}
|
||||
|
||||
srcIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(req.SourcePubkey))
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
srcV, err := st.ValidatorAtIndexReadOnly(srcIdx)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
sourceAddress := req.SourceAddress
|
||||
withdrawalCreds := srcV.GetWithdrawalCredentials()
|
||||
if len(withdrawalCreds) != 32 || len(sourceAddress) != 20 || !bytes.HasSuffix(withdrawalCreds, sourceAddress) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !srcV.HasETH1WithdrawalCredentials() {
|
||||
return false
|
||||
}
|
||||
|
||||
curEpoch := slots.ToEpoch(st.Slot())
|
||||
if !helpers.IsActiveValidatorUsingTrie(srcV, curEpoch) {
|
||||
return false
|
||||
}
|
||||
|
||||
if srcV.ExitEpoch() != params.BeaconConfig().FarFutureEpoch {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func switchToCompoundingValidator(st state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
v, err := st.ValidatorAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(v.WithdrawalCredentials) == 0 {
|
||||
return errors.New("validator has no withdrawal credentials")
|
||||
}
|
||||
|
||||
v.WithdrawalCredentials[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
if err := st.UpdateValidatorAtIndex(idx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return queueExcessActiveBalance(st, idx)
|
||||
}
|
||||
|
||||
func queueExcessActiveBalance(st state.BeaconState, idx primitives.ValidatorIndex) error {
|
||||
bal, err := st.BalanceAtIndex(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bal > params.BeaconConfig().MinActivationBalance {
|
||||
if err := st.UpdateBalancesAtIndex(idx, params.BeaconConfig().MinActivationBalance); err != nil {
|
||||
return err
|
||||
}
|
||||
excessBalance := bal - params.BeaconConfig().MinActivationBalance
|
||||
val, err := st.ValidatorAtIndexReadOnly(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pk := val.PublicKey()
|
||||
return st.AppendPendingDeposit(ð.PendingDeposit{
|
||||
PublicKey: pk[:],
|
||||
WithdrawalCredentials: val.GetWithdrawalCredentials(),
|
||||
Amount: excessBalance,
|
||||
Signature: common.InfiniteSignature[:],
|
||||
Slot: params.BeaconConfig().GenesisSlot,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func computeConsolidationEpochAndUpdateChurn(st state.BeaconState, consolidationBalance primitives.Gwei) (primitives.Epoch, error) {
|
||||
earliestEpoch, err := st.EarliestConsolidationEpoch()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
earliestConsolidationEpoch := max(earliestEpoch, helpers.ActivationExitEpoch(slots.ToEpoch(st.Slot())))
|
||||
|
||||
activeBal, err := helpers.TotalActiveBalance(st)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
perEpochConsolidationChurn := helpers.ConsolidationChurnLimit(primitives.Gwei(activeBal))
|
||||
|
||||
var consolidationBalanceToConsume primitives.Gwei
|
||||
if earliestEpoch < earliestConsolidationEpoch {
|
||||
consolidationBalanceToConsume = perEpochConsolidationChurn
|
||||
} else {
|
||||
consolidationBalanceToConsume, err = st.ConsolidationBalanceToConsume()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if consolidationBalance > consolidationBalanceToConsume {
|
||||
balanceToProcess := consolidationBalance - consolidationBalanceToConsume
|
||||
additionalEpochs, err := prysmMath.Div64(uint64(balanceToProcess-1), uint64(perEpochConsolidationChurn))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
additionalEpochs++
|
||||
earliestConsolidationEpoch += primitives.Epoch(additionalEpochs)
|
||||
consolidationBalanceToConsume += primitives.Gwei(additionalEpochs) * perEpochConsolidationChurn
|
||||
}
|
||||
|
||||
if err := st.SetConsolidationBalanceToConsume(consolidationBalanceToConsume - consolidationBalance); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := st.SetEarliestConsolidationEpoch(earliestConsolidationEpoch); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return earliestConsolidationEpoch, nil
|
||||
}
|
||||
@@ -1,316 +0,0 @@
|
||||
package requests_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func createValidatorsWithTotalActiveBalance(totalBal primitives.Gwei) []*eth.Validator {
|
||||
num := totalBal / primitives.Gwei(params.BeaconConfig().MinActivationBalance)
|
||||
vals := make([]*eth.Validator, num)
|
||||
for i := range vals {
|
||||
wd := make([]byte, 32)
|
||||
wd[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
wd[31] = byte(i)
|
||||
|
||||
vals[i] = ð.Validator{
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
EffectiveBalance: params.BeaconConfig().MinActivationBalance,
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
PublicKey: fmt.Appendf(nil, "val_%d", i),
|
||||
WithdrawableEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
WithdrawalCredentials: wd,
|
||||
}
|
||||
}
|
||||
if totalBal%primitives.Gwei(params.BeaconConfig().MinActivationBalance) != 0 {
|
||||
vals = append(vals, ð.Validator{
|
||||
ActivationEpoch: primitives.Epoch(0),
|
||||
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
|
||||
EffectiveBalance: uint64(totalBal) % params.BeaconConfig().MinActivationBalance,
|
||||
})
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
func TestProcessConsolidationRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
state state.BeaconState
|
||||
reqs []*enginev1.ConsolidationRequest
|
||||
validate func(*testing.T, state.BeaconState)
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil request",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{nil},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
require.DeepEqual(t, st, st)
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "one valid request",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
}
|
||||
// Validator scenario setup. See comments in reqs section.
|
||||
st.Validators[3].WithdrawalCredentials = bytesutil.Bytes32(0)
|
||||
st.Validators[8].WithdrawalCredentials = bytesutil.Bytes32(1)
|
||||
st.Validators[9].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
st.Validators[12].ActivationEpoch = params.BeaconConfig().FarFutureEpoch
|
||||
st.Validators[13].ExitEpoch = 10
|
||||
st.Validators[16].ExitEpoch = 10
|
||||
st.PendingPartialWithdrawals = []*eth.PendingPartialWithdrawal{
|
||||
{
|
||||
Index: 17,
|
||||
Amount: 100,
|
||||
},
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
// Source doesn't have withdrawal credentials.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_4"),
|
||||
},
|
||||
// Source withdrawal credentials don't match the consolidation address.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)), // Should be 5
|
||||
SourcePubkey: []byte("val_5"),
|
||||
TargetPubkey: []byte("val_6"),
|
||||
},
|
||||
// Target does not have their withdrawal credentials set appropriately. (Using eth1 address prefix)
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(7)),
|
||||
SourcePubkey: []byte("val_7"),
|
||||
TargetPubkey: []byte("val_8"),
|
||||
},
|
||||
// Source is inactive.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(9)),
|
||||
SourcePubkey: []byte("val_9"),
|
||||
TargetPubkey: []byte("val_10"),
|
||||
},
|
||||
// Target is inactive.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(11)),
|
||||
SourcePubkey: []byte("val_11"),
|
||||
TargetPubkey: []byte("val_12"),
|
||||
},
|
||||
// Source is exiting.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(13)),
|
||||
SourcePubkey: []byte("val_13"),
|
||||
TargetPubkey: []byte("val_14"),
|
||||
},
|
||||
// Target is exiting.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(15)),
|
||||
SourcePubkey: []byte("val_15"),
|
||||
TargetPubkey: []byte("val_16"),
|
||||
},
|
||||
// Source doesn't exist
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("INVALID"),
|
||||
TargetPubkey: []byte("val_0"),
|
||||
},
|
||||
// Target doesn't exist
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_0"),
|
||||
TargetPubkey: []byte("INVALID"),
|
||||
},
|
||||
// Source == target
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_0"),
|
||||
TargetPubkey: []byte("val_0"),
|
||||
},
|
||||
// Has pending partial withdrawal
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(0)),
|
||||
SourcePubkey: []byte("val_17"),
|
||||
TargetPubkey: []byte("val_1"),
|
||||
},
|
||||
// Valid consolidation request. This should be last to ensure invalid requests do
|
||||
// not end the processing early.
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint64(1), numPC)
|
||||
pcs, err := st.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), pcs[0].SourceIndex)
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pcs[0].TargetIndex)
|
||||
|
||||
// Verify the source validator is exiting.
|
||||
src, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch not updated")
|
||||
require.Equal(t, params.BeaconConfig().MinValidatorWithdrawabilityDelay, src.WithdrawableEpoch-src.ExitEpoch, "source validator withdrawable epoch not set correctly")
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify no pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// Verify the source validator is not exiting.
|
||||
src, err := st.ValidatorAtIndex(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached during processing",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit-1),
|
||||
}
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_4"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// The first consolidation was appended.
|
||||
pcs, err := st.PendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(1), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].SourceIndex)
|
||||
require.Equal(t, primitives.ValidatorIndex(2), pcs[params.BeaconConfig().PendingConsolidationsLimit-1].TargetIndex)
|
||||
|
||||
// Verify the second source validator is not exiting.
|
||||
src, err := st.ValidatorAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.ExitEpoch, "source validator exit epoch should not be updated")
|
||||
require.Equal(t, params.BeaconConfig().FarFutureEpoch, src.WithdrawableEpoch, "source validator withdrawable epoch should not be updated")
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "pending consolidations limit reached and compounded consolidation after",
|
||||
state: func() state.BeaconState {
|
||||
st := ð.BeaconStateElectra{
|
||||
Slot: params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().ShardCommitteePeriod)),
|
||||
Validators: createValidatorsWithTotalActiveBalance(32000000000000000), // 32M ETH
|
||||
PendingConsolidations: make([]*eth.PendingConsolidation, params.BeaconConfig().PendingConsolidationsLimit),
|
||||
}
|
||||
// To allow compounding consolidation requests.
|
||||
st.Validators[3].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
s, err := state_native.InitializeFromProtoElectra(st)
|
||||
require.NoError(t, err)
|
||||
return s
|
||||
}(),
|
||||
reqs: []*enginev1.ConsolidationRequest{
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(1)),
|
||||
SourcePubkey: []byte("val_1"),
|
||||
TargetPubkey: []byte("val_2"),
|
||||
},
|
||||
{
|
||||
SourceAddress: append(bytesutil.PadTo(nil, 19), byte(3)),
|
||||
SourcePubkey: []byte("val_3"),
|
||||
TargetPubkey: []byte("val_3"),
|
||||
},
|
||||
},
|
||||
validate: func(t *testing.T, st state.BeaconState) {
|
||||
// Verify a pending consolidation is created.
|
||||
numPC, err := st.NumPendingConsolidations()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().PendingConsolidationsLimit, numPC)
|
||||
|
||||
// Verify that the last consolidation was included
|
||||
src, err := st.ValidatorAtIndex(3)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, params.BeaconConfig().CompoundingWithdrawalPrefixByte, src.WithdrawalCredentials[0], "source validator was not compounded")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := requests.ProcessConsolidationRequests(context.TODO(), tt.state, tt.reqs)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ProcessWithdrawalRequests() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
if tt.validate != nil {
|
||||
tt.validate(t, tt.state)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package requests
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ProcessDepositRequests processes execution layer deposits requests.
|
||||
func ProcessDepositRequests(ctx context.Context, beaconState state.BeaconState, reqs []*enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
_, span := trace.StartSpan(ctx, "requests.ProcessDepositRequests")
|
||||
defer span.End()
|
||||
|
||||
if len(reqs) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
for _, req := range reqs {
|
||||
beaconState, err = processDepositRequest(beaconState, req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not apply deposit request")
|
||||
}
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
|
||||
// processDepositRequest processes the specific deposit request
|
||||
//
|
||||
// def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||
//
|
||||
// # Set deposit request start index
|
||||
// if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUESTS_START_INDEX:
|
||||
// state.deposit_requests_start_index = deposit_request.index
|
||||
//
|
||||
// # Create pending deposit
|
||||
// state.pending_deposits.append(PendingDeposit(
|
||||
// pubkey=deposit_request.pubkey,
|
||||
// withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||
// amount=deposit_request.amount,
|
||||
// signature=deposit_request.signature,
|
||||
// slot=state.slot,
|
||||
// ))
|
||||
func processDepositRequest(beaconState state.BeaconState, req *enginev1.DepositRequest) (state.BeaconState, error) {
|
||||
requestsStartIndex, err := beaconState.DepositRequestsStartIndex()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get deposit requests start index")
|
||||
}
|
||||
if req == nil {
|
||||
return nil, errors.New("nil deposit request")
|
||||
}
|
||||
if requestsStartIndex == params.BeaconConfig().UnsetDepositRequestsStartIndex {
|
||||
if err := beaconState.SetDepositRequestsStartIndex(req.Index); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set deposit requests start index")
|
||||
}
|
||||
}
|
||||
if err := beaconState.AppendPendingDeposit(ðpb.PendingDeposit{
|
||||
PublicKey: bytesutil.SafeCopyBytes(req.Pubkey),
|
||||
WithdrawalCredentials: bytesutil.SafeCopyBytes(req.WithdrawalCredentials),
|
||||
Amount: req.Amount,
|
||||
Signature: bytesutil.SafeCopyBytes(req.Signature),
|
||||
Slot: beaconState.Slot(),
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "could not append deposit request")
|
||||
}
|
||||
return beaconState, nil
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
package requests_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/requests"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessDepositRequests(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateElectra(t, 1)
|
||||
sk, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetDepositRequestsStartIndex(1))
|
||||
|
||||
t.Run("empty requests continues", func(t *testing.T) {
|
||||
newSt, err := requests.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{})
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, newSt, st)
|
||||
})
|
||||
t.Run("nil request errors", func(t *testing.T) {
|
||||
_, err = requests.ProcessDepositRequests(t.Context(), st, []*enginev1.DepositRequest{nil})
|
||||
require.ErrorContains(t, "nil deposit request", err)
|
||||
})
|
||||
|
||||
vals := st.Validators()
|
||||
vals[0].PublicKey = sk.PublicKey().Marshal()
|
||||
vals[0].WithdrawalCredentials[0] = params.BeaconConfig().ETH1AddressWithdrawalPrefixByte
|
||||
require.NoError(t, st.SetValidators(vals))
|
||||
bals := st.Balances()
|
||||
bals[0] = params.BeaconConfig().MinActivationBalance + 2000
|
||||
require.NoError(t, st.SetBalances(bals))
|
||||
require.NoError(t, st.SetPendingDeposits(make([]*eth.PendingDeposit, 0))) // reset pbd as the deterministic state populates this already
|
||||
withdrawalCred := make([]byte, 32)
|
||||
withdrawalCred[0] = params.BeaconConfig().CompoundingWithdrawalPrefixByte
|
||||
depositMessage := ð.DepositMessage{
|
||||
PublicKey: sk.PublicKey().Marshal(),
|
||||
Amount: 1000,
|
||||
WithdrawalCredentials: withdrawalCred,
|
||||
}
|
||||
domain, err := signing.ComputeDomain(params.BeaconConfig().DomainDeposit, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(depositMessage, domain)
|
||||
require.NoError(t, err)
|
||||
sig := sk.Sign(sr[:])
|
||||
reqs := []*enginev1.DepositRequest{
|
||||
{
|
||||
Pubkey: depositMessage.PublicKey,
|
||||
Index: 0,
|
||||
WithdrawalCredentials: depositMessage.WithdrawalCredentials,
|
||||
Amount: depositMessage.Amount,
|
||||
Signature: sig.Marshal(),
|
||||
},
|
||||
}
|
||||
st, err = requests.ProcessDepositRequests(t.Context(), st, reqs)
|
||||
require.NoError(t, err)
|
||||
|
||||
pbd, err := st.PendingDeposits()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(pbd))
|
||||
require.Equal(t, uint64(1000), pbd[0].Amount)
|
||||
require.DeepEqual(t, bytesutil.SafeCopyBytes(reqs[0].Pubkey), pbd[0].PublicKey)
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
// Code generated by hack/gen-logs.sh; DO NOT EDIT.
|
||||
// This file is created and regenerated automatically. Anything added here might get removed.
|
||||
package requests
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
// The prefix for logs from this package will be the text after the last slash in the package path.
|
||||
// If you wish to change this, you should add your desired name in the runtime/logging/logrus-prefixed-formatter/prefix-replacement.go file.
|
||||
var log = logrus.WithField("package", "beacon-chain/core/requests")
|
||||
@@ -3,8 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"electra.go",
|
||||
"errors.go",
|
||||
"log.go",
|
||||
"skip_slot_cache.go",
|
||||
"state.go",
|
||||
@@ -27,7 +25,6 @@ go_library(
|
||||
"//beacon-chain/core/execution:go_default_library",
|
||||
"//beacon-chain/core/fulu:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/requests:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/core/transition/interop:go_default_library",
|
||||
"//beacon-chain/core/validators:go_default_library",
|
||||
@@ -65,14 +62,11 @@ go_test(
|
||||
"altair_transition_no_verify_sig_test.go",
|
||||
"bellatrix_transition_no_verify_sig_test.go",
|
||||
"benchmarks_test.go",
|
||||
"electra_test.go",
|
||||
"exports_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"state_fuzz_test.go",
|
||||
"state_test.go",
|
||||
"trailing_slot_state_cache_test.go",
|
||||
"transition_fuzz_test.go",
|
||||
"transition_gloas_test.go",
|
||||
"transition_no_verify_sig_test.go",
|
||||
"transition_test.go",
|
||||
],
|
||||
@@ -108,7 +102,6 @@ go_test(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,216 +0,0 @@
|
||||
package transition_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v7/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
)
|
||||
|
||||
func TestProcessOperationsWithNilRequests(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blockElectra *ethpb.SignedBeaconBlockElectra)
|
||||
errMsg string
|
||||
}{
|
||||
{
|
||||
name: "Nil deposit request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Deposits = []*enginev1.DepositRequest{nil}
|
||||
},
|
||||
errMsg: "nil deposit request",
|
||||
},
|
||||
{
|
||||
name: "Nil withdrawal request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Withdrawals = []*enginev1.WithdrawalRequest{nil}
|
||||
},
|
||||
errMsg: "nil withdrawal request",
|
||||
},
|
||||
{
|
||||
name: "Nil consolidation request",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
blk.Block.Body.ExecutionRequests.Consolidations = []*enginev1.ConsolidationRequest{nil}
|
||||
},
|
||||
errMsg: "nil consolidation request",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(1))
|
||||
|
||||
_, err = transition.ElectraOperations(t.Context(), st, b.Block())
|
||||
require.ErrorContains(t, tc.errMsg, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestElectraOperations_ProcessingErrors(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modifyBlk func(blk *ethpb.SignedBeaconBlockElectra)
|
||||
errCheck func(t *testing.T, err error)
|
||||
}{
|
||||
{
|
||||
name: "ErrProcessProposerSlashingsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create invalid proposer slashing with out-of-bounds proposer index
|
||||
blk.Block.Body.ProposerSlashings = []*ethpb.ProposerSlashing{
|
||||
{
|
||||
Header_1: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 999999, // Invalid index (out of bounds)
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
Header_2: ðpb.SignedBeaconBlockHeader{
|
||||
Header: ðpb.BeaconBlockHeader{
|
||||
Slot: 1,
|
||||
ProposerIndex: 999999,
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process proposer slashings failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessProposerSlashingsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessAttestationsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create attestation with invalid committee index
|
||||
blk.Block.Body.Attestations = []*ethpb.AttestationElectra{
|
||||
{
|
||||
AggregationBits: []byte{0b00000001},
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 999999, // Invalid committee index
|
||||
BeaconBlockRoot: make([]byte, 32),
|
||||
Source: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
Target: ðpb.Checkpoint{
|
||||
Epoch: 0,
|
||||
Root: make([]byte, 32),
|
||||
},
|
||||
},
|
||||
CommitteeBits: []byte{0b00000001},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process attestations failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessAttestationsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessDepositsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create deposit with invalid proof length
|
||||
blk.Block.Body.Deposits = []*ethpb.Deposit{
|
||||
{
|
||||
Proof: [][]byte{}, // Invalid: empty proof
|
||||
Data: ðpb.Deposit_Data{
|
||||
PublicKey: make([]byte, 48),
|
||||
WithdrawalCredentials: make([]byte, 32),
|
||||
Amount: 32000000000, // 32 ETH in Gwei
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process deposits failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessDepositsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessVoluntaryExitsFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create voluntary exit with invalid validator index
|
||||
blk.Block.Body.VoluntaryExits = []*ethpb.SignedVoluntaryExit{
|
||||
{
|
||||
Exit: ðpb.VoluntaryExit{
|
||||
Epoch: 0,
|
||||
ValidatorIndex: 999999, // Invalid index (out of bounds)
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process voluntary exits failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessVoluntaryExitsFailed))
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "ErrProcessBLSChangesFailed",
|
||||
modifyBlk: func(blk *ethpb.SignedBeaconBlockElectra) {
|
||||
// Create BLS to execution change with invalid validator index
|
||||
blk.Block.Body.BlsToExecutionChanges = []*ethpb.SignedBLSToExecutionChange{
|
||||
{
|
||||
Message: ðpb.BLSToExecutionChange{
|
||||
ValidatorIndex: 999999, // Invalid index (out of bounds)
|
||||
FromBlsPubkey: make([]byte, 48),
|
||||
ToExecutionAddress: make([]byte, 20),
|
||||
},
|
||||
Signature: make([]byte, 96),
|
||||
},
|
||||
}
|
||||
},
|
||||
errCheck: func(t *testing.T, err error) {
|
||||
require.ErrorContains(t, "process BLS to execution changes failed", err)
|
||||
require.Equal(t, true, errors.Is(err, transition.ErrProcessBLSChangesFailed))
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
st, ks := util.DeterministicGenesisStateElectra(t, 128)
|
||||
blk, err := util.GenerateFullBlockElectra(st, ks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
tc.modifyBlk(blk)
|
||||
|
||||
b, err := blocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, st.SetSlot(primitives.Slot(1)))
|
||||
|
||||
_, err = transition.ElectraOperations(ctx, st, b.Block())
|
||||
require.NotNil(t, err, "Expected an error but got nil")
|
||||
tc.errCheck(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package transition
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
ErrAttestationsSignatureInvalid = errors.New("attestations signature invalid")
|
||||
ErrRandaoSignatureInvalid = errors.New("randao signature invalid")
|
||||
ErrBLSToExecutionChangesSignatureInvalid = errors.New("BLS to execution changes signature invalid")
|
||||
ErrProcessWithdrawalsFailed = errors.New("process withdrawals failed")
|
||||
ErrProcessRandaoFailed = errors.New("process randao failed")
|
||||
ErrProcessEth1DataFailed = errors.New("process eth1 data failed")
|
||||
ErrProcessProposerSlashingsFailed = errors.New("process proposer slashings failed")
|
||||
ErrProcessAttesterSlashingsFailed = errors.New("process attester slashings failed")
|
||||
ErrProcessAttestationsFailed = errors.New("process attestations failed")
|
||||
ErrProcessDepositsFailed = errors.New("process deposits failed")
|
||||
ErrProcessVoluntaryExitsFailed = errors.New("process voluntary exits failed")
|
||||
ErrProcessBLSChangesFailed = errors.New("process BLS to execution changes failed")
|
||||
ErrProcessSyncAggregateFailed = errors.New("process sync aggregate failed")
|
||||
)
|
||||
@@ -1,3 +0,0 @@
|
||||
package transition
|
||||
|
||||
var ElectraOperations = electraOperations
|
||||
@@ -142,18 +142,6 @@ func ProcessSlot(ctx context.Context, state state.BeaconState) (state.BeaconStat
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Spec v1.6.1 (pseudocode):
|
||||
// # [New in Gloas:EIP7732]
|
||||
// # Unset the next payload availability
|
||||
// state.execution_payload_availability[(state.slot + 1) % SLOTS_PER_HISTORICAL_ROOT] = 0b0
|
||||
if state.Version() >= version.Gloas {
|
||||
index := uint64((state.Slot() + 1) % params.BeaconConfig().SlotsPerHistoricalRoot)
|
||||
if err := state.UpdateExecutionPayloadAvailabilityAtIndex(index, 0x0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,141 +0,0 @@
|
||||
package transition
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestProcessSlot_GloasClearsNextPayloadAvailability(t *testing.T) {
|
||||
slot := primitives.Slot(10)
|
||||
cfg := params.BeaconConfig()
|
||||
nextIdx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
|
||||
byteIdx := nextIdx / 8
|
||||
bitMask := byte(1 << (nextIdx % 8))
|
||||
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
|
||||
st := newGloasState(t, slot, availability)
|
||||
|
||||
_, err := ProcessSlot(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
post := st.ToProto().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, byte(0xFF)&^bitMask, post.ExecutionPayloadAvailability[byteIdx])
|
||||
}
|
||||
|
||||
func TestProcessSlot_GloasClearsNextPayloadAvailability_Wrap(t *testing.T) {
|
||||
cfg := params.BeaconConfig()
|
||||
slot := primitives.Slot(cfg.SlotsPerHistoricalRoot - 1)
|
||||
availability := bytes.Repeat([]byte{0xFF}, int(cfg.SlotsPerHistoricalRoot/8))
|
||||
st := newGloasState(t, slot, availability)
|
||||
|
||||
_, err := ProcessSlot(context.Background(), st)
|
||||
require.NoError(t, err)
|
||||
|
||||
post := st.ToProto().(*ethpb.BeaconStateGloas)
|
||||
require.Equal(t, byte(0xFE), post.ExecutionPayloadAvailability[0])
|
||||
}
|
||||
|
||||
func TestProcessSlot_GloasAvailabilityUpdateError(t *testing.T) {
|
||||
slot := primitives.Slot(7)
|
||||
availability := make([]byte, 1)
|
||||
st := newGloasState(t, slot, availability)
|
||||
|
||||
_, err := ProcessSlot(context.Background(), st)
|
||||
cfg := params.BeaconConfig()
|
||||
idx := uint64((slot + 1) % cfg.SlotsPerHistoricalRoot)
|
||||
byteIdx := idx / 8
|
||||
require.EqualError(t, err, fmt.Sprintf(
|
||||
"bit index %d (byte index %d) out of range for execution payload availability length %d",
|
||||
idx, byteIdx, len(availability),
|
||||
))
|
||||
}
|
||||
|
||||
func newGloasState(t *testing.T, slot primitives.Slot, availability []byte) state.BeaconState {
|
||||
t.Helper()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
protoState := ðpb.BeaconStateGloas{
|
||||
Slot: slot,
|
||||
LatestBlockHeader: testBeaconBlockHeader(),
|
||||
BlockRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
StateRoots: make([][]byte, cfg.SlotsPerHistoricalRoot),
|
||||
RandaoMixes: make([][]byte, fieldparams.RandaoMixesLength),
|
||||
ExecutionPayloadAvailability: availability,
|
||||
BuilderPendingPayments: make([]*ethpb.BuilderPendingPayment, int(cfg.SlotsPerEpoch*2)),
|
||||
LatestExecutionPayloadBid: ðpb.ExecutionPayloadBid{
|
||||
ParentBlockHash: make([]byte, 32),
|
||||
ParentBlockRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
PrevRandao: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
BlobKzgCommitmentsRoot: make([]byte, 32),
|
||||
},
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
PreviousEpochParticipation: []byte{},
|
||||
CurrentEpochParticipation: []byte{},
|
||||
JustificationBits: []byte{0},
|
||||
PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
CurrentSyncCommittee: ðpb.SyncCommittee{},
|
||||
NextSyncCommittee: ðpb.SyncCommittee{},
|
||||
}
|
||||
|
||||
for i := range protoState.BlockRoots {
|
||||
protoState.BlockRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range protoState.StateRoots {
|
||||
protoState.StateRoots[i] = make([]byte, 32)
|
||||
}
|
||||
for i := range protoState.RandaoMixes {
|
||||
protoState.RandaoMixes[i] = make([]byte, 32)
|
||||
}
|
||||
|
||||
for i := range protoState.BuilderPendingPayments {
|
||||
protoState.BuilderPendingPayments[i] = ðpb.BuilderPendingPayment{
|
||||
Withdrawal: ðpb.BuilderPendingWithdrawal{
|
||||
FeeRecipient: make([]byte, 20),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pubkeys := make([][]byte, cfg.SyncCommitteeSize)
|
||||
for i := range pubkeys {
|
||||
pubkeys[i] = make([]byte, fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
aggPubkey := make([]byte, fieldparams.BLSPubkeyLength)
|
||||
protoState.CurrentSyncCommittee = ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: aggPubkey,
|
||||
}
|
||||
protoState.NextSyncCommittee = ðpb.SyncCommittee{
|
||||
Pubkeys: pubkeys,
|
||||
AggregatePubkey: aggPubkey,
|
||||
}
|
||||
|
||||
st, err := state_native.InitializeFromProtoGloas(protoState)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, version.Gloas, st.Version())
|
||||
return st
|
||||
}
|
||||
|
||||
func testBeaconBlockHeader() *ethpb.BeaconBlockHeader {
|
||||
return ðpb.BeaconBlockHeader{
|
||||
ParentRoot: make([]byte, 32),
|
||||
StateRoot: make([]byte, 32),
|
||||
BodyRoot: make([]byte, 32),
|
||||
}
|
||||
}
|
||||
@@ -7,11 +7,12 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/altair"
|
||||
b "github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
v "github.com/OffchainLabs/prysm/v7/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
@@ -69,11 +70,10 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
}
|
||||
|
||||
// Execute per block transition.
|
||||
sigSlice, st, err := ProcessBlockNoVerifyAnySig(ctx, st, signed)
|
||||
set, st, err := ProcessBlockNoVerifyAnySig(ctx, st, signed)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
set := sigSlice.Batch()
|
||||
|
||||
// State root validation.
|
||||
postStateRoot, err := st.HashTreeRoot(ctx)
|
||||
@@ -113,7 +113,7 @@ func ExecuteStateTransitionNoVerifyAnySig(
|
||||
// assert block.state_root == hash_tree_root(state)
|
||||
func CalculateStateRoot(
|
||||
ctx context.Context,
|
||||
rollback state.BeaconState,
|
||||
state state.BeaconState,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.CalculateStateRoot")
|
||||
@@ -122,7 +122,7 @@ func CalculateStateRoot(
|
||||
tracing.AnnotateError(span, ctx.Err())
|
||||
return [32]byte{}, ctx.Err()
|
||||
}
|
||||
if rollback == nil || rollback.IsNil() {
|
||||
if state == nil || state.IsNil() {
|
||||
return [32]byte{}, errors.New("nil state")
|
||||
}
|
||||
if signed == nil || signed.IsNil() || signed.Block().IsNil() {
|
||||
@@ -130,7 +130,7 @@ func CalculateStateRoot(
|
||||
}
|
||||
|
||||
// Copy state to avoid mutating the state reference.
|
||||
state := rollback.Copy()
|
||||
state = state.Copy()
|
||||
|
||||
// Execute per slots transition.
|
||||
var err error
|
||||
@@ -141,103 +141,14 @@ func CalculateStateRoot(
|
||||
}
|
||||
|
||||
// Execute per block transition.
|
||||
if features.Get().EnableProposerPreprocessing {
|
||||
state, err = processBlockForProposing(ctx, state, signed)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block for proposing")
|
||||
}
|
||||
} else {
|
||||
state, err = ProcessBlockForStateRoot(ctx, state, signed)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
state, err = ProcessBlockForStateRoot(ctx, state, signed)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not process block")
|
||||
}
|
||||
|
||||
return state.HashTreeRoot(ctx)
|
||||
}
|
||||
|
||||
// processBlockVerifySigs processes the block and verifies the signatures within it. Block signatures are not verified as this block is not yet signed.
|
||||
func processBlockForProposing(ctx context.Context, st state.BeaconState, signed interfaces.ReadOnlySignedBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
var set BlockSignatureBatches
|
||||
set, st, err = ProcessBlockNoVerifyAnySig(ctx, st, signed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We first try to verify all sigantures batched optimistically. We ignore block proposer signature.
|
||||
sigSet := set.Batch()
|
||||
valid, err := sigSet.Verify()
|
||||
if err != nil || valid {
|
||||
return st, err
|
||||
}
|
||||
// Some signature failed to verify.
|
||||
// Verify Attestations signatures
|
||||
attSigs := set.AttestationSignatures
|
||||
if attSigs == nil {
|
||||
return nil, ErrAttestationsSignatureInvalid
|
||||
}
|
||||
valid, err = attSigs.Verify()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return nil, ErrAttestationsSignatureInvalid
|
||||
}
|
||||
|
||||
// Verify Randao signature
|
||||
randaoSigs := set.RandaoSignatures
|
||||
if randaoSigs == nil {
|
||||
return nil, ErrRandaoSignatureInvalid
|
||||
}
|
||||
valid, err = randaoSigs.Verify()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return nil, ErrRandaoSignatureInvalid
|
||||
}
|
||||
|
||||
if signed.Block().Version() < version.Capella {
|
||||
//This should not happen as we must have failed one of the above signatures.
|
||||
return st, nil
|
||||
}
|
||||
// Verify BLS to execution changes signatures
|
||||
blsChangeSigs := set.BLSChangeSignatures
|
||||
if blsChangeSigs == nil {
|
||||
return nil, ErrBLSToExecutionChangesSignatureInvalid
|
||||
}
|
||||
valid, err = blsChangeSigs.Verify()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !valid {
|
||||
return nil, ErrBLSToExecutionChangesSignatureInvalid
|
||||
}
|
||||
// We should not reach this point as one of the above signatures must have failed.
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// BlockSignatureBatches holds the signature batches for different parts of a beacon block.
|
||||
type BlockSignatureBatches struct {
|
||||
RandaoSignatures *bls.SignatureBatch
|
||||
AttestationSignatures *bls.SignatureBatch
|
||||
BLSChangeSignatures *bls.SignatureBatch
|
||||
}
|
||||
|
||||
// Batch returns the batch of signature batches in the BlockSignatureBatches.
|
||||
func (b BlockSignatureBatches) Batch() *bls.SignatureBatch {
|
||||
sigs := bls.NewSet()
|
||||
if b.RandaoSignatures != nil {
|
||||
sigs.Join(b.RandaoSignatures)
|
||||
}
|
||||
if b.AttestationSignatures != nil {
|
||||
sigs.Join(b.AttestationSignatures)
|
||||
}
|
||||
if b.BLSChangeSignatures != nil {
|
||||
sigs.Join(b.BLSChangeSignatures)
|
||||
}
|
||||
return sigs
|
||||
}
|
||||
|
||||
// ProcessBlockNoVerifyAnySig creates a new, modified beacon state by applying block operation
|
||||
// transformations as defined in the Ethereum Serenity specification. It does not validate
|
||||
// any block signature except for deposit and slashing signatures. It also returns the relevant
|
||||
@@ -254,48 +165,48 @@ func ProcessBlockNoVerifyAnySig(
|
||||
ctx context.Context,
|
||||
st state.BeaconState,
|
||||
signed interfaces.ReadOnlySignedBeaconBlock,
|
||||
) (BlockSignatureBatches, state.BeaconState, error) {
|
||||
) (*bls.SignatureBatch, state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockNoVerifyAnySig")
|
||||
defer span.End()
|
||||
set := BlockSignatureBatches{}
|
||||
if err := blocks.BeaconBlockIsNil(signed); err != nil {
|
||||
return set, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if st.Version() != signed.Block().Version() {
|
||||
return set, nil, fmt.Errorf("state and block are different version. %d != %d", st.Version(), signed.Block().Version())
|
||||
return nil, nil, fmt.Errorf("state and block are different version. %d != %d", st.Version(), signed.Block().Version())
|
||||
}
|
||||
|
||||
blk := signed.Block()
|
||||
st, err := ProcessBlockForStateRoot(ctx, st, signed)
|
||||
if err != nil {
|
||||
return set, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
randaoReveal := signed.Block().Body().RandaoReveal()
|
||||
rSet, err := b.RandaoSignatureBatch(ctx, st, randaoReveal[:])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return set, nil, errors.Wrap(err, "could not retrieve randao signature set")
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve randao signature set")
|
||||
}
|
||||
set.RandaoSignatures = rSet
|
||||
aSet, err := b.AttestationSignatureBatch(ctx, st, signed.Block().Body().Attestations())
|
||||
if err != nil {
|
||||
return set, nil, errors.Wrap(err, "could not retrieve attestation signature set")
|
||||
return nil, nil, errors.Wrap(err, "could not retrieve attestation signature set")
|
||||
}
|
||||
set.AttestationSignatures = aSet
|
||||
|
||||
// Merge beacon block, randao and attestations signatures into a set.
|
||||
set := bls.NewSet()
|
||||
set.Join(rSet).Join(aSet)
|
||||
|
||||
if blk.Version() >= version.Capella {
|
||||
changes, err := signed.Block().Body().BLSToExecutionChanges()
|
||||
if err != nil {
|
||||
return set, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges")
|
||||
}
|
||||
cSet, err := b.BLSChangesSignatureBatch(st, changes)
|
||||
if err != nil {
|
||||
return set, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
|
||||
return nil, nil, errors.Wrap(err, "could not get BLSToExecutionChanges signatures")
|
||||
}
|
||||
set.BLSChangeSignatures = cSet
|
||||
set.Join(cSet)
|
||||
}
|
||||
return set, st, nil
|
||||
}
|
||||
@@ -357,7 +268,7 @@ func ProcessOperationsNoVerifyAttsSigs(
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
state, err = electraOperations(ctx, state, beaconBlock)
|
||||
state, err = electra.ProcessOperations(ctx, state, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -415,7 +326,7 @@ func ProcessBlockForStateRoot(
|
||||
if state.Version() >= version.Capella {
|
||||
state, err = b.ProcessWithdrawals(state, executionData)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessWithdrawalsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process withdrawals")
|
||||
}
|
||||
}
|
||||
if err = b.ProcessPayload(state, blk.Body()); err != nil {
|
||||
@@ -427,13 +338,13 @@ func ProcessBlockForStateRoot(
|
||||
state, err = b.ProcessRandaoNoVerify(state, randaoReveal[:])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(ErrProcessRandaoFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not verify and process randao")
|
||||
}
|
||||
|
||||
state, err = b.ProcessEth1DataInBlock(ctx, state, signed.Block().Body().Eth1Data())
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(ErrProcessEth1DataFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process eth1 data")
|
||||
}
|
||||
|
||||
state, err = ProcessOperationsNoVerifyAttsSigs(ctx, state, signed.Block())
|
||||
@@ -452,7 +363,7 @@ func ProcessBlockForStateRoot(
|
||||
}
|
||||
state, _, err = altair.ProcessSyncAggregate(ctx, state, sa)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessSyncAggregateFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "process_sync_aggregate failed")
|
||||
}
|
||||
|
||||
return state, nil
|
||||
@@ -468,35 +379,31 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
|
||||
exitInfo := &validators.ExitInfo{}
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = validators.ExitInformation(st)
|
||||
exitInfo = v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process altair proposer slashing")
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process altair attester slashing")
|
||||
}
|
||||
st, err = altair.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process altair attestation")
|
||||
}
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process altair deposit")
|
||||
}
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
st, err = b.ProcessBLSToExecutionChanges(st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessBLSChangesFailed, err.Error())
|
||||
}
|
||||
return st, nil
|
||||
return b.ProcessBLSToExecutionChanges(st, beaconBlock)
|
||||
}
|
||||
|
||||
// This calls phase 0 block operations.
|
||||
@@ -504,32 +411,32 @@ func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock int
|
||||
var err error
|
||||
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
|
||||
hasExits := len(beaconBlock.Body().VoluntaryExits()) > 0
|
||||
var exitInfo *validators.ExitInfo
|
||||
var exitInfo *v.ExitInfo
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = validators.ExitInformation(st)
|
||||
exitInfo = v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessProposerSlashingsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process block proposer slashings")
|
||||
}
|
||||
st, err = b.ProcessAttesterSlashings(ctx, st, beaconBlock.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessAttesterSlashingsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process block attester slashings")
|
||||
}
|
||||
st, err = b.ProcessAttestationsNoVerifySignature(ctx, st, beaconBlock)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessAttestationsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process block attestations")
|
||||
}
|
||||
if _, err := altair.ProcessDeposits(ctx, st, beaconBlock.Body().Deposits()); err != nil {
|
||||
return nil, errors.Wrap(ErrProcessDepositsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process deposits")
|
||||
}
|
||||
st, err = b.ProcessVoluntaryExits(ctx, st, beaconBlock.Body().VoluntaryExits(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(ErrProcessVoluntaryExitsFailed, err.Error())
|
||||
return nil, errors.Wrap(err, "could not process voluntary exits")
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
@@ -132,8 +132,7 @@ func TestProcessBlockNoVerify_PassesProcessingConditions(t *testing.T) {
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
// Test Signature set verifies.
|
||||
sigSet := set.Batch()
|
||||
verified, err := sigSet.Verify()
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, verified, "Could not verify signature set.")
|
||||
}
|
||||
@@ -146,8 +145,7 @@ func TestProcessBlockNoVerifyAnySigAltair_OK(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
sigSet := set.Batch()
|
||||
verified, err := sigSet.Verify()
|
||||
verified, err := set.Verify()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, verified, "Could not verify signature set")
|
||||
}
|
||||
@@ -156,9 +154,8 @@ func TestProcessBlockNoVerify_SigSetContainsDescriptions(t *testing.T) {
|
||||
beaconState, block, _, _, _ := createFullBlockWithOperations(t)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(block)
|
||||
require.NoError(t, err)
|
||||
signatures, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
set, _, err := transition.ProcessBlockNoVerifyAnySig(t.Context(), beaconState, wsb)
|
||||
require.NoError(t, err)
|
||||
set := signatures.Batch()
|
||||
assert.Equal(t, len(set.Signatures), len(set.Descriptions), "Signatures and descriptions do not match up")
|
||||
assert.Equal(t, "randao signature", set.Descriptions[0])
|
||||
assert.Equal(t, "attestation signature", set.Descriptions[1])
|
||||
|
||||
@@ -512,11 +512,6 @@ func (dcs *DataColumnStorage) Get(root [fieldparams.RootLength]byte, indices []u
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column sidecars file path open")
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := file.Close(); closeErr != nil {
|
||||
log.WithError(closeErr).WithField("file", filePath).Error("Error closing file during Get")
|
||||
}
|
||||
}()
|
||||
|
||||
// Read file metadata.
|
||||
metadata, err := dcs.metadata(file)
|
||||
|
||||
@@ -89,7 +89,6 @@ type NoHeadAccessDatabase interface {
|
||||
SaveBlocks(ctx context.Context, blocks []interfaces.ReadOnlySignedBeaconBlock) error
|
||||
SaveROBlocks(ctx context.Context, blks []blocks.ROBlock, cache bool) error
|
||||
SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
SlotByBlockRoot(context.Context, [32]byte) (primitives.Slot, error)
|
||||
// State related methods.
|
||||
SaveState(ctx context.Context, state state.ReadOnlyBeaconState, blockRoot [32]byte) error
|
||||
SaveStates(ctx context.Context, states []state.ReadOnlyBeaconState, blockRoots [][32]byte) error
|
||||
@@ -97,7 +96,6 @@ type NoHeadAccessDatabase interface {
|
||||
DeleteStates(ctx context.Context, blockRoots [][32]byte) error
|
||||
SaveStateSummary(ctx context.Context, summary *ethpb.StateSummary) error
|
||||
SaveStateSummaries(ctx context.Context, summaries []*ethpb.StateSummary) error
|
||||
SlotInDiffTree(primitives.Slot) (uint64, int, error)
|
||||
// Checkpoint operations.
|
||||
SaveJustifiedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
|
||||
SaveFinalizedCheckpoint(ctx context.Context, checkpoint *ethpb.Checkpoint) error
|
||||
|
||||
@@ -32,7 +32,6 @@ go_library(
|
||||
"state_diff_helpers.go",
|
||||
"state_summary.go",
|
||||
"state_summary_cache.go",
|
||||
"testing_helpers.go",
|
||||
"utils.go",
|
||||
"validated_checkpoint.go",
|
||||
"wss.go",
|
||||
|
||||
@@ -67,6 +67,7 @@ func getSubscriptionStatusFromDB(t *testing.T, db *Store) bool {
|
||||
return subscribed
|
||||
}
|
||||
|
||||
|
||||
func TestUpdateCustodyInfo(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
|
||||
|
||||
@@ -22,10 +22,6 @@ var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
|
||||
// ErrNotFoundMetadataSeqNum is a not found error specifically for the metadata sequence number getter
|
||||
var ErrNotFoundMetadataSeqNum = errors.Wrap(ErrNotFound, "metadata sequence number")
|
||||
|
||||
// ErrStateDiffIncompatible is returned when state-diff feature is enabled
|
||||
// but the database was created without state-diff support.
|
||||
var ErrStateDiffIncompatible = errors.New("state-diff feature enabled but database was created without state-diff support")
|
||||
|
||||
var errEmptyBlockSlice = errors.New("[]blocks.ROBlock is empty")
|
||||
var errIncorrectBlockParent = errors.New("unexpected missing or forked blocks in a []ROBlock")
|
||||
var errFinalizedChildNotFound = errors.New("unable to find finalized root descending from backfill batch")
|
||||
|
||||
@@ -42,10 +42,6 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
|
||||
if err := s.SaveGenesisBlockRoot(ctx, genesisBlkRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save genesis block root")
|
||||
}
|
||||
|
||||
if err := s.initializeStateDiff(0, genesisState); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize state diff for genesis")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -203,47 +203,17 @@ func NewKVStore(ctx context.Context, dirPath string, opts ...KVStoreOption) (*St
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := kv.startStateDiff(ctx); err != nil {
|
||||
if errors.Is(err, ErrStateDiffIncompatible) {
|
||||
return kv, err
|
||||
if features.Get().EnableStateDiff {
|
||||
sdCache, err := newStateDiffCache(kv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
kv.stateDiffCache = sdCache
|
||||
}
|
||||
|
||||
return kv, nil
|
||||
}
|
||||
|
||||
func (kv *Store) startStateDiff(ctx context.Context) error {
|
||||
if !features.Get().EnableStateDiff {
|
||||
return nil
|
||||
}
|
||||
// Check if offset already exists (existing state-diff database).
|
||||
hasOffset, err := kv.hasStateDiffOffset()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if hasOffset {
|
||||
// Existing state-diff database - restarts not yet supported.
|
||||
return errors.New("restarting with existing state-diff database not yet supported")
|
||||
}
|
||||
|
||||
// Check if this is a new database (no head block).
|
||||
headBlock, err := kv.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get head block")
|
||||
}
|
||||
|
||||
if headBlock == nil {
|
||||
// New database - will be initialized later during checkpoint/genesis sync.
|
||||
// stateDiffCache stays nil until SaveOrigin or SaveGenesisData initializes it.
|
||||
log.Info("State-diff enabled: will be initialized during checkpoint or genesis sync")
|
||||
} else {
|
||||
// Existing database without state-diff - return store with error for caller to handle.
|
||||
return ErrStateDiffIncompatible
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearDB removes the previously stored database in the data directory.
|
||||
func (s *Store) ClearDB() error {
|
||||
if err := s.Close(); err != nil {
|
||||
|
||||
@@ -1053,10 +1053,6 @@ func (s *Store) getStateUsingStateDiff(ctx context.Context, blockRoot [32]byte)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uint64(slot) < s.getOffset() {
|
||||
return nil, ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
st, err := s.stateByDiff(ctx, slot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1074,10 +1070,6 @@ func (s *Store) hasStateUsingStateDiff(ctx context.Context, blockRoot [32]byte)
|
||||
return false, err
|
||||
}
|
||||
|
||||
if uint64(slot) < s.getOffset() {
|
||||
return false, ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
stateLvl := computeLevel(s.getOffset(), slot)
|
||||
return stateLvl != -1, nil
|
||||
}
|
||||
|
||||
@@ -23,16 +23,6 @@ const (
|
||||
The data at level 0 is saved every 2**exponent[0] slots and always contains a full state snapshot that is used as a base for the delta saved at other levels.
|
||||
*/
|
||||
|
||||
// SlotInDiffTree returns whether the given slot is a saving point in the diff tree.
|
||||
// If it is, it also returns the offset and level in the tree.
|
||||
func (s *Store) SlotInDiffTree(slot primitives.Slot) (uint64, int, error) {
|
||||
offset := s.getOffset()
|
||||
if uint64(slot) < offset {
|
||||
return 0, -1, ErrSlotBeforeOffset
|
||||
}
|
||||
return offset, computeLevel(offset, slot), nil
|
||||
}
|
||||
|
||||
// saveStateByDiff takes a state and decides between saving a full state snapshot or a diff.
|
||||
func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconState) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.saveStateByDiff")
|
||||
@@ -43,10 +33,13 @@ func (s *Store) saveStateByDiff(ctx context.Context, st state.ReadOnlyBeaconStat
|
||||
}
|
||||
|
||||
slot := st.Slot()
|
||||
offset, lvl, err := s.SlotInDiffTree(slot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if slot is in diff tree")
|
||||
offset := s.getOffset()
|
||||
if uint64(slot) < offset {
|
||||
return ErrSlotBeforeOffset
|
||||
}
|
||||
|
||||
// Find the level to save the state.
|
||||
lvl := computeLevel(offset, slot)
|
||||
if lvl == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func newStateDiffCache(s *Store) (*stateDiffCache, error) {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := bucket.Get(offsetKey)
|
||||
offsetBytes := bucket.Get([]byte("offset"))
|
||||
if offsetBytes == nil {
|
||||
return errors.New("state diff cache: offset not found")
|
||||
}
|
||||
|
||||
@@ -9,19 +9,17 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
statenative "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/hdiff"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/math"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
pkgerrors "github.com/pkg/errors"
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var (
|
||||
offsetKey = []byte("offset")
|
||||
ErrSlotBeforeOffset = errors.New("slot is before state-diff root offset")
|
||||
ErrSlotBeforeOffset = errors.New("slot is before root offset")
|
||||
)
|
||||
|
||||
func makeKeyForStateDiffTree(level int, slot uint64) []byte {
|
||||
@@ -75,9 +73,6 @@ func (s *Store) getAnchorState(offset uint64, lvl int, slot primitives.Slot) (an
|
||||
|
||||
// computeLevel computes the level in the diff tree. Returns -1 in case slot should not be in tree.
|
||||
func computeLevel(offset uint64, slot primitives.Slot) int {
|
||||
if uint64(slot) < offset {
|
||||
return -1
|
||||
}
|
||||
rel := uint64(slot) - offset
|
||||
for i, exp := range flags.Get().StateDiffExponents {
|
||||
if exp < 2 || exp >= 64 {
|
||||
@@ -124,66 +119,6 @@ func (s *Store) getOffset() uint64 {
|
||||
return s.stateDiffCache.getOffset()
|
||||
}
|
||||
|
||||
// hasStateDiffOffset checks if the state-diff offset has been set in the database.
|
||||
// This is used to detect if an existing database has state-diff enabled.
|
||||
func (s *Store) hasStateDiffOffset() (bool, error) {
|
||||
var hasOffset bool
|
||||
err := s.db.View(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return nil
|
||||
}
|
||||
hasOffset = bucket.Get(offsetKey) != nil
|
||||
return nil
|
||||
})
|
||||
return hasOffset, err
|
||||
}
|
||||
|
||||
// initializeStateDiff sets up the state-diff schema for a new database.
|
||||
// This should be called during checkpoint sync or genesis sync.
|
||||
func (s *Store) initializeStateDiff(slot primitives.Slot, initialState state.ReadOnlyBeaconState) error {
|
||||
// Return early if the feature is not set
|
||||
if !features.Get().EnableStateDiff {
|
||||
return nil
|
||||
}
|
||||
// Only reinitialize if the offset is different
|
||||
if s.stateDiffCache != nil {
|
||||
if s.stateDiffCache.getOffset() == uint64(slot) {
|
||||
log.WithField("offset", slot).Warning("Ignoring state diff cache reinitialization")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Write offset directly to the database (without using cache which doesn't exist yet).
|
||||
err := s.db.Update(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, uint64(slot))
|
||||
return bucket.Put(offsetKey, offsetBytes)
|
||||
})
|
||||
if err != nil {
|
||||
return pkgerrors.Wrap(err, "failed to set offset")
|
||||
}
|
||||
|
||||
// Create the state diff cache (this will read the offset from the database).
|
||||
sdCache, err := newStateDiffCache(s)
|
||||
if err != nil {
|
||||
return pkgerrors.Wrap(err, "failed to create state diff cache")
|
||||
}
|
||||
s.stateDiffCache = sdCache
|
||||
|
||||
// Save the initial state as a full snapshot.
|
||||
if err := s.saveFullSnapshot(initialState); err != nil {
|
||||
return pkgerrors.Wrap(err, "failed to save initial snapshot")
|
||||
}
|
||||
|
||||
log.WithField("offset", slot).Info("Initialized state-diff cache")
|
||||
return nil
|
||||
}
|
||||
|
||||
func keyForSnapshot(v int) ([]byte, error) {
|
||||
switch v {
|
||||
case version.Fulu:
|
||||
|
||||
@@ -43,12 +43,8 @@ func TestStateDiff_ComputeLevel(t *testing.T) {
|
||||
|
||||
offset := db.getOffset()
|
||||
|
||||
// should be -1. slot < offset
|
||||
lvl := computeLevel(10, primitives.Slot(9))
|
||||
require.Equal(t, -1, lvl)
|
||||
|
||||
// 2 ** 21
|
||||
lvl = computeLevel(offset, primitives.Slot(math.PowerOf2(21)))
|
||||
lvl := computeLevel(offset, primitives.Slot(math.PowerOf2(21)))
|
||||
require.Equal(t, 0, lvl)
|
||||
|
||||
// 2 ** 21 * 3
|
||||
|
||||
@@ -1395,23 +1395,6 @@ func TestStore_CanSaveRetrieveStateUsingStateDiff(t *testing.T) {
|
||||
require.IsNil(t, readSt)
|
||||
})
|
||||
|
||||
t.Run("slot before offset", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
err := setOffsetInDB(db, 10)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := bytesutil.ToBytes32([]byte{'A'})
|
||||
ss := ðpb.StateSummary{Slot: 9, Root: r[:]}
|
||||
err = db.SaveStateSummary(t.Context(), ss)
|
||||
require.NoError(t, err)
|
||||
|
||||
st, err := db.getStateUsingStateDiff(t.Context(), r)
|
||||
require.ErrorIs(t, err, ErrSlotBeforeOffset)
|
||||
require.IsNil(t, st)
|
||||
})
|
||||
|
||||
t.Run("Full state snapshot", func(t *testing.T) {
|
||||
t.Run("using state summary", func(t *testing.T) {
|
||||
for v := range version.All() {
|
||||
@@ -1644,21 +1627,4 @@ func TestStore_HasStateUsingStateDiff(t *testing.T) {
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
t.Run("slot before offset", func(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
setDefaultStateDiffExponents()
|
||||
|
||||
err := setOffsetInDB(db, 10)
|
||||
require.NoError(t, err)
|
||||
|
||||
r := bytesutil.ToBytes32([]byte{'B'})
|
||||
ss := ðpb.StateSummary{Slot: 0, Root: r[:]}
|
||||
err = db.SaveStateSummary(t.Context(), ss)
|
||||
require.NoError(t, err)
|
||||
|
||||
hasState, err := db.hasStateUsingStateDiff(t.Context(), r)
|
||||
require.ErrorIs(t, err, ErrSlotBeforeOffset)
|
||||
require.Equal(t, false, hasState)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// InitStateDiffCacheForTesting initializes the state diff cache with the given offset.
|
||||
// This is intended for testing purposes when setting up state diff after database creation.
|
||||
// This file is only compiled when the "testing" build tag is set.
|
||||
func (s *Store) InitStateDiffCacheForTesting(t testing.TB, offset uint64) error {
|
||||
// First, set the offset in the database.
|
||||
err := s.db.Update(func(tx *bbolt.Tx) error {
|
||||
bucket := tx.Bucket(stateDiffBucket)
|
||||
if bucket == nil {
|
||||
return bbolt.ErrBucketNotFound
|
||||
}
|
||||
|
||||
offsetBytes := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(offsetBytes, offset)
|
||||
return bucket.Put([]byte("offset"), offsetBytes)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then create the state diff cache.
|
||||
sdCache, err := newStateDiffCache(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.stateDiffCache = sdCache
|
||||
return nil
|
||||
}
|
||||
@@ -110,8 +110,6 @@ func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
if err = s.SaveFinalizedCheckpoint(ctx, chkpt); err != nil {
|
||||
return errors.Wrap(err, "save finalized checkpoint")
|
||||
}
|
||||
if err := s.initializeStateDiff(state.Slot(), state); err != nil {
|
||||
return errors.Wrap(err, "failed to initialize state diff")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -75,6 +75,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
p2p := p2pTesting.NewTestP2P(t)
|
||||
lcStore := NewLightClientStore(p2p, new(event.Feed), testDB.SetupDB(t))
|
||||
|
||||
timeForGoroutinesToFinish := 20 * time.Microsecond
|
||||
// update 0 with basic data and no supermajority following an empty lastFinalityUpdate - should save and broadcast
|
||||
l0 := util.NewTestLightClient(t, version.Altair)
|
||||
update0, err := NewLightClientFinalityUpdateFromBeaconState(l0.Ctx, l0.State, l0.Block, l0.AttestedState, l0.AttestedBlock, l0.FinalizedBlock)
|
||||
@@ -86,9 +87,8 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update0, true)
|
||||
require.Equal(t, update0, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Eventually(t, func() bool {
|
||||
return p2p.BroadcastCalled.Load()
|
||||
}, time.Second, 10*time.Millisecond, "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update when previous is nil")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 1 with same finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
@@ -102,7 +102,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update1, true)
|
||||
require.Equal(t, update1, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called after setting a new last finality update without supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
@@ -117,9 +117,8 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update2, true)
|
||||
require.Equal(t, update2, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Eventually(t, func() bool {
|
||||
return p2p.BroadcastCalled.Load()
|
||||
}, time.Second, 10*time.Millisecond, "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after setting a new last finality update with supermajority")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 3 with same finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
@@ -133,7 +132,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update3, true)
|
||||
require.Equal(t, update3, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been when previous was already broadcast")
|
||||
|
||||
// update 4 with increased finality slot, increased attested slot, and supermajority - should save and broadcast
|
||||
@@ -147,9 +146,8 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update4, true)
|
||||
require.Equal(t, update4, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
require.Eventually(t, func() bool {
|
||||
return p2p.BroadcastCalled.Load()
|
||||
}, time.Second, 10*time.Millisecond, "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, true, p2p.BroadcastCalled.Load(), "Broadcast should have been called after a new finality update with increased finality slot")
|
||||
p2p.BroadcastCalled.Store(false) // Reset for next test
|
||||
|
||||
// update 5 with the same new finality slot, increased attested slot, and supermajority - should save but not broadcast
|
||||
@@ -163,7 +161,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update5, true)
|
||||
require.Equal(t, update5, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
|
||||
// update 6 with the same new finality slot, increased attested slot, and no supermajority - should save but not broadcast
|
||||
@@ -177,7 +175,7 @@ func TestLightClientStore_SetLastFinalityUpdate(t *testing.T) {
|
||||
|
||||
lcStore.SetLastFinalityUpdate(update6, true)
|
||||
require.Equal(t, update6, lcStore.LastFinalityUpdate(), "lastFinalityUpdate should match the set value")
|
||||
time.Sleep(50 * time.Millisecond) // Wait briefly to verify broadcast is not called
|
||||
time.Sleep(timeForGoroutinesToFinish) // give some time for the broadcast goroutine to finish
|
||||
require.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcast should not have been called when previous was already broadcast with supermajority")
|
||||
}
|
||||
|
||||
|
||||
@@ -64,6 +64,7 @@ go_library(
|
||||
"//monitoring/tracing:go_default_library",
|
||||
"//runtime:go_default_library",
|
||||
"//runtime/prereqs:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
|
||||
@@ -66,6 +66,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/prometheus"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/prereqs"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -468,6 +469,10 @@ func (b *BeaconNode) OperationFeed() event.SubscriberSender {
|
||||
func (b *BeaconNode) Start() {
|
||||
b.lock.Lock()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"version": version.Version(),
|
||||
}).Info("Starting beacon node")
|
||||
|
||||
b.services.StartAll()
|
||||
|
||||
stop := b.stop
|
||||
@@ -535,12 +540,7 @@ func openDB(ctx context.Context, dbPath string, clearer *dbClearer) (*kv.Store,
|
||||
log.WithField("databasePath", dbPath).Info("Checking DB")
|
||||
|
||||
d, err := kv.NewKVStore(ctx, dbPath)
|
||||
if errors.Is(err, kv.ErrStateDiffIncompatible) {
|
||||
log.WithError(err).Warn("Disabling state-diff feature")
|
||||
cfg := features.Get()
|
||||
cfg.EnableStateDiff = false
|
||||
features.Init(cfg)
|
||||
} else if err != nil {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not create database at %s", dbPath)
|
||||
}
|
||||
|
||||
|
||||
@@ -68,6 +68,7 @@ func TestNodeClose_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNodeStart_Ok(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
app := cli.App{}
|
||||
tmp := fmt.Sprintf("%s/datadirtest2", t.TempDir())
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
@@ -96,9 +97,11 @@ func TestNodeStart_Ok(t *testing.T) {
|
||||
}()
|
||||
time.Sleep(3 * time.Second)
|
||||
node.Close()
|
||||
require.LogsContain(t, hook, "Starting beacon node")
|
||||
}
|
||||
|
||||
func TestNodeStart_SyncChecker(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
app := cli.App{}
|
||||
tmp := fmt.Sprintf("%s/datadirtest2", t.TempDir())
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
@@ -124,6 +127,7 @@ func TestNodeStart_SyncChecker(t *testing.T) {
|
||||
time.Sleep(3 * time.Second)
|
||||
assert.NotNil(t, node.syncChecker.Svc)
|
||||
node.Close()
|
||||
require.LogsContain(t, hook, "Starting beacon node")
|
||||
}
|
||||
|
||||
// TestClearDB tests clearing the database
|
||||
|
||||
@@ -49,7 +49,6 @@ func (s *Service) prepareForkChoiceAtts() {
|
||||
}
|
||||
case <-s.ctx.Done():
|
||||
log.Debug("Context closed, exiting routine")
|
||||
ticker.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,8 @@ go_library(
|
||||
"doc.go",
|
||||
"fork.go",
|
||||
"fork_watcher.go",
|
||||
"gossip_peer_controller.go",
|
||||
"gossip_peer_crawler.go",
|
||||
"gossip_scoring_params.go",
|
||||
"gossip_topic_mappings.go",
|
||||
"handshake.go",
|
||||
@@ -52,6 +54,7 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/gossipcrawler:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
@@ -116,6 +119,7 @@ go_library(
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_x_sync//semaphore:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -129,6 +133,8 @@ go_test(
|
||||
"dial_relay_node_test.go",
|
||||
"discovery_test.go",
|
||||
"fork_test.go",
|
||||
"gossip_peer_controller_test.go",
|
||||
"gossip_peer_crawler_test.go",
|
||||
"gossip_scoring_params_test.go",
|
||||
"gossip_topic_mappings_test.go",
|
||||
"message_id_test.go",
|
||||
@@ -155,9 +161,11 @@ go_test(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/gossipcrawler:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
@@ -204,8 +212,10 @@ go_test(
|
||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/testutil:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -61,7 +61,10 @@ func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error {
|
||||
if !ok {
|
||||
return errors.Errorf("message of %T does not support marshaller interface", msg)
|
||||
}
|
||||
return s.broadcastObject(ctx, castMsg, fmt.Sprintf(topic, forkDigest))
|
||||
|
||||
fullTopic := fmt.Sprintf(topic, forkDigest) + s.Encoding().ProtocolSuffix()
|
||||
|
||||
return s.broadcastObject(ctx, castMsg, fullTopic)
|
||||
}
|
||||
|
||||
// BroadcastAttestation broadcasts an attestation to the p2p network, the message is assumed to be
|
||||
@@ -107,6 +110,7 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att, forkDigest [fieldparams.VersionLength]byte) {
|
||||
topic := AttestationSubnetTopic(forkDigest, subnet)
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -117,7 +121,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
|
||||
// Ensure we have peers with this subnet.
|
||||
s.subnetLocker(subnet).RLock()
|
||||
hasPeer := s.hasPeerWithSubnet(attestationToTopic(subnet, forkDigest))
|
||||
hasPeer := s.hasPeerWithTopic(topic)
|
||||
s.subnetLocker(subnet).RUnlock()
|
||||
|
||||
span.SetAttributes(
|
||||
@@ -132,7 +136,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
s.subnetLocker(subnet).Lock()
|
||||
defer s.subnetLocker(subnet).Unlock()
|
||||
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, AttestationSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
|
||||
@@ -154,13 +158,14 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, att, attestationToTopic(subnet, forkDigest)); err != nil {
|
||||
if err := s.broadcastObject(ctx, att, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast attestation")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
|
||||
topic := SyncCommitteeSubnetTopic(forkDigest, subnet)
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -174,7 +179,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
// to ensure that we can reuse the same subnet locker.
|
||||
wrappedSubIdx := subnet + syncLockerVal
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
hasPeer := s.hasPeerWithSubnet(syncCommitteeToTopic(subnet, forkDigest))
|
||||
hasPeer := s.hasPeerWithTopic(topic)
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
span.SetAttributes(
|
||||
@@ -188,7 +193,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, SyncCommitteeSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
|
||||
@@ -206,7 +211,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, sMsg, syncCommitteeToTopic(subnet, forkDigest)); err != nil {
|
||||
if err := s.broadcastObject(ctx, sMsg, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast sync committee message")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
@@ -234,6 +239,7 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [fieldparams.VersionLength]byte) {
|
||||
topic := BlobSubnetTopic(forkDigest, subnet)
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -244,7 +250,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
|
||||
wrappedSubIdx := subnet + blobSubnetLockerVal
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
hasPeer := s.hasPeerWithSubnet(blobSubnetToTopic(subnet, forkDigest))
|
||||
hasPeer := s.hasPeerWithTopic(topic)
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
@@ -253,7 +259,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, BlobSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
|
||||
@@ -265,7 +271,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, blobSidecar, blobSubnetToTopic(subnet, forkDigest)); err != nil {
|
||||
if err := s.broadcastObject(ctx, blobSidecar, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
@@ -294,7 +300,7 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
|
||||
}
|
||||
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
|
||||
if err := s.broadcastObject(ctx, update, LcOptimisticToTopic(digest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -328,7 +334,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
}
|
||||
|
||||
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
|
||||
if err := s.broadcastObject(ctx, update, LcFinalityToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client finality update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -374,7 +380,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
|
||||
topicFunc := func(sidecar blocks.VerifiedRODataColumn) (topic string, wrappedSubIdx uint64, subnet uint64) {
|
||||
subnet = peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
topic = dataColumnSubnetToTopic(subnet, forkDigest)
|
||||
topic = DataColumnSubnetTopic(forkDigest, subnet)
|
||||
wrappedSubIdx = subnet + dataColumnSubnetVal
|
||||
return
|
||||
}
|
||||
@@ -390,7 +396,7 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
// Check if we have a peer for this subnet (use RLock for read-only check).
|
||||
mu := s.subnetLocker(wrappedSubIdx)
|
||||
mu.RLock()
|
||||
hasPeer := s.hasPeerWithSubnet(topic)
|
||||
hasPeer := s.hasPeerWithTopic(topic)
|
||||
mu.RUnlock()
|
||||
|
||||
if hasPeer {
|
||||
@@ -433,10 +439,10 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
ctx := trace.NewContext(s.ctx, span)
|
||||
defer span.End()
|
||||
|
||||
topic, wrappedSubIdx, subnet := topicFunc(sidecar)
|
||||
topic, wrappedSubIdx, _ := topicFunc(sidecar)
|
||||
|
||||
// Find peers for this sidecar's subnet.
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, topic); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Cannot find peers if needed")
|
||||
return
|
||||
@@ -539,41 +545,37 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
func (s *Service) findPeersIfNeeded(
|
||||
ctx context.Context,
|
||||
wrappedSubIdx uint64,
|
||||
topicFormat string,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
subnet uint64,
|
||||
topic string,
|
||||
) error {
|
||||
// Sending a data column sidecar to only one peer is not ideal,
|
||||
// but it ensures at least one peer receives it.
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
// No peers found, attempt to find peers with this subnet.
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, topicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnet")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeGossipMessage encodes an object for gossip transmission.
|
||||
// It returns the encoded bytes and the full topic with protocol suffix.
|
||||
func (s *Service) encodeGossipMessage(obj ssz.Marshaler, topic string) ([]byte, string, error) {
|
||||
func (s *Service) encodeGossipMessage(obj ssz.Marshaler) ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := s.Encoding().EncodeGossip(buf, obj); err != nil {
|
||||
return nil, "", fmt.Errorf("could not encode message: %w", err)
|
||||
return nil, fmt.Errorf("could not encode message: %w", err)
|
||||
}
|
||||
return buf.Bytes(), topic + s.Encoding().ProtocolSuffix(), nil
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// broadcastObject broadcasts a message to other peers in our gossip mesh.
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic string) error {
|
||||
func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, fullTopic string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.broadcastObject")
|
||||
defer span.End()
|
||||
|
||||
span.SetAttributes(trace.StringAttribute("topic", topic))
|
||||
span.SetAttributes(trace.StringAttribute("topic", fullTopic))
|
||||
|
||||
data, fullTopic, err := s.encodeGossipMessage(obj, topic)
|
||||
data, err := s.encodeGossipMessage(obj)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -597,13 +599,13 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
|
||||
// batchObject adds an object to a message batch for a future broadcast.
|
||||
// The caller MUST publish the batch after all messages have been added.
|
||||
func (s *Service) batchObject(ctx context.Context, batch *pubsub.MessageBatch, obj ssz.Marshaler, topic string) error {
|
||||
func (s *Service) batchObject(ctx context.Context, batch *pubsub.MessageBatch, obj ssz.Marshaler, fullTopic string) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.batchObject")
|
||||
defer span.End()
|
||||
|
||||
span.SetAttributes(trace.StringAttribute("topic", topic))
|
||||
span.SetAttributes(trace.StringAttribute("topic", fullTopic))
|
||||
|
||||
data, fullTopic, err := s.encodeGossipMessage(obj, topic)
|
||||
data, err := s.encodeGossipMessage(obj)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
@@ -624,27 +626,3 @@ func (s *Service) batchObject(ctx context.Context, batch *pubsub.MessageBatch, o
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func lcOptimisticToTopic(forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, forkDigest)
|
||||
}
|
||||
|
||||
func lcFinalityToTopic(forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(LightClientFinalityUpdateTopicFormat, forkDigest)
|
||||
}
|
||||
|
||||
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -72,10 +73,7 @@ func TestService_Broadcast(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -114,6 +112,7 @@ func TestService_Attestation_Subnet(t *testing.T) {
|
||||
if gtm := GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()]; gtm != AttestationSubnetTopicFormat {
|
||||
t.Errorf("Constant is out of date. Wanted %s, got %s", AttestationSubnetTopicFormat, gtm)
|
||||
}
|
||||
s := Service{}
|
||||
|
||||
tests := []struct {
|
||||
att *ethpb.Attestation
|
||||
@@ -126,7 +125,7 @@ func TestService_Attestation_Subnet(t *testing.T) {
|
||||
Slot: 2,
|
||||
},
|
||||
},
|
||||
topic: "/eth2/00000000/beacon_attestation_2",
|
||||
topic: "/eth2/00000000/beacon_attestation_2" + s.Encoding().ProtocolSuffix(),
|
||||
},
|
||||
{
|
||||
att: ðpb.Attestation{
|
||||
@@ -135,7 +134,7 @@ func TestService_Attestation_Subnet(t *testing.T) {
|
||||
Slot: 10,
|
||||
},
|
||||
},
|
||||
topic: "/eth2/00000000/beacon_attestation_21",
|
||||
topic: "/eth2/00000000/beacon_attestation_21" + s.Encoding().ProtocolSuffix(),
|
||||
},
|
||||
{
|
||||
att: ðpb.Attestation{
|
||||
@@ -144,12 +143,12 @@ func TestService_Attestation_Subnet(t *testing.T) {
|
||||
Slot: 529,
|
||||
},
|
||||
},
|
||||
topic: "/eth2/00000000/beacon_attestation_8",
|
||||
topic: "/eth2/00000000/beacon_attestation_8" + s.Encoding().ProtocolSuffix(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(100, tt.att.Data.CommitteeIndex, tt.att.Data.Slot)
|
||||
assert.Equal(t, tt.topic, attestationToTopic(subnet, [4]byte{} /* fork digest */), "Wrong topic")
|
||||
assert.Equal(t, tt.topic, AttestationSubnetTopic([4]byte{}, subnet), "Wrong topic")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,21 +177,16 @@ func TestService_BroadcastAttestation(t *testing.T) {
|
||||
msg := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
|
||||
subnet := uint64(5)
|
||||
|
||||
topic := AttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = topic
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = AttestationSubnetTopicFormat
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
topic := AttestationSubnetTopic(digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -234,6 +228,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
// Setup bootnode.
|
||||
cfg := &Config{PingInterval: testPingInterval, DB: db}
|
||||
cfg.UDPPort = uint(port)
|
||||
cfg.TCPPort = uint(port)
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
ipAddr := net.ParseIP("127.0.0.1")
|
||||
genesisTime := time.Now()
|
||||
@@ -259,8 +254,9 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
var hosts []host.Host
|
||||
var configs []*Config
|
||||
// setup other nodes.
|
||||
cfg = &Config{
|
||||
baseCfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
MaxPeers: 2,
|
||||
PingInterval: testPingInterval,
|
||||
@@ -269,11 +265,21 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
// Setup 2 different hosts
|
||||
for i := uint(1); i <= 2; i++ {
|
||||
h, pkey, ipAddr := createHost(t, port+i)
|
||||
cfg.UDPPort = uint(port + i)
|
||||
cfg.TCPPort = uint(port + i)
|
||||
|
||||
// Create a new config for each service to avoid shared mutations
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: baseCfg.Discv5BootStrapAddrs,
|
||||
MaxPeers: baseCfg.MaxPeers,
|
||||
PingInterval: baseCfg.PingInterval,
|
||||
DB: baseCfg.DB,
|
||||
UDPPort: uint(port + i),
|
||||
TCPPort: uint(port + i),
|
||||
}
|
||||
|
||||
if len(listeners) > 0 {
|
||||
cfg.Discv5BootStrapAddrs = append(cfg.Discv5BootStrapAddrs, listeners[len(listeners)-1].Self().String())
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
@@ -286,18 +292,22 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
close(s.custodyInfoSet)
|
||||
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
// Set for 2nd peer
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
|
||||
// Set listener for the service
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
|
||||
// Set subnet for 2nd peer
|
||||
if i == 2 {
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
bitV := bitfield.NewBitvector64()
|
||||
bitV.SetBitAt(subnet, true)
|
||||
err := s.updateSubnetRecordWithMetadata(bitV)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
listeners = append(listeners, listener)
|
||||
hosts = append(hosts, h)
|
||||
configs = append(configs, cfg)
|
||||
}
|
||||
defer func() {
|
||||
// Close down all peers.
|
||||
@@ -332,7 +342,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
pubsub: ps1,
|
||||
dv5Listener: listeners[0],
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: cfg,
|
||||
cfg: configs[0],
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
@@ -348,7 +358,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
pubsub: ps2,
|
||||
dv5Listener: listeners[1],
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: cfg,
|
||||
cfg: configs[1],
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
@@ -361,14 +371,12 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
go p2.listenForNewNodes()
|
||||
|
||||
msg := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
|
||||
topic := AttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = topic
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = AttestationSubnetTopicFormat
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
topic := AttestationSubnetTopic(digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
// We don't use our internal subscribe method
|
||||
// due to using floodsub over here.
|
||||
tpHandle, err := p2.JoinTopic(topic)
|
||||
@@ -381,15 +389,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
_, err = tpHandle.Subscribe()
|
||||
require.NoError(t, err)
|
||||
|
||||
// This test specifically tests discovery-based peer finding, which requires
|
||||
// time for nodes to discover each other. Using a fixed sleep here is intentional
|
||||
// as we're testing the discovery timing behavior.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// Verify mesh establishment after discovery
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0 && len(p2.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
time.Sleep(500 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
nodePeers := p.pubsub.ListPeers(topic)
|
||||
nodePeers2 := p2.pubsub.ListPeers(topic)
|
||||
@@ -447,21 +447,16 @@ func TestService_BroadcastSyncCommittee(t *testing.T) {
|
||||
msg := util.HydrateSyncCommittee(ðpb.SyncCommitteeMessage{})
|
||||
subnet := uint64(5)
|
||||
|
||||
topic := SyncCommitteeSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.SyncCommitteeMessage]()] = topic
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.SyncCommitteeMessage]()] = SyncCommitteeSubnetTopicFormat
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
topic := SyncCommitteeSubnetTopic(digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -527,21 +522,16 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
}
|
||||
subnet := uint64(0)
|
||||
|
||||
topic := BlobSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.BlobSidecar]()] = topic
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.BlobSidecar]()] = BlobSubnetTopicFormat
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
topic := BlobSubnetTopic(digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -597,17 +587,13 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
topic := LcOptimisticToTopic(params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -676,17 +662,13 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
topic := LcFinalityToTopic(params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(p.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
time.Sleep(50 * time.Millisecond) // libp2p fails without this delay...
|
||||
|
||||
// Async listen for the pubsub, must be before the broadcast.
|
||||
var wg sync.WaitGroup
|
||||
@@ -730,7 +712,6 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
const (
|
||||
port = 2000
|
||||
columnIndex = 12
|
||||
topicFormat = DataColumnSubnetTopicFormat
|
||||
)
|
||||
|
||||
ctx := t.Context()
|
||||
@@ -788,7 +769,17 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet) + service.Encoding().ProtocolSuffix()
|
||||
topic := DataColumnSubnetTopic(digest, subnet)
|
||||
|
||||
crawler, err := NewGossipPeerCrawler(t.Context(), service, listener, 1*time.Second, 1*time.Second, 10,
|
||||
func(n *enode.Node) bool { return true },
|
||||
service.Peers().Scorers().Score)
|
||||
require.NoError(t, err)
|
||||
err = crawler.Start(func(ctx context.Context, node *enode.Node) ([]string, error) {
|
||||
return []string{topic}, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
service.gossipDialer = NewGossipPeerDialer(t.Context(), crawler, service.PubSub().ListPeers, service.DialPeers)
|
||||
|
||||
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
verifiedRoSidecar := verifiedRoSidecars[0]
|
||||
@@ -797,10 +788,8 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for libp2p mesh to establish
|
||||
require.Eventually(t, func() bool {
|
||||
return len(service.pubsub.ListPeers(topic)) > 0
|
||||
}, 5*time.Second, 10*time.Millisecond, "libp2p mesh did not establish")
|
||||
// libp2p fails without this delay
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})
|
||||
|
||||
@@ -369,11 +369,11 @@ func (s *Service) listenForNewNodes() {
|
||||
}
|
||||
}
|
||||
|
||||
// FindAndDialPeersWithSubnets ensures that our node is connected to enough peers.
|
||||
// If, the threshold is met, then this function immediately returns.
|
||||
// findAndDialPeers ensures that our node is connected to enough peers.
|
||||
// If the threshold is met, then this function immediately returns.
|
||||
// Otherwise, it searches for new peers and dials them.
|
||||
// If `ctx“ is canceled while searching for peers, search is stopped, but new found peers are still dialed.
|
||||
// In this case, the function returns an error.
|
||||
// If `ctx` is canceled while searching for peers, search is stopped, but newly
|
||||
// found peers are still dialed. In this case, the function returns an error.
|
||||
func (s *Service) findAndDialPeers(ctx context.Context) error {
|
||||
// Restrict dials if limit is applied.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
@@ -404,8 +404,7 @@ func (s *Service) findAndDialPeers(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
dialedPeerCount := s.dialPeers(s.ctx, maxConcurrentDials, peersToDial)
|
||||
|
||||
dialedPeerCount := s.DialPeers(s.ctx, maxConcurrentDials, peersToDial)
|
||||
if dialedPeerCount > missingPeerCount {
|
||||
missingPeerCount = 0
|
||||
continue
|
||||
@@ -554,6 +553,7 @@ func (s *Service) createListener(
|
||||
Bootnodes: bootNodes,
|
||||
PingInterval: s.cfg.PingInterval,
|
||||
NoFindnodeLivenessCheck: s.cfg.DisableLivenessCheck,
|
||||
V5RespTimeout: 300 * time.Millisecond,
|
||||
}
|
||||
|
||||
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
|
||||
|
||||
@@ -482,12 +482,12 @@ func TestStaticPeering_PeersAreAdded(t *testing.T) {
|
||||
s.Start()
|
||||
<-exitRoutine
|
||||
}()
|
||||
time.Sleep(50 * time.Millisecond) // Wait for service initialization
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
var vr [32]byte
|
||||
require.NoError(t, cs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
require.Eventually(t, func() bool {
|
||||
return len(s.host.Network().Peers()) == 5
|
||||
}, 10*time.Second, 100*time.Millisecond, "Not all peers added to peerstore")
|
||||
time.Sleep(4 * time.Second)
|
||||
ps := s.host.Network().Peers()
|
||||
assert.Equal(t, 5, len(ps), "Not all peers added to peerstore")
|
||||
require.NoError(t, s.Stop())
|
||||
exitRoutine <- true
|
||||
}
|
||||
|
||||
343
beacon-chain/p2p/gossip_peer_controller.go
Normal file
343
beacon-chain/p2p/gossip_peer_controller.go
Normal file
@@ -0,0 +1,343 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
const dialInterval = 500 * time.Millisecond
|
||||
const peerCountLogInterval = 5 * time.Minute
|
||||
const topicMonitorInterval = 200 * time.Millisecond
|
||||
|
||||
// GossipPeerDialer maintains minimum peer counts for gossip topics by periodically
|
||||
// dialing new peers discovered by a crawler. It runs a background loop that checks each
|
||||
// topic's peer count and dials new peers when below the target threshold.
|
||||
type GossipPeerDialer struct {
|
||||
ctx context.Context
|
||||
|
||||
listPeers func(topic string) []peer.ID
|
||||
dialPeers func(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint
|
||||
|
||||
crawler gossipcrawler.Crawler
|
||||
topicsProvider gossipcrawler.SubnetTopicsProvider
|
||||
|
||||
cachedTopics map[string]int
|
||||
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// NewGossipPeerDialer creates a new GossipPeerDialer instance.
|
||||
//
|
||||
// Parameters:
|
||||
// - ctx: Parent context that controls the lifecycle of the dialer. When cancelled,
|
||||
// the background dial loop will terminate.
|
||||
// - crawler: Source of peer candidates for each topic. The crawler maintains a registry
|
||||
// of peers discovered through DHT crawling, indexed by the topics they subscribe to.
|
||||
// - listPeers: Function that returns the current peers connected for a given topic.
|
||||
// Used to determine how many additional peers need to be dialed.
|
||||
// - dialPeers: Function that dials the given enode.Node peers with a concurrency limit.
|
||||
// Returns the number of successful dials.
|
||||
//
|
||||
// The dialer must be started with Start() before it begins maintaining peer counts.
|
||||
func NewGossipPeerDialer(
|
||||
ctx context.Context,
|
||||
crawler gossipcrawler.Crawler,
|
||||
listPeers func(topic string) []peer.ID,
|
||||
dialPeers func(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint,
|
||||
) *GossipPeerDialer {
|
||||
return &GossipPeerDialer{
|
||||
ctx: ctx,
|
||||
listPeers: listPeers,
|
||||
dialPeers: dialPeers,
|
||||
crawler: crawler,
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins the background dial loop that maintains peer counts for all topics.
|
||||
//
|
||||
// The provider function is called on each tick to get the current list of topics that
|
||||
// need peer maintenance. This allows the set of topics to change dynamically as the node
|
||||
// subscribes/unsubscribes from subnets.
|
||||
//
|
||||
// Start is idempotent - calling it multiple times has no effect after the first call.
|
||||
// Only the provider from the first call will be used; subsequent calls are ignored.
|
||||
//
|
||||
// The dial loop runs every dialInterval (1 second) and for each topic:
|
||||
// 1. Checks current peer count via listPeers()
|
||||
// 2. If below the per-topic min peer count, requests candidates from the crawler
|
||||
// 3. Deduplicates peers across all topics to avoid redundant dials
|
||||
// 4. Dials missing peers with rate limiting if enabled
|
||||
//
|
||||
// Returns nil always (error return preserved for interface compatibility).
|
||||
func (g *GossipPeerDialer) Start(provider gossipcrawler.SubnetTopicsProvider) error {
|
||||
g.once.Do(func() {
|
||||
g.topicsProvider = provider
|
||||
g.cachedTopics = make(map[string]int)
|
||||
go g.dialLoop()
|
||||
go g.logPeerCountsLoop()
|
||||
go g.topicMonitorLoop()
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *GossipPeerDialer) dialLoop() {
|
||||
ticker := time.NewTicker(dialInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
peersToDial := g.selectPeersForTopics()
|
||||
if len(peersToDial) == 0 {
|
||||
continue
|
||||
}
|
||||
g.dialPeersWithRatelimiting(peersToDial)
|
||||
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GossipPeerDialer) logPeerCountsLoop() {
|
||||
ticker := time.NewTicker(peerCountLogInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
topics := g.topicsProvider()
|
||||
for topic, minPeers := range topics {
|
||||
currentPeers := len(g.listPeers(topic))
|
||||
log.WithField("topic", topic).
|
||||
WithField("currentPeers", currentPeers).
|
||||
WithField("minPeers", minPeers).
|
||||
Debug("Gossip topic peer count")
|
||||
}
|
||||
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// topicsChanged compares the new topics with cached topics and returns true
|
||||
// if topics have been added or removed. Changes to min peer counts are ignored.
|
||||
func (g *GossipPeerDialer) topicsChanged(newTopics map[string]int) bool {
|
||||
if len(newTopics) != len(g.cachedTopics) {
|
||||
return true
|
||||
}
|
||||
for topic := range newTopics {
|
||||
if _, ok := g.cachedTopics[topic]; !ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (g *GossipPeerDialer) topicMonitorLoop() {
|
||||
ticker := time.NewTicker(topicMonitorInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
topics := g.topicsProvider()
|
||||
if g.topicsChanged(topics) {
|
||||
g.cachedTopics = topics
|
||||
g.crawler.TriggerCrawl()
|
||||
}
|
||||
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// selectPeersForTopics builds a bidirectional mapping of topics to peers and selects
|
||||
// peers to dial using a greedy algorithm that prioritizes peers serving multiple topics.
|
||||
// When a peer is selected, the needed count is decremented for ALL topics that peer serves,
|
||||
// avoiding redundant dials when one peer can satisfy multiple topic requirements.
|
||||
func (g *GossipPeerDialer) selectPeersForTopics() []*enode.Node {
|
||||
topicsWithMinPeers := g.topicsProvider()
|
||||
|
||||
// Calculate how many peers each topic still needs.
|
||||
neededByTopic := make(map[string]int)
|
||||
for topic, minPeers := range topicsWithMinPeers {
|
||||
currentCount := len(g.listPeers(topic))
|
||||
if needed := minPeers - currentCount; needed > 0 {
|
||||
neededByTopic[topic] = needed
|
||||
}
|
||||
}
|
||||
|
||||
if len(neededByTopic) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
peerToTopics := make(map[enode.ID][]string)
|
||||
nodeByID := make(map[enode.ID]*enode.Node)
|
||||
|
||||
for topic := range neededByTopic {
|
||||
candidates := g.crawler.PeersForTopic(topic)
|
||||
for _, node := range candidates {
|
||||
id := node.ID()
|
||||
if _, exists := nodeByID[id]; !exists {
|
||||
nodeByID[id] = node
|
||||
}
|
||||
peerToTopics[id] = append(peerToTopics[id], topic)
|
||||
}
|
||||
}
|
||||
|
||||
// Build candidate list sorted by topic count (descending).
|
||||
// Peers serving more topics are prioritized.
|
||||
type candidate struct {
|
||||
node *enode.Node
|
||||
topics []string
|
||||
}
|
||||
candidates := make([]candidate, 0, len(peerToTopics))
|
||||
for id, topics := range peerToTopics {
|
||||
candidates = append(candidates, candidate{node: nodeByID[id], topics: topics})
|
||||
}
|
||||
|
||||
// sort candidates by topic count (descending)
|
||||
slices.SortFunc(candidates, func(a, b candidate) int {
|
||||
return len(b.topics) - len(a.topics)
|
||||
})
|
||||
|
||||
// Greedy selection with cross-topic accounting.
|
||||
var selected []*enode.Node
|
||||
for _, c := range candidates {
|
||||
// Check if this peer serves any topic we still need.
|
||||
servesNeededTopic := false
|
||||
for _, topic := range c.topics {
|
||||
if neededByTopic[topic] > 0 {
|
||||
servesNeededTopic = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !servesNeededTopic {
|
||||
continue
|
||||
}
|
||||
|
||||
// Select this peer and decrement needed count for ALL topics it serves.
|
||||
selected = append(selected, c.node)
|
||||
for _, topic := range c.topics {
|
||||
if neededByTopic[topic] > 0 {
|
||||
neededByTopic[topic]--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return selected
|
||||
}
|
||||
|
||||
// DialPeersForTopicBlocking blocks until the specified topic has at least nPeers connected,
|
||||
// or until the context is cancelled.
|
||||
//
|
||||
// This method is useful when you need to ensure a minimum number of peers are connected
|
||||
// for a specific topic before proceeding (e.g., before publishing a message).
|
||||
//
|
||||
// The method polls in a loop:
|
||||
// 1. Check if current peer count >= nPeers, return nil if satisfied
|
||||
// 2. Get peer candidates from crawler for this topic
|
||||
// 3. Dial candidates with rate limiting
|
||||
// 4. Wait 100ms for connections to establish in pubsub layer
|
||||
// 5. Repeat until target reached or context cancelled
|
||||
//
|
||||
// Parameters:
|
||||
// - ctx: Context to cancel the blocking operation. Takes precedence for cancellation.
|
||||
// - topic: The gossipsub topic to ensure peers for.
|
||||
// - nPeers: Minimum number of peers required before returning.
|
||||
//
|
||||
// Returns:
|
||||
// - nil: Successfully reached the target peer count.
|
||||
// - ctx.Err(): The provided context was cancelled.
|
||||
// - g.ctx.Err(): The dialer's parent context was cancelled.
|
||||
//
|
||||
// Note: This may block indefinitely if the crawler cannot provide enough peers
|
||||
// and the context has no deadline.
|
||||
func (g *GossipPeerDialer) DialPeersForTopicBlocking(ctx context.Context, topic string, nPeers int) error {
|
||||
for {
|
||||
peers := g.listPeers(topic)
|
||||
if len(peers) >= nPeers {
|
||||
return nil
|
||||
}
|
||||
|
||||
newPeers := g.peersForTopic(topic, nPeers)
|
||||
if len(newPeers) > 0 {
|
||||
g.dialPeersWithRatelimiting(newPeers)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
// some wait here is good after dialing as connections take some time to show up in pubsub
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
case <-g.ctx.Done():
|
||||
return g.ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GossipPeerDialer) peersForTopic(topic string, targetCount int) []*enode.Node {
|
||||
peers := g.listPeers(topic)
|
||||
peerCount := len(peers)
|
||||
if peerCount >= targetCount {
|
||||
return nil
|
||||
}
|
||||
missing := targetCount - peerCount
|
||||
newPeers := g.crawler.PeersForTopic(topic)
|
||||
if len(newPeers) > missing {
|
||||
newPeers = newPeers[:missing]
|
||||
}
|
||||
return newPeers
|
||||
}
|
||||
|
||||
// ProtectedPeers returns peer IDs that should be protected from pruning.
|
||||
// For each topic, one connected peer is marked as protected to ensure
|
||||
// we maintain connectivity to all subscribed topics.
|
||||
func (g *GossipPeerDialer) ProtectedPeers() []peer.ID {
|
||||
if g.topicsProvider == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
topics := g.topicsProvider()
|
||||
protectedPeers := make(map[peer.ID]struct{})
|
||||
|
||||
for topic := range topics {
|
||||
connectedPeers := g.listPeers(topic)
|
||||
|
||||
// Skip if no peers connected
|
||||
if len(connectedPeers) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Protect the first peer for this topic
|
||||
protectedPeers[connectedPeers[0]] = struct{}{}
|
||||
}
|
||||
|
||||
result := make([]peer.ID, 0, len(protectedPeers))
|
||||
for pid := range protectedPeers {
|
||||
result = append(result, pid)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (g *GossipPeerDialer) dialPeersWithRatelimiting(peers []*enode.Node) {
|
||||
// Dial new peers in batches.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials = flags.Get().MaxConcurrentDials
|
||||
}
|
||||
g.dialPeers(g.ctx, maxConcurrentDials, peers)
|
||||
}
|
||||
691
beacon-chain/p2p/gossip_peer_controller_test.go
Normal file
691
beacon-chain/p2p/gossip_peer_controller_test.go
Normal file
@@ -0,0 +1,691 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"net"
|
||||
"slices"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/ecdsa"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGossipPeerDialer_Start(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
newCrawler func(t *testing.T) *mockCrawler
|
||||
provider gossipcrawler.SubnetTopicsProvider
|
||||
expectedConnects int
|
||||
expectStartErr bool
|
||||
}{
|
||||
{
|
||||
name: "dials unique peers across topics",
|
||||
newCrawler: func(t *testing.T) *mockCrawler {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30101)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30102)
|
||||
return &mockCrawler{
|
||||
consume: true,
|
||||
peers: map[string][]*enode.Node{
|
||||
"topic/a": {nodeA, nodeB},
|
||||
"topic/b": {nodeA},
|
||||
},
|
||||
}
|
||||
},
|
||||
provider: func() map[string]int {
|
||||
return map[string]int{"topic/a": 2, "topic/b": 2}
|
||||
},
|
||||
expectedConnects: 2,
|
||||
},
|
||||
{
|
||||
name: "uses per-topic min peer counts",
|
||||
newCrawler: func(t *testing.T) *mockCrawler {
|
||||
nodes := make([]*enode.Node, 5)
|
||||
for i := range nodes {
|
||||
nodes[i] = newTestNode(t, "127.0.0.1", uint16(30110+i))
|
||||
}
|
||||
return &mockCrawler{
|
||||
consume: true,
|
||||
peers: map[string][]*enode.Node{
|
||||
// topic/mesh has 3 available peers, minPeers=2 -> should dial 2
|
||||
"topic/mesh": {nodes[0], nodes[1], nodes[2]},
|
||||
// topic/fanout has 3 available peers, minPeers=1 -> should dial 1
|
||||
"topic/fanout": {nodes[3], nodes[4]},
|
||||
},
|
||||
}
|
||||
},
|
||||
provider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/mesh": 2,
|
||||
"topic/fanout": 1,
|
||||
}
|
||||
},
|
||||
// Total: 2 from mesh + 1 from fanout = 3 peers dialed
|
||||
expectedConnects: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
md := &mockDialer{}
|
||||
listPeers := func(topic string) []peer.ID { return nil }
|
||||
|
||||
dialer := NewGossipPeerDialer(t.Context(), tt.newCrawler(t), listPeers, md.DialPeers)
|
||||
|
||||
err := dialer.Start(tt.provider)
|
||||
if tt.expectStartErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return md.dialCount() >= tt.expectedConnects
|
||||
}, 2*time.Second, 20*time.Millisecond)
|
||||
|
||||
require.Equal(t, tt.expectedConnects, md.dialCount())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipPeerDialer_DialPeersForTopicBlocking(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
connectedPeers int
|
||||
newCrawler func(t *testing.T) *mockCrawler
|
||||
targetPeers int
|
||||
ctx func() (context.Context, context.CancelFunc)
|
||||
expectedConnects int
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "returns immediately when enough peers",
|
||||
connectedPeers: 1,
|
||||
newCrawler: func(t *testing.T) *mockCrawler {
|
||||
return &mockCrawler{}
|
||||
},
|
||||
targetPeers: 1,
|
||||
ctx: func() (context.Context, context.CancelFunc) { return context.WithCancel(context.Background()) },
|
||||
expectedConnects: 0,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "dials when peers are missing",
|
||||
connectedPeers: 0,
|
||||
newCrawler: func(t *testing.T) *mockCrawler {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30201)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30202)
|
||||
return &mockCrawler{
|
||||
peers: map[string][]*enode.Node{
|
||||
"topic/a": {nodeA, nodeB},
|
||||
},
|
||||
}
|
||||
},
|
||||
targetPeers: 2,
|
||||
ctx: func() (context.Context, context.CancelFunc) {
|
||||
return context.WithTimeout(context.Background(), 1*time.Second)
|
||||
},
|
||||
expectedConnects: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
md := &mockDialer{}
|
||||
var mu sync.Mutex
|
||||
connected := make([]peer.ID, 0)
|
||||
for i := 0; i < tt.connectedPeers; i++ {
|
||||
connected = append(connected, peer.ID(string(rune(i))))
|
||||
}
|
||||
|
||||
listPeers := func(topic string) []peer.ID {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return connected
|
||||
}
|
||||
|
||||
dialPeers := func(ctx context.Context, max int, nodes []*enode.Node) uint {
|
||||
cnt := md.DialPeers(ctx, max, nodes)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
for range nodes {
|
||||
// Just add a dummy peer ID to simulate connection success
|
||||
connected = append(connected, peer.ID("dummy"))
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
crawler := tt.newCrawler(t)
|
||||
dialer := NewGossipPeerDialer(t.Context(), crawler, listPeers, dialPeers)
|
||||
topic := "topic/a"
|
||||
|
||||
ctx, cancel := tt.ctx()
|
||||
defer cancel()
|
||||
|
||||
err := dialer.DialPeersForTopicBlocking(ctx, topic, tt.targetPeers)
|
||||
if tt.expectErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, tt.expectedConnects, md.dialCount())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipPeerDialer_peersForTopic(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
connected int
|
||||
targetCount int
|
||||
buildPeers func(t *testing.T) ([]*enode.Node, []*enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "returns nil when enough peers already connected",
|
||||
connected: 1,
|
||||
targetCount: 1,
|
||||
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
|
||||
return []*enode.Node{newTestNode(t, "127.0.0.1", 30301)}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "returns crawler peers when none connected",
|
||||
connected: 0,
|
||||
targetCount: 2,
|
||||
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30311)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30312)
|
||||
return []*enode.Node{nodeA, nodeB}, []*enode.Node{nodeA, nodeB}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "truncates peers when more than needed",
|
||||
connected: 0,
|
||||
targetCount: 1,
|
||||
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30321)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30322)
|
||||
nodeC := newTestNode(t, "127.0.0.1", 30323)
|
||||
return []*enode.Node{nodeA, nodeB, nodeC}, []*enode.Node{nodeA}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only returns missing peers",
|
||||
connected: 1,
|
||||
targetCount: 3,
|
||||
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30331)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30332)
|
||||
nodeC := newTestNode(t, "127.0.0.1", 30333)
|
||||
return []*enode.Node{nodeA, nodeB, nodeC}, []*enode.Node{nodeA, nodeB}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
listPeers := func(topic string) []peer.ID {
|
||||
peers := make([]peer.ID, tt.connected)
|
||||
for i := 0; i < tt.connected; i++ {
|
||||
peers[i] = peer.ID(string(rune(i))) // Fake peer ID
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
crawlerPeers, expected := tt.buildPeers(t)
|
||||
crawler := &mockCrawler{
|
||||
peers: map[string][]*enode.Node{"topic/test": crawlerPeers},
|
||||
consume: false,
|
||||
}
|
||||
dialer := NewGossipPeerDialer(t.Context(), crawler, listPeers, func(ctx context.Context,
|
||||
maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
})
|
||||
|
||||
got := dialer.peersForTopic("topic/test", tt.targetCount)
|
||||
if expected == nil {
|
||||
require.Nil(t, got)
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), len(got))
|
||||
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i], got[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipPeerDialer_selectPeersForTopics(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
connectedPeers map[string]int // topic -> connected peer count
|
||||
topicsProvider func() map[string]int
|
||||
buildPeers func(t *testing.T) (map[string][]*enode.Node, []*enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "prioritizes multi-topic peer over single-topic peers",
|
||||
connectedPeers: map[string]int{},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 1,
|
||||
"topic/b": 1,
|
||||
"topic/c": 1,
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
// Peer X serves all 3 topics
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30401)
|
||||
// Peer Y serves only topic/a
|
||||
nodeY := newTestNode(t, "127.0.0.1", 30402)
|
||||
// Peer Z serves only topic/b
|
||||
nodeZ := newTestNode(t, "127.0.0.1", 30403)
|
||||
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX, nodeY},
|
||||
"topic/b": {nodeX, nodeZ},
|
||||
"topic/c": {nodeX},
|
||||
}
|
||||
// Only nodeX should be dialed (satisfies all 3 topics)
|
||||
return crawlerPeers, []*enode.Node{nodeX}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cross-topic decrement works correctly",
|
||||
connectedPeers: map[string]int{},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 2, // Need 2 peers
|
||||
"topic/b": 1, // Need 1 peer
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
// Peer X serves both topics
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30411)
|
||||
// Peer Y serves only topic/a
|
||||
nodeY := newTestNode(t, "127.0.0.1", 30412)
|
||||
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX, nodeY},
|
||||
"topic/b": {nodeX},
|
||||
}
|
||||
// nodeX covers topic/b fully, and 1 of 2 for topic/a
|
||||
// nodeY covers remaining 1 for topic/a
|
||||
return crawlerPeers, []*enode.Node{nodeX, nodeY}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no redundant dials when one peer satisfies all",
|
||||
connectedPeers: map[string]int{},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 1,
|
||||
"topic/b": 1,
|
||||
"topic/c": 1,
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30421)
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX},
|
||||
"topic/b": {nodeX},
|
||||
"topic/c": {nodeX},
|
||||
}
|
||||
// Only 1 dial needed for all 3 topics
|
||||
return crawlerPeers, []*enode.Node{nodeX}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "skips topics with enough peers already",
|
||||
connectedPeers: map[string]int{
|
||||
"topic/a": 2, // Already has 2
|
||||
},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 2, // min 2, already have 2
|
||||
"topic/b": 1, // min 1, have 0
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30431)
|
||||
nodeY := newTestNode(t, "127.0.0.1", 30432)
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX},
|
||||
"topic/b": {nodeY},
|
||||
}
|
||||
// Only nodeY should be dialed (topic/a already satisfied)
|
||||
return crawlerPeers, []*enode.Node{nodeY}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "returns nil when all topics satisfied",
|
||||
connectedPeers: map[string]int{"topic/a": 2, "topic/b": 1},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 2,
|
||||
"topic/b": 1,
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30441)
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX},
|
||||
"topic/b": {nodeX},
|
||||
}
|
||||
// No dials needed
|
||||
return crawlerPeers, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "handles empty crawler response",
|
||||
connectedPeers: map[string]int{},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{"topic/a": 1}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
return map[string][]*enode.Node{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
listPeers := func(topic string) []peer.ID {
|
||||
count := tt.connectedPeers[topic]
|
||||
peers := make([]peer.ID, count)
|
||||
for i := range count {
|
||||
peers[i] = peer.ID(topic + string(rune(i)))
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
crawlerPeers, expected := tt.buildPeers(t)
|
||||
crawler := &mockCrawler{
|
||||
peers: crawlerPeers,
|
||||
consume: false,
|
||||
}
|
||||
|
||||
dialer := NewGossipPeerDialer(t.Context(), crawler, listPeers, func(ctx context.Context,
|
||||
maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
})
|
||||
dialer.topicsProvider = tt.topicsProvider
|
||||
|
||||
got := dialer.selectPeersForTopics()
|
||||
|
||||
if expected == nil {
|
||||
require.Nil(t, got)
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), len(got), "expected %d peers, got %d", len(expected), len(got))
|
||||
|
||||
// Verify all expected nodes are present (order may vary for equal topic counts)
|
||||
expectedIDs := make(map[enode.ID]struct{})
|
||||
for _, n := range expected {
|
||||
expectedIDs[n.ID()] = struct{}{}
|
||||
}
|
||||
for _, n := range got {
|
||||
_, ok := expectedIDs[n.ID()]
|
||||
require.True(t, ok, "unexpected peer %s in result", n.ID())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipPeerDialer_ProtectedPeers(t *testing.T) {
|
||||
peerA := peer.ID("peerA")
|
||||
peerB := peer.ID("peerB")
|
||||
peerC := peer.ID("peerC")
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
topicsProvider func() map[string]int
|
||||
connectedPeers map[string][]peer.ID // topic -> connected peers
|
||||
expected []peer.ID
|
||||
}{
|
||||
{
|
||||
name: "nil topics provider",
|
||||
topicsProvider: nil,
|
||||
connectedPeers: map[string][]peer.ID{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "no topics",
|
||||
topicsProvider: func() map[string]int { return map[string]int{} },
|
||||
connectedPeers: map[string][]peer.ID{},
|
||||
expected: []peer.ID{},
|
||||
},
|
||||
{
|
||||
name: "no peers for any topic",
|
||||
topicsProvider: func() map[string]int { return map[string]int{"topic/a": 1, "topic/b": 1} },
|
||||
connectedPeers: map[string][]peer.ID{"topic/a": {}, "topic/b": {}},
|
||||
expected: []peer.ID{},
|
||||
},
|
||||
{
|
||||
name: "multiple peers for all topics protects first peer from each",
|
||||
topicsProvider: func() map[string]int { return map[string]int{"topic/a": 2, "topic/b": 2} },
|
||||
connectedPeers: map[string][]peer.ID{"topic/a": {peerA, peerB}, "topic/b": {peerB, peerC}},
|
||||
expected: []peer.ID{peerA, peerB},
|
||||
},
|
||||
{
|
||||
name: "single peer for one topic",
|
||||
topicsProvider: func() map[string]int { return map[string]int{"topic/a": 1} },
|
||||
connectedPeers: map[string][]peer.ID{"topic/a": {peerA}},
|
||||
expected: []peer.ID{peerA},
|
||||
},
|
||||
{
|
||||
name: "same peer is first for multiple topics",
|
||||
topicsProvider: func() map[string]int { return map[string]int{"topic/a": 1, "topic/b": 1} },
|
||||
connectedPeers: map[string][]peer.ID{"topic/a": {peerA}, "topic/b": {peerA}},
|
||||
expected: []peer.ID{peerA},
|
||||
},
|
||||
{
|
||||
name: "different first peers for different topics",
|
||||
topicsProvider: func() map[string]int { return map[string]int{"topic/a": 1, "topic/b": 1} },
|
||||
connectedPeers: map[string][]peer.ID{"topic/a": {peerA}, "topic/b": {peerB}},
|
||||
expected: []peer.ID{peerA, peerB},
|
||||
},
|
||||
{
|
||||
name: "protects first peer from each topic",
|
||||
topicsProvider: func() map[string]int { return map[string]int{"topic/a": 1, "topic/b": 2, "topic/c": 1} },
|
||||
connectedPeers: map[string][]peer.ID{"topic/a": {peerA}, "topic/b": {peerB, peerC}, "topic/c": {peerC}},
|
||||
expected: []peer.ID{peerA, peerB, peerC},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
listPeers := func(topic string) []peer.ID {
|
||||
return tt.connectedPeers[topic]
|
||||
}
|
||||
|
||||
dialer := &GossipPeerDialer{
|
||||
topicsProvider: tt.topicsProvider,
|
||||
listPeers: listPeers,
|
||||
}
|
||||
|
||||
got := dialer.ProtectedPeers()
|
||||
|
||||
if tt.expected == nil {
|
||||
require.Nil(t, got)
|
||||
return
|
||||
}
|
||||
|
||||
require.NotNil(t, got)
|
||||
require.Equal(t, len(tt.expected), len(got), "expected %d peers, got %d", len(tt.expected), len(got))
|
||||
|
||||
if len(tt.expected) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Check all expected peers are present (order may vary due to map iteration)
|
||||
expectedSet := make(map[peer.ID]struct{})
|
||||
for _, p := range tt.expected {
|
||||
expectedSet[p] = struct{}{}
|
||||
}
|
||||
for _, p := range got {
|
||||
_, ok := expectedSet[p]
|
||||
require.True(t, ok, "unexpected peer %s in result", p)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipPeerDialer_topicsChanged(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cachedTopics map[string]int
|
||||
newTopics map[string]int
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "both empty",
|
||||
cachedTopics: map[string]int{},
|
||||
newTopics: map[string]int{},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "same topics",
|
||||
cachedTopics: map[string]int{"topic/a": 1, "topic/b": 2},
|
||||
newTopics: map[string]int{"topic/a": 1, "topic/b": 2},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "same topics different min peer counts",
|
||||
cachedTopics: map[string]int{"topic/a": 1, "topic/b": 2},
|
||||
newTopics: map[string]int{"topic/a": 5, "topic/b": 10},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "new topic added",
|
||||
cachedTopics: map[string]int{"topic/a": 1},
|
||||
newTopics: map[string]int{"topic/a": 1, "topic/b": 2},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "topic removed",
|
||||
cachedTopics: map[string]int{"topic/a": 1, "topic/b": 2},
|
||||
newTopics: map[string]int{"topic/a": 1},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "same length different topics",
|
||||
cachedTopics: map[string]int{"topic/a": 1, "topic/b": 2},
|
||||
newTopics: map[string]int{"topic/a": 1, "topic/c": 2},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "from empty to non-empty",
|
||||
cachedTopics: map[string]int{},
|
||||
newTopics: map[string]int{"topic/a": 1},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "from non-empty to empty",
|
||||
cachedTopics: map[string]int{"topic/a": 1},
|
||||
newTopics: map[string]int{},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dialer := &GossipPeerDialer{
|
||||
cachedTopics: tt.cachedTopics,
|
||||
}
|
||||
got := dialer.topicsChanged(tt.newTopics)
|
||||
require.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type mockCrawler struct {
|
||||
mu sync.Mutex
|
||||
peers map[string][]*enode.Node
|
||||
consume bool
|
||||
}
|
||||
|
||||
func (m *mockCrawler) Start(gossipcrawler.TopicExtractor) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockCrawler) Stop() {}
|
||||
func (m *mockCrawler) RemovePeerByPeerId(peer.ID) {}
|
||||
func (m *mockCrawler) RemoveTopic(string) {}
|
||||
func (m *mockCrawler) TriggerCrawl() {}
|
||||
func (m *mockCrawler) PeersForTopic(topic string) []*enode.Node {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
nodes := m.peers[topic]
|
||||
if len(nodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
copied := slices.Clone(nodes)
|
||||
if m.consume {
|
||||
m.peers[topic] = nil
|
||||
}
|
||||
return copied
|
||||
}
|
||||
|
||||
type mockDialer struct {
|
||||
mu sync.Mutex
|
||||
dials []*enode.Node
|
||||
}
|
||||
|
||||
func (m *mockDialer) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.dials = append(m.dials, nodes...)
|
||||
return uint(len(nodes))
|
||||
}
|
||||
|
||||
func (m *mockDialer) dialCount() int {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return len(m.dials)
|
||||
}
|
||||
|
||||
func (m *mockDialer) dialedNodes() []*enode.Node {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return slices.Clone(m.dials)
|
||||
}
|
||||
|
||||
func newTestNode(t *testing.T, ip string, tcpPort uint16) *enode.Node {
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
|
||||
return newTestNodeWithPriv(t, priv, ip, tcpPort)
|
||||
}
|
||||
|
||||
func newTestNodeWithPriv(t *testing.T, priv crypto.PrivKey, ip string, tcpPort uint16) *enode.Node {
|
||||
t.Helper()
|
||||
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
db.Close()
|
||||
})
|
||||
|
||||
convertedKey, err := ecdsa.ConvertFromInterfacePrivKey(priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
localNode.SetStaticIP(net.ParseIP(ip))
|
||||
localNode.Set(enr.TCP(tcpPort))
|
||||
localNode.Set(enr.UDP(tcpPort))
|
||||
|
||||
return localNode.Node()
|
||||
}
|
||||
606
beacon-chain/p2p/gossip_peer_crawler.go
Normal file
606
beacon-chain/p2p/gossip_peer_crawler.go
Normal file
@@ -0,0 +1,606 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
const (
|
||||
semaphoreWeight = int64(1)
|
||||
// pingBufferSizeScaleFactor determines the ping channel buffer size as a multiple
|
||||
// of maxConcurrentPings. The crawl loop blocks when the ping buffer is full, so we
|
||||
// need headroom beyond the number of concurrent pings allowed. Since pings to
|
||||
// unreachable peers may timeout (taking longer to complete), a larger buffer ensures
|
||||
// the crawler can continue discovering peers without stalling while waiting for slow
|
||||
// or failed pings to drain.
|
||||
pingBufferSizeScaleFactor = 4
|
||||
)
|
||||
|
||||
type peerNode struct {
|
||||
isPinged bool
|
||||
node *enode.Node
|
||||
peerID peer.ID
|
||||
topics map[string]struct{}
|
||||
}
|
||||
|
||||
type crawledPeers struct {
|
||||
mu sync.RWMutex
|
||||
peerNodeByEnode map[enode.ID]*peerNode
|
||||
peerNodeByPid map[peer.ID]*peerNode
|
||||
peersByTopic map[string]map[*peerNode]struct{}
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) updateStatusToPinged(enodeID enode.ID) {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
existingPNode, ok := cp.peerNodeByEnode[enodeID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// we only want to ping a node with a given NodeId once -> not on every sequence number change
|
||||
// as ping is simply a test of a node being reachable and not fake
|
||||
existingPNode.isPinged = true
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) updatePeer(node *enode.Node, topics []string) (bool, error) {
|
||||
if node == nil {
|
||||
return false, errors.New("node is nil")
|
||||
}
|
||||
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
enodeID := node.ID()
|
||||
existingPNode, ok := cp.peerNodeByEnode[enodeID]
|
||||
|
||||
if ok && existingPNode.node == nil {
|
||||
return false, errors.New("enode is nil for enodeId")
|
||||
}
|
||||
|
||||
// we don't want to update enodes with a lower sequence number as they're stale records
|
||||
if ok && existingPNode.node.Seq() >= node.Seq() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !ok {
|
||||
// this is a new peer
|
||||
peerID, err := enodeToPeerID(node)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("converting enode to peer ID: %w", err)
|
||||
}
|
||||
existingPNode = &peerNode{
|
||||
node: node,
|
||||
peerID: peerID,
|
||||
topics: make(map[string]struct{}),
|
||||
}
|
||||
cp.peerNodeByEnode[enodeID] = existingPNode
|
||||
cp.peerNodeByPid[peerID] = existingPNode
|
||||
} else {
|
||||
existingPNode.node = node
|
||||
}
|
||||
|
||||
cp.updateTopicsUnlocked(existingPNode, topics)
|
||||
cp.recordMetricsUnlocked()
|
||||
|
||||
if existingPNode.isPinged || len(topics) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) removeTopic(topic string) {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
// Get all peers subscribed to this topic
|
||||
peers, ok := cp.peersByTopic[topic]
|
||||
if !ok {
|
||||
return // Topic doesn't exist
|
||||
}
|
||||
|
||||
// Remove the topic from each peer's topic list
|
||||
for pnode := range peers {
|
||||
delete(pnode.topics, topic)
|
||||
// remove the peer if it has no more topics left
|
||||
if len(pnode.topics) == 0 {
|
||||
cp.updateTopicsUnlocked(pnode, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the topic from byTopic map
|
||||
delete(cp.peersByTopic, topic)
|
||||
cp.recordMetricsUnlocked()
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) removePeerByPeerId(peerID peer.ID) {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
pnode, ok := cp.peerNodeByPid[peerID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// Use updateTopicsUnlocked with empty topics to remove the peer
|
||||
cp.updateTopicsUnlocked(pnode, nil)
|
||||
cp.recordMetricsUnlocked()
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) removePeerByNodeId(enodeID enode.ID) {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
pnode, ok := cp.peerNodeByEnode[enodeID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
cp.updateTopicsUnlocked(pnode, nil)
|
||||
cp.recordMetricsUnlocked()
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) recordMetricsUnlocked() {
|
||||
gossipCrawlerPeersByEnodeCount.Set(float64(len(cp.peerNodeByEnode)))
|
||||
gossipCrawlerPeersByPidCount.Set(float64(len(cp.peerNodeByPid)))
|
||||
gossipCrawlerTopicsCount.Set(float64(len(cp.peersByTopic)))
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) logPeerCounts() {
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
|
||||
for topic, peers := range cp.peersByTopic {
|
||||
// Count only pinged peers (verified reachable)
|
||||
pingedCount := 0
|
||||
for pnode := range peers {
|
||||
if pnode.isPinged {
|
||||
pingedCount++
|
||||
}
|
||||
}
|
||||
log.WithField("topic", topic).
|
||||
WithField("totalPeers", len(peers)).
|
||||
WithField("pingedPeers", pingedCount).
|
||||
Debug("Crawler indexed peers for topic")
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) cleanupPeer(pnode *peerNode) {
|
||||
delete(cp.peerNodeByPid, pnode.peerID)
|
||||
delete(cp.peerNodeByEnode, pnode.node.ID())
|
||||
for t := range pnode.topics {
|
||||
if peers, ok := cp.peersByTopic[t]; ok {
|
||||
delete(peers, pnode)
|
||||
if len(peers) == 0 {
|
||||
delete(cp.peersByTopic, t)
|
||||
}
|
||||
}
|
||||
}
|
||||
pnode.topics = nil // Clear topics to indicate removal.
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) removeOldTopicsFromPeer(pnode *peerNode, newTopics map[string]struct{}) {
|
||||
for oldTopic := range pnode.topics {
|
||||
if _, ok := newTopics[oldTopic]; !ok {
|
||||
if peers, ok := cp.peersByTopic[oldTopic]; ok {
|
||||
delete(peers, pnode)
|
||||
if len(peers) == 0 {
|
||||
delete(cp.peersByTopic, oldTopic)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) addNewTopicsToPeer(pnode *peerNode, newTopics map[string]struct{}) {
|
||||
for newTopic := range newTopics {
|
||||
if _, ok := pnode.topics[newTopic]; !ok {
|
||||
if _, ok := cp.peersByTopic[newTopic]; !ok {
|
||||
cp.peersByTopic[newTopic] = make(map[*peerNode]struct{})
|
||||
}
|
||||
cp.peersByTopic[newTopic][pnode] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateTopicsUnlocked updates the topics associated with a peer node.
|
||||
// If the topics slice is empty, the peer is completely removed from the crawled peers.
|
||||
// Otherwise, it updates the peer's topics by removing old topics that are no longer
|
||||
// present and adding new topics. This method assumes the caller holds the lock on cp.mu.
|
||||
// If a topic has no peers after this update, it is removed from the list of topics we track peers for.
|
||||
func (cp *crawledPeers) updateTopicsUnlocked(pnode *peerNode, topics []string) {
|
||||
// If topics is empty, remove the peer completely.
|
||||
if len(topics) == 0 {
|
||||
cp.cleanupPeer(pnode)
|
||||
return
|
||||
}
|
||||
|
||||
newTopics := make(map[string]struct{})
|
||||
for _, t := range topics {
|
||||
newTopics[t] = struct{}{}
|
||||
}
|
||||
|
||||
// Remove old topics that are no longer present.
|
||||
cp.removeOldTopicsFromPeer(pnode, newTopics)
|
||||
|
||||
// Add new topics.
|
||||
cp.addNewTopicsToPeer(pnode, newTopics)
|
||||
|
||||
pnode.topics = newTopics
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) peersForTopic(topic string, filter gossipcrawler.PeerFilterFunc) []peerNode {
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
|
||||
peers, ok := cp.peersByTopic[topic]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var peerNodes []peerNode
|
||||
for pnode := range peers {
|
||||
if pnode.node == nil {
|
||||
continue
|
||||
}
|
||||
if pnode.isPinged && filter(pnode.node) {
|
||||
peerNodes = append(peerNodes, *pnode)
|
||||
}
|
||||
}
|
||||
return peerNodes
|
||||
}
|
||||
|
||||
// GossipPeerCrawler discovers and maintains a registry of peers subscribed to gossipsub topics.
|
||||
// It uses discv5 to find peers, extracts their topic subscriptions from ENR records, and verifies
|
||||
// their reachability via ping. Only peers that have been successfully pinged are returned when
|
||||
// querying for peers on a given topic. The crawler runs three background loops: one for discovery,
|
||||
// one for ping verification, and one for periodic cleanup of stale or filtered-out peers.
|
||||
type GossipPeerCrawler struct {
|
||||
ctx context.Context
|
||||
|
||||
crawlInterval, crawlTimeout time.Duration
|
||||
|
||||
crawledPeers *crawledPeers
|
||||
|
||||
// Discovery interface for finding peers
|
||||
dv5 ListenerRebooter
|
||||
|
||||
p2pSvc *Service
|
||||
|
||||
topicExtractor gossipcrawler.TopicExtractor
|
||||
|
||||
peerFilter gossipcrawler.PeerFilterFunc
|
||||
scorer PeerScoreFunc
|
||||
|
||||
pingCh chan enode.Node
|
||||
pingSemaphore *semaphore.Weighted
|
||||
|
||||
triggerCrawlCh chan struct{}
|
||||
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// cleanupInterval controls how frequently we sweep crawled peers and prune
|
||||
// those that are no longer useful.
|
||||
const cleanupInterval = 5 * time.Minute
|
||||
|
||||
// crawlerLogInterval controls how frequently we log peer counts per topic.
|
||||
const crawlerLogInterval = 10 * time.Minute
|
||||
|
||||
// PeerScoreFunc calculates a reputation score for a given peer ID.
|
||||
// Higher scores indicate more desirable peers. This function is used by PeersForTopic
|
||||
// to sort returned peers in descending order of quality, allowing callers to prioritize
|
||||
// connections to the most reliable peers.
|
||||
type PeerScoreFunc func(peer.ID) float64
|
||||
|
||||
// NewGossipPeerCrawler creates a new crawler for discovering gossipsub peers.
|
||||
// The crawler uses the provided discv5 listener to discover peers and tracks their
|
||||
// topic subscriptions. Parameters:
|
||||
// - p2pSvc: The P2P service for network operations
|
||||
// - dv5: The discv5 listener used for peer discovery and ping verification
|
||||
// - crawlTimeout: Maximum duration for each crawl iteration
|
||||
// - crawlInterval: The duration between each crawl iteration
|
||||
// - maxConcurrentPings: Limits parallel ping operations to avoid overwhelming the network
|
||||
// - peerFilter: Determines which discovered peers should be tracked
|
||||
// - scorer: Calculates peer quality scores for sorting results
|
||||
//
|
||||
// Returns an error if any required parameter is nil or invalid.
|
||||
func NewGossipPeerCrawler(
|
||||
ctx context.Context,
|
||||
p2pSvc *Service,
|
||||
dv5 ListenerRebooter,
|
||||
crawlTimeout time.Duration,
|
||||
crawlInterval time.Duration,
|
||||
maxConcurrentPings int64,
|
||||
peerFilter gossipcrawler.PeerFilterFunc,
|
||||
scorer PeerScoreFunc,
|
||||
) (*GossipPeerCrawler, error) {
|
||||
if p2pSvc == nil {
|
||||
return nil, errors.New("p2pSvc is nil")
|
||||
}
|
||||
if dv5 == nil {
|
||||
return nil, errors.New("dv5 is nil")
|
||||
}
|
||||
if crawlTimeout <= 0 {
|
||||
return nil, errors.New("crawl timeout must be greater than 0")
|
||||
}
|
||||
if crawlInterval <= 0 {
|
||||
return nil, errors.New("crawl interval must be greater than 0")
|
||||
}
|
||||
if maxConcurrentPings <= 0 {
|
||||
return nil, errors.New("max concurrent pings must be greater than 0")
|
||||
}
|
||||
if peerFilter == nil {
|
||||
return nil, errors.New("peer filter is nil")
|
||||
}
|
||||
if scorer == nil {
|
||||
return nil, errors.New("peer scorer is nil")
|
||||
}
|
||||
|
||||
g := &GossipPeerCrawler{
|
||||
ctx: ctx,
|
||||
crawlInterval: crawlInterval,
|
||||
crawlTimeout: crawlTimeout,
|
||||
p2pSvc: p2pSvc,
|
||||
dv5: dv5,
|
||||
peerFilter: peerFilter,
|
||||
scorer: scorer,
|
||||
}
|
||||
g.pingCh = make(chan enode.Node, pingBufferSizeScaleFactor*maxConcurrentPings)
|
||||
g.pingSemaphore = semaphore.NewWeighted(maxConcurrentPings)
|
||||
g.triggerCrawlCh = make(chan struct{}, 1)
|
||||
g.crawledPeers = &crawledPeers{
|
||||
peerNodeByEnode: make(map[enode.ID]*peerNode),
|
||||
peerNodeByPid: make(map[peer.ID]*peerNode),
|
||||
peersByTopic: make(map[string]map[*peerNode]struct{}),
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// PeersForTopic returns a list of enode records for peers subscribed to the given topic.
|
||||
// Only peers that have been successfully pinged (verified as reachable) and pass the
|
||||
// configured peer filter are included. Results are sorted in descending order by peer
|
||||
// score, so higher-quality peers appear first. Returns nil if no peers are found for
|
||||
// the topic. The returned slice should not be modified as it contains pointers to
|
||||
// internal enode records.
|
||||
func (g *GossipPeerCrawler) PeersForTopic(topic string) []*enode.Node {
|
||||
peerNodes := g.crawledPeers.peersForTopic(topic, g.peerFilter)
|
||||
|
||||
slices.SortFunc(peerNodes, func(a, b peerNode) int {
|
||||
scoreA := g.scorer(a.peerID)
|
||||
scoreB := g.scorer(b.peerID)
|
||||
if scoreA > scoreB {
|
||||
return -1
|
||||
}
|
||||
if scoreA < scoreB {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
|
||||
nodes := make([]*enode.Node, 0, len(peerNodes))
|
||||
for _, pn := range peerNodes {
|
||||
nodes = append(nodes, pn.node)
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// RemovePeerByPeerId removes a peer from the crawler's registry by their libp2p peer ID.
|
||||
// This also removes the peer from all topic subscriptions they were associated with.
|
||||
// If the peer is not found, this operation is a no-op.
|
||||
func (g *GossipPeerCrawler) RemovePeerByPeerId(peerID peer.ID) {
|
||||
g.crawledPeers.removePeerByPeerId(peerID)
|
||||
}
|
||||
|
||||
// RemoveTopic removes a topic and all its peer associations from the crawler.
|
||||
// Peers that were only subscribed to this topic are completely removed from the registry.
|
||||
// Peers subscribed to other topics remain tracked for those topics.
|
||||
// If the topic does not exist, this operation is a no-op.
|
||||
func (g *GossipPeerCrawler) RemoveTopic(topic string) {
|
||||
g.crawledPeers.removeTopic(topic)
|
||||
}
|
||||
|
||||
// Start begins the crawler's background operations. It launches three goroutines:
|
||||
// a crawl loop that periodically discovers new peers via discv5, a ping loop that
|
||||
// verifies peer reachability, and a cleanup loop that removes stale or filtered peers.
|
||||
// The provided TopicExtractor is used to determine which gossipsub topics each
|
||||
// discovered peer subscribes to. Start is idempotent; subsequent calls after the
|
||||
// first are no-ops. Returns an error if the topic extractor is nil.
|
||||
func (g *GossipPeerCrawler) Start(te gossipcrawler.TopicExtractor) error {
|
||||
if te == nil {
|
||||
return errors.New("topic extractor is nil")
|
||||
}
|
||||
g.once.Do(func() {
|
||||
g.topicExtractor = te
|
||||
go g.crawlLoop()
|
||||
go g.pingLoop()
|
||||
go g.cleanupLoop()
|
||||
go g.logPeerCountsLoop()
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *GossipPeerCrawler) pingLoop() {
|
||||
for {
|
||||
select {
|
||||
case node := <-g.pingCh:
|
||||
if err := g.pingSemaphore.Acquire(g.ctx, semaphoreWeight); err != nil {
|
||||
return
|
||||
}
|
||||
go func(node *enode.Node) {
|
||||
defer g.pingSemaphore.Release(semaphoreWeight)
|
||||
|
||||
if err := g.dv5.Ping(node); err != nil {
|
||||
log.WithError(err).WithField("node", node.ID()).Debug("Failed to ping node")
|
||||
g.crawledPeers.removePeerByNodeId(node.ID())
|
||||
return
|
||||
}
|
||||
|
||||
g.crawledPeers.updateStatusToPinged(node.ID())
|
||||
}(&node)
|
||||
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GossipPeerCrawler) crawlLoop() {
|
||||
|
||||
for {
|
||||
g.crawl()
|
||||
select {
|
||||
case <-time.After(g.crawlInterval):
|
||||
case <-g.triggerCrawlCh:
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GossipPeerCrawler) crawl() {
|
||||
ctx, cancel := context.WithTimeout(g.ctx, g.crawlTimeout)
|
||||
defer cancel()
|
||||
|
||||
iterator := g.dv5.RandomNodes()
|
||||
|
||||
// Ensure iterator unblocks on context cancellation or timeout
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
iterator.Close()
|
||||
}()
|
||||
|
||||
for iterator.Next() {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
node := iterator.Node()
|
||||
if node == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !g.peerFilter(node) {
|
||||
g.crawledPeers.removePeerByNodeId(node.ID())
|
||||
continue
|
||||
}
|
||||
|
||||
topics, err := g.topicExtractor(ctx, node)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("node", node.ID()).Debug("Failed to extract topics, skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
shouldPing, err := g.crawledPeers.updatePeer(node, topics)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("node", node.ID()).Error("Failed to update crawled peers")
|
||||
}
|
||||
if !shouldPing {
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case g.pingCh <- *node:
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupLoop periodically removes peers that the filter rejects or that
|
||||
// have no topics of interest. It uses the same context lifecycle as other
|
||||
// background loops.
|
||||
func (g *GossipPeerCrawler) cleanupLoop() {
|
||||
ticker := time.NewTicker(cleanupInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Initial cleanup to catch any leftovers from startup state
|
||||
g.cleanup()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
g.cleanup()
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup scans the crawled peer set and removes entries that either fail
|
||||
// the current peer filter or have no topics of interest remaining.
|
||||
func (g *GossipPeerCrawler) cleanup() {
|
||||
cp := g.crawledPeers
|
||||
|
||||
// Snapshot current peers to evaluate without holding the lock during
|
||||
// filter and topic extraction.
|
||||
cp.mu.RLock()
|
||||
peers := make([]*peerNode, 0, len(cp.peerNodeByPid))
|
||||
for _, p := range cp.peerNodeByPid {
|
||||
peers = append(peers, p)
|
||||
}
|
||||
cp.mu.RUnlock()
|
||||
|
||||
for _, p := range peers {
|
||||
// Remove peers that no longer pass the filter
|
||||
if !g.peerFilter(p.node) {
|
||||
cp.removePeerByNodeId(p.node.ID())
|
||||
continue
|
||||
}
|
||||
|
||||
// Re-extract topics; if the extractor errors or yields none, drop the peer.
|
||||
topics, err := g.topicExtractor(g.ctx, p.node)
|
||||
if err != nil || len(topics) == 0 {
|
||||
cp.removePeerByNodeId(p.node.ID())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GossipPeerCrawler) logPeerCountsLoop() {
|
||||
ticker := time.NewTicker(crawlerLogInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
g.crawledPeers.logPeerCounts()
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TriggerCrawl requests an immediate crawl. If a crawl trigger is already
|
||||
// pending, this call is ignored (non-blocking). This allows external systems
|
||||
// to request a crawl without waiting for the regular interval.
|
||||
func (g *GossipPeerCrawler) TriggerCrawl() {
|
||||
select {
|
||||
case g.triggerCrawlCh <- struct{}{}:
|
||||
log.Info("Triggering crawl")
|
||||
default:
|
||||
// Channel full, crawl already triggered
|
||||
}
|
||||
}
|
||||
|
||||
// enodeToPeerID converts an enode record to a peer ID.
|
||||
func enodeToPeerID(n *enode.Node) (peer.ID, error) {
|
||||
info, _, err := convertToAddrInfo(n)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("converting enode to addr info: %w", err)
|
||||
}
|
||||
if info == nil {
|
||||
return "", errors.New("peer info is nil")
|
||||
}
|
||||
return info.ID, nil
|
||||
}
|
||||
787
beacon-chain/p2p/gossip_peer_crawler_test.go
Normal file
787
beacon-chain/p2p/gossip_peer_crawler_test.go
Normal file
@@ -0,0 +1,787 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
require2 "github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Helpers for crawledPeers tests
|
||||
func newTestCrawledPeers() *crawledPeers {
|
||||
return &crawledPeers{
|
||||
peerNodeByEnode: make(map[enode.ID]*peerNode),
|
||||
peerNodeByPid: make(map[peer.ID]*peerNode),
|
||||
peersByTopic: make(map[string]map[*peerNode]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func addPeerWithTopics(t *testing.T, cp *crawledPeers, node *enode.Node, topics []string, pinged bool) *peerNode {
|
||||
t.Helper()
|
||||
pid, err := enodeToPeerID(node)
|
||||
require.NoError(t, err)
|
||||
p := &peerNode{
|
||||
isPinged: pinged,
|
||||
node: node,
|
||||
peerID: pid,
|
||||
topics: make(map[string]struct{}),
|
||||
}
|
||||
cp.mu.Lock()
|
||||
cp.peerNodeByEnode[p.node.ID()] = p
|
||||
cp.peerNodeByPid[p.peerID] = p
|
||||
cp.updateTopicsUnlocked(p, topics)
|
||||
cp.mu.Unlock()
|
||||
return p
|
||||
}
|
||||
|
||||
func TestUpdateStatusToPinged(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
node1 := localNode.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prep func(*crawledPeers)
|
||||
target *enode.Node
|
||||
expectPinged map[enode.ID]bool
|
||||
}{
|
||||
{
|
||||
name: "sets pinged for existing peer",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"a"}, false)
|
||||
},
|
||||
target: node1,
|
||||
expectPinged: map[enode.ID]bool{
|
||||
node1.ID(): true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "idempotent when already pinged",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"a"}, true)
|
||||
},
|
||||
target: node1,
|
||||
expectPinged: map[enode.ID]bool{
|
||||
node1.ID(): true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no change when peer missing",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"a"}, false)
|
||||
},
|
||||
target: node2,
|
||||
expectPinged: map[enode.ID]bool{
|
||||
node1.ID(): false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
tc.prep(cp)
|
||||
cp.updateStatusToPinged(tc.target.ID())
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
for id, exp := range tc.expectPinged {
|
||||
if p := cp.peerNodeByEnode[id]; p != nil {
|
||||
require.Equal(t, exp, p.isPinged)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveTopic(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
node1 := localNode.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
topic1 := "t1"
|
||||
topic2 := "t2"
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prep func(*crawledPeers)
|
||||
topic string
|
||||
check func(*testing.T, *crawledPeers)
|
||||
}{
|
||||
{
|
||||
name: "removes topic from all peers and index",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1", "t2"}, true)
|
||||
addPeerWithTopics(t, cp, node2, []string{"t1"}, true)
|
||||
},
|
||||
topic: topic1,
|
||||
check: func(t *testing.T, cp *crawledPeers) {
|
||||
_, ok := cp.peersByTopic[topic1]
|
||||
require.False(t, ok)
|
||||
for _, p := range cp.peerNodeByPid {
|
||||
_, has := p.topics[topic1]
|
||||
require.False(t, has)
|
||||
}
|
||||
// Ensure other topics remain
|
||||
_, ok = cp.peersByTopic[topic2]
|
||||
require.True(t, ok)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no-op when topic missing",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t2"}, true)
|
||||
},
|
||||
topic: topic1,
|
||||
check: func(t *testing.T, cp *crawledPeers) {
|
||||
_, ok := cp.peersByTopic[topic2]
|
||||
require.True(t, ok)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
tc.prep(cp)
|
||||
cp.removeTopic(tc.topic)
|
||||
tc.check(t, cp)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePeer(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
node1 := localNode.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prep func(*crawledPeers)
|
||||
target enode.ID
|
||||
wantTopics int
|
||||
}{
|
||||
{
|
||||
name: "removes existing peer and prunes empty topic",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
},
|
||||
target: node1.ID(),
|
||||
wantTopics: 0,
|
||||
},
|
||||
{
|
||||
name: "removes only targeted peer; keeps topic for other",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
addPeerWithTopics(t, cp, node2, []string{"t1"}, true)
|
||||
},
|
||||
target: node1.ID(),
|
||||
wantTopics: 1, // byTopic should still have t1 with one peer
|
||||
},
|
||||
{
|
||||
name: "no-op when peer missing",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
},
|
||||
target: node2.ID(),
|
||||
wantTopics: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
tc.prep(cp)
|
||||
cp.removePeerByNodeId(tc.target)
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
require.Len(t, cp.peersByTopic, tc.wantTopics)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePeerId(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
node1 := localNode.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
pid1, err := enodeToPeerID(node1)
|
||||
require.NoError(t, err)
|
||||
pid2, err := enodeToPeerID(node2)
|
||||
require.NoError(t, err)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prep func(*crawledPeers)
|
||||
target peer.ID
|
||||
wantTopics int
|
||||
wantPeers int
|
||||
}{
|
||||
{
|
||||
name: "removes existing peer by id and prunes topic",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
},
|
||||
target: pid1,
|
||||
wantTopics: 0,
|
||||
wantPeers: 0,
|
||||
},
|
||||
{
|
||||
name: "removes only targeted peer id; keeps topic for other",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
addPeerWithTopics(t, cp, node2, []string{"t1"}, true)
|
||||
},
|
||||
target: pid1,
|
||||
wantTopics: 1,
|
||||
wantPeers: 1,
|
||||
},
|
||||
{
|
||||
name: "no-op when peer id missing",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
},
|
||||
target: pid2,
|
||||
wantTopics: 1,
|
||||
wantPeers: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
tc.prep(cp)
|
||||
cp.removePeerByPeerId(tc.target)
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
require.Len(t, cp.peersByTopic, tc.wantTopics)
|
||||
require.Len(t, cp.peerNodeByPid, tc.wantPeers)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateCrawledIfNewer(t *testing.T) {
|
||||
newCrawler := func() (*crawledPeers, *GossipPeerCrawler, func()) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
g := &GossipPeerCrawler{
|
||||
ctx: ctx,
|
||||
pingCh: make(chan enode.Node, 8),
|
||||
}
|
||||
cp := newTestCrawledPeers()
|
||||
return cp, g, cancel
|
||||
}
|
||||
|
||||
// Helper: local node that will cause enodeToPeerID to fail (no TCP/UDP multiaddrs)
|
||||
newNodeNoPorts := func(t *testing.T) *enode.Node {
|
||||
_, privKey := createAddrAndPrivKey(t)
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { db.Close() })
|
||||
ln := enode.NewLocalNode(db, privKey)
|
||||
// Do not set TCP/UDP; keep only IP
|
||||
ln.SetStaticIP(net.ParseIP("127.0.0.1"))
|
||||
return ln.Node()
|
||||
}
|
||||
|
||||
// Ensure both A nodes have the same enode.ID but differing seq
|
||||
ln := createTestNodeRandom(t)
|
||||
nodeA1 := ln.Node()
|
||||
setNodeSeq(ln, nodeA1.Seq()+1)
|
||||
nodeA2 := ln.Node()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
arrange func(*crawledPeers)
|
||||
invokeNode *enode.Node
|
||||
invokeTopics []string
|
||||
expectedShouldPing bool
|
||||
expectErr bool
|
||||
assert func(*testing.T, *crawledPeers, <-chan enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "new peer with topics adds peer and pings",
|
||||
arrange: func(cp *crawledPeers) {},
|
||||
invokeNode: nodeA1,
|
||||
invokeTopics: []string{"a"},
|
||||
expectedShouldPing: true,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Len(t, cp.peerNodeByEnode, 1)
|
||||
require.Len(t, cp.peerNodeByPid, 1)
|
||||
require.Contains(t, cp.peersByTopic, "a")
|
||||
cp.mu.RUnlock()
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new peer with empty topics is removed",
|
||||
arrange: func(cp *crawledPeers) {},
|
||||
invokeNode: nodeA1,
|
||||
invokeTopics: nil,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Empty(t, cp.peerNodeByEnode)
|
||||
require.Empty(t, cp.peerNodeByPid)
|
||||
require.Empty(t, cp.peersByTopic)
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing peer lower seq is ignored",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, nodeA2, []string{"x"}, false) // higher seq exists
|
||||
},
|
||||
invokeNode: nodeA1, // lower seq
|
||||
invokeTopics: []string{"a", "b"},
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Contains(t, cp.peersByTopic, "x")
|
||||
require.NotContains(t, cp.peersByTopic, "a")
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing peer equal seq is ignored",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, nodeA1, []string{"x"}, false)
|
||||
},
|
||||
invokeNode: nodeA1,
|
||||
invokeTopics: []string{"a"},
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Contains(t, cp.peersByTopic, "x")
|
||||
require.NotContains(t, cp.peersByTopic, "a")
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing peer higher seq updates topics and pings",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, nodeA1, []string{"x"}, false)
|
||||
},
|
||||
invokeNode: nodeA2,
|
||||
invokeTopics: []string{"a"},
|
||||
expectedShouldPing: true,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.NotContains(t, cp.peersByTopic, "x")
|
||||
require.Contains(t, cp.peersByTopic, "a")
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing peer higher seq but empty topics removes peer",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, nodeA1, []string{"x"}, false)
|
||||
},
|
||||
invokeNode: nodeA2,
|
||||
invokeTopics: nil,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Empty(t, cp.peerNodeByEnode)
|
||||
require.Empty(t, cp.peerNodeByPid)
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "corrupted existing entry with nil node is ignored",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
pid, _ := enodeToPeerID(nodeA1)
|
||||
cp.mu.Lock()
|
||||
pn := &peerNode{node: nil, peerID: pid, topics: map[string]struct{}{"x": {}}}
|
||||
cp.peerNodeByEnode[nodeA1.ID()] = pn
|
||||
cp.peerNodeByPid[pid] = pn
|
||||
cp.peersByTopic["x"] = map[*peerNode]struct{}{pn: {}}
|
||||
cp.mu.Unlock()
|
||||
},
|
||||
expectErr: true,
|
||||
invokeNode: nodeA2,
|
||||
invokeTopics: []string{"a"},
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Contains(t, cp.peersByTopic, "x")
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new peer with no ports causes enodeToPeerID error; no add",
|
||||
arrange: func(cp *crawledPeers) {},
|
||||
invokeNode: newNodeNoPorts(t),
|
||||
invokeTopics: []string{"a"},
|
||||
expectErr: true,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Empty(t, cp.peerNodeByEnode)
|
||||
require.Empty(t, cp.peerNodeByPid)
|
||||
require.Empty(t, cp.peersByTopic)
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp, g, cancel := newCrawler()
|
||||
defer cancel()
|
||||
tc.arrange(cp)
|
||||
shouldPing, err := cp.updatePeer(tc.invokeNode, tc.invokeTopics)
|
||||
if tc.expectErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, shouldPing, tc.expectedShouldPing)
|
||||
tc.assert(t, cp, g.pingCh)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeersForTopic(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
newCrawler := func(filter gossipcrawler.PeerFilterFunc) (*GossipPeerCrawler, *crawledPeers) {
|
||||
g := &GossipPeerCrawler{
|
||||
peerFilter: filter,
|
||||
scorer: func(peer.ID) float64 { return 0 },
|
||||
crawledPeers: newTestCrawledPeers(),
|
||||
}
|
||||
return g, g.crawledPeers
|
||||
}
|
||||
|
||||
// Prepare nodes
|
||||
ln1 := createTestNodeRandom(t)
|
||||
ln2 := createTestNodeRandom(t)
|
||||
ln3 := createTestNodeRandom(t)
|
||||
n1, n2, n3 := ln1.Node(), ln2.Node(), ln3.Node()
|
||||
topic := "top"
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
filter gossipcrawler.PeerFilterFunc
|
||||
setup func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers)
|
||||
wantIDs []enode.ID
|
||||
}{
|
||||
{
|
||||
name: "no peers for topic returns empty",
|
||||
filter: func(*enode.Node) bool { return true },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {},
|
||||
wantIDs: nil,
|
||||
},
|
||||
{
|
||||
name: "excludes unpinged peers",
|
||||
filter: func(*enode.Node) bool { return true },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
|
||||
// Add one pinged and one not pinged on same topic
|
||||
addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
|
||||
addPeerWithTopics(t, cp, n2, []string{string(topic)}, false)
|
||||
},
|
||||
wantIDs: []enode.ID{n1.ID()},
|
||||
},
|
||||
{
|
||||
name: "applies peer filter to exclude",
|
||||
filter: func(n *enode.Node) bool { return n.ID() != n2.ID() },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
|
||||
addPeerWithTopics(t, cp, n2, []string{string(topic)}, true)
|
||||
},
|
||||
wantIDs: []enode.ID{n1.ID()},
|
||||
},
|
||||
{
|
||||
name: "ignores peerNode with nil node",
|
||||
filter: func(*enode.Node) bool { return true },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
|
||||
// Add n2 then set its node to nil to simulate corrupted entry
|
||||
p2 := addPeerWithTopics(t, cp, n2, []string{string(topic)}, true)
|
||||
cp.mu.Lock()
|
||||
p2.node = nil
|
||||
cp.mu.Unlock()
|
||||
},
|
||||
wantIDs: []enode.ID{n1.ID()},
|
||||
},
|
||||
{
|
||||
name: "sorted by score descending",
|
||||
filter: func(*enode.Node) bool { return true },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
|
||||
// Add three pinged peers
|
||||
p1 := addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
|
||||
p2 := addPeerWithTopics(t, cp, n2, []string{string(topic)}, true)
|
||||
p3 := addPeerWithTopics(t, cp, n3, []string{string(topic)}, true)
|
||||
// Provide a deterministic scoring function
|
||||
scores := map[peer.ID]float64{
|
||||
p1.peerID: 3.0,
|
||||
p2.peerID: 2.0,
|
||||
p3.peerID: 1.0,
|
||||
}
|
||||
g.scorer = func(id peer.ID) float64 { return scores[id] }
|
||||
},
|
||||
wantIDs: []enode.ID{n1.ID(), n2.ID(), n3.ID()},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
g, cp := newCrawler(tc.filter)
|
||||
tc.setup(t, g, cp)
|
||||
got := g.PeersForTopic(topic)
|
||||
var gotIDs []enode.ID
|
||||
for _, n := range got {
|
||||
gotIDs = append(gotIDs, n.ID())
|
||||
}
|
||||
if tc.wantIDs == nil {
|
||||
require.Empty(t, gotIDs)
|
||||
return
|
||||
}
|
||||
require.Equal(t, tc.wantIDs, gotIDs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCrawler_AddsAndPingsPeer(t *testing.T) {
|
||||
// Create a test node with valid ENR entries (IP/TCP/UDP)
|
||||
localNode := createTestNodeRandom(t)
|
||||
node := localNode.Node()
|
||||
|
||||
// Prepare a mock iterator returning our single node
|
||||
iterator := p2ptest.NewMockIterator([]*enode.Node{node})
|
||||
// Prepare a mock listener with successful Ping
|
||||
mockListener := p2ptest.NewMockListener(localNode, iterator)
|
||||
mockListener.PingFunc = func(*enode.Node) error { return nil }
|
||||
|
||||
// Inject a permissive peer filter
|
||||
filter := gossipcrawler.PeerFilterFunc(func(n *enode.Node) bool { return true })
|
||||
|
||||
// Create crawler with small intervals
|
||||
scorer := func(peer.ID) float64 { return 0 }
|
||||
g, err := NewGossipPeerCrawler(t.Context(), &Service{}, mockListener, 2*time.Second, 10*time.Millisecond, 4, filter, scorer)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assign a simple topic extractor
|
||||
topic := "test/topic"
|
||||
topicExtractor := func(ctx context.Context, n *enode.Node) ([]string, error) {
|
||||
return []string{topic}, nil
|
||||
}
|
||||
|
||||
// Run ping loop in background and perform a single crawl
|
||||
require.NoError(t, g.Start(topicExtractor))
|
||||
|
||||
// Verify that the peer has been indexed under the topic and marked as pinged
|
||||
require2.Eventually(t, func() bool {
|
||||
g.crawledPeers.mu.RLock()
|
||||
defer g.crawledPeers.mu.RUnlock()
|
||||
|
||||
peers := g.crawledPeers.peersByTopic[topic]
|
||||
if len(peers) == 0 {
|
||||
return false
|
||||
}
|
||||
// Fetch the single peerNode and check status
|
||||
for pn := range peers {
|
||||
if pn == nil {
|
||||
return false
|
||||
}
|
||||
return pn.isPinged
|
||||
}
|
||||
return false
|
||||
}, 2*time.Second, 10*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestCrawler_SkipsPeer_WhenFilterRejects(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
node := localNode.Node()
|
||||
iterator := p2ptest.NewMockIterator([]*enode.Node{node})
|
||||
mockListener := p2ptest.NewMockListener(localNode, iterator)
|
||||
mockListener.PingFunc = func(*enode.Node) error { return nil }
|
||||
|
||||
// Reject all peers via injected filter
|
||||
filter := gossipcrawler.PeerFilterFunc(func(n *enode.Node) bool { return false })
|
||||
|
||||
scorer := func(peer.ID) float64 { return 0 }
|
||||
g, err := NewGossipPeerCrawler(t.Context(), &Service{}, mockListener, 2*time.Second, 10*time.Millisecond, 2, filter, scorer)
|
||||
if err != nil {
|
||||
t.Fatalf("NewGossipPeerCrawler error: %v", err)
|
||||
}
|
||||
|
||||
topic := "test/topic"
|
||||
g.topicExtractor = func(ctx context.Context, n *enode.Node) ([]string, error) { return []string{topic}, nil }
|
||||
|
||||
g.crawl()
|
||||
|
||||
// Verify no peers are indexed, because filter rejected the node
|
||||
g.crawledPeers.mu.RLock()
|
||||
defer g.crawledPeers.mu.RUnlock()
|
||||
if len(g.crawledPeers.peerNodeByEnode) != 0 || len(g.crawledPeers.peerNodeByPid) != 0 || len(g.crawledPeers.peersByTopic) != 0 {
|
||||
t.Fatalf("expected no peers indexed, got byEnode=%d byPeerId=%d byTopic=%d",
|
||||
len(g.crawledPeers.peerNodeByEnode), len(g.crawledPeers.peerNodeByPid), len(g.crawledPeers.peersByTopic))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCrawler_RemoveTopic_RemovesTopicFromIndexes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
node := localNode.Node()
|
||||
iterator := p2ptest.NewMockIterator([]*enode.Node{node})
|
||||
mockListener := p2ptest.NewMockListener(localNode, iterator)
|
||||
mockListener.PingFunc = func(*enode.Node) error { return nil }
|
||||
|
||||
filter := gossipcrawler.PeerFilterFunc(func(n *enode.Node) bool { return true })
|
||||
|
||||
scorer := func(peer.ID) float64 { return 0 }
|
||||
g, err := NewGossipPeerCrawler(t.Context(), &Service{}, mockListener, 2*time.Second, 10*time.Millisecond, 2, filter, scorer)
|
||||
if err != nil {
|
||||
t.Fatalf("NewGossipPeerCrawler error: %v", err)
|
||||
}
|
||||
|
||||
topic1 := "test/topic1"
|
||||
topic2 := "test/topic2"
|
||||
g.topicExtractor = func(ctx context.Context, n *enode.Node) ([]string, error) { return []string{topic1, topic2}, nil }
|
||||
|
||||
// Single crawl to index topics
|
||||
g.crawl()
|
||||
|
||||
// Remove one topic and assert it is pruned from all indexes
|
||||
g.RemoveTopic(topic1)
|
||||
|
||||
g.crawledPeers.mu.RLock()
|
||||
defer g.crawledPeers.mu.RUnlock()
|
||||
|
||||
if _, ok := g.crawledPeers.peersByTopic[topic1]; ok {
|
||||
t.Fatalf("expected topic1 to be removed from byTopic")
|
||||
}
|
||||
|
||||
// Ensure peer still exists and retains topic2
|
||||
for _, pn := range g.crawledPeers.peerNodeByEnode {
|
||||
if _, has1 := pn.topics[topic1]; has1 {
|
||||
t.Fatalf("expected topic1 to be removed from peer topics")
|
||||
}
|
||||
if _, has2 := pn.topics[topic2]; !has2 {
|
||||
t.Fatalf("expected topic2 to remain for peer")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCrawledPeersMetrics(t *testing.T) {
|
||||
localNode1 := createTestNodeRandom(t)
|
||||
node1 := localNode1.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
pid1, err := enodeToPeerID(node1)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("updatePeer records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add first peer with two topics
|
||||
_, err := cp.updatePeer(node1, []string{"topic1", "topic2"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Add second peer with one overlapping topic
|
||||
_, err = cp.updatePeer(node2, []string{"topic1", "topic3"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(3), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
|
||||
t.Run("removePeerByPeerId records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add two peers
|
||||
_, err := cp.updatePeer(node1, []string{"topic1"})
|
||||
require.NoError(t, err)
|
||||
_, err = cp.updatePeer(node2, []string{"topic1", "topic2"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Remove first peer by peer ID
|
||||
cp.removePeerByPeerId(pid1)
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
|
||||
t.Run("removePeerByNodeId records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add two peers
|
||||
_, err := cp.updatePeer(node1, []string{"topic1"})
|
||||
require.NoError(t, err)
|
||||
_, err = cp.updatePeer(node2, []string{"topic2"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Remove first peer by enode ID
|
||||
cp.removePeerByNodeId(node1.ID())
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
|
||||
t.Run("removeTopic records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add two peers with overlapping topics
|
||||
_, err := cp.updatePeer(node1, []string{"topic1", "topic2"})
|
||||
require.NoError(t, err)
|
||||
_, err = cp.updatePeer(node2, []string{"topic1"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Remove topic1 - this should also remove node2 which only had topic1
|
||||
cp.removeTopic("topic1")
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
|
||||
t.Run("updatePeer with empty topics removes peer and records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add peer with topics
|
||||
_, err := cp.updatePeer(node1, []string{"topic1"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Increment sequence number to ensure update is processed
|
||||
setNodeSeq(localNode1, node1.Seq()+1)
|
||||
node1Updated := localNode1.Node()
|
||||
|
||||
// Update with empty topics - should remove the peer
|
||||
_, err = cp.updatePeer(node1Updated, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(0), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(0), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(0), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
}
|
||||
14
beacon-chain/p2p/gossipcrawler/BUILD.bazel
Normal file
14
beacon-chain/p2p/gossipcrawler/BUILD.bazel
Normal file
@@ -0,0 +1,14 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["interface.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler",
|
||||
visibility = [
|
||||
"//visibility:public",
|
||||
],
|
||||
deps = [
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
],
|
||||
)
|
||||
37
beacon-chain/p2p/gossipcrawler/interface.go
Normal file
37
beacon-chain/p2p/gossipcrawler/interface.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package gossipcrawler
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// TopicExtractor is a function that can determine the set of topics a current or potential peer
|
||||
// is subscribed to based on key/value pairs from the ENR record.
|
||||
type TopicExtractor func(ctx context.Context, node *enode.Node) ([]string, error)
|
||||
|
||||
// PeerFilterFunc defines the filtering interface used by the crawler to decide if a node
|
||||
// is a valid candidate to index in the crawler.
|
||||
type PeerFilterFunc func(*enode.Node) bool
|
||||
|
||||
type Crawler interface {
|
||||
Start(te TopicExtractor) error
|
||||
RemovePeerByPeerId(peerID peer.ID)
|
||||
RemoveTopic(topic string)
|
||||
PeersForTopic(topic string) []*enode.Node
|
||||
TriggerCrawl()
|
||||
}
|
||||
|
||||
// SubnetTopicsProvider returns the set of gossipsub topics the node
|
||||
// should currently maintain peer connections for along with the minimum number of peers required
|
||||
// for each topic.
|
||||
type SubnetTopicsProvider func() map[string]int
|
||||
|
||||
// GossipDialer controls dialing peers for gossipsub topics based
|
||||
// on a provided SubnetTopicsProvider and the p2p crawler.
|
||||
type GossipDialer interface {
|
||||
Start(provider SubnetTopicsProvider) error
|
||||
DialPeersForTopicBlocking(ctx context.Context, topic string, nPeers int) error
|
||||
ProtectedPeers() []peer.ID
|
||||
}
|
||||
@@ -225,6 +225,10 @@ func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id p
|
||||
return
|
||||
}
|
||||
|
||||
if s.crawler != nil {
|
||||
s.crawler.RemovePeerByPeerId(peerID)
|
||||
}
|
||||
|
||||
priorState, err := s.peers.ConnectionState(peerID)
|
||||
if err != nil {
|
||||
// Can happen if the peer has already disconnected, so...
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -35,6 +35,7 @@ type (
|
||||
PeersProvider
|
||||
MetadataProvider
|
||||
CustodyManager
|
||||
Started() bool
|
||||
}
|
||||
|
||||
// Accessor provides access to the Broadcaster, PeerManager and CustodyManager interfaces.
|
||||
@@ -98,11 +99,13 @@ type (
|
||||
PeerID() peer.ID
|
||||
Host() host.Host
|
||||
ENR() *enr.Record
|
||||
GossipDialer() gossipcrawler.GossipDialer
|
||||
NodeID() enode.ID
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshPersistentSubnets()
|
||||
FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error
|
||||
DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
Crawler() gossipcrawler.Crawler
|
||||
}
|
||||
|
||||
// Sender abstracts the sending functionality from libp2p.
|
||||
|
||||
@@ -97,6 +97,20 @@ var (
|
||||
Help: "The number of data column sidecar message broadcast attempts.",
|
||||
})
|
||||
|
||||
// Gossip Peer Crawler Metrics
|
||||
gossipCrawlerPeersByEnodeCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "p2p_gossip_crawler_peers_by_enode_count",
|
||||
Help: "The number of peers tracked by enode ID in the gossip peer crawler.",
|
||||
})
|
||||
gossipCrawlerPeersByPidCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "p2p_gossip_crawler_peers_by_pid_count",
|
||||
Help: "The number of peers tracked by peer ID in the gossip peer crawler.",
|
||||
})
|
||||
gossipCrawlerTopicsCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "p2p_gossip_crawler_topics_count",
|
||||
Help: "The number of topics tracked in the gossip peer crawler.",
|
||||
})
|
||||
|
||||
// Gossip Tracer Metrics
|
||||
pubsubTopicsActive = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "p2p_pubsub_topic_active",
|
||||
|
||||
@@ -6,11 +6,13 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/async"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
|
||||
@@ -61,6 +63,10 @@ var (
|
||||
// for the current peer limit status for the time period
|
||||
// defined below.
|
||||
pollingPeriod = 6 * time.Second
|
||||
|
||||
crawlTimeout = 12 * time.Second
|
||||
crawlInterval = 1 * time.Second
|
||||
maxConcurrentDials = int64(256)
|
||||
)
|
||||
|
||||
// Service for managing peer to peer (p2p) networking.
|
||||
@@ -95,6 +101,8 @@ type Service struct {
|
||||
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
|
||||
custodyInfoSet chan struct{}
|
||||
allForkDigests map[[4]byte]struct{}
|
||||
crawler gossipcrawler.Crawler
|
||||
gossipDialer gossipcrawler.GossipDialer
|
||||
}
|
||||
|
||||
type custodyInfo struct {
|
||||
@@ -163,7 +171,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
|
||||
s.host = h
|
||||
|
||||
// Gossipsub registration is done before we add in any new peers
|
||||
// Gossip registration is done before we add in any new peers
|
||||
// due to libp2p's gossipsub implementation not taking into
|
||||
// account previously added peers when creating the gossipsub
|
||||
// object.
|
||||
@@ -241,6 +249,25 @@ func (s *Service) Start() {
|
||||
|
||||
s.dv5Listener = listener
|
||||
go s.listenForNewNodes()
|
||||
crawler, err := NewGossipPeerCrawler(
|
||||
s.ctx,
|
||||
s,
|
||||
s.dv5Listener,
|
||||
crawlTimeout,
|
||||
crawlInterval,
|
||||
maxConcurrentDials,
|
||||
s.filterPeer,
|
||||
s.Peers().Scorers().Score,
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to create peer crawler")
|
||||
s.startupErr = err
|
||||
return
|
||||
}
|
||||
s.crawler = crawler
|
||||
// Initialise the gossipsub dialer which will be started
|
||||
// once the sync service is ready to provide subnet topics.
|
||||
s.gossipDialer = NewGossipPeerDialer(s.ctx, s.crawler, s.PubSub().ListPeers, s.DialPeers)
|
||||
}
|
||||
|
||||
s.started = true
|
||||
@@ -311,12 +338,25 @@ func (s *Service) Start() {
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
s.started = false
|
||||
|
||||
if s.dv5Listener != nil {
|
||||
s.dv5Listener.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Crawler returns the p2p service's peer crawler.
|
||||
func (s *Service) Crawler() gossipcrawler.Crawler {
|
||||
return s.crawler
|
||||
}
|
||||
|
||||
// GossipDialer returns the dialer responsible for maintaining
|
||||
// peer counts per gossipsub topic, if discovery is enabled.
|
||||
func (s *Service) GossipDialer() gossipcrawler.GossipDialer {
|
||||
return s.gossipDialer
|
||||
}
|
||||
|
||||
// Status of the p2p service. Will return an error if the service is considered unhealthy to
|
||||
// indicate that this node should not serve traffic until the issue has been resolved.
|
||||
func (s *Service) Status() error {
|
||||
@@ -557,3 +597,80 @@ func (s *Service) downscorePeer(peerID peer.ID, reason string) {
|
||||
newScore := s.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
func AttestationSubnets(nodeID enode.ID, node *enode.Node, record *enr.Record) (map[uint64]bool, error) {
|
||||
return attestationSubnets(record)
|
||||
}
|
||||
|
||||
func SyncSubnets(nodeID enode.ID, node *enode.Node, record *enr.Record) (map[uint64]bool, error) {
|
||||
return syncSubnets(record)
|
||||
}
|
||||
|
||||
func DataColumnSubnets(nodeID enode.ID, node *enode.Node, record *enr.Record) (map[uint64]bool, error) {
|
||||
return dataColumnSubnets(nodeID, record)
|
||||
}
|
||||
|
||||
func DataColumnSubnetTopic(digest [4]byte, subnet uint64) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, digest, subnet) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func SyncCommitteeSubnetTopic(digest [4]byte, subnet uint64) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, digest, subnet) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func AttestationSubnetTopic(digest [4]byte, subnet uint64) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, digest, subnet) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func BlobSubnetTopic(digest [4]byte, subnet uint64) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, digest, subnet) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func LcOptimisticToTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func LcFinalityToTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(LightClientFinalityUpdateTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func BlockSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(BlockSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func AggregateAndProofSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(AggregateAndProofSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func VoluntaryExitSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(ExitSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func ProposerSlashingSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(ProposerSlashingSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func AttesterSlashingSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(AttesterSlashingSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func SyncContributionAndProofSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(SyncContributionAndProofSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func BlsToExecutionChangeSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(BlsToExecutionChangeSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
@@ -35,7 +35,8 @@ func createHost(t *testing.T, port uint) (host.Host, *ecdsa.PrivateKey, net.IP)
|
||||
ipAddr := net.ParseIP("127.0.0.1")
|
||||
listen, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
||||
require.NoError(t, err, "Failed to p2p listen")
|
||||
h, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.Security(noise.ID, noise.New)}...)
|
||||
h, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen),
|
||||
libp2p.Security(noise.ID, noise.New)}...)
|
||||
require.NoError(t, err)
|
||||
return h, pkey, ipAddr
|
||||
}
|
||||
@@ -80,9 +81,8 @@ func TestService_Start_OnlyStartsOnce(t *testing.T) {
|
||||
}()
|
||||
var vr [32]byte
|
||||
require.NoError(t, cs.SetClock(startup.NewClock(time.Now(), vr)))
|
||||
require.Eventually(t, func() bool {
|
||||
return s.started
|
||||
}, 5*time.Second, 100*time.Millisecond, "Expected service to be started")
|
||||
time.Sleep(time.Second * 2)
|
||||
assert.Equal(t, true, s.started, "Expected service to be started")
|
||||
s.Start()
|
||||
require.LogsContain(t, hook, "Attempted to start p2p service when it was already started")
|
||||
require.NoError(t, s.Stop())
|
||||
@@ -261,9 +261,17 @@ func TestListenForNewNodes(t *testing.T) {
|
||||
err = cs.SetClock(startup.NewClock(genesisTime, gvr))
|
||||
require.NoError(t, err, "Could not set clock in service")
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return len(s.host.Network().Peers()) == peerCount
|
||||
}, 5*time.Second, 100*time.Millisecond, "Not all peers added to peerstore")
|
||||
actualPeerCount := len(s.host.Network().Peers())
|
||||
for range 40 {
|
||||
if actualPeerCount == peerCount {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
actualPeerCount = len(s.host.Network().Peers())
|
||||
}
|
||||
|
||||
assert.Equal(t, peerCount, actualPeerCount, "Not all peers added to peerstore")
|
||||
|
||||
err = s.Stop()
|
||||
require.NoError(t, err, "Failed to stop service")
|
||||
|
||||
@@ -3,9 +3,6 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -14,19 +11,16 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
pb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -61,223 +55,9 @@ const dataColumnSubnetVal = 150
|
||||
|
||||
const errSavingSequenceNumber = "saving sequence number after updating subnets: %w"
|
||||
|
||||
// nodeFilter returns a function that filters nodes based on the subnet topic and subnet index.
|
||||
func (s *Service) nodeFilter(topic string, indices map[uint64]int) (func(node *enode.Node) (map[uint64]bool, error), error) {
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
return s.filterPeerForAttSubnet(indices), nil
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
return s.filterPeerForSyncSubnet(indices), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
return s.filterPeerForBlobSubnet(indices), nil
|
||||
case strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
return s.filterPeerForDataColumnsSubnet(indices), nil
|
||||
default:
|
||||
return nil, errors.Errorf("no subnet exists for provided topic: %s", topic)
|
||||
}
|
||||
}
|
||||
|
||||
// FindAndDialPeersWithSubnets ensures that our node is connected to at least `minimumPeersPerSubnet`
|
||||
// peers for each subnet listed in `subnets`.
|
||||
// If, for all subnets, the threshold is met, then this function immediately returns.
|
||||
// Otherwise, it searches for new peers for defective subnets, and dials them.
|
||||
// If `ctx“ is canceled while searching for peers, search is stopped, but new found peers are still dialed.
|
||||
// In this case, the function returns an error.
|
||||
func (s *Service) FindAndDialPeersWithSubnets(
|
||||
ctx context.Context,
|
||||
topicFormat string,
|
||||
digest [fieldparams.VersionLength]byte,
|
||||
minimumPeersPerSubnet int,
|
||||
subnets map[uint64]bool,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindAndDialPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
// Return early if the discovery listener isn't set.
|
||||
if s.dv5Listener == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials = flags.Get().MaxConcurrentDials
|
||||
}
|
||||
|
||||
defectiveSubnets := s.defectiveSubnets(topicFormat, digest, minimumPeersPerSubnet, subnets)
|
||||
for len(defectiveSubnets) > 0 {
|
||||
// Stop the search/dialing loop if the context is canceled.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peersToDial, err := func() ([]*enode.Node, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, batchPeriod)
|
||||
defer cancel()
|
||||
|
||||
peersToDial, err := s.findPeersWithSubnets(ctx, topicFormat, digest, minimumPeersPerSubnet, defectiveSubnets)
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
|
||||
return peersToDial, nil
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dial new peers in batches.
|
||||
s.dialPeers(s.ctx, maxConcurrentDials, peersToDial)
|
||||
|
||||
defectiveSubnets = s.defectiveSubnets(topicFormat, digest, minimumPeersPerSubnet, subnets)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateDefectiveSubnets updates the defective subnets map when a node with matching subnets is found.
|
||||
// It decrements the defective count for each subnet the node satisfies and removes subnets
|
||||
// that are fully satisfied (count reaches 0).
|
||||
func updateDefectiveSubnets(
|
||||
nodeSubnets map[uint64]bool,
|
||||
defectiveSubnets map[uint64]int,
|
||||
) {
|
||||
for subnet := range defectiveSubnets {
|
||||
if !nodeSubnets[subnet] {
|
||||
continue
|
||||
}
|
||||
defectiveSubnets[subnet]--
|
||||
if defectiveSubnets[subnet] == 0 {
|
||||
delete(defectiveSubnets, subnet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// findPeersWithSubnets finds peers subscribed to defective subnets in batches
|
||||
// until enough peers are found or the context is canceled.
|
||||
// It returns new peers found during the search.
|
||||
func (s *Service) findPeersWithSubnets(
|
||||
ctx context.Context,
|
||||
topicFormat string,
|
||||
digest [fieldparams.VersionLength]byte,
|
||||
minimumPeersPerSubnet int,
|
||||
defectiveSubnetsOrigin map[uint64]int,
|
||||
) ([]*enode.Node, error) {
|
||||
// Copy the defective subnets map to avoid modifying the original map.
|
||||
defectiveSubnets := make(map[uint64]int, len(defectiveSubnetsOrigin))
|
||||
maps.Copy(defectiveSubnets, defectiveSubnetsOrigin)
|
||||
|
||||
// Create an discovery iterator to find new peers.
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
|
||||
// `iterator.Next` can block indefinitely. `iterator.Close` unblocks it.
|
||||
// So it is important to close the iterator when the context is done to ensure
|
||||
// that the search does not hang indefinitely.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
iterator.Close()
|
||||
}()
|
||||
|
||||
// Retrieve the filter function that will be used to filter nodes based on the defective subnets.
|
||||
filter, err := s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
|
||||
// Crawl the network for peers subscribed to the defective subnets.
|
||||
nodeByNodeID := make(map[enode.ID]*enode.Node)
|
||||
|
||||
for len(defectiveSubnets) > 0 && iterator.Next() {
|
||||
if err := ctx.Err(); err != nil {
|
||||
// Convert the map to a slice.
|
||||
peersToDial := make([]*enode.Node, 0, len(nodeByNodeID))
|
||||
for _, node := range nodeByNodeID {
|
||||
peersToDial = append(peersToDial, node)
|
||||
}
|
||||
|
||||
return peersToDial, err
|
||||
}
|
||||
|
||||
node := iterator.Node()
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() >= node.Seq() {
|
||||
continue // keep existing and skip.
|
||||
}
|
||||
|
||||
// Treat nodes that exist in nodeByNodeID with higher seq numbers as new peers
|
||||
// Skip peer not matching the filter.
|
||||
if !s.filterPeer(node) {
|
||||
if ok {
|
||||
// this means the existing peer with the lower sequence number is no longer valid
|
||||
delete(nodeByNodeID, existing.ID())
|
||||
// Note: We are choosing to not rollback changes to the defective subnets map in favor of calling s.defectiveSubnets once again after dialing peers.
|
||||
// This is a case that should rarely happen and should be handled through a second iteration in FindAndDialPeersWithSubnets
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Get all needed subnets that the node is subscribed to.
|
||||
// Skip nodes that are not subscribed to any of the defective subnets.
|
||||
nodeSubnets, err := filter(node)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"nodeID": node.ID(),
|
||||
"topicFormat": topicFormat,
|
||||
}).Debug("Could not get needed subnets from peer")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if len(nodeSubnets) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// We found a new peer. Modify the defective subnets map
|
||||
// and the filter accordingly.
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
updateDefectiveSubnets(nodeSubnets, defectiveSubnets)
|
||||
filter, err = s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the map to a slice.
|
||||
peersToDial := make([]*enode.Node, 0, len(nodeByNodeID))
|
||||
for _, node := range nodeByNodeID {
|
||||
peersToDial = append(peersToDial, node)
|
||||
}
|
||||
|
||||
return peersToDial, nil
|
||||
}
|
||||
|
||||
// defectiveSubnets returns a map of subnets that have fewer than the minimum peer count.
|
||||
func (s *Service) defectiveSubnets(
|
||||
topicFormat string,
|
||||
digest [fieldparams.VersionLength]byte,
|
||||
minimumPeersPerSubnet int,
|
||||
subnets map[uint64]bool,
|
||||
) map[uint64]int {
|
||||
missingCountPerSubnet := make(map[uint64]int, len(subnets))
|
||||
for subnet := range subnets {
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet) + s.Encoding().ProtocolSuffix()
|
||||
peers := s.pubsub.ListPeers(topic)
|
||||
peerCount := len(peers)
|
||||
if peerCount < minimumPeersPerSubnet {
|
||||
missingCountPerSubnet[subnet] = minimumPeersPerSubnet - peerCount
|
||||
}
|
||||
}
|
||||
|
||||
return missingCountPerSubnet
|
||||
}
|
||||
|
||||
// dialPeers dials multiple peers concurrently up to `maxConcurrentDials` at a time.
|
||||
// DialPeers dials multiple peers concurrently up to `maxConcurrentDials` at a time.
|
||||
// In case of a dial failure, it logs the error but continues dialing other peers.
|
||||
func (s *Service) dialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
func (s *Service) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
var mut sync.Mutex
|
||||
|
||||
counter := uint(0)
|
||||
@@ -319,75 +99,13 @@ func (s *Service) dialPeers(ctx context.Context, maxConcurrentDials int, nodes [
|
||||
return counter
|
||||
}
|
||||
|
||||
// filterPeerForAttSubnet returns a method with filters peers specifically for a particular attestation subnet.
|
||||
func (s *Service) filterPeerForAttSubnet(indices map[uint64]int) func(node *enode.Node) (map[uint64]bool, error) {
|
||||
return func(node *enode.Node) (map[uint64]bool, error) {
|
||||
if !s.filterPeer(node) {
|
||||
return map[uint64]bool{}, nil
|
||||
}
|
||||
|
||||
subnets, err := attestationSubnets(node.Record())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "attestation subnets")
|
||||
}
|
||||
|
||||
return intersect(indices, subnets), nil
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular sync subnet.
|
||||
func (s *Service) filterPeerForSyncSubnet(indices map[uint64]int) func(node *enode.Node) (map[uint64]bool, error) {
|
||||
return func(node *enode.Node) (map[uint64]bool, error) {
|
||||
if !s.filterPeer(node) {
|
||||
return map[uint64]bool{}, nil
|
||||
}
|
||||
|
||||
subnets, err := syncSubnets(node.Record())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "sync subnets")
|
||||
}
|
||||
|
||||
return intersect(indices, subnets), nil
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular blob subnet.
|
||||
// All peers are supposed to be subscribed to all blob subnets.
|
||||
func (s *Service) filterPeerForBlobSubnet(indices map[uint64]int) func(_ *enode.Node) (map[uint64]bool, error) {
|
||||
result := make(map[uint64]bool, len(indices))
|
||||
for i := range indices {
|
||||
result[i] = true
|
||||
}
|
||||
|
||||
return func(_ *enode.Node) (map[uint64]bool, error) {
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular data column subnet.
|
||||
func (s *Service) filterPeerForDataColumnsSubnet(indices map[uint64]int) func(node *enode.Node) (map[uint64]bool, error) {
|
||||
return func(node *enode.Node) (map[uint64]bool, error) {
|
||||
if !s.filterPeer(node) {
|
||||
return map[uint64]bool{}, nil
|
||||
}
|
||||
|
||||
subnets, err := dataColumnSubnets(node.ID(), node.Record())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column subnets")
|
||||
}
|
||||
|
||||
return intersect(indices, subnets), nil
|
||||
}
|
||||
}
|
||||
|
||||
// lower threshold to broadcast object compared to searching
|
||||
// for a subnet. So that even in the event of poor peer
|
||||
// connectivity, we can still broadcast an attestation.
|
||||
func (s *Service) hasPeerWithSubnet(subnetTopic string) bool {
|
||||
func (s *Service) hasPeerWithTopic(topic string) bool {
|
||||
// In the event peer threshold is lower, we will choose the lower
|
||||
// threshold.
|
||||
minPeers := min(1, flags.Get().MinimumPeersPerSubnet)
|
||||
topic := subnetTopic + s.Encoding().ProtocolSuffix()
|
||||
peersWithSubnet := s.pubsub.ListPeers(topic)
|
||||
peersWithSubnetCount := len(peersWithSubnet)
|
||||
|
||||
@@ -712,16 +430,3 @@ func byteCount(bitCount int) int {
|
||||
}
|
||||
return numOfBytes
|
||||
}
|
||||
|
||||
// interesect intersects two maps and returns a new map containing only the keys
|
||||
// that are present in both maps.
|
||||
func intersect(left map[uint64]int, right map[uint64]bool) map[uint64]bool {
|
||||
result := make(map[uint64]bool, min(len(left), len(right)))
|
||||
for i := range left {
|
||||
if right[i] {
|
||||
result[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -3,14 +3,15 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
testDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
require2 "github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
@@ -122,6 +124,21 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
// Start the service.
|
||||
service.Start()
|
||||
|
||||
// start the crawler with a topic extractor that maps ENR attestation subnets
|
||||
// to full attestation topics for the current fork digest and encoding.
|
||||
_ = service.Crawler().Start(func(ctx context.Context, node *enode.Node) ([]string, error) {
|
||||
subs, err := attestationSubnets(node.Record())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var topics []string
|
||||
for subnet := range subs {
|
||||
t := AttestationSubnetTopic(bootNodeForkDigest, subnet)
|
||||
topics = append(topics, t)
|
||||
}
|
||||
return topics, nil
|
||||
})
|
||||
|
||||
// Set the ENR `attnets`, used by Prysm to filter peers by subnet.
|
||||
bitV := bitfield.NewBitvector64()
|
||||
bitV.SetBitAt(i, true)
|
||||
@@ -129,7 +146,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
service.dv5Listener.LocalNode().Set(entry)
|
||||
|
||||
// Join and subscribe to the subnet, needed by libp2p.
|
||||
topicName := fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, i) + "/ssz_snappy"
|
||||
topicName := AttestationSubnetTopic(bootNodeForkDigest, i)
|
||||
topic, err := service.pubsub.Join(topicName)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -169,23 +186,65 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
close(service.custodyInfoSet)
|
||||
|
||||
service.Start()
|
||||
|
||||
subnets := map[uint64]bool{1: true, 2: true, 3: true}
|
||||
var topics []string
|
||||
for subnet := range subnets {
|
||||
t := AttestationSubnetTopic(bootNodeForkDigest, subnet)
|
||||
topics = append(topics, t)
|
||||
}
|
||||
|
||||
// start the crawler with a topic extractor that maps ENR attestation subnets
|
||||
// to full attestation topics for the current fork digest and encoding.
|
||||
_ = service.Crawler().Start(func(ctx context.Context, node *enode.Node) ([]string, error) {
|
||||
var topics []string
|
||||
subs, err := attestationSubnets(node.Record())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for subnet := range subs {
|
||||
t := AttestationSubnetTopic(bootNodeForkDigest, subnet)
|
||||
topics = append(topics, t)
|
||||
}
|
||||
return topics, nil
|
||||
})
|
||||
defer func() {
|
||||
err := service.Stop()
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
subnets := map[uint64]bool{1: true, 2: true, 3: true}
|
||||
defectiveSubnets := service.defectiveSubnets(AttestationSubnetTopicFormat, bootNodeForkDigest, minimumPeersPerSubnet, subnets)
|
||||
require.Equal(t, subnetCount, len(defectiveSubnets))
|
||||
builder := func(idx uint64) string {
|
||||
return AttestationSubnetTopic(bootNodeForkDigest, idx)
|
||||
}
|
||||
defectiveSubnetsCount := defectiveSubnets(service, topics, minimumPeersPerSubnet)
|
||||
require.Equal(t, subnetCount, defectiveSubnetsCount)
|
||||
|
||||
ctxWithTimeOut, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
var topicsToDial []string
|
||||
for s := range subnets {
|
||||
topicsToDial = append(topicsToDial, builder(s))
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err = service.FindAndDialPeersWithSubnets(ctxWithTimeOut, AttestationSubnetTopicFormat, bootNodeForkDigest, minimumPeersPerSubnet, subnets)
|
||||
require.NoError(t, err)
|
||||
for _, topic := range topicsToDial {
|
||||
err = service.GossipDialer().DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnet)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
defectiveSubnets = service.defectiveSubnets(AttestationSubnetTopicFormat, bootNodeForkDigest, minimumPeersPerSubnet, subnets)
|
||||
require.Equal(t, 0, len(defectiveSubnets))
|
||||
defectiveSubnetsCount = defectiveSubnets(service, topics, minimumPeersPerSubnet)
|
||||
require.Equal(t, 0, defectiveSubnetsCount)
|
||||
}
|
||||
|
||||
func defectiveSubnets(service *Service, topics []string, minimumPeersPerSubnet int) int {
|
||||
count := 0
|
||||
for _, topic := range topics {
|
||||
peers := service.pubsub.ListPeers(topic)
|
||||
if len(peers) < minimumPeersPerSubnet {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func Test_AttSubnets(t *testing.T) {
|
||||
@@ -581,7 +640,6 @@ func TestFindPeersWithSubnets_NodeDeduplication(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
@@ -742,47 +800,13 @@ func TestFindPeersWithSubnets_NodeDeduplication(t *testing.T) {
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
s := createTestService(t, db)
|
||||
localNode := createTestNodeRandom(t)
|
||||
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
tt.defectiveSubnets,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
crawler := startTestCrawler(t, s, s.dv5Listener.(*testp2p.MockListener))
|
||||
verifyCrawlerPeers(t, crawler, s, tt.defectiveSubnets, tt.expectedCount, tt.description, tt.eval)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -792,7 +816,6 @@ func TestFindPeersWithSubnets_FilterPeerRemoval(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
@@ -945,23 +968,7 @@ func TestFindPeersWithSubnets_FilterPeerRemoval(t *testing.T) {
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
// Create test P2P instance
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
// Create mock service
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
s := createTestService(t, db)
|
||||
|
||||
// Mark specific node versions as "bad" to simulate filterPeer failures
|
||||
for _, node := range tt.nodes {
|
||||
@@ -979,30 +986,11 @@ func TestFindPeersWithSubnets_FilterPeerRemoval(t *testing.T) {
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
tt.defectiveSubnets,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
crawler := startTestCrawler(t, s, s.dv5Listener.(*testp2p.MockListener))
|
||||
verifyCrawlerPeers(t, crawler, s, tt.defectiveSubnets, tt.expectedCount, tt.description, tt.eval)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1046,7 +1034,6 @@ func TestFindPeersWithSubnets_received_bad_existing_node(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
// Create LocalNode with same ID but different sequences
|
||||
@@ -1067,21 +1054,7 @@ func TestFindPeersWithSubnets_received_bad_existing_node(t *testing.T) {
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
service := createTestService(t, db)
|
||||
|
||||
// Create iterator with callback that marks peer as bad before processing node1_seq2
|
||||
iter := &callbackIteratorForSubnets{
|
||||
@@ -1105,22 +1078,80 @@ func TestFindPeersWithSubnets_received_bad_existing_node(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
service.dv5Listener = testp2p.NewMockListener(localNode, iter)
|
||||
|
||||
digest, err := service.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
crawler := startTestCrawler(t, service, service.dv5Listener.(*testp2p.MockListener))
|
||||
|
||||
// Run findPeersWithSubnets - node1_seq1 gets processed first, then callback marks peer bad, then node1_seq2 fails
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := service.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
map[uint64]int{1: 2}, // Need 2 peers for subnet 1
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(result))
|
||||
require.Equal(t, localNode2.Node().ID(), result[0].ID()) // only node2 should remain
|
||||
// Verification using verifyCrawlerPeers with a custom eval function
|
||||
verifyCrawlerPeers(t, crawler, service, map[uint64]int{1: 1}, 1, "only node2 should remain", func(t *testing.T, result []*enode.Node) {
|
||||
require.Equal(t, localNode2.Node().ID(), result[0].ID())
|
||||
})
|
||||
}
|
||||
|
||||
func createTestService(t *testing.T, d db.Database) *Service {
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: d,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func startTestCrawler(t *testing.T, s *Service, listener *testp2p.MockListener) *GossipPeerCrawler {
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
crawler, err := NewGossipPeerCrawler(t.Context(), s, listener,
|
||||
1*time.Second, 100*time.Millisecond, 10, gossipcrawler.PeerFilterFunc(s.filterPeer),
|
||||
s.Peers().Scorers().Score)
|
||||
require.NoError(t, err)
|
||||
s.crawler = crawler
|
||||
require.NoError(t, crawler.Start(func(ctx context.Context, n *enode.Node) ([]string, error) {
|
||||
subs, err := attestationSubnets(n.Record())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var topics []string
|
||||
for subnet := range subs {
|
||||
t := AttestationSubnetTopic(digest, subnet)
|
||||
topics = append(topics, t)
|
||||
}
|
||||
return topics, nil
|
||||
}))
|
||||
return crawler
|
||||
}
|
||||
|
||||
func verifyCrawlerPeers(t *testing.T, crawler *GossipPeerCrawler, s *Service, subnets map[uint64]int, expectedCount int, description string, eval func(t *testing.T, result []*enode.Node)) {
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
var topics []string
|
||||
for subnet := range subnets {
|
||||
topics = append(topics, AttestationSubnetTopic(digest, subnet))
|
||||
}
|
||||
|
||||
var results []*enode.Node
|
||||
require2.Eventually(t, func() bool {
|
||||
results = results[:0]
|
||||
seen := make(map[enode.ID]struct{})
|
||||
for _, topic := range topics {
|
||||
peers := crawler.PeersForTopic(topic)
|
||||
for _, peer := range peers {
|
||||
if _, ok := seen[peer.ID()]; !ok {
|
||||
seen[peer.ID()] = struct{}{}
|
||||
results = append(results, peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(results) == expectedCount
|
||||
}, 1*time.Second, 100*time.Millisecond, description)
|
||||
|
||||
if eval != nil {
|
||||
eval(t, results)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,9 +21,9 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/gossipcrawler:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -41,6 +41,20 @@ func (*FakeP2P) AddConnectionHandler(_, _ func(ctx context.Context, id peer.ID)
|
||||
|
||||
}
|
||||
|
||||
// Crawler -- fake.
|
||||
func (*FakeP2P) Crawler() gossipcrawler.Crawler {
|
||||
return &MockCrawler{}
|
||||
}
|
||||
|
||||
// GossipDialer -- fake.
|
||||
func (*FakeP2P) GossipDialer() gossipcrawler.GossipDialer {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*FakeP2P) Started() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// AddDisconnectionHandler -- fake.
|
||||
func (*FakeP2P) AddDisconnectionHandler(_ func(ctx context.Context, id peer.ID) error) {
|
||||
}
|
||||
@@ -70,11 +84,6 @@ func (*FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// FindAndDialPeersWithSubnets mocks the p2p func.
|
||||
func (*FakeP2P) FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshPersistentSubnets mocks the p2p func.
|
||||
func (*FakeP2P) RefreshPersistentSubnets() {}
|
||||
|
||||
@@ -93,6 +102,11 @@ func (*FakeP2P) Peers() *peers.Status {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DialPeers -- fake.
|
||||
func (*FakeP2P) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
}
|
||||
|
||||
// PublishToTopic -- fake.
|
||||
func (*FakeP2P) PublishToTopic(_ context.Context, _ string, _ []byte, _ ...pubsub.PubOpt) error {
|
||||
return nil
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
@@ -19,6 +19,7 @@ type MockPeerManager struct {
|
||||
BHost host.Host
|
||||
DiscoveryAddr []multiaddr.Multiaddr
|
||||
FailDiscoveryAddr bool
|
||||
Dialer gossipcrawler.GossipDialer
|
||||
}
|
||||
|
||||
// Disconnect .
|
||||
@@ -46,6 +47,11 @@ func (m MockPeerManager) NodeID() enode.ID {
|
||||
return enode.ID{}
|
||||
}
|
||||
|
||||
// GossipDialer returns the configured dialer mock, if any.
|
||||
func (m MockPeerManager) GossipDialer() gossipcrawler.GossipDialer {
|
||||
return m.Dialer
|
||||
}
|
||||
|
||||
// DiscoveryAddresses .
|
||||
func (m *MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if m.FailDiscoveryAddr {
|
||||
@@ -57,10 +63,15 @@ func (m *MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
// RefreshPersistentSubnets .
|
||||
func (*MockPeerManager) RefreshPersistentSubnets() {}
|
||||
|
||||
// FindAndDialPeersWithSubnet .
|
||||
func (*MockPeerManager) FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error {
|
||||
return nil
|
||||
// DialPeers
|
||||
func (p *MockPeerManager) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
}
|
||||
|
||||
// AddPingMethod .
|
||||
func (*MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {}
|
||||
|
||||
// Crawler.
|
||||
func (*MockPeerManager) Crawler() gossipcrawler.Crawler {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -66,6 +66,7 @@ type TestP2P struct {
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
enr *enr.Record
|
||||
dialer gossipcrawler.GossipDialer
|
||||
}
|
||||
|
||||
// NewTestP2P initializes a new p2p test service.
|
||||
@@ -119,6 +120,10 @@ func NewTestP2PWithPubsubOptions(t *testing.T, pubsubOpts []pubsub.Option, userO
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TestP2P) Status() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connect two test peers together.
|
||||
func (p *TestP2P) Connect(b *TestP2P) {
|
||||
if err := connect(p.BHost, b.BHost); err != nil {
|
||||
@@ -192,11 +197,7 @@ func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
|
||||
if _, err := p.Encoding().EncodeGossip(buf, castedMsg); err != nil {
|
||||
p.t.Fatalf("Failed to encode message: %v", err)
|
||||
}
|
||||
digest, err := p.ForkDigest()
|
||||
if err != nil {
|
||||
p.t.Fatal(err)
|
||||
}
|
||||
topicHandle, err := ps.Join(fmt.Sprintf(topic, digest) + p.Encoding().ProtocolSuffix())
|
||||
topicHandle, err := ps.Join(topic)
|
||||
if err != nil {
|
||||
p.t.Fatal(err)
|
||||
}
|
||||
@@ -288,6 +289,9 @@ func (p *TestP2P) SubscribeToTopic(topic string, opts ...pubsub.SubOpt) (*pubsub
|
||||
// LeaveTopic closes topic and removes corresponding handler from list of joined topics.
|
||||
// This method will return error if there are outstanding event handlers or subscriptions.
|
||||
func (p *TestP2P) LeaveTopic(topic string) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if t, ok := p.joinedTopics[topic]; ok {
|
||||
if err := t.Close(); err != nil {
|
||||
return err
|
||||
@@ -428,9 +432,8 @@ func (p *TestP2P) Peers() *peers.Status {
|
||||
return p.peers
|
||||
}
|
||||
|
||||
// FindAndDialPeersWithSubnets mocks the p2p func.
|
||||
func (*TestP2P) FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error {
|
||||
return nil
|
||||
func (p *TestP2P) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
}
|
||||
|
||||
// RefreshPersistentSubnets mocks the p2p func.
|
||||
@@ -567,3 +570,43 @@ func (s *TestP2P) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
|
||||
return custodyGroupCount
|
||||
}
|
||||
|
||||
// MockCrawler is a minimal mock implementation of PeerCrawler for testing
|
||||
type MockCrawler struct{}
|
||||
|
||||
// Start does nothing as this is a mock
|
||||
func (m *MockCrawler) Start(gossipcrawler.TopicExtractor) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop does nothing as this is a mock
|
||||
func (m *MockCrawler) Stop() {}
|
||||
|
||||
// SetTopicExtractor does nothing as this is a mock
|
||||
func (m *MockCrawler) SetTopicExtractor(extractor func(context.Context, *enode.Node) ([]string, error)) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveTopic does nothing as this is a mock
|
||||
func (m *MockCrawler) RemoveTopic(topic string) {}
|
||||
|
||||
// RemovePeerID does nothing as this is a mock
|
||||
func (m *MockCrawler) RemovePeerByPeerId(pid peer.ID) {}
|
||||
|
||||
// PeersForTopic returns empty list as this is a mock
|
||||
func (m *MockCrawler) PeersForTopic(topic string) []*enode.Node {
|
||||
return []*enode.Node{}
|
||||
}
|
||||
|
||||
// TriggerCrawl does nothing as this is a mock
|
||||
func (m *MockCrawler) TriggerCrawl() {}
|
||||
|
||||
// Crawler returns a mock crawler implementation for testing.
|
||||
func (*TestP2P) Crawler() gossipcrawler.Crawler {
|
||||
return &MockCrawler{}
|
||||
}
|
||||
|
||||
// GossipDialer returns nil for tests that do not exercise dialer behaviour.
|
||||
func (p *TestP2P) GossipDialer() gossipcrawler.GossipDialer {
|
||||
return p.dialer
|
||||
}
|
||||
|
||||
@@ -575,7 +575,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".PublishBlockV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlockV2,
|
||||
@@ -586,7 +586,7 @@ func (s *Service) beaconEndpoints(
|
||||
name: namespace + ".PublishBlindedBlockV2",
|
||||
middleware: []middleware.Middleware{
|
||||
middleware.ContentTypeHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType, api.OctetStreamMediaType}),
|
||||
middleware.AcceptHeaderHandler([]string{api.JsonMediaType}),
|
||||
middleware.AcceptEncodingHeaderHandler(),
|
||||
},
|
||||
handler: server.PublishBlindedBlockV2,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user