mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 14:28:09 -05:00
Compare commits
23 Commits
manu
...
paralleliz
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cfe59706af | ||
|
|
b113d6bbde | ||
|
|
45577ef931 | ||
|
|
92865adfe7 | ||
|
|
a3863c118b | ||
|
|
730e6500e3 | ||
|
|
dac2c65004 | ||
|
|
dbfb987e1d | ||
|
|
3596d00ff9 | ||
|
|
2ac30f5ce6 | ||
|
|
7418c00ad6 | ||
|
|
66342655fd | ||
|
|
18eca953c1 | ||
|
|
8191bb5711 | ||
|
|
d4613aee0c | ||
|
|
9fcc1a7a77 | ||
|
|
75dea214ac | ||
|
|
4374e709cb | ||
|
|
be300f80bd | ||
|
|
096cba5b2d | ||
|
|
d5127233e4 | ||
|
|
3d35cc20ec | ||
|
|
1e658530a7 |
7
.gitignore
vendored
7
.gitignore
vendored
@@ -38,13 +38,6 @@ metaData
|
||||
|
||||
# execution API authentication
|
||||
jwt.hex
|
||||
execution/
|
||||
|
||||
# local execution client data
|
||||
execution/
|
||||
|
||||
# local documentation
|
||||
CLAUDE.md
|
||||
|
||||
# manual testing
|
||||
tmp
|
||||
|
||||
@@ -193,6 +193,7 @@ nogo(
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/httperror:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
|
||||
@@ -152,7 +152,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
log.WithError(err).Error("Could not update committee cache")
|
||||
}
|
||||
|
||||
return indices, nil
|
||||
|
||||
@@ -5,10 +5,20 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
var (
|
||||
dataColumnComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "beacon_data_column_sidecar_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute data column sidecars from blobs.",
|
||||
Buckets: []float64{25, 50, 100, 250, 500, 750, 1000},
|
||||
},
|
||||
)
|
||||
|
||||
cellsAndProofsFromStructuredComputationTime = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "cells_and_proofs_from_structured_computation_milliseconds",
|
||||
Help: "Captures the time taken to compute cells and proofs from structured computation.",
|
||||
Buckets: []float64{10, 20, 30, 40, 50, 100, 200},
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@ package peerdas
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
@@ -296,32 +297,42 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
return nil, nil, ErrBlobsCellsProofsMismatch
|
||||
}
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, blobCount)
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, blobCount)
|
||||
proofsPerBlob := make([][]kzg.Proof, blobCount)
|
||||
|
||||
for i, blob := range blobs {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
var proofs []kzg.Proof
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, proofs)
|
||||
proofs := make([]kzg.Proof, 0, numberOfColumns)
|
||||
for idx := uint64(i) * numberOfColumns; idx < (uint64(i)+1)*numberOfColumns; idx++ {
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], cellProofs[idx]) != len(kzgProof) {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
proofs = append(proofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = proofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
@@ -329,40 +340,55 @@ func ComputeCellsAndProofsFromFlat(blobs [][]byte, cellProofs [][]byte) ([][]kzg
|
||||
|
||||
// ComputeCellsAndProofsFromStructured computes the cells and proofs from blobs and cell proofs.
|
||||
func ComputeCellsAndProofsFromStructured(blobsAndProofs []*pb.BlobAndProofV2) ([][]kzg.Cell, [][]kzg.Proof, error) {
|
||||
cellsPerBlob := make([][]kzg.Cell, 0, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, 0, len(blobsAndProofs))
|
||||
for _, blobAndProof := range blobsAndProofs {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
cellsAndProofsFromStructuredComputationTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
|
||||
var wg errgroup.Group
|
||||
|
||||
cellsPerBlob := make([][]kzg.Cell, len(blobsAndProofs))
|
||||
proofsPerBlob := make([][]kzg.Proof, len(blobsAndProofs))
|
||||
|
||||
for i, blobAndProof := range blobsAndProofs {
|
||||
if blobAndProof == nil {
|
||||
return nil, nil, ErrNilBlobAndProof
|
||||
}
|
||||
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return nil, nil, errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return nil, nil, errors.New("wrong KZG proof size - should never happen")
|
||||
wg.Go(func() error {
|
||||
var kzgBlob kzg.Blob
|
||||
if copy(kzgBlob[:], blobAndProof.Blob) != len(kzgBlob) {
|
||||
return errors.New("wrong blob size - should never happen")
|
||||
}
|
||||
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return nil, nil, errors.New("wrong copied KZG proof size - should never happen")
|
||||
// Compute the extended cells from the (non-extended) blob.
|
||||
cells, err := kzg.ComputeCells(&kzgBlob)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "compute cells")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
kzgProofs := make([]kzg.Proof, 0, fieldparams.NumberOfColumns)
|
||||
for _, kzgProofBytes := range blobAndProof.KzgProofs {
|
||||
if len(kzgProofBytes) != kzg.BytesPerProof {
|
||||
return errors.New("wrong KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
cellsPerBlob = append(cellsPerBlob, cells)
|
||||
proofsPerBlob = append(proofsPerBlob, kzgProofs)
|
||||
var kzgProof kzg.Proof
|
||||
if copy(kzgProof[:], kzgProofBytes) != len(kzgProof) {
|
||||
return errors.New("wrong copied KZG proof size - should never happen")
|
||||
}
|
||||
|
||||
kzgProofs = append(kzgProofs, kzgProof)
|
||||
}
|
||||
|
||||
cellsPerBlob[i] = cells
|
||||
proofsPerBlob[i] = kzgProofs
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cellsPerBlob, proofsPerBlob, nil
|
||||
|
||||
@@ -38,6 +38,7 @@ go_library(
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_spf13_afero//:go_default_library",
|
||||
"@org_golang_x_sync//errgroup:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -515,6 +516,11 @@ func (dcs *DataColumnStorage) Clear() error {
|
||||
|
||||
// prune clean the cache, the filesystem and mutexes.
|
||||
func (dcs *DataColumnStorage) prune() {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
dataColumnPruneLatency.Observe(float64(time.Since(startTime).Milliseconds()))
|
||||
}()
|
||||
|
||||
highestStoredEpoch := dcs.cache.HighestEpoch()
|
||||
|
||||
// Check if we need to prune.
|
||||
@@ -620,7 +626,10 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
}
|
||||
|
||||
// Create the SSZ encoded data column sidecars.
|
||||
var sszEncodedDataColumnSidecars []byte
|
||||
var sszEncodedDataColumnSidecarsBytes []byte
|
||||
|
||||
// Initialize the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount := uint8(0)
|
||||
|
||||
for {
|
||||
dataColumnSidecars := pullChan(inputDataColumnSidecars)
|
||||
@@ -628,7 +637,26 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
break
|
||||
}
|
||||
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
var wg errgroup.Group
|
||||
sszEncodedDataColumnSidecars := make([][]byte, len(dataColumnSidecars))
|
||||
for i, dataColumnSidecar := range dataColumnSidecars {
|
||||
wg.Go(func() error {
|
||||
// SSZ encode the data column sidecar.
|
||||
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column sidecar marshal SSZ")
|
||||
}
|
||||
|
||||
sszEncodedDataColumnSidecars[i] = sszEncodedDataColumnSidecar
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, dataColumnSidecar := range dataColumnSidecars {
|
||||
// Extract the data columns index.
|
||||
dataColumnIndex := dataColumnSidecar.Index
|
||||
|
||||
@@ -650,10 +678,7 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
}
|
||||
|
||||
// SSZ encode the data column sidecar.
|
||||
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column sidecar marshal SSZ")
|
||||
}
|
||||
sszEncodedDataColumnSidecar := sszEncodedDataColumnSidecars[i]
|
||||
|
||||
// Compute the size of the SSZ encoded data column sidecar.
|
||||
incomingSszEncodedDataColumnSidecarSize := uint32(len(sszEncodedDataColumnSidecar))
|
||||
@@ -668,8 +693,11 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
return errors.Wrap(err, "set index")
|
||||
}
|
||||
|
||||
// Increment the count of the saved SSZ encoded data column sidecar.
|
||||
storedCount++
|
||||
|
||||
// Append the SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
sszEncodedDataColumnSidecarsBytes = append(sszEncodedDataColumnSidecarsBytes, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -684,17 +712,20 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsExistingFile(filePath string
|
||||
}
|
||||
|
||||
// Append the SSZ encoded data column sidecars to the end of the file.
|
||||
count, err = file.WriteAt(sszEncodedDataColumnSidecars, metadata.fileSize)
|
||||
count, err = file.WriteAt(sszEncodedDataColumnSidecarsBytes, metadata.fileSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "write SSZ encoded data column sidecars")
|
||||
}
|
||||
if count != len(sszEncodedDataColumnSidecars) {
|
||||
if count != len(sszEncodedDataColumnSidecarsBytes) {
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -707,7 +738,7 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
|
||||
|
||||
var (
|
||||
sszEncodedDataColumnSidecarRefSize int
|
||||
sszEncodedDataColumnSidecars []byte
|
||||
sszEncodedDataColumnSidecarsBytes []byte
|
||||
)
|
||||
|
||||
// Initialize the count of the saved SSZ encoded data column sidecar.
|
||||
@@ -719,7 +750,26 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
|
||||
break
|
||||
}
|
||||
|
||||
for _, dataColumnSidecar := range dataColumnSidecars {
|
||||
var wg errgroup.Group
|
||||
sszEncodedDataColumnSidecars := make([][]byte, len(dataColumnSidecars))
|
||||
for i, dataColumnSidecar := range dataColumnSidecars {
|
||||
wg.Go(func() error {
|
||||
// SSZ encode the first data column sidecar.
|
||||
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column sidecar marshal SSZ")
|
||||
}
|
||||
|
||||
sszEncodedDataColumnSidecars[i] = sszEncodedDataColumnSidecar
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := wg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, dataColumnSidecar := range dataColumnSidecars {
|
||||
// Extract the data column index.
|
||||
dataColumnIndex := dataColumnSidecar.Index
|
||||
|
||||
@@ -742,10 +792,7 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
|
||||
storedCount++
|
||||
|
||||
// SSZ encode the first data column sidecar.
|
||||
sszEncodedDataColumnSidecar, err := dataColumnSidecar.MarshalSSZ()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "data column sidecar marshal SSZ")
|
||||
}
|
||||
sszEncodedDataColumnSidecar := sszEncodedDataColumnSidecars[i]
|
||||
|
||||
// Check if the size of the SSZ encoded data column sidecar is correct.
|
||||
if sszEncodedDataColumnSidecarRefSize != 0 && len(sszEncodedDataColumnSidecar) != sszEncodedDataColumnSidecarRefSize {
|
||||
@@ -756,7 +803,7 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
|
||||
sszEncodedDataColumnSidecarRefSize = len(sszEncodedDataColumnSidecar)
|
||||
|
||||
// Append the first SSZ encoded data column sidecar to the SSZ encoded data column sidecars.
|
||||
sszEncodedDataColumnSidecars = append(sszEncodedDataColumnSidecars, sszEncodedDataColumnSidecar...)
|
||||
sszEncodedDataColumnSidecarsBytes = append(sszEncodedDataColumnSidecarsBytes, sszEncodedDataColumnSidecar...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -793,12 +840,12 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
|
||||
rawIndices := indices.raw()
|
||||
|
||||
// Concatenate the version, the data column sidecar size, the data column indices and the SSZ encoded data column sidecar.
|
||||
countToWrite := headerSize + len(sszEncodedDataColumnSidecars)
|
||||
countToWrite := headerSize + len(sszEncodedDataColumnSidecarsBytes)
|
||||
bytes := make([]byte, 0, countToWrite)
|
||||
bytes = append(bytes, byte(version))
|
||||
bytes = append(bytes, encodedSszEncodedDataColumnSidecarSize[:]...)
|
||||
bytes = append(bytes, rawIndices[:]...)
|
||||
bytes = append(bytes, sszEncodedDataColumnSidecars...)
|
||||
bytes = append(bytes, sszEncodedDataColumnSidecarsBytes...)
|
||||
|
||||
countWritten, err := file.Write(bytes)
|
||||
if err != nil {
|
||||
@@ -808,10 +855,14 @@ func (dcs *DataColumnStorage) saveDataColumnSidecarsNewFile(filePath string, inp
|
||||
return errWrongBytesWritten
|
||||
}
|
||||
|
||||
syncStart := time.Now()
|
||||
if err := file.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync")
|
||||
}
|
||||
|
||||
dataColumnFileSyncLatency.Observe(float64(time.Since(syncStart).Milliseconds()))
|
||||
dataColumnBatchStoreCount.Observe(float64(storedCount))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -36,16 +36,15 @@ var (
|
||||
})
|
||||
|
||||
// Data columns
|
||||
dataColumnBuckets = []float64{3, 5, 7, 9, 11, 13}
|
||||
dataColumnSaveLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_save_latency",
|
||||
Help: "Latency of DataColumnSidecar storage save operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
Buckets: []float64{10, 20, 30, 50, 100, 200, 500},
|
||||
})
|
||||
dataColumnFetchLatency = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "data_column_storage_get_latency",
|
||||
Help: "Latency of DataColumnSidecar storage get operations in milliseconds",
|
||||
Buckets: dataColumnBuckets,
|
||||
Buckets: []float64{3, 5, 7, 9, 11, 13},
|
||||
})
|
||||
dataColumnPrunedCounter = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "data_column_pruned",
|
||||
@@ -59,4 +58,16 @@ var (
|
||||
Name: "data_column_disk_count",
|
||||
Help: "Approximate number of data columns in storage",
|
||||
})
|
||||
dataColumnFileSyncLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_file_sync_latency",
|
||||
Help: "Latency of sync operations when saving data columns in milliseconds",
|
||||
})
|
||||
dataColumnBatchStoreCount = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_batch_store_count",
|
||||
Help: "Number of data columns stored in a batch",
|
||||
})
|
||||
dataColumnPruneLatency = promauto.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "data_column_prune_latency",
|
||||
Help: "Latency of data column prune operations in milliseconds",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -40,6 +40,7 @@ go_library(
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/verification:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/execution/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/verification"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
@@ -532,12 +533,23 @@ func (s *Service) GetBlobsV2(ctx context.Context, versionedHashes []common.Hash)
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.GetBlobsV2")
|
||||
defer span.End()
|
||||
|
||||
start := time.Now()
|
||||
|
||||
if !s.capabilityCache.has(GetBlobsV2) {
|
||||
return nil, errors.New(fmt.Sprintf("%s is not supported", GetBlobsV2))
|
||||
}
|
||||
|
||||
if flags.Get().DisableGetBlobsV2 {
|
||||
return []*pb.BlobAndProofV2{}, nil
|
||||
}
|
||||
|
||||
result := make([]*pb.BlobAndProofV2, len(versionedHashes))
|
||||
err := s.rpcClient.CallContext(ctx, &result, GetBlobsV2, versionedHashes)
|
||||
|
||||
if len(result) != 0 {
|
||||
getBlobsV2Latency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}
|
||||
|
||||
return result, handleRPCError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -27,6 +27,13 @@ var (
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
getBlobsV2Latency = promauto.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "get_blobs_v2_latency_milliseconds",
|
||||
Help: "Captures RPC latency for getBlobsV2 in milliseconds",
|
||||
Buckets: []float64{25, 50, 100, 200, 500, 1000, 2000, 4000},
|
||||
},
|
||||
)
|
||||
errParseCount = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "execution_parse_error_count",
|
||||
Help: "The number of errors that occurred while parsing execution payload",
|
||||
|
||||
@@ -664,7 +664,6 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
IPColocationWhitelist: colocationWhitelist,
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
EnableAutoNAT: cliCtx.Bool(cmd.EnableAutoNATFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
StateGen: b.stateGen,
|
||||
|
||||
@@ -13,8 +13,6 @@ go_library(
|
||||
"doc.go",
|
||||
"fork.go",
|
||||
"fork_watcher.go",
|
||||
"gossip_peer_controller.go",
|
||||
"gossip_peer_crawler.go",
|
||||
"gossip_scoring_params.go",
|
||||
"gossip_topic_mappings.go",
|
||||
"handshake.go",
|
||||
@@ -53,7 +51,6 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/gossipcrawler:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
@@ -95,7 +92,6 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/control:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/event:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
@@ -105,8 +101,10 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_mplex//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
||||
"@com_github_libp2p_go_mplex//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//net:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
@@ -117,7 +115,6 @@ go_library(
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_x_sync//semaphore:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -131,8 +128,6 @@ go_test(
|
||||
"dial_relay_node_test.go",
|
||||
"discovery_test.go",
|
||||
"fork_test.go",
|
||||
"gossip_peer_controller_test.go",
|
||||
"gossip_peer_crawler_test.go",
|
||||
"gossip_scoring_params_test.go",
|
||||
"gossip_topic_mappings_test.go",
|
||||
"message_id_test.go",
|
||||
@@ -159,11 +154,9 @@ go_test(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/gossipcrawler:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
@@ -201,7 +194,6 @@ go_test(
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/event:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
@@ -211,10 +203,8 @@ go_test(
|
||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/testutil:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -60,10 +60,7 @@ func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error {
|
||||
if !ok {
|
||||
return errors.Errorf("message of %T does not support marshaller interface", msg)
|
||||
}
|
||||
|
||||
fullTopic := fmt.Sprintf(topic, forkDigest) + s.Encoding().ProtocolSuffix()
|
||||
|
||||
return s.broadcastObject(ctx, castMsg, fullTopic)
|
||||
return s.broadcastObject(ctx, castMsg, fmt.Sprintf(topic, forkDigest))
|
||||
}
|
||||
|
||||
// BroadcastAttestation broadcasts an attestation to the p2p network, the message is assumed to be
|
||||
@@ -109,7 +106,6 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att, forkDigest [fieldparams.VersionLength]byte) {
|
||||
topic := AttestationSubnetTopic(forkDigest, subnet)
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -120,7 +116,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
|
||||
// Ensure we have peers with this subnet.
|
||||
s.subnetLocker(subnet).RLock()
|
||||
hasPeer := s.hasPeerWithTopic(topic)
|
||||
hasPeer := s.hasPeerWithSubnet(attestationToTopic(subnet, forkDigest))
|
||||
s.subnetLocker(subnet).RUnlock()
|
||||
|
||||
span.SetAttributes(
|
||||
@@ -135,7 +131,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
s.subnetLocker(subnet).Lock()
|
||||
defer s.subnetLocker(subnet).Unlock()
|
||||
|
||||
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, AttestationSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
|
||||
@@ -157,14 +153,13 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, att, topic); err != nil {
|
||||
if err := s.broadcastObject(ctx, att, attestationToTopic(subnet, forkDigest)); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast attestation")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
|
||||
topic := SyncCommitteeSubnetTopic(forkDigest, subnet)
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -178,7 +173,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
// to ensure that we can reuse the same subnet locker.
|
||||
wrappedSubIdx := subnet + syncLockerVal
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
hasPeer := s.hasPeerWithTopic(topic)
|
||||
hasPeer := s.hasPeerWithSubnet(syncCommitteeToTopic(subnet, forkDigest))
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
span.SetAttributes(
|
||||
@@ -192,7 +187,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, SyncCommitteeSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
|
||||
@@ -210,7 +205,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, sMsg, topic); err != nil {
|
||||
if err := s.broadcastObject(ctx, sMsg, syncCommitteeToTopic(subnet, forkDigest)); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast sync committee message")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
@@ -238,7 +233,6 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [fieldparams.VersionLength]byte) {
|
||||
topic := BlobSubnetTopic(forkDigest, subnet)
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -249,7 +243,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
|
||||
wrappedSubIdx := subnet + blobSubnetLockerVal
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
hasPeer := s.hasPeerWithTopic(topic)
|
||||
hasPeer := s.hasPeerWithSubnet(blobSubnetToTopic(subnet, forkDigest))
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
@@ -258,7 +252,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, BlobSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
|
||||
@@ -270,7 +264,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, blobSidecar, topic); err != nil {
|
||||
if err := s.broadcastObject(ctx, blobSidecar, blobSubnetToTopic(subnet, forkDigest)); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
@@ -299,7 +293,7 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
|
||||
}
|
||||
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, LcOptimisticToTopic(digest)); err != nil {
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -333,7 +327,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
}
|
||||
|
||||
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, LcFinalityToTopic(forkDigest)); err != nil {
|
||||
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client finality update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -392,14 +386,13 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
|
||||
// Build the topic corresponding to subnet column subnet and this fork digest.
|
||||
topic := DataColumnSubnetTopic(forkDigest, subnet)
|
||||
topic := dataColumnSubnetToTopic(subnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := subnet + dataColumnSubnetVal
|
||||
|
||||
// Find peers if needed.
|
||||
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, topic); err != nil {
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Cannot find peers if needed")
|
||||
return
|
||||
@@ -494,16 +487,20 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
func (s *Service) findPeersIfNeeded(
|
||||
ctx context.Context,
|
||||
wrappedSubIdx uint64,
|
||||
topic string,
|
||||
topicFormat string,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
subnet uint64,
|
||||
) error {
|
||||
// Sending a data column sidecar to only one peer is not ideal,
|
||||
// but it ensures at least one peer receives it.
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
|
||||
// No peers found, attempt to find peers with this subnet.
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, topicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnet")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -528,10 +525,34 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
iid := int64(id)
|
||||
span = trace.AddMessageSendEvent(span, iid, messageLen /*uncompressed*/, messageLen /*compressed*/)
|
||||
}
|
||||
if err := s.PublishToTopic(ctx, topic, buf.Bytes()); err != nil {
|
||||
if err := s.PublishToTopic(ctx, topic+s.Encoding().ProtocolSuffix(), buf.Bytes()); err != nil {
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func lcOptimisticToTopic(forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, forkDigest)
|
||||
}
|
||||
|
||||
func lcFinalityToTopic(forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(LightClientFinalityUpdateTopicFormat, forkDigest)
|
||||
}
|
||||
|
||||
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -110,7 +109,6 @@ func TestService_Attestation_Subnet(t *testing.T) {
|
||||
if gtm := GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()]; gtm != AttestationSubnetTopicFormat {
|
||||
t.Errorf("Constant is out of date. Wanted %s, got %s", AttestationSubnetTopicFormat, gtm)
|
||||
}
|
||||
s := Service{}
|
||||
|
||||
tests := []struct {
|
||||
att *ethpb.Attestation
|
||||
@@ -123,7 +121,7 @@ func TestService_Attestation_Subnet(t *testing.T) {
|
||||
Slot: 2,
|
||||
},
|
||||
},
|
||||
topic: "/eth2/00000000/beacon_attestation_2" + s.Encoding().ProtocolSuffix(),
|
||||
topic: "/eth2/00000000/beacon_attestation_2",
|
||||
},
|
||||
{
|
||||
att: ðpb.Attestation{
|
||||
@@ -132,7 +130,7 @@ func TestService_Attestation_Subnet(t *testing.T) {
|
||||
Slot: 10,
|
||||
},
|
||||
},
|
||||
topic: "/eth2/00000000/beacon_attestation_21" + s.Encoding().ProtocolSuffix(),
|
||||
topic: "/eth2/00000000/beacon_attestation_21",
|
||||
},
|
||||
{
|
||||
att: ðpb.Attestation{
|
||||
@@ -141,12 +139,12 @@ func TestService_Attestation_Subnet(t *testing.T) {
|
||||
Slot: 529,
|
||||
},
|
||||
},
|
||||
topic: "/eth2/00000000/beacon_attestation_8" + s.Encoding().ProtocolSuffix(),
|
||||
topic: "/eth2/00000000/beacon_attestation_8",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(100, tt.att.Data.CommitteeIndex, tt.att.Data.Slot)
|
||||
assert.Equal(t, tt.topic, AttestationSubnetTopic([4]byte{}, subnet), "Wrong topic")
|
||||
assert.Equal(t, tt.topic, attestationToTopic(subnet, [4]byte{} /* fork digest */), "Wrong topic")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -175,12 +173,14 @@ func TestService_BroadcastAttestation(t *testing.T) {
|
||||
msg := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
|
||||
subnet := uint64(5)
|
||||
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = AttestationSubnetTopicFormat
|
||||
topic := AttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic := AttestationSubnetTopic(digest, subnet)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -226,7 +226,6 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
// Setup bootnode.
|
||||
cfg := &Config{PingInterval: testPingInterval, DB: db}
|
||||
cfg.UDPPort = uint(port)
|
||||
cfg.TCPPort = uint(port)
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
ipAddr := net.ParseIP("127.0.0.1")
|
||||
genesisTime := time.Now()
|
||||
@@ -252,9 +251,8 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
var hosts []host.Host
|
||||
var configs []*Config
|
||||
// setup other nodes.
|
||||
baseCfg := &Config{
|
||||
cfg = &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
MaxPeers: 2,
|
||||
PingInterval: testPingInterval,
|
||||
@@ -263,21 +261,11 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
// Setup 2 different hosts
|
||||
for i := uint(1); i <= 2; i++ {
|
||||
h, pkey, ipAddr := createHost(t, port+i)
|
||||
|
||||
// Create a new config for each service to avoid shared mutations
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: baseCfg.Discv5BootStrapAddrs,
|
||||
MaxPeers: baseCfg.MaxPeers,
|
||||
PingInterval: baseCfg.PingInterval,
|
||||
DB: baseCfg.DB,
|
||||
UDPPort: uint(port + i),
|
||||
TCPPort: uint(port + i),
|
||||
}
|
||||
|
||||
cfg.UDPPort = uint(port + i)
|
||||
cfg.TCPPort = uint(port + i)
|
||||
if len(listeners) > 0 {
|
||||
cfg.Discv5BootStrapAddrs = append(cfg.Discv5BootStrapAddrs, listeners[len(listeners)-1].Self().String())
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
@@ -290,22 +278,18 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
close(s.custodyInfoSet)
|
||||
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
|
||||
// Set listener for the service
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
|
||||
// Set subnet for 2nd peer
|
||||
// Set for 2nd peer
|
||||
if i == 2 {
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
bitV := bitfield.NewBitvector64()
|
||||
bitV.SetBitAt(subnet, true)
|
||||
err := s.updateSubnetRecordWithMetadata(bitV)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
listeners = append(listeners, listener)
|
||||
hosts = append(hosts, h)
|
||||
configs = append(configs, cfg)
|
||||
}
|
||||
defer func() {
|
||||
// Close down all peers.
|
||||
@@ -340,7 +324,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
pubsub: ps1,
|
||||
dv5Listener: listeners[0],
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: configs[0],
|
||||
cfg: cfg,
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
@@ -356,7 +340,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
pubsub: ps2,
|
||||
dv5Listener: listeners[1],
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: configs[1],
|
||||
cfg: cfg,
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
@@ -369,12 +353,14 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
go p2.listenForNewNodes()
|
||||
|
||||
msg := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = AttestationSubnetTopicFormat
|
||||
topic := AttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic := AttestationSubnetTopic(digest, subnet)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
// We don't use our internal subscribe method
|
||||
// due to using floodsub over here.
|
||||
tpHandle, err := p2.JoinTopic(topic)
|
||||
@@ -445,12 +431,14 @@ func TestService_BroadcastSyncCommittee(t *testing.T) {
|
||||
msg := util.HydrateSyncCommittee(ðpb.SyncCommitteeMessage{})
|
||||
subnet := uint64(5)
|
||||
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.SyncCommitteeMessage]()] = SyncCommitteeSubnetTopicFormat
|
||||
topic := SyncCommitteeSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.SyncCommitteeMessage]()] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic := SyncCommitteeSubnetTopic(digest, subnet)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -520,12 +508,14 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
}
|
||||
subnet := uint64(0)
|
||||
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.BlobSidecar]()] = BlobSubnetTopicFormat
|
||||
topic := BlobSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.BlobSidecar]()] = topic
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic := BlobSubnetTopic(digest, subnet)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -585,9 +575,10 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
|
||||
topic := LcOptimisticToTopic(params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -660,9 +651,10 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
|
||||
topic := LcFinalityToTopic(params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -710,6 +702,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
const (
|
||||
port = 2000
|
||||
columnIndex = 12
|
||||
topicFormat = DataColumnSubnetTopicFormat
|
||||
)
|
||||
|
||||
ctx := t.Context()
|
||||
@@ -767,17 +760,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
|
||||
topic := DataColumnSubnetTopic(digest, subnet)
|
||||
|
||||
crawler, err := NewGossipPeerCrawler(t.Context(), service, listener, 1*time.Second, 1*time.Second, 10,
|
||||
func(n *enode.Node) bool { return true },
|
||||
service.Peers().Scorers().Score)
|
||||
require.NoError(t, err)
|
||||
err = crawler.Start(func(ctx context.Context, node *enode.Node) ([]string, error) {
|
||||
return []string{topic}, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
service.gossipDialer = NewGossipPeerDialer(t.Context(), crawler, service.PubSub().ListPeers, service.DialPeers)
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet) + service.Encoding().ProtocolSuffix()
|
||||
|
||||
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
verifiedRoSidecar := verifiedRoSidecars[0]
|
||||
|
||||
@@ -28,7 +28,6 @@ const (
|
||||
type Config struct {
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
EnableAutoNAT bool
|
||||
StaticPeerID bool
|
||||
DisableLivenessCheck bool
|
||||
StaticPeers []string
|
||||
|
||||
@@ -369,11 +369,11 @@ func (s *Service) listenForNewNodes() {
|
||||
}
|
||||
}
|
||||
|
||||
// findAndDialPeers ensures that our node is connected to enough peers.
|
||||
// If the threshold is met, then this function immediately returns.
|
||||
// FindAndDialPeersWithSubnets ensures that our node is connected to enough peers.
|
||||
// If, the threshold is met, then this function immediately returns.
|
||||
// Otherwise, it searches for new peers and dials them.
|
||||
// If `ctx` is canceled while searching for peers, search is stopped, but newly
|
||||
// found peers are still dialed. In this case, the function returns an error.
|
||||
// If `ctx“ is canceled while searching for peers, search is stopped, but new found peers are still dialed.
|
||||
// In this case, the function returns an error.
|
||||
func (s *Service) findAndDialPeers(ctx context.Context) error {
|
||||
// Restrict dials if limit is applied.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
@@ -404,7 +404,8 @@ func (s *Service) findAndDialPeers(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
dialedPeerCount := s.DialPeers(s.ctx, maxConcurrentDials, peersToDial)
|
||||
dialedPeerCount := s.dialPeers(s.ctx, maxConcurrentDials, peersToDial)
|
||||
|
||||
if dialedPeerCount > missingPeerCount {
|
||||
missingPeerCount = 0
|
||||
continue
|
||||
@@ -553,7 +554,6 @@ func (s *Service) createListener(
|
||||
Bootnodes: bootNodes,
|
||||
PingInterval: s.cfg.PingInterval,
|
||||
NoFindnodeLivenessCheck: s.cfg.DisableLivenessCheck,
|
||||
V5RespTimeout: 300 * time.Millisecond,
|
||||
}
|
||||
|
||||
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
|
||||
|
||||
@@ -1,252 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
const dialInterval = 1 * time.Second
|
||||
|
||||
// GossipPeerDialer maintains minimum peer counts for gossip topics by periodically
|
||||
// dialing new peers discovered by a crawler. It runs a background loop that checks each
|
||||
// topic's peer count and dials new peers when below the target threshold.
|
||||
type GossipPeerDialer struct {
|
||||
ctx context.Context
|
||||
|
||||
listPeers func(topic string) []peer.ID
|
||||
dialPeers func(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint
|
||||
|
||||
crawler gossipcrawler.Crawler
|
||||
topicsProvider gossipcrawler.SubnetTopicsProvider
|
||||
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// NewGossipPeerDialer creates a new GossipPeerDialer instance.
|
||||
//
|
||||
// Parameters:
|
||||
// - ctx: Parent context that controls the lifecycle of the dialer. When cancelled,
|
||||
// the background dial loop will terminate.
|
||||
// - crawler: Source of peer candidates for each topic. The crawler maintains a registry
|
||||
// of peers discovered through DHT crawling, indexed by the topics they subscribe to.
|
||||
// - listPeers: Function that returns the current peers connected for a given topic.
|
||||
// Used to determine how many additional peers need to be dialed.
|
||||
// - dialPeers: Function that dials the given enode.Node peers with a concurrency limit.
|
||||
// Returns the number of successful dials.
|
||||
//
|
||||
// The dialer must be started with Start() before it begins maintaining peer counts.
|
||||
func NewGossipPeerDialer(
|
||||
ctx context.Context,
|
||||
crawler gossipcrawler.Crawler,
|
||||
listPeers func(topic string) []peer.ID,
|
||||
dialPeers func(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint,
|
||||
) *GossipPeerDialer {
|
||||
return &GossipPeerDialer{
|
||||
ctx: ctx,
|
||||
listPeers: listPeers,
|
||||
dialPeers: dialPeers,
|
||||
crawler: crawler,
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins the background dial loop that maintains peer counts for all topics.
|
||||
//
|
||||
// The provider function is called on each tick to get the current list of topics that
|
||||
// need peer maintenance. This allows the set of topics to change dynamically as the node
|
||||
// subscribes/unsubscribes from subnets.
|
||||
//
|
||||
// Start is idempotent - calling it multiple times has no effect after the first call.
|
||||
// Only the provider from the first call will be used; subsequent calls are ignored.
|
||||
//
|
||||
// The dial loop runs every dialInterval (1 second) and for each topic:
|
||||
// 1. Checks current peer count via listPeers()
|
||||
// 2. If below the per-topic min peer count, requests candidates from the crawler
|
||||
// 3. Deduplicates peers across all topics to avoid redundant dials
|
||||
// 4. Dials missing peers with rate limiting if enabled
|
||||
//
|
||||
// Returns nil always (error return preserved for interface compatibility).
|
||||
func (g *GossipPeerDialer) Start(provider gossipcrawler.SubnetTopicsProvider) error {
|
||||
g.once.Do(func() {
|
||||
g.topicsProvider = provider
|
||||
go g.dialLoop()
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *GossipPeerDialer) dialLoop() {
|
||||
ticker := time.NewTicker(dialInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
peersToDial := g.selectPeersForTopics()
|
||||
if len(peersToDial) == 0 {
|
||||
continue
|
||||
}
|
||||
g.dialPeersWithRatelimiting(peersToDial)
|
||||
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// selectPeersForTopics builds a bidirectional mapping of topics to peers and selects
|
||||
// peers to dial using a greedy algorithm that prioritizes peers serving multiple topics.
|
||||
// When a peer is selected, the needed count is decremented for ALL topics that peer serves,
|
||||
// avoiding redundant dials when one peer can satisfy multiple topic requirements.
|
||||
func (g *GossipPeerDialer) selectPeersForTopics() []*enode.Node {
|
||||
topicsWithMinPeers := g.topicsProvider()
|
||||
|
||||
// Calculate how many peers each topic still needs.
|
||||
neededByTopic := make(map[string]int)
|
||||
for topic, minPeers := range topicsWithMinPeers {
|
||||
currentCount := len(g.listPeers(topic))
|
||||
if needed := minPeers - currentCount; needed > 0 {
|
||||
neededByTopic[topic] = needed
|
||||
}
|
||||
}
|
||||
|
||||
if len(neededByTopic) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
peerToTopics := make(map[enode.ID][]string)
|
||||
nodeByID := make(map[enode.ID]*enode.Node)
|
||||
|
||||
for topic := range neededByTopic {
|
||||
candidates := g.crawler.PeersForTopic(topic)
|
||||
for _, node := range candidates {
|
||||
id := node.ID()
|
||||
if _, exists := nodeByID[id]; !exists {
|
||||
nodeByID[id] = node
|
||||
}
|
||||
peerToTopics[id] = append(peerToTopics[id], topic)
|
||||
}
|
||||
}
|
||||
|
||||
// Build candidate list sorted by topic count (descending).
|
||||
// Peers serving more topics are prioritized.
|
||||
type candidate struct {
|
||||
node *enode.Node
|
||||
topics []string
|
||||
}
|
||||
candidates := make([]candidate, 0, len(peerToTopics))
|
||||
for id, topics := range peerToTopics {
|
||||
candidates = append(candidates, candidate{node: nodeByID[id], topics: topics})
|
||||
}
|
||||
|
||||
// sort candidates by topic count (descending)
|
||||
slices.SortFunc(candidates, func(a, b candidate) int {
|
||||
return len(b.topics) - len(a.topics)
|
||||
})
|
||||
|
||||
// Greedy selection with cross-topic accounting.
|
||||
var selected []*enode.Node
|
||||
for _, c := range candidates {
|
||||
// Check if this peer serves any topic we still need.
|
||||
servesNeededTopic := false
|
||||
for _, topic := range c.topics {
|
||||
if neededByTopic[topic] > 0 {
|
||||
servesNeededTopic = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !servesNeededTopic {
|
||||
continue
|
||||
}
|
||||
|
||||
// Select this peer and decrement needed count for ALL topics it serves.
|
||||
selected = append(selected, c.node)
|
||||
for _, topic := range c.topics {
|
||||
if neededByTopic[topic] > 0 {
|
||||
neededByTopic[topic]--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return selected
|
||||
}
|
||||
|
||||
// DialPeersForTopicBlocking blocks until the specified topic has at least nPeers connected,
|
||||
// or until the context is cancelled.
|
||||
//
|
||||
// This method is useful when you need to ensure a minimum number of peers are connected
|
||||
// for a specific topic before proceeding (e.g., before publishing a message).
|
||||
//
|
||||
// The method polls in a loop:
|
||||
// 1. Check if current peer count >= nPeers, return nil if satisfied
|
||||
// 2. Get peer candidates from crawler for this topic
|
||||
// 3. Dial candidates with rate limiting
|
||||
// 4. Wait 100ms for connections to establish in pubsub layer
|
||||
// 5. Repeat until target reached or context cancelled
|
||||
//
|
||||
// Parameters:
|
||||
// - ctx: Context to cancel the blocking operation. Takes precedence for cancellation.
|
||||
// - topic: The gossipsub topic to ensure peers for.
|
||||
// - nPeers: Minimum number of peers required before returning.
|
||||
//
|
||||
// Returns:
|
||||
// - nil: Successfully reached the target peer count.
|
||||
// - ctx.Err(): The provided context was cancelled.
|
||||
// - g.ctx.Err(): The dialer's parent context was cancelled.
|
||||
//
|
||||
// Note: This may block indefinitely if the crawler cannot provide enough peers
|
||||
// and the context has no deadline.
|
||||
func (g *GossipPeerDialer) DialPeersForTopicBlocking(ctx context.Context, topic string, nPeers int) error {
|
||||
for {
|
||||
peers := g.listPeers(topic)
|
||||
if len(peers) >= nPeers {
|
||||
return nil
|
||||
}
|
||||
|
||||
newPeers := g.peersForTopic(topic, nPeers)
|
||||
if len(newPeers) > 0 {
|
||||
g.dialPeersWithRatelimiting(newPeers)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
// some wait here is good after dialing as connections take some time to show up in pubsub
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
case <-g.ctx.Done():
|
||||
return g.ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GossipPeerDialer) peersForTopic(topic string, targetCount int) []*enode.Node {
|
||||
peers := g.listPeers(topic)
|
||||
peerCount := len(peers)
|
||||
if peerCount >= targetCount {
|
||||
return nil
|
||||
}
|
||||
missing := targetCount - peerCount
|
||||
newPeers := g.crawler.PeersForTopic(topic)
|
||||
if len(newPeers) > missing {
|
||||
newPeers = newPeers[:missing]
|
||||
}
|
||||
|
||||
return newPeers
|
||||
}
|
||||
|
||||
func (g *GossipPeerDialer) dialPeersWithRatelimiting(peers []*enode.Node) {
|
||||
// Dial new peers in batches.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials = flags.Get().MaxConcurrentDials
|
||||
}
|
||||
g.dialPeers(g.ctx, maxConcurrentDials, peers)
|
||||
}
|
||||
@@ -1,523 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"net"
|
||||
"slices"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/ecdsa"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGossipPeerDialer_Start(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
newCrawler func(t *testing.T) *mockCrawler
|
||||
provider gossipcrawler.SubnetTopicsProvider
|
||||
expectedConnects int
|
||||
expectStartErr bool
|
||||
}{
|
||||
{
|
||||
name: "dials unique peers across topics",
|
||||
newCrawler: func(t *testing.T) *mockCrawler {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30101)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30102)
|
||||
return &mockCrawler{
|
||||
consume: true,
|
||||
peers: map[string][]*enode.Node{
|
||||
"topic/a": {nodeA, nodeB},
|
||||
"topic/b": {nodeA},
|
||||
},
|
||||
}
|
||||
},
|
||||
provider: func() map[string]int {
|
||||
return map[string]int{"topic/a": 2, "topic/b": 2}
|
||||
},
|
||||
expectedConnects: 2,
|
||||
},
|
||||
{
|
||||
name: "uses per-topic min peer counts",
|
||||
newCrawler: func(t *testing.T) *mockCrawler {
|
||||
nodes := make([]*enode.Node, 5)
|
||||
for i := range nodes {
|
||||
nodes[i] = newTestNode(t, "127.0.0.1", uint16(30110+i))
|
||||
}
|
||||
return &mockCrawler{
|
||||
consume: true,
|
||||
peers: map[string][]*enode.Node{
|
||||
// topic/mesh has 3 available peers, minPeers=2 -> should dial 2
|
||||
"topic/mesh": {nodes[0], nodes[1], nodes[2]},
|
||||
// topic/fanout has 3 available peers, minPeers=1 -> should dial 1
|
||||
"topic/fanout": {nodes[3], nodes[4]},
|
||||
},
|
||||
}
|
||||
},
|
||||
provider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/mesh": 2,
|
||||
"topic/fanout": 1,
|
||||
}
|
||||
},
|
||||
// Total: 2 from mesh + 1 from fanout = 3 peers dialed
|
||||
expectedConnects: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
md := &mockDialer{}
|
||||
listPeers := func(topic string) []peer.ID { return nil }
|
||||
|
||||
dialer := NewGossipPeerDialer(t.Context(), tt.newCrawler(t), listPeers, md.DialPeers)
|
||||
|
||||
err := dialer.Start(tt.provider)
|
||||
if tt.expectStartErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return md.dialCount() >= tt.expectedConnects
|
||||
}, 2*time.Second, 20*time.Millisecond)
|
||||
|
||||
require.Equal(t, tt.expectedConnects, md.dialCount())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipPeerDialer_DialPeersForTopicBlocking(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
connectedPeers int
|
||||
newCrawler func(t *testing.T) *mockCrawler
|
||||
targetPeers int
|
||||
ctx func() (context.Context, context.CancelFunc)
|
||||
expectedConnects int
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "returns immediately when enough peers",
|
||||
connectedPeers: 1,
|
||||
newCrawler: func(t *testing.T) *mockCrawler {
|
||||
return &mockCrawler{}
|
||||
},
|
||||
targetPeers: 1,
|
||||
ctx: func() (context.Context, context.CancelFunc) { return context.WithCancel(context.Background()) },
|
||||
expectedConnects: 0,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "dials when peers are missing",
|
||||
connectedPeers: 0,
|
||||
newCrawler: func(t *testing.T) *mockCrawler {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30201)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30202)
|
||||
return &mockCrawler{
|
||||
peers: map[string][]*enode.Node{
|
||||
"topic/a": {nodeA, nodeB},
|
||||
},
|
||||
}
|
||||
},
|
||||
targetPeers: 2,
|
||||
ctx: func() (context.Context, context.CancelFunc) {
|
||||
return context.WithTimeout(context.Background(), 1*time.Second)
|
||||
},
|
||||
expectedConnects: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
md := &mockDialer{}
|
||||
var mu sync.Mutex
|
||||
connected := make([]peer.ID, 0)
|
||||
for i := 0; i < tt.connectedPeers; i++ {
|
||||
connected = append(connected, peer.ID(string(rune(i))))
|
||||
}
|
||||
|
||||
listPeers := func(topic string) []peer.ID {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return connected
|
||||
}
|
||||
|
||||
dialPeers := func(ctx context.Context, max int, nodes []*enode.Node) uint {
|
||||
cnt := md.DialPeers(ctx, max, nodes)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
for range nodes {
|
||||
// Just add a dummy peer ID to simulate connection success
|
||||
connected = append(connected, peer.ID("dummy"))
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
crawler := tt.newCrawler(t)
|
||||
dialer := NewGossipPeerDialer(t.Context(), crawler, listPeers, dialPeers)
|
||||
topic := "topic/a"
|
||||
|
||||
ctx, cancel := tt.ctx()
|
||||
defer cancel()
|
||||
|
||||
err := dialer.DialPeersForTopicBlocking(ctx, topic, tt.targetPeers)
|
||||
if tt.expectErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, tt.expectedConnects, md.dialCount())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipPeerDialer_peersForTopic(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
connected int
|
||||
targetCount int
|
||||
buildPeers func(t *testing.T) ([]*enode.Node, []*enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "returns nil when enough peers already connected",
|
||||
connected: 1,
|
||||
targetCount: 1,
|
||||
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
|
||||
return []*enode.Node{newTestNode(t, "127.0.0.1", 30301)}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "returns crawler peers when none connected",
|
||||
connected: 0,
|
||||
targetCount: 2,
|
||||
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30311)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30312)
|
||||
return []*enode.Node{nodeA, nodeB}, []*enode.Node{nodeA, nodeB}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "truncates peers when more than needed",
|
||||
connected: 0,
|
||||
targetCount: 1,
|
||||
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30321)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30322)
|
||||
nodeC := newTestNode(t, "127.0.0.1", 30323)
|
||||
return []*enode.Node{nodeA, nodeB, nodeC}, []*enode.Node{nodeA}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only returns missing peers",
|
||||
connected: 1,
|
||||
targetCount: 3,
|
||||
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30331)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30332)
|
||||
nodeC := newTestNode(t, "127.0.0.1", 30333)
|
||||
return []*enode.Node{nodeA, nodeB, nodeC}, []*enode.Node{nodeA, nodeB}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
listPeers := func(topic string) []peer.ID {
|
||||
peers := make([]peer.ID, tt.connected)
|
||||
for i := 0; i < tt.connected; i++ {
|
||||
peers[i] = peer.ID(string(rune(i))) // Fake peer ID
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
crawlerPeers, expected := tt.buildPeers(t)
|
||||
crawler := &mockCrawler{
|
||||
peers: map[string][]*enode.Node{"topic/test": crawlerPeers},
|
||||
consume: false,
|
||||
}
|
||||
dialer := NewGossipPeerDialer(t.Context(), crawler, listPeers, func(ctx context.Context,
|
||||
maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
})
|
||||
|
||||
got := dialer.peersForTopic("topic/test", tt.targetCount)
|
||||
if expected == nil {
|
||||
require.Nil(t, got)
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), len(got))
|
||||
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i], got[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipPeerDialer_selectPeersForTopics(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
connectedPeers map[string]int // topic -> connected peer count
|
||||
topicsProvider func() map[string]int
|
||||
buildPeers func(t *testing.T) (map[string][]*enode.Node, []*enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "prioritizes multi-topic peer over single-topic peers",
|
||||
connectedPeers: map[string]int{},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 1,
|
||||
"topic/b": 1,
|
||||
"topic/c": 1,
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
// Peer X serves all 3 topics
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30401)
|
||||
// Peer Y serves only topic/a
|
||||
nodeY := newTestNode(t, "127.0.0.1", 30402)
|
||||
// Peer Z serves only topic/b
|
||||
nodeZ := newTestNode(t, "127.0.0.1", 30403)
|
||||
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX, nodeY},
|
||||
"topic/b": {nodeX, nodeZ},
|
||||
"topic/c": {nodeX},
|
||||
}
|
||||
// Only nodeX should be dialed (satisfies all 3 topics)
|
||||
return crawlerPeers, []*enode.Node{nodeX}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cross-topic decrement works correctly",
|
||||
connectedPeers: map[string]int{},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 2, // Need 2 peers
|
||||
"topic/b": 1, // Need 1 peer
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
// Peer X serves both topics
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30411)
|
||||
// Peer Y serves only topic/a
|
||||
nodeY := newTestNode(t, "127.0.0.1", 30412)
|
||||
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX, nodeY},
|
||||
"topic/b": {nodeX},
|
||||
}
|
||||
// nodeX covers topic/b fully, and 1 of 2 for topic/a
|
||||
// nodeY covers remaining 1 for topic/a
|
||||
return crawlerPeers, []*enode.Node{nodeX, nodeY}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no redundant dials when one peer satisfies all",
|
||||
connectedPeers: map[string]int{},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 1,
|
||||
"topic/b": 1,
|
||||
"topic/c": 1,
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30421)
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX},
|
||||
"topic/b": {nodeX},
|
||||
"topic/c": {nodeX},
|
||||
}
|
||||
// Only 1 dial needed for all 3 topics
|
||||
return crawlerPeers, []*enode.Node{nodeX}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "skips topics with enough peers already",
|
||||
connectedPeers: map[string]int{
|
||||
"topic/a": 2, // Already has 2
|
||||
},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 2, // min 2, already have 2
|
||||
"topic/b": 1, // min 1, have 0
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30431)
|
||||
nodeY := newTestNode(t, "127.0.0.1", 30432)
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX},
|
||||
"topic/b": {nodeY},
|
||||
}
|
||||
// Only nodeY should be dialed (topic/a already satisfied)
|
||||
return crawlerPeers, []*enode.Node{nodeY}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "returns nil when all topics satisfied",
|
||||
connectedPeers: map[string]int{"topic/a": 2, "topic/b": 1},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 2,
|
||||
"topic/b": 1,
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30441)
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX},
|
||||
"topic/b": {nodeX},
|
||||
}
|
||||
// No dials needed
|
||||
return crawlerPeers, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "handles empty crawler response",
|
||||
connectedPeers: map[string]int{},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{"topic/a": 1}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
return map[string][]*enode.Node{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
listPeers := func(topic string) []peer.ID {
|
||||
count := tt.connectedPeers[topic]
|
||||
peers := make([]peer.ID, count)
|
||||
for i := range count {
|
||||
peers[i] = peer.ID(topic + string(rune(i)))
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
crawlerPeers, expected := tt.buildPeers(t)
|
||||
crawler := &mockCrawler{
|
||||
peers: crawlerPeers,
|
||||
consume: false,
|
||||
}
|
||||
|
||||
dialer := NewGossipPeerDialer(t.Context(), crawler, listPeers, func(ctx context.Context,
|
||||
maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
})
|
||||
dialer.topicsProvider = tt.topicsProvider
|
||||
|
||||
got := dialer.selectPeersForTopics()
|
||||
|
||||
if expected == nil {
|
||||
require.Nil(t, got)
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), len(got), "expected %d peers, got %d", len(expected), len(got))
|
||||
|
||||
// Verify all expected nodes are present (order may vary for equal topic counts)
|
||||
expectedIDs := make(map[enode.ID]struct{})
|
||||
for _, n := range expected {
|
||||
expectedIDs[n.ID()] = struct{}{}
|
||||
}
|
||||
for _, n := range got {
|
||||
_, ok := expectedIDs[n.ID()]
|
||||
require.True(t, ok, "unexpected peer %s in result", n.ID())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type mockCrawler struct {
|
||||
mu sync.Mutex
|
||||
peers map[string][]*enode.Node
|
||||
consume bool
|
||||
}
|
||||
|
||||
func (m *mockCrawler) Start(gossipcrawler.TopicExtractor) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockCrawler) Stop() {}
|
||||
func (m *mockCrawler) RemovePeerByPeerId(peer.ID) {}
|
||||
func (m *mockCrawler) RemoveTopic(string) {}
|
||||
func (m *mockCrawler) PeersForTopic(topic string) []*enode.Node {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
nodes := m.peers[topic]
|
||||
if len(nodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
copied := slices.Clone(nodes)
|
||||
if m.consume {
|
||||
m.peers[topic] = nil
|
||||
}
|
||||
return copied
|
||||
}
|
||||
|
||||
type mockDialer struct {
|
||||
mu sync.Mutex
|
||||
dials []*enode.Node
|
||||
}
|
||||
|
||||
func (m *mockDialer) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.dials = append(m.dials, nodes...)
|
||||
return uint(len(nodes))
|
||||
}
|
||||
|
||||
func (m *mockDialer) dialCount() int {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return len(m.dials)
|
||||
}
|
||||
|
||||
func (m *mockDialer) dialedNodes() []*enode.Node {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return slices.Clone(m.dials)
|
||||
}
|
||||
|
||||
func newTestNode(t *testing.T, ip string, tcpPort uint16) *enode.Node {
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
|
||||
return newTestNodeWithPriv(t, priv, ip, tcpPort)
|
||||
}
|
||||
|
||||
func newTestNodeWithPriv(t *testing.T, priv crypto.PrivKey, ip string, tcpPort uint16) *enode.Node {
|
||||
t.Helper()
|
||||
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
db.Close()
|
||||
})
|
||||
|
||||
convertedKey, err := ecdsa.ConvertFromInterfacePrivKey(priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
localNode.SetStaticIP(net.ParseIP(ip))
|
||||
localNode.Set(enr.TCP(tcpPort))
|
||||
localNode.Set(enr.UDP(tcpPort))
|
||||
|
||||
return localNode.Node()
|
||||
}
|
||||
@@ -1,546 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
type peerNode struct {
|
||||
isPinged bool
|
||||
node *enode.Node
|
||||
peerID peer.ID
|
||||
topics map[string]struct{}
|
||||
}
|
||||
|
||||
type crawledPeers struct {
|
||||
mu sync.RWMutex
|
||||
peerNodeByEnode map[enode.ID]*peerNode
|
||||
peerNodeByPid map[peer.ID]*peerNode
|
||||
peersByTopic map[string]map[*peerNode]struct{}
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) updateStatusToPinged(enodeID enode.ID) {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
existingPNode, ok := cp.peerNodeByEnode[enodeID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// we only want to ping a node with a given NodeId once -> not on every sequence number change
|
||||
// as ping is simply a test of a node being reachable and not fake
|
||||
existingPNode.isPinged = true
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) updatePeer(node *enode.Node, topics []string) (bool, error) {
|
||||
if node == nil {
|
||||
return false, errors.New("node is nil")
|
||||
}
|
||||
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
enodeID := node.ID()
|
||||
existingPNode, ok := cp.peerNodeByEnode[enodeID]
|
||||
|
||||
if ok && existingPNode.node == nil {
|
||||
return false, errors.New("enode is nil for enodeId")
|
||||
}
|
||||
|
||||
// we don't want to update enodes with a lower sequence number as they're stale records
|
||||
if ok && existingPNode.node.Seq() >= node.Seq() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !ok {
|
||||
// this is a new peer
|
||||
peerID, err := enodeToPeerID(node)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("converting enode to peer ID: %w", err)
|
||||
}
|
||||
existingPNode = &peerNode{
|
||||
node: node,
|
||||
peerID: peerID,
|
||||
topics: make(map[string]struct{}),
|
||||
}
|
||||
cp.peerNodeByEnode[enodeID] = existingPNode
|
||||
cp.peerNodeByPid[peerID] = existingPNode
|
||||
} else {
|
||||
existingPNode.node = node
|
||||
}
|
||||
|
||||
cp.updateTopicsUnlocked(existingPNode, topics)
|
||||
cp.recordMetricsUnlocked()
|
||||
|
||||
if existingPNode.isPinged || len(topics) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) removeTopic(topic string) {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
// Get all peers subscribed to this topic
|
||||
peers, ok := cp.peersByTopic[topic]
|
||||
if !ok {
|
||||
return // Topic doesn't exist
|
||||
}
|
||||
|
||||
// Remove the topic from each peer's topic list
|
||||
for pnode := range peers {
|
||||
delete(pnode.topics, topic)
|
||||
// remove the peer if it has no more topics left
|
||||
if len(pnode.topics) == 0 {
|
||||
cp.updateTopicsUnlocked(pnode, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the topic from byTopic map
|
||||
delete(cp.peersByTopic, topic)
|
||||
cp.recordMetricsUnlocked()
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) removePeerByPeerId(peerID peer.ID) {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
pnode, ok := cp.peerNodeByPid[peerID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// Use updateTopicsUnlocked with empty topics to remove the peer
|
||||
cp.updateTopicsUnlocked(pnode, nil)
|
||||
cp.recordMetricsUnlocked()
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) removePeerByNodeId(enodeID enode.ID) {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
pnode, ok := cp.peerNodeByEnode[enodeID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
cp.updateTopicsUnlocked(pnode, nil)
|
||||
cp.recordMetricsUnlocked()
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) recordMetricsUnlocked() {
|
||||
gossipCrawlerPeersByEnodeCount.Set(float64(len(cp.peerNodeByEnode)))
|
||||
gossipCrawlerPeersByPidCount.Set(float64(len(cp.peerNodeByPid)))
|
||||
gossipCrawlerTopicsCount.Set(float64(len(cp.peersByTopic)))
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) cleanupPeer(pnode *peerNode) {
|
||||
delete(cp.peerNodeByPid, pnode.peerID)
|
||||
delete(cp.peerNodeByEnode, pnode.node.ID())
|
||||
for t := range pnode.topics {
|
||||
if peers, ok := cp.peersByTopic[t]; ok {
|
||||
delete(peers, pnode)
|
||||
if len(peers) == 0 {
|
||||
delete(cp.peersByTopic, t)
|
||||
}
|
||||
}
|
||||
}
|
||||
pnode.topics = nil // Clear topics to indicate removal.
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) removeOldTopicsFromPeer(pnode *peerNode, newTopics map[string]struct{}) {
|
||||
for oldTopic := range pnode.topics {
|
||||
if _, ok := newTopics[oldTopic]; !ok {
|
||||
if peers, ok := cp.peersByTopic[oldTopic]; ok {
|
||||
delete(peers, pnode)
|
||||
if len(peers) == 0 {
|
||||
delete(cp.peersByTopic, oldTopic)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) addNewTopicsToPeer(pnode *peerNode, newTopics map[string]struct{}) {
|
||||
for newTopic := range newTopics {
|
||||
if _, ok := pnode.topics[newTopic]; !ok {
|
||||
if _, ok := cp.peersByTopic[newTopic]; !ok {
|
||||
cp.peersByTopic[newTopic] = make(map[*peerNode]struct{})
|
||||
}
|
||||
cp.peersByTopic[newTopic][pnode] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateTopicsUnlocked updates the topics associated with a peer node.
|
||||
// If the topics slice is empty, the peer is completely removed from the crawled peers.
|
||||
// Otherwise, it updates the peer's topics by removing old topics that are no longer
|
||||
// present and adding new topics. This method assumes the caller holds the lock on cp.mu.
|
||||
// If a topic has no peers after this update, it is removed from the list of topics we track peers for.
|
||||
func (cp *crawledPeers) updateTopicsUnlocked(pnode *peerNode, topics []string) {
|
||||
// If topics is empty, remove the peer completely.
|
||||
if len(topics) == 0 {
|
||||
cp.cleanupPeer(pnode)
|
||||
return
|
||||
}
|
||||
|
||||
newTopics := make(map[string]struct{})
|
||||
for _, t := range topics {
|
||||
newTopics[t] = struct{}{}
|
||||
}
|
||||
|
||||
// Remove old topics that are no longer present.
|
||||
cp.removeOldTopicsFromPeer(pnode, newTopics)
|
||||
|
||||
// Add new topics.
|
||||
cp.addNewTopicsToPeer(pnode, newTopics)
|
||||
|
||||
pnode.topics = newTopics
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) getPeersForTopic(topic string, filter gossipcrawler.PeerFilterFunc) []peerNode {
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
|
||||
peers, ok := cp.peersByTopic[topic]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var peerNodes []peerNode
|
||||
for pnode := range peers {
|
||||
if pnode.node == nil {
|
||||
continue
|
||||
}
|
||||
if pnode.isPinged && filter(pnode.node) {
|
||||
peerNodes = append(peerNodes, *pnode)
|
||||
}
|
||||
}
|
||||
return peerNodes
|
||||
}
|
||||
|
||||
// GossipPeerCrawler discovers and maintains a registry of peers subscribed to gossipsub topics.
|
||||
// It uses discv5 to find peers, extracts their topic subscriptions from ENR records, and verifies
|
||||
// their reachability via ping. Only peers that have been successfully pinged are returned when
|
||||
// querying for peers on a given topic. The crawler runs three background loops: one for discovery,
|
||||
// one for ping verification, and one for periodic cleanup of stale or filtered-out peers.
|
||||
type GossipPeerCrawler struct {
|
||||
ctx context.Context
|
||||
|
||||
crawlInterval, crawlTimeout time.Duration
|
||||
|
||||
crawledPeers *crawledPeers
|
||||
|
||||
// Discovery interface for finding peers
|
||||
dv5 ListenerRebooter
|
||||
|
||||
p2pSvc *Service
|
||||
|
||||
topicExtractor gossipcrawler.TopicExtractor
|
||||
|
||||
peerFilter gossipcrawler.PeerFilterFunc
|
||||
scorer PeerScoreFunc
|
||||
|
||||
pingCh chan enode.Node
|
||||
pingSemaphore *semaphore.Weighted
|
||||
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// cleanupInterval controls how frequently we sweep crawled peers and prune
|
||||
// those that are no longer useful.
|
||||
const cleanupInterval = 5 * time.Minute
|
||||
|
||||
// PeerScoreFunc calculates a reputation score for a given peer ID.
|
||||
// Higher scores indicate more desirable peers. This function is used by PeersForTopic
|
||||
// to sort returned peers in descending order of quality, allowing callers to prioritize
|
||||
// connections to the most reliable peers.
|
||||
type PeerScoreFunc func(peer.ID) float64
|
||||
|
||||
// NewGossipPeerCrawler creates a new crawler for discovering gossipsub peers.
|
||||
// The crawler uses the provided discv5 listener to discover peers and tracks their
|
||||
// topic subscriptions. Parameters:
|
||||
// - p2pSvc: The P2P service for network operations
|
||||
// - dv5: The discv5 listener used for peer discovery and ping verification
|
||||
// - crawlTimeout: Maximum duration for each crawl iteration
|
||||
// - crawlInterval: The duration between each crawl iteration
|
||||
// - maxConcurrentPings: Limits parallel ping operations to avoid overwhelming the network
|
||||
// - peerFilter: Determines which discovered peers should be tracked
|
||||
// - scorer: Calculates peer quality scores for sorting results
|
||||
//
|
||||
// Returns an error if any required parameter is nil or invalid.
|
||||
func NewGossipPeerCrawler(
|
||||
ctx context.Context,
|
||||
p2pSvc *Service,
|
||||
dv5 ListenerRebooter,
|
||||
crawlTimeout time.Duration,
|
||||
crawlInterval time.Duration,
|
||||
maxConcurrentPings int64,
|
||||
peerFilter gossipcrawler.PeerFilterFunc,
|
||||
scorer PeerScoreFunc,
|
||||
) (*GossipPeerCrawler, error) {
|
||||
if p2pSvc == nil {
|
||||
return nil, errors.New("p2pSvc is nil")
|
||||
}
|
||||
if dv5 == nil {
|
||||
return nil, errors.New("dv5 is nil")
|
||||
}
|
||||
if crawlTimeout <= 0 {
|
||||
return nil, errors.New("crawl timeout must be greater than 0")
|
||||
}
|
||||
if crawlInterval <= 0 {
|
||||
return nil, errors.New("crawl interval must be greater than 0")
|
||||
}
|
||||
if maxConcurrentPings <= 0 {
|
||||
return nil, errors.New("max concurrent pings must be greater than 0")
|
||||
}
|
||||
if peerFilter == nil {
|
||||
return nil, errors.New("peer filter is nil")
|
||||
}
|
||||
if scorer == nil {
|
||||
return nil, errors.New("peer scorer is nil")
|
||||
}
|
||||
|
||||
g := &GossipPeerCrawler{
|
||||
ctx: ctx,
|
||||
crawlInterval: crawlInterval,
|
||||
crawlTimeout: crawlTimeout,
|
||||
p2pSvc: p2pSvc,
|
||||
dv5: dv5,
|
||||
peerFilter: peerFilter,
|
||||
scorer: scorer,
|
||||
}
|
||||
g.pingCh = make(chan enode.Node, 4*maxConcurrentPings)
|
||||
g.pingSemaphore = semaphore.NewWeighted(maxConcurrentPings)
|
||||
g.crawledPeers = &crawledPeers{
|
||||
peerNodeByEnode: make(map[enode.ID]*peerNode),
|
||||
peerNodeByPid: make(map[peer.ID]*peerNode),
|
||||
peersByTopic: make(map[string]map[*peerNode]struct{}),
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// PeersForTopic returns a list of enode records for peers subscribed to the given topic.
|
||||
// Only peers that have been successfully pinged (verified as reachable) and pass the
|
||||
// configured peer filter are included. Results are sorted in descending order by peer
|
||||
// score, so higher-quality peers appear first. Returns nil if no peers are found for
|
||||
// the topic. The returned slice should not be modified as it contains pointers to
|
||||
// internal enode records.
|
||||
func (g *GossipPeerCrawler) PeersForTopic(topic string) []*enode.Node {
|
||||
peerNodes := g.crawledPeers.getPeersForTopic(topic, g.peerFilter)
|
||||
|
||||
slices.SortFunc(peerNodes, func(a, b peerNode) int {
|
||||
scoreA := g.scorer(a.peerID)
|
||||
scoreB := g.scorer(b.peerID)
|
||||
if scoreA > scoreB {
|
||||
return -1
|
||||
}
|
||||
if scoreA < scoreB {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
|
||||
nodes := make([]*enode.Node, 0, len(peerNodes))
|
||||
for _, pn := range peerNodes {
|
||||
nodes = append(nodes, pn.node)
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// RemovePeerByPeerId removes a peer from the crawler's registry by their libp2p peer ID.
|
||||
// This also removes the peer from all topic subscriptions they were associated with.
|
||||
// If the peer is not found, this operation is a no-op.
|
||||
func (g *GossipPeerCrawler) RemovePeerByPeerId(peerID peer.ID) {
|
||||
g.crawledPeers.removePeerByPeerId(peerID)
|
||||
}
|
||||
|
||||
// RemoveTopic removes a topic and all its peer associations from the crawler.
|
||||
// Peers that were only subscribed to this topic are completely removed from the registry.
|
||||
// Peers subscribed to other topics remain tracked for those topics.
|
||||
// If the topic does not exist, this operation is a no-op.
|
||||
func (g *GossipPeerCrawler) RemoveTopic(topic string) {
|
||||
g.crawledPeers.removeTopic(topic)
|
||||
}
|
||||
|
||||
// Start begins the crawler's background operations. It launches three goroutines:
|
||||
// a crawl loop that periodically discovers new peers via discv5, a ping loop that
|
||||
// verifies peer reachability, and a cleanup loop that removes stale or filtered peers.
|
||||
// The provided TopicExtractor is used to determine which gossipsub topics each
|
||||
// discovered peer subscribes to. Start is idempotent; subsequent calls after the
|
||||
// first are no-ops. Returns an error if the topic extractor is nil.
|
||||
func (g *GossipPeerCrawler) Start(te gossipcrawler.TopicExtractor) error {
|
||||
if te == nil {
|
||||
return errors.New("topic extractor is nil")
|
||||
}
|
||||
g.once.Do(func() {
|
||||
g.topicExtractor = te
|
||||
go g.crawlLoop()
|
||||
go g.pingLoop()
|
||||
go g.cleanupLoop()
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *GossipPeerCrawler) pingLoop() {
|
||||
for {
|
||||
select {
|
||||
case node := <-g.pingCh:
|
||||
if err := g.pingSemaphore.Acquire(g.ctx, 1); err != nil {
|
||||
return
|
||||
}
|
||||
go func(node *enode.Node) {
|
||||
defer g.pingSemaphore.Release(1)
|
||||
|
||||
if err := g.dv5.Ping(node); err != nil {
|
||||
log.WithError(err).WithField("node", node.ID()).Debug("Failed to ping node")
|
||||
g.crawledPeers.removePeerByNodeId(node.ID())
|
||||
return
|
||||
}
|
||||
|
||||
g.crawledPeers.updateStatusToPinged(node.ID())
|
||||
}(&node)
|
||||
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GossipPeerCrawler) crawlLoop() {
|
||||
for {
|
||||
g.crawl()
|
||||
select {
|
||||
case <-time.After(g.crawlInterval):
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GossipPeerCrawler) crawl() {
|
||||
log.Debug("Bitcoin")
|
||||
|
||||
ctx, cancel := context.WithTimeout(g.ctx, g.crawlTimeout)
|
||||
defer cancel()
|
||||
|
||||
iterator := g.dv5.RandomNodes()
|
||||
|
||||
// Ensure iterator unblocks on context cancellation or timeout
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
iterator.Close()
|
||||
}()
|
||||
|
||||
log.Debug("ABCDEFG")
|
||||
|
||||
for iterator.Next() {
|
||||
log.Debug("Ethereum")
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
node := iterator.Node()
|
||||
if node == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !g.peerFilter(node) {
|
||||
g.crawledPeers.removePeerByNodeId(node.ID())
|
||||
continue
|
||||
}
|
||||
|
||||
topics, err := g.topicExtractor(ctx, node)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("node", node.ID()).Debug("Failed to extract topics, skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
shouldPing, err := g.crawledPeers.updatePeer(node, topics)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("node", node.ID()).Error("Failed to update crawled peers")
|
||||
}
|
||||
if !shouldPing {
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case g.pingCh <- *node:
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupLoop periodically removes peers that the filter rejects or that
|
||||
// have no topics of interest. It uses the same context lifecycle as other
|
||||
// background loops.
|
||||
func (g *GossipPeerCrawler) cleanupLoop() {
|
||||
ticker := time.NewTicker(cleanupInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Initial cleanup to catch any leftovers from startup state
|
||||
g.cleanup()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
g.cleanup()
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup scans the crawled peer set and removes entries that either fail
|
||||
// the current peer filter or have no topics of interest remaining.
|
||||
func (g *GossipPeerCrawler) cleanup() {
|
||||
cp := g.crawledPeers
|
||||
|
||||
// Snapshot current peers to evaluate without holding the lock during
|
||||
// filter and topic extraction.
|
||||
cp.mu.RLock()
|
||||
peers := make([]*peerNode, 0, len(cp.peerNodeByPid))
|
||||
for _, p := range cp.peerNodeByPid {
|
||||
peers = append(peers, p)
|
||||
}
|
||||
cp.mu.RUnlock()
|
||||
|
||||
for _, p := range peers {
|
||||
// Remove peers that no longer pass the filter
|
||||
if !g.peerFilter(p.node) {
|
||||
cp.removePeerByNodeId(p.node.ID())
|
||||
continue
|
||||
}
|
||||
|
||||
// Re-extract topics; if the extractor errors or yields none, drop the peer.
|
||||
topics, err := g.topicExtractor(g.ctx, p.node)
|
||||
if err != nil || len(topics) == 0 {
|
||||
cp.removePeerByNodeId(p.node.ID())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// enodeToPeerID converts an enode record to a peer ID.
|
||||
func enodeToPeerID(n *enode.Node) (peer.ID, error) {
|
||||
info, _, err := convertToAddrInfo(n)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("converting enode to addr info: %w", err)
|
||||
}
|
||||
if info == nil {
|
||||
return "", errors.New("peer info is nil")
|
||||
}
|
||||
return info.ID, nil
|
||||
}
|
||||
@@ -1,787 +0,0 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
require2 "github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Helpers for crawledPeers tests
|
||||
func newTestCrawledPeers() *crawledPeers {
|
||||
return &crawledPeers{
|
||||
peerNodeByEnode: make(map[enode.ID]*peerNode),
|
||||
peerNodeByPid: make(map[peer.ID]*peerNode),
|
||||
peersByTopic: make(map[string]map[*peerNode]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func addPeerWithTopics(t *testing.T, cp *crawledPeers, node *enode.Node, topics []string, pinged bool) *peerNode {
|
||||
t.Helper()
|
||||
pid, err := enodeToPeerID(node)
|
||||
require.NoError(t, err)
|
||||
p := &peerNode{
|
||||
isPinged: pinged,
|
||||
node: node,
|
||||
peerID: pid,
|
||||
topics: make(map[string]struct{}),
|
||||
}
|
||||
cp.mu.Lock()
|
||||
cp.peerNodeByEnode[p.node.ID()] = p
|
||||
cp.peerNodeByPid[p.peerID] = p
|
||||
cp.updateTopicsUnlocked(p, topics)
|
||||
cp.mu.Unlock()
|
||||
return p
|
||||
}
|
||||
|
||||
func TestUpdateStatusToPinged(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
node1 := localNode.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prep func(*crawledPeers)
|
||||
target *enode.Node
|
||||
expectPinged map[enode.ID]bool
|
||||
}{
|
||||
{
|
||||
name: "sets pinged for existing peer",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"a"}, false)
|
||||
},
|
||||
target: node1,
|
||||
expectPinged: map[enode.ID]bool{
|
||||
node1.ID(): true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "idempotent when already pinged",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"a"}, true)
|
||||
},
|
||||
target: node1,
|
||||
expectPinged: map[enode.ID]bool{
|
||||
node1.ID(): true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no change when peer missing",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"a"}, false)
|
||||
},
|
||||
target: node2,
|
||||
expectPinged: map[enode.ID]bool{
|
||||
node1.ID(): false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
tc.prep(cp)
|
||||
cp.updateStatusToPinged(tc.target.ID())
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
for id, exp := range tc.expectPinged {
|
||||
if p := cp.peerNodeByEnode[id]; p != nil {
|
||||
require.Equal(t, exp, p.isPinged)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveTopic(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
node1 := localNode.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
topic1 := "t1"
|
||||
topic2 := "t2"
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prep func(*crawledPeers)
|
||||
topic string
|
||||
check func(*testing.T, *crawledPeers)
|
||||
}{
|
||||
{
|
||||
name: "removes topic from all peers and index",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1", "t2"}, true)
|
||||
addPeerWithTopics(t, cp, node2, []string{"t1"}, true)
|
||||
},
|
||||
topic: topic1,
|
||||
check: func(t *testing.T, cp *crawledPeers) {
|
||||
_, ok := cp.peersByTopic[topic1]
|
||||
require.False(t, ok)
|
||||
for _, p := range cp.peerNodeByPid {
|
||||
_, has := p.topics[topic1]
|
||||
require.False(t, has)
|
||||
}
|
||||
// Ensure other topics remain
|
||||
_, ok = cp.peersByTopic[topic2]
|
||||
require.True(t, ok)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no-op when topic missing",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t2"}, true)
|
||||
},
|
||||
topic: topic1,
|
||||
check: func(t *testing.T, cp *crawledPeers) {
|
||||
_, ok := cp.peersByTopic[topic2]
|
||||
require.True(t, ok)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
tc.prep(cp)
|
||||
cp.removeTopic(tc.topic)
|
||||
tc.check(t, cp)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePeer(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
node1 := localNode.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prep func(*crawledPeers)
|
||||
target enode.ID
|
||||
wantTopics int
|
||||
}{
|
||||
{
|
||||
name: "removes existing peer and prunes empty topic",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
},
|
||||
target: node1.ID(),
|
||||
wantTopics: 0,
|
||||
},
|
||||
{
|
||||
name: "removes only targeted peer; keeps topic for other",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
addPeerWithTopics(t, cp, node2, []string{"t1"}, true)
|
||||
},
|
||||
target: node1.ID(),
|
||||
wantTopics: 1, // byTopic should still have t1 with one peer
|
||||
},
|
||||
{
|
||||
name: "no-op when peer missing",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
},
|
||||
target: node2.ID(),
|
||||
wantTopics: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
tc.prep(cp)
|
||||
cp.removePeerByNodeId(tc.target)
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
require.Len(t, cp.peersByTopic, tc.wantTopics)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePeerId(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
node1 := localNode.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
pid1, err := enodeToPeerID(node1)
|
||||
require.NoError(t, err)
|
||||
pid2, err := enodeToPeerID(node2)
|
||||
require.NoError(t, err)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prep func(*crawledPeers)
|
||||
target peer.ID
|
||||
wantTopics int
|
||||
wantPeers int
|
||||
}{
|
||||
{
|
||||
name: "removes existing peer by id and prunes topic",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
},
|
||||
target: pid1,
|
||||
wantTopics: 0,
|
||||
wantPeers: 0,
|
||||
},
|
||||
{
|
||||
name: "removes only targeted peer id; keeps topic for other",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
addPeerWithTopics(t, cp, node2, []string{"t1"}, true)
|
||||
},
|
||||
target: pid1,
|
||||
wantTopics: 1,
|
||||
wantPeers: 1,
|
||||
},
|
||||
{
|
||||
name: "no-op when peer id missing",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
},
|
||||
target: pid2,
|
||||
wantTopics: 1,
|
||||
wantPeers: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
tc.prep(cp)
|
||||
cp.removePeerByPeerId(tc.target)
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
require.Len(t, cp.peersByTopic, tc.wantTopics)
|
||||
require.Len(t, cp.peerNodeByPid, tc.wantPeers)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateCrawledIfNewer(t *testing.T) {
|
||||
newCrawler := func() (*crawledPeers, *GossipPeerCrawler, func()) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
g := &GossipPeerCrawler{
|
||||
ctx: ctx,
|
||||
pingCh: make(chan enode.Node, 8),
|
||||
}
|
||||
cp := newTestCrawledPeers()
|
||||
return cp, g, cancel
|
||||
}
|
||||
|
||||
// Helper: local node that will cause enodeToPeerID to fail (no TCP/UDP multiaddrs)
|
||||
newNodeNoPorts := func(t *testing.T) *enode.Node {
|
||||
_, privKey := createAddrAndPrivKey(t)
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { db.Close() })
|
||||
ln := enode.NewLocalNode(db, privKey)
|
||||
// Do not set TCP/UDP; keep only IP
|
||||
ln.SetStaticIP(net.ParseIP("127.0.0.1"))
|
||||
return ln.Node()
|
||||
}
|
||||
|
||||
// Ensure both A nodes have the same enode.ID but differing seq
|
||||
ln := createTestNodeRandom(t)
|
||||
nodeA1 := ln.Node()
|
||||
setNodeSeq(ln, nodeA1.Seq()+1)
|
||||
nodeA2 := ln.Node()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
arrange func(*crawledPeers)
|
||||
invokeNode *enode.Node
|
||||
invokeTopics []string
|
||||
expectedShouldPing bool
|
||||
expectErr bool
|
||||
assert func(*testing.T, *crawledPeers, <-chan enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "new peer with topics adds peer and pings",
|
||||
arrange: func(cp *crawledPeers) {},
|
||||
invokeNode: nodeA1,
|
||||
invokeTopics: []string{"a"},
|
||||
expectedShouldPing: true,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Len(t, cp.peerNodeByEnode, 1)
|
||||
require.Len(t, cp.peerNodeByPid, 1)
|
||||
require.Contains(t, cp.peersByTopic, "a")
|
||||
cp.mu.RUnlock()
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new peer with empty topics is removed",
|
||||
arrange: func(cp *crawledPeers) {},
|
||||
invokeNode: nodeA1,
|
||||
invokeTopics: nil,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Empty(t, cp.peerNodeByEnode)
|
||||
require.Empty(t, cp.peerNodeByPid)
|
||||
require.Empty(t, cp.peersByTopic)
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing peer lower seq is ignored",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, nodeA2, []string{"x"}, false) // higher seq exists
|
||||
},
|
||||
invokeNode: nodeA1, // lower seq
|
||||
invokeTopics: []string{"a", "b"},
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Contains(t, cp.peersByTopic, "x")
|
||||
require.NotContains(t, cp.peersByTopic, "a")
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing peer equal seq is ignored",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, nodeA1, []string{"x"}, false)
|
||||
},
|
||||
invokeNode: nodeA1,
|
||||
invokeTopics: []string{"a"},
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Contains(t, cp.peersByTopic, "x")
|
||||
require.NotContains(t, cp.peersByTopic, "a")
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing peer higher seq updates topics and pings",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, nodeA1, []string{"x"}, false)
|
||||
},
|
||||
invokeNode: nodeA2,
|
||||
invokeTopics: []string{"a"},
|
||||
expectedShouldPing: true,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.NotContains(t, cp.peersByTopic, "x")
|
||||
require.Contains(t, cp.peersByTopic, "a")
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing peer higher seq but empty topics removes peer",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, nodeA1, []string{"x"}, false)
|
||||
},
|
||||
invokeNode: nodeA2,
|
||||
invokeTopics: nil,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Empty(t, cp.peerNodeByEnode)
|
||||
require.Empty(t, cp.peerNodeByPid)
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "corrupted existing entry with nil node is ignored",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
pid, _ := enodeToPeerID(nodeA1)
|
||||
cp.mu.Lock()
|
||||
pn := &peerNode{node: nil, peerID: pid, topics: map[string]struct{}{"x": {}}}
|
||||
cp.peerNodeByEnode[nodeA1.ID()] = pn
|
||||
cp.peerNodeByPid[pid] = pn
|
||||
cp.peersByTopic["x"] = map[*peerNode]struct{}{pn: {}}
|
||||
cp.mu.Unlock()
|
||||
},
|
||||
expectErr: true,
|
||||
invokeNode: nodeA2,
|
||||
invokeTopics: []string{"a"},
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Contains(t, cp.peersByTopic, "x")
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new peer with no ports causes enodeToPeerID error; no add",
|
||||
arrange: func(cp *crawledPeers) {},
|
||||
invokeNode: newNodeNoPorts(t),
|
||||
invokeTopics: []string{"a"},
|
||||
expectErr: true,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Empty(t, cp.peerNodeByEnode)
|
||||
require.Empty(t, cp.peerNodeByPid)
|
||||
require.Empty(t, cp.peersByTopic)
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp, g, cancel := newCrawler()
|
||||
defer cancel()
|
||||
tc.arrange(cp)
|
||||
shouldPing, err := cp.updatePeer(tc.invokeNode, tc.invokeTopics)
|
||||
if tc.expectErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, shouldPing, tc.expectedShouldPing)
|
||||
tc.assert(t, cp, g.pingCh)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeersForTopic(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
newCrawler := func(filter gossipcrawler.PeerFilterFunc) (*GossipPeerCrawler, *crawledPeers) {
|
||||
g := &GossipPeerCrawler{
|
||||
peerFilter: filter,
|
||||
scorer: func(peer.ID) float64 { return 0 },
|
||||
crawledPeers: newTestCrawledPeers(),
|
||||
}
|
||||
return g, g.crawledPeers
|
||||
}
|
||||
|
||||
// Prepare nodes
|
||||
ln1 := createTestNodeRandom(t)
|
||||
ln2 := createTestNodeRandom(t)
|
||||
ln3 := createTestNodeRandom(t)
|
||||
n1, n2, n3 := ln1.Node(), ln2.Node(), ln3.Node()
|
||||
topic := "top"
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
filter gossipcrawler.PeerFilterFunc
|
||||
setup func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers)
|
||||
wantIDs []enode.ID
|
||||
}{
|
||||
{
|
||||
name: "no peers for topic returns empty",
|
||||
filter: func(*enode.Node) bool { return true },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {},
|
||||
wantIDs: nil,
|
||||
},
|
||||
{
|
||||
name: "excludes unpinged peers",
|
||||
filter: func(*enode.Node) bool { return true },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
|
||||
// Add one pinged and one not pinged on same topic
|
||||
addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
|
||||
addPeerWithTopics(t, cp, n2, []string{string(topic)}, false)
|
||||
},
|
||||
wantIDs: []enode.ID{n1.ID()},
|
||||
},
|
||||
{
|
||||
name: "applies peer filter to exclude",
|
||||
filter: func(n *enode.Node) bool { return n.ID() != n2.ID() },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
|
||||
addPeerWithTopics(t, cp, n2, []string{string(topic)}, true)
|
||||
},
|
||||
wantIDs: []enode.ID{n1.ID()},
|
||||
},
|
||||
{
|
||||
name: "ignores peerNode with nil node",
|
||||
filter: func(*enode.Node) bool { return true },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
|
||||
// Add n2 then set its node to nil to simulate corrupted entry
|
||||
p2 := addPeerWithTopics(t, cp, n2, []string{string(topic)}, true)
|
||||
cp.mu.Lock()
|
||||
p2.node = nil
|
||||
cp.mu.Unlock()
|
||||
},
|
||||
wantIDs: []enode.ID{n1.ID()},
|
||||
},
|
||||
{
|
||||
name: "sorted by score descending",
|
||||
filter: func(*enode.Node) bool { return true },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
|
||||
// Add three pinged peers
|
||||
p1 := addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
|
||||
p2 := addPeerWithTopics(t, cp, n2, []string{string(topic)}, true)
|
||||
p3 := addPeerWithTopics(t, cp, n3, []string{string(topic)}, true)
|
||||
// Provide a deterministic scoring function
|
||||
scores := map[peer.ID]float64{
|
||||
p1.peerID: 3.0,
|
||||
p2.peerID: 2.0,
|
||||
p3.peerID: 1.0,
|
||||
}
|
||||
g.scorer = func(id peer.ID) float64 { return scores[id] }
|
||||
},
|
||||
wantIDs: []enode.ID{n1.ID(), n2.ID(), n3.ID()},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
g, cp := newCrawler(tc.filter)
|
||||
tc.setup(t, g, cp)
|
||||
got := g.PeersForTopic(topic)
|
||||
var gotIDs []enode.ID
|
||||
for _, n := range got {
|
||||
gotIDs = append(gotIDs, n.ID())
|
||||
}
|
||||
if tc.wantIDs == nil {
|
||||
require.Empty(t, gotIDs)
|
||||
return
|
||||
}
|
||||
require.Equal(t, tc.wantIDs, gotIDs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCrawler_AddsAndPingsPeer(t *testing.T) {
|
||||
// Create a test node with valid ENR entries (IP/TCP/UDP)
|
||||
localNode := createTestNodeRandom(t)
|
||||
node := localNode.Node()
|
||||
|
||||
// Prepare a mock iterator returning our single node
|
||||
iterator := p2ptest.NewMockIterator([]*enode.Node{node})
|
||||
// Prepare a mock listener with successful Ping
|
||||
mockListener := p2ptest.NewMockListener(localNode, iterator)
|
||||
mockListener.PingFunc = func(*enode.Node) error { return nil }
|
||||
|
||||
// Inject a permissive peer filter
|
||||
filter := gossipcrawler.PeerFilterFunc(func(n *enode.Node) bool { return true })
|
||||
|
||||
// Create crawler with small intervals
|
||||
scorer := func(peer.ID) float64 { return 0 }
|
||||
g, err := NewGossipPeerCrawler(t.Context(), &Service{}, mockListener, 2*time.Second, 10*time.Millisecond, 4, filter, scorer)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assign a simple topic extractor
|
||||
topic := "test/topic"
|
||||
topicExtractor := func(ctx context.Context, n *enode.Node) ([]string, error) {
|
||||
return []string{topic}, nil
|
||||
}
|
||||
|
||||
// Run ping loop in background and perform a single crawl
|
||||
require.NoError(t, g.Start(topicExtractor))
|
||||
|
||||
// Verify that the peer has been indexed under the topic and marked as pinged
|
||||
require2.Eventually(t, func() bool {
|
||||
g.crawledPeers.mu.RLock()
|
||||
defer g.crawledPeers.mu.RUnlock()
|
||||
|
||||
peers := g.crawledPeers.peersByTopic[topic]
|
||||
if len(peers) == 0 {
|
||||
return false
|
||||
}
|
||||
// Fetch the single peerNode and check status
|
||||
for pn := range peers {
|
||||
if pn == nil {
|
||||
return false
|
||||
}
|
||||
return pn.isPinged
|
||||
}
|
||||
return false
|
||||
}, 2*time.Second, 10*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestCrawler_SkipsPeer_WhenFilterRejects(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
node := localNode.Node()
|
||||
iterator := p2ptest.NewMockIterator([]*enode.Node{node})
|
||||
mockListener := p2ptest.NewMockListener(localNode, iterator)
|
||||
mockListener.PingFunc = func(*enode.Node) error { return nil }
|
||||
|
||||
// Reject all peers via injected filter
|
||||
filter := gossipcrawler.PeerFilterFunc(func(n *enode.Node) bool { return false })
|
||||
|
||||
scorer := func(peer.ID) float64 { return 0 }
|
||||
g, err := NewGossipPeerCrawler(t.Context(), &Service{}, mockListener, 2*time.Second, 10*time.Millisecond, 2, filter, scorer)
|
||||
if err != nil {
|
||||
t.Fatalf("NewGossipPeerCrawler error: %v", err)
|
||||
}
|
||||
|
||||
topic := "test/topic"
|
||||
g.topicExtractor = func(ctx context.Context, n *enode.Node) ([]string, error) { return []string{topic}, nil }
|
||||
|
||||
g.crawl()
|
||||
|
||||
// Verify no peers are indexed, because filter rejected the node
|
||||
g.crawledPeers.mu.RLock()
|
||||
defer g.crawledPeers.mu.RUnlock()
|
||||
if len(g.crawledPeers.peerNodeByEnode) != 0 || len(g.crawledPeers.peerNodeByPid) != 0 || len(g.crawledPeers.peersByTopic) != 0 {
|
||||
t.Fatalf("expected no peers indexed, got byEnode=%d byPeerId=%d byTopic=%d",
|
||||
len(g.crawledPeers.peerNodeByEnode), len(g.crawledPeers.peerNodeByPid), len(g.crawledPeers.peersByTopic))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCrawler_RemoveTopic_RemovesTopicFromIndexes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
node := localNode.Node()
|
||||
iterator := p2ptest.NewMockIterator([]*enode.Node{node})
|
||||
mockListener := p2ptest.NewMockListener(localNode, iterator)
|
||||
mockListener.PingFunc = func(*enode.Node) error { return nil }
|
||||
|
||||
filter := gossipcrawler.PeerFilterFunc(func(n *enode.Node) bool { return true })
|
||||
|
||||
scorer := func(peer.ID) float64 { return 0 }
|
||||
g, err := NewGossipPeerCrawler(t.Context(), &Service{}, mockListener, 2*time.Second, 10*time.Millisecond, 2, filter, scorer)
|
||||
if err != nil {
|
||||
t.Fatalf("NewGossipPeerCrawler error: %v", err)
|
||||
}
|
||||
|
||||
topic1 := "test/topic1"
|
||||
topic2 := "test/topic2"
|
||||
g.topicExtractor = func(ctx context.Context, n *enode.Node) ([]string, error) { return []string{topic1, topic2}, nil }
|
||||
|
||||
// Single crawl to index topics
|
||||
g.crawl()
|
||||
|
||||
// Remove one topic and assert it is pruned from all indexes
|
||||
g.RemoveTopic(topic1)
|
||||
|
||||
g.crawledPeers.mu.RLock()
|
||||
defer g.crawledPeers.mu.RUnlock()
|
||||
|
||||
if _, ok := g.crawledPeers.peersByTopic[topic1]; ok {
|
||||
t.Fatalf("expected topic1 to be removed from byTopic")
|
||||
}
|
||||
|
||||
// Ensure peer still exists and retains topic2
|
||||
for _, pn := range g.crawledPeers.peerNodeByEnode {
|
||||
if _, has1 := pn.topics[topic1]; has1 {
|
||||
t.Fatalf("expected topic1 to be removed from peer topics")
|
||||
}
|
||||
if _, has2 := pn.topics[topic2]; !has2 {
|
||||
t.Fatalf("expected topic2 to remain for peer")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCrawledPeersMetrics(t *testing.T) {
|
||||
localNode1 := createTestNodeRandom(t)
|
||||
node1 := localNode1.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
pid1, err := enodeToPeerID(node1)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("updatePeer records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add first peer with two topics
|
||||
_, err := cp.updatePeer(node1, []string{"topic1", "topic2"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Add second peer with one overlapping topic
|
||||
_, err = cp.updatePeer(node2, []string{"topic1", "topic3"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(3), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
|
||||
t.Run("removePeerByPeerId records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add two peers
|
||||
_, err := cp.updatePeer(node1, []string{"topic1"})
|
||||
require.NoError(t, err)
|
||||
_, err = cp.updatePeer(node2, []string{"topic1", "topic2"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Remove first peer by peer ID
|
||||
cp.removePeerByPeerId(pid1)
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
|
||||
t.Run("removePeerByNodeId records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add two peers
|
||||
_, err := cp.updatePeer(node1, []string{"topic1"})
|
||||
require.NoError(t, err)
|
||||
_, err = cp.updatePeer(node2, []string{"topic2"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Remove first peer by enode ID
|
||||
cp.removePeerByNodeId(node1.ID())
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
|
||||
t.Run("removeTopic records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add two peers with overlapping topics
|
||||
_, err := cp.updatePeer(node1, []string{"topic1", "topic2"})
|
||||
require.NoError(t, err)
|
||||
_, err = cp.updatePeer(node2, []string{"topic1"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Remove topic1 - this should also remove node2 which only had topic1
|
||||
cp.removeTopic("topic1")
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
|
||||
t.Run("updatePeer with empty topics removes peer and records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add peer with topics
|
||||
_, err := cp.updatePeer(node1, []string{"topic1"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Increment sequence number to ensure update is processed
|
||||
setNodeSeq(localNode1, node1.Seq()+1)
|
||||
node1Updated := localNode1.Node()
|
||||
|
||||
// Update with empty topics - should remove the peer
|
||||
_, err = cp.updatePeer(node1Updated, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(0), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(0), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(0), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["interface.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler",
|
||||
visibility = [
|
||||
"//visibility:public",
|
||||
],
|
||||
deps = [
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -1,35 +0,0 @@
|
||||
package gossipcrawler
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// TopicExtractor is a function that can determine the set of topics a current or potential peer
|
||||
// is subscribed to based on key/value pairs from the ENR record.
|
||||
type TopicExtractor func(ctx context.Context, node *enode.Node) ([]string, error)
|
||||
|
||||
// PeerFilterFunc defines the filtering interface used by the crawler to decide if a node
|
||||
// is a valid candidate to index in the crawler.
|
||||
type PeerFilterFunc func(*enode.Node) bool
|
||||
|
||||
type Crawler interface {
|
||||
Start(te TopicExtractor) error
|
||||
RemovePeerByPeerId(peerID peer.ID)
|
||||
RemoveTopic(topic string)
|
||||
PeersForTopic(topic string) []*enode.Node
|
||||
}
|
||||
|
||||
// SubnetTopicsProvider returns the set of gossipsub topics the node
|
||||
// should currently maintain peer connections for along with the minimum number of peers required
|
||||
// for each topic.
|
||||
type SubnetTopicsProvider func() map[string]int
|
||||
|
||||
// GossipDialer controls dialing peers for gossipsub topics based
|
||||
// on a provided SubnetTopicsProvider and the p2p crawler.
|
||||
type GossipDialer interface {
|
||||
Start(provider SubnetTopicsProvider) error
|
||||
DialPeersForTopicBlocking(ctx context.Context, topic string, nPeers int) error
|
||||
}
|
||||
@@ -225,10 +225,6 @@ func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id p
|
||||
return
|
||||
}
|
||||
|
||||
if s.crawler != nil {
|
||||
s.crawler.RemovePeerByPeerId(peerID)
|
||||
}
|
||||
|
||||
priorState, err := s.peers.ConnectionState(peerID)
|
||||
if err != nil {
|
||||
// Can happen if the peer has already disconnected, so...
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -98,13 +98,11 @@ type (
|
||||
PeerID() peer.ID
|
||||
Host() host.Host
|
||||
ENR() *enr.Record
|
||||
GossipDialer() gossipcrawler.GossipDialer
|
||||
NodeID() enode.ID
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshPersistentSubnets()
|
||||
DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint
|
||||
FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
Crawler() gossipcrawler.Crawler
|
||||
}
|
||||
|
||||
// Sender abstracts the sending functionality from libp2p.
|
||||
|
||||
@@ -97,20 +97,6 @@ var (
|
||||
Help: "The number of data column sidecar message broadcast attempts.",
|
||||
})
|
||||
|
||||
// Gossip Peer Crawler Metrics
|
||||
gossipCrawlerPeersByEnodeCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "p2p_gossip_crawler_peers_by_enode_count",
|
||||
Help: "The number of peers tracked by enode ID in the gossip peer crawler.",
|
||||
})
|
||||
gossipCrawlerPeersByPidCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "p2p_gossip_crawler_peers_by_pid_count",
|
||||
Help: "The number of peers tracked by peer ID in the gossip peer crawler.",
|
||||
})
|
||||
gossipCrawlerTopicsCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "p2p_gossip_crawler_topics_count",
|
||||
Help: "The number of topics tracked in the gossip peer crawler.",
|
||||
})
|
||||
|
||||
// Gossip Tracer Metrics
|
||||
pubsubTopicsActive = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "p2p_pubsub_topic_active",
|
||||
|
||||
@@ -4,17 +4,20 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
ecdsaprysm "github.com/OffchainLabs/prysm/v7/crypto/ecdsa"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
mplex "github.com/libp2p/go-libp2p-mplex"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
||||
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
gomplex "github.com/libp2p/go-mplex"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -107,6 +110,7 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
libp2p.ConnectionGater(s),
|
||||
libp2p.Transport(libp2ptcp.NewTCPTransport),
|
||||
libp2p.DefaultMuxers,
|
||||
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
|
||||
libp2p.Security(noise.ID, noise.New),
|
||||
libp2p.Ping(false), // Disable Ping Service.
|
||||
}
|
||||
@@ -158,10 +162,6 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
|
||||
}
|
||||
|
||||
if cfg.EnableAutoNAT {
|
||||
options = append(options, libp2p.EnableAutoNATv2())
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
@@ -217,3 +217,8 @@ func privKeyOption(privkey *ecdsa.PrivateKey) libp2p.Option {
|
||||
return cfg.Apply(libp2p.Identity(ifaceKey))
|
||||
}
|
||||
}
|
||||
|
||||
// Configures stream timeouts on mplex.
|
||||
func configureMplex() {
|
||||
gomplex.ResetStreamTimeout = 5 * time.Second
|
||||
}
|
||||
|
||||
@@ -132,6 +132,7 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[0].ID)
|
||||
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[1].ID)
|
||||
}
|
||||
|
||||
func TestSetConnManagerOption(t *testing.T) {
|
||||
@@ -187,30 +188,6 @@ func checkLimit(t *testing.T, cm connmgr.ConnManager, expected int) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildOptions_EnableAutoNAT(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
p2pCfg := &Config{
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
EnableAutoNAT: true,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
}
|
||||
svc := &Service{cfg: p2pCfg}
|
||||
var err error
|
||||
svc.privKey, err = privKey(svc.cfg)
|
||||
require.NoError(t, err)
|
||||
ipAddr := network.IPAddr()
|
||||
opts, err := svc.buildOptions(ipAddr, svc.privKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that options were built without error when EnableAutoNAT is true.
|
||||
// The actual AutoNAT v2 behavior is tested by libp2p itself.
|
||||
var cfg libp2p.Config
|
||||
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestMultiAddressBuilderWithID(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
||||
@@ -58,7 +58,7 @@ func TestPeerExplicitAdd(t *testing.T) {
|
||||
|
||||
resAddress, err := p.Address(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, address.Equal(resAddress), true, "Unexpected address")
|
||||
assert.Equal(t, address, resAddress, "Unexpected address")
|
||||
|
||||
resDirection, err := p.Direction(id)
|
||||
require.NoError(t, err)
|
||||
@@ -72,7 +72,7 @@ func TestPeerExplicitAdd(t *testing.T) {
|
||||
|
||||
resAddress2, err := p.Address(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, address2.Equal(resAddress2), true, "Unexpected address")
|
||||
assert.Equal(t, address2, resAddress2, "Unexpected address")
|
||||
|
||||
resDirection2, err := p.Direction(id)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -6,13 +6,11 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/async"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
|
||||
@@ -30,7 +28,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -64,10 +61,6 @@ var (
|
||||
// for the current peer limit status for the time period
|
||||
// defined below.
|
||||
pollingPeriod = 6 * time.Second
|
||||
|
||||
crawlTimeout = 5 * time.Second
|
||||
crawlInterval = 1 * time.Second
|
||||
maxConcurrentDials = int64(256)
|
||||
)
|
||||
|
||||
// Service for managing peer to peer (p2p) networking.
|
||||
@@ -102,8 +95,6 @@ type Service struct {
|
||||
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
|
||||
custodyInfoSet chan struct{}
|
||||
allForkDigests map[[4]byte]struct{}
|
||||
crawler gossipcrawler.Crawler
|
||||
gossipDialer gossipcrawler.GossipDialer
|
||||
}
|
||||
|
||||
type custodyInfo struct {
|
||||
@@ -163,6 +154,8 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
return nil, errors.Wrapf(err, "failed to build p2p options")
|
||||
}
|
||||
|
||||
// Sets mplex timeouts
|
||||
configureMplex()
|
||||
h, err := libp2p.New(opts...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create p2p host")
|
||||
@@ -170,7 +163,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
|
||||
s.host = h
|
||||
|
||||
// Gossip registration is done before we add in any new peers
|
||||
// Gossipsub registration is done before we add in any new peers
|
||||
// due to libp2p's gossipsub implementation not taking into
|
||||
// account previously added peers when creating the gossipsub
|
||||
// object.
|
||||
@@ -248,25 +241,6 @@ func (s *Service) Start() {
|
||||
|
||||
s.dv5Listener = listener
|
||||
go s.listenForNewNodes()
|
||||
crawler, err := NewGossipPeerCrawler(
|
||||
s.ctx,
|
||||
s,
|
||||
s.dv5Listener,
|
||||
crawlTimeout,
|
||||
crawlInterval,
|
||||
maxConcurrentDials,
|
||||
s.filterPeer,
|
||||
s.Peers().Scorers().Score,
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to create peer crawler")
|
||||
s.startupErr = err
|
||||
return
|
||||
}
|
||||
s.crawler = crawler
|
||||
// Initialise the gossipsub dialer which will be started
|
||||
// once the sync service is ready to provide subnet topics.
|
||||
s.gossipDialer = NewGossipPeerDialer(s.ctx, s.crawler, s.PubSub().ListPeers, s.DialPeers)
|
||||
}
|
||||
|
||||
s.started = true
|
||||
@@ -285,14 +259,6 @@ func (s *Service) Start() {
|
||||
// current epoch.
|
||||
s.RefreshPersistentSubnets()
|
||||
|
||||
if s.cfg.EnableAutoNAT {
|
||||
if err := s.subscribeReachabilityEvents(); err != nil {
|
||||
log.WithError(err).Error("Failed to subscribe to AutoNAT v2 reachability events")
|
||||
} else {
|
||||
log.Info("AutoNAT v2 enabled for address reachability detection")
|
||||
}
|
||||
}
|
||||
|
||||
// Periodic functions.
|
||||
async.RunEvery(s.ctx, params.BeaconConfig().TtfbTimeoutDuration(), func() {
|
||||
ensurePeerConnections(s.ctx, s.host, s.peers, relayNodes...)
|
||||
@@ -345,25 +311,12 @@ func (s *Service) Start() {
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
s.started = false
|
||||
|
||||
if s.dv5Listener != nil {
|
||||
s.dv5Listener.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Crawler returns the p2p service's peer crawler.
|
||||
func (s *Service) Crawler() gossipcrawler.Crawler {
|
||||
return s.crawler
|
||||
}
|
||||
|
||||
// GossipDialer returns the dialer responsible for maintaining
|
||||
// peer counts per gossipsub topic, if discovery is enabled.
|
||||
func (s *Service) GossipDialer() gossipcrawler.GossipDialer {
|
||||
return s.gossipDialer
|
||||
}
|
||||
|
||||
// Status of the p2p service. Will return an error if the service is considered unhealthy to
|
||||
// indicate that this node should not serve traffic until the issue has been resolved.
|
||||
func (s *Service) Status() error {
|
||||
@@ -604,118 +557,3 @@ func (s *Service) downscorePeer(peerID peer.ID, reason string) {
|
||||
newScore := s.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
func (s *Service) subscribeReachabilityEvents() error {
|
||||
sub, err := s.host.EventBus().Subscribe(new(event.EvtHostReachableAddrsChanged))
|
||||
if err != nil {
|
||||
return fmt.Errorf("subscribing to reachability events: %w", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
if err := sub.Close(); err != nil {
|
||||
log.WithError(err).Debug("Failed to close reachability event subscription")
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case ev := <-sub.Out():
|
||||
if event, ok := ev.(event.EvtHostReachableAddrsChanged); ok {
|
||||
log.WithFields(logrus.Fields{
|
||||
"reachable": multiaddrsToStrings(event.Reachable),
|
||||
"unreachable": multiaddrsToStrings(event.Unreachable),
|
||||
"unknown": multiaddrsToStrings(event.Unknown),
|
||||
}).Info("Address reachability changed")
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func multiaddrsToStrings(addrs []multiaddr.Multiaddr) []string {
|
||||
strs := make([]string, len(addrs))
|
||||
for i, a := range addrs {
|
||||
strs[i] = a.String()
|
||||
}
|
||||
return strs
|
||||
}
|
||||
|
||||
func AttestationSubnets(nodeID enode.ID, node *enode.Node, record *enr.Record) (map[uint64]bool, error) {
|
||||
return attestationSubnets(record)
|
||||
}
|
||||
|
||||
func SyncSubnets(nodeID enode.ID, node *enode.Node, record *enr.Record) (map[uint64]bool, error) {
|
||||
return syncSubnets(record)
|
||||
}
|
||||
|
||||
func DataColumnSubnets(nodeID enode.ID, node *enode.Node, record *enr.Record) (map[uint64]bool, error) {
|
||||
return dataColumnSubnets(nodeID, record)
|
||||
}
|
||||
|
||||
func DataColumnSubnetTopic(digest [4]byte, subnet uint64) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, digest, subnet) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func SyncCommitteeSubnetTopic(digest [4]byte, subnet uint64) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, digest, subnet) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func AttestationSubnetTopic(digest [4]byte, subnet uint64) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, digest, subnet) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func BlobSubnetTopic(digest [4]byte, subnet uint64) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, digest, subnet) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func LcOptimisticToTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func LcFinalityToTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(LightClientFinalityUpdateTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func BlockSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(BlockSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func AggregateAndProofSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(AggregateAndProofSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func VoluntaryExitSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(ExitSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func ProposerSlashingSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(ProposerSlashingSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func AttesterSlashingSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(AttesterSlashingSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func SyncContributionAndProofSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(SyncContributionAndProofSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func BlsToExecutionChangeSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(BlsToExecutionChangeSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
@@ -36,8 +35,7 @@ func createHost(t *testing.T, port uint) (host.Host, *ecdsa.PrivateKey, net.IP)
|
||||
ipAddr := net.ParseIP("127.0.0.1")
|
||||
listen, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
||||
require.NoError(t, err, "Failed to p2p listen")
|
||||
h, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen),
|
||||
libp2p.Security(noise.ID, noise.New)}...)
|
||||
h, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.Security(noise.ID, noise.New)}...)
|
||||
require.NoError(t, err)
|
||||
return h, pkey, ipAddr
|
||||
}
|
||||
@@ -402,58 +400,3 @@ func TestService_connectWithPeer(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_SubscribeReachabilityEvents(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := t.Context()
|
||||
|
||||
h, _, _ := createHost(t, 0)
|
||||
defer func() {
|
||||
if err := h.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create service with the host
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
host: h,
|
||||
cfg: &Config{EnableAutoNAT: true},
|
||||
}
|
||||
|
||||
// Get an emitter for the reachability event
|
||||
emitter, err := h.EventBus().Emitter(new(event.EvtHostReachableAddrsChanged))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := emitter.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
// Subscribe to reachability events
|
||||
require.NoError(t, s.subscribeReachabilityEvents())
|
||||
|
||||
// Create test multiaddrs for each reachability state
|
||||
reachableAddr, err := multiaddr.NewMultiaddr("/ip4/192.168.1.1/tcp/9000")
|
||||
require.NoError(t, err)
|
||||
unreachableAddr, err := multiaddr.NewMultiaddr("/ip4/10.0.0.1/tcp/9001")
|
||||
require.NoError(t, err)
|
||||
unknownAddr, err := multiaddr.NewMultiaddr("/ip4/172.16.0.1/tcp/9002")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Emit a reachability event with all address types
|
||||
err = emitter.Emit(event.EvtHostReachableAddrsChanged{
|
||||
Reachable: []multiaddr.Multiaddr{reachableAddr},
|
||||
Unreachable: []multiaddr.Multiaddr{unreachableAddr},
|
||||
Unknown: []multiaddr.Multiaddr{unknownAddr},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the event to be processed
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Verify the log message contains all addresses
|
||||
require.LogsContain(t, hook, "Address reachability changed")
|
||||
require.LogsContain(t, hook, "/ip4/192.168.1.1/tcp/9000")
|
||||
require.LogsContain(t, hook, "/ip4/10.0.0.1/tcp/9001")
|
||||
require.LogsContain(t, hook, "/ip4/172.16.0.1/tcp/9002")
|
||||
}
|
||||
|
||||
@@ -3,6 +3,9 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -11,16 +14,19 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
pb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -55,9 +61,223 @@ const dataColumnSubnetVal = 150
|
||||
|
||||
const errSavingSequenceNumber = "saving sequence number after updating subnets: %w"
|
||||
|
||||
// DialPeers dials multiple peers concurrently up to `maxConcurrentDials` at a time.
|
||||
// nodeFilter returns a function that filters nodes based on the subnet topic and subnet index.
|
||||
func (s *Service) nodeFilter(topic string, indices map[uint64]int) (func(node *enode.Node) (map[uint64]bool, error), error) {
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
return s.filterPeerForAttSubnet(indices), nil
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
return s.filterPeerForSyncSubnet(indices), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
return s.filterPeerForBlobSubnet(indices), nil
|
||||
case strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
return s.filterPeerForDataColumnsSubnet(indices), nil
|
||||
default:
|
||||
return nil, errors.Errorf("no subnet exists for provided topic: %s", topic)
|
||||
}
|
||||
}
|
||||
|
||||
// FindAndDialPeersWithSubnets ensures that our node is connected to at least `minimumPeersPerSubnet`
|
||||
// peers for each subnet listed in `subnets`.
|
||||
// If, for all subnets, the threshold is met, then this function immediately returns.
|
||||
// Otherwise, it searches for new peers for defective subnets, and dials them.
|
||||
// If `ctx“ is canceled while searching for peers, search is stopped, but new found peers are still dialed.
|
||||
// In this case, the function returns an error.
|
||||
func (s *Service) FindAndDialPeersWithSubnets(
|
||||
ctx context.Context,
|
||||
topicFormat string,
|
||||
digest [fieldparams.VersionLength]byte,
|
||||
minimumPeersPerSubnet int,
|
||||
subnets map[uint64]bool,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindAndDialPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
// Return early if the discovery listener isn't set.
|
||||
if s.dv5Listener == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials = flags.Get().MaxConcurrentDials
|
||||
}
|
||||
|
||||
defectiveSubnets := s.defectiveSubnets(topicFormat, digest, minimumPeersPerSubnet, subnets)
|
||||
for len(defectiveSubnets) > 0 {
|
||||
// Stop the search/dialing loop if the context is canceled.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peersToDial, err := func() ([]*enode.Node, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, batchPeriod)
|
||||
defer cancel()
|
||||
|
||||
peersToDial, err := s.findPeersWithSubnets(ctx, topicFormat, digest, minimumPeersPerSubnet, defectiveSubnets)
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
|
||||
return peersToDial, nil
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dial new peers in batches.
|
||||
s.dialPeers(s.ctx, maxConcurrentDials, peersToDial)
|
||||
|
||||
defectiveSubnets = s.defectiveSubnets(topicFormat, digest, minimumPeersPerSubnet, subnets)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateDefectiveSubnets updates the defective subnets map when a node with matching subnets is found.
|
||||
// It decrements the defective count for each subnet the node satisfies and removes subnets
|
||||
// that are fully satisfied (count reaches 0).
|
||||
func updateDefectiveSubnets(
|
||||
nodeSubnets map[uint64]bool,
|
||||
defectiveSubnets map[uint64]int,
|
||||
) {
|
||||
for subnet := range defectiveSubnets {
|
||||
if !nodeSubnets[subnet] {
|
||||
continue
|
||||
}
|
||||
defectiveSubnets[subnet]--
|
||||
if defectiveSubnets[subnet] == 0 {
|
||||
delete(defectiveSubnets, subnet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// findPeersWithSubnets finds peers subscribed to defective subnets in batches
|
||||
// until enough peers are found or the context is canceled.
|
||||
// It returns new peers found during the search.
|
||||
func (s *Service) findPeersWithSubnets(
|
||||
ctx context.Context,
|
||||
topicFormat string,
|
||||
digest [fieldparams.VersionLength]byte,
|
||||
minimumPeersPerSubnet int,
|
||||
defectiveSubnetsOrigin map[uint64]int,
|
||||
) ([]*enode.Node, error) {
|
||||
// Copy the defective subnets map to avoid modifying the original map.
|
||||
defectiveSubnets := make(map[uint64]int, len(defectiveSubnetsOrigin))
|
||||
maps.Copy(defectiveSubnets, defectiveSubnetsOrigin)
|
||||
|
||||
// Create an discovery iterator to find new peers.
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
|
||||
// `iterator.Next` can block indefinitely. `iterator.Close` unblocks it.
|
||||
// So it is important to close the iterator when the context is done to ensure
|
||||
// that the search does not hang indefinitely.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
iterator.Close()
|
||||
}()
|
||||
|
||||
// Retrieve the filter function that will be used to filter nodes based on the defective subnets.
|
||||
filter, err := s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
|
||||
// Crawl the network for peers subscribed to the defective subnets.
|
||||
nodeByNodeID := make(map[enode.ID]*enode.Node)
|
||||
|
||||
for len(defectiveSubnets) > 0 && iterator.Next() {
|
||||
if err := ctx.Err(); err != nil {
|
||||
// Convert the map to a slice.
|
||||
peersToDial := make([]*enode.Node, 0, len(nodeByNodeID))
|
||||
for _, node := range nodeByNodeID {
|
||||
peersToDial = append(peersToDial, node)
|
||||
}
|
||||
|
||||
return peersToDial, err
|
||||
}
|
||||
|
||||
node := iterator.Node()
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() >= node.Seq() {
|
||||
continue // keep existing and skip.
|
||||
}
|
||||
|
||||
// Treat nodes that exist in nodeByNodeID with higher seq numbers as new peers
|
||||
// Skip peer not matching the filter.
|
||||
if !s.filterPeer(node) {
|
||||
if ok {
|
||||
// this means the existing peer with the lower sequence number is no longer valid
|
||||
delete(nodeByNodeID, existing.ID())
|
||||
// Note: We are choosing to not rollback changes to the defective subnets map in favor of calling s.defectiveSubnets once again after dialing peers.
|
||||
// This is a case that should rarely happen and should be handled through a second iteration in FindAndDialPeersWithSubnets
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Get all needed subnets that the node is subscribed to.
|
||||
// Skip nodes that are not subscribed to any of the defective subnets.
|
||||
nodeSubnets, err := filter(node)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"nodeID": node.ID(),
|
||||
"topicFormat": topicFormat,
|
||||
}).Debug("Could not get needed subnets from peer")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if len(nodeSubnets) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// We found a new peer. Modify the defective subnets map
|
||||
// and the filter accordingly.
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
updateDefectiveSubnets(nodeSubnets, defectiveSubnets)
|
||||
filter, err = s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the map to a slice.
|
||||
peersToDial := make([]*enode.Node, 0, len(nodeByNodeID))
|
||||
for _, node := range nodeByNodeID {
|
||||
peersToDial = append(peersToDial, node)
|
||||
}
|
||||
|
||||
return peersToDial, nil
|
||||
}
|
||||
|
||||
// defectiveSubnets returns a map of subnets that have fewer than the minimum peer count.
|
||||
func (s *Service) defectiveSubnets(
|
||||
topicFormat string,
|
||||
digest [fieldparams.VersionLength]byte,
|
||||
minimumPeersPerSubnet int,
|
||||
subnets map[uint64]bool,
|
||||
) map[uint64]int {
|
||||
missingCountPerSubnet := make(map[uint64]int, len(subnets))
|
||||
for subnet := range subnets {
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet) + s.Encoding().ProtocolSuffix()
|
||||
peers := s.pubsub.ListPeers(topic)
|
||||
peerCount := len(peers)
|
||||
if peerCount < minimumPeersPerSubnet {
|
||||
missingCountPerSubnet[subnet] = minimumPeersPerSubnet - peerCount
|
||||
}
|
||||
}
|
||||
|
||||
return missingCountPerSubnet
|
||||
}
|
||||
|
||||
// dialPeers dials multiple peers concurrently up to `maxConcurrentDials` at a time.
|
||||
// In case of a dial failure, it logs the error but continues dialing other peers.
|
||||
func (s *Service) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
func (s *Service) dialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
var mut sync.Mutex
|
||||
|
||||
counter := uint(0)
|
||||
@@ -99,13 +319,75 @@ func (s *Service) DialPeers(ctx context.Context, maxConcurrentDials int, nodes [
|
||||
return counter
|
||||
}
|
||||
|
||||
// filterPeerForAttSubnet returns a method with filters peers specifically for a particular attestation subnet.
|
||||
func (s *Service) filterPeerForAttSubnet(indices map[uint64]int) func(node *enode.Node) (map[uint64]bool, error) {
|
||||
return func(node *enode.Node) (map[uint64]bool, error) {
|
||||
if !s.filterPeer(node) {
|
||||
return map[uint64]bool{}, nil
|
||||
}
|
||||
|
||||
subnets, err := attestationSubnets(node.Record())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "attestation subnets")
|
||||
}
|
||||
|
||||
return intersect(indices, subnets), nil
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular sync subnet.
|
||||
func (s *Service) filterPeerForSyncSubnet(indices map[uint64]int) func(node *enode.Node) (map[uint64]bool, error) {
|
||||
return func(node *enode.Node) (map[uint64]bool, error) {
|
||||
if !s.filterPeer(node) {
|
||||
return map[uint64]bool{}, nil
|
||||
}
|
||||
|
||||
subnets, err := syncSubnets(node.Record())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "sync subnets")
|
||||
}
|
||||
|
||||
return intersect(indices, subnets), nil
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular blob subnet.
|
||||
// All peers are supposed to be subscribed to all blob subnets.
|
||||
func (s *Service) filterPeerForBlobSubnet(indices map[uint64]int) func(_ *enode.Node) (map[uint64]bool, error) {
|
||||
result := make(map[uint64]bool, len(indices))
|
||||
for i := range indices {
|
||||
result[i] = true
|
||||
}
|
||||
|
||||
return func(_ *enode.Node) (map[uint64]bool, error) {
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular data column subnet.
|
||||
func (s *Service) filterPeerForDataColumnsSubnet(indices map[uint64]int) func(node *enode.Node) (map[uint64]bool, error) {
|
||||
return func(node *enode.Node) (map[uint64]bool, error) {
|
||||
if !s.filterPeer(node) {
|
||||
return map[uint64]bool{}, nil
|
||||
}
|
||||
|
||||
subnets, err := dataColumnSubnets(node.ID(), node.Record())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column subnets")
|
||||
}
|
||||
|
||||
return intersect(indices, subnets), nil
|
||||
}
|
||||
}
|
||||
|
||||
// lower threshold to broadcast object compared to searching
|
||||
// for a subnet. So that even in the event of poor peer
|
||||
// connectivity, we can still broadcast an attestation.
|
||||
func (s *Service) hasPeerWithTopic(topic string) bool {
|
||||
func (s *Service) hasPeerWithSubnet(subnetTopic string) bool {
|
||||
// In the event peer threshold is lower, we will choose the lower
|
||||
// threshold.
|
||||
minPeers := min(1, flags.Get().MinimumPeersPerSubnet)
|
||||
topic := subnetTopic + s.Encoding().ProtocolSuffix()
|
||||
peersWithSubnet := s.pubsub.ListPeers(topic)
|
||||
peersWithSubnetCount := len(peersWithSubnet)
|
||||
|
||||
@@ -430,3 +712,16 @@ func byteCount(bitCount int) int {
|
||||
}
|
||||
return numOfBytes
|
||||
}
|
||||
|
||||
// interesect intersects two maps and returns a new map containing only the keys
|
||||
// that are present in both maps.
|
||||
func intersect(left map[uint64]int, right map[uint64]bool) map[uint64]bool {
|
||||
result := make(map[uint64]bool, min(len(left), len(right)))
|
||||
for i := range left {
|
||||
if right[i] {
|
||||
result[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -3,15 +3,14 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
testDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
@@ -25,7 +24,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
require2 "github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
@@ -124,21 +122,6 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
// Start the service.
|
||||
service.Start()
|
||||
|
||||
// start the crawler with a topic extractor that maps ENR attestation subnets
|
||||
// to full attestation topics for the current fork digest and encoding.
|
||||
_ = service.Crawler().Start(func(ctx context.Context, node *enode.Node) ([]string, error) {
|
||||
subs, err := attestationSubnets(node.Record())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var topics []string
|
||||
for subnet := range subs {
|
||||
t := AttestationSubnetTopic(bootNodeForkDigest, subnet)
|
||||
topics = append(topics, t)
|
||||
}
|
||||
return topics, nil
|
||||
})
|
||||
|
||||
// Set the ENR `attnets`, used by Prysm to filter peers by subnet.
|
||||
bitV := bitfield.NewBitvector64()
|
||||
bitV.SetBitAt(i, true)
|
||||
@@ -146,7 +129,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
service.dv5Listener.LocalNode().Set(entry)
|
||||
|
||||
// Join and subscribe to the subnet, needed by libp2p.
|
||||
topicName := AttestationSubnetTopic(bootNodeForkDigest, i)
|
||||
topicName := fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, i) + "/ssz_snappy"
|
||||
topic, err := service.pubsub.Join(topicName)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -186,65 +169,23 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
close(service.custodyInfoSet)
|
||||
|
||||
service.Start()
|
||||
|
||||
subnets := map[uint64]bool{1: true, 2: true, 3: true}
|
||||
var topics []string
|
||||
for subnet := range subnets {
|
||||
t := AttestationSubnetTopic(bootNodeForkDigest, subnet)
|
||||
topics = append(topics, t)
|
||||
}
|
||||
|
||||
// start the crawler with a topic extractor that maps ENR attestation subnets
|
||||
// to full attestation topics for the current fork digest and encoding.
|
||||
_ = service.Crawler().Start(func(ctx context.Context, node *enode.Node) ([]string, error) {
|
||||
var topics []string
|
||||
subs, err := attestationSubnets(node.Record())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for subnet := range subs {
|
||||
t := AttestationSubnetTopic(bootNodeForkDigest, subnet)
|
||||
topics = append(topics, t)
|
||||
}
|
||||
return topics, nil
|
||||
})
|
||||
defer func() {
|
||||
err := service.Stop()
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
builder := func(idx uint64) string {
|
||||
return AttestationSubnetTopic(bootNodeForkDigest, idx)
|
||||
}
|
||||
defectiveSubnetsCount := defectiveSubnets(service, topics, minimumPeersPerSubnet)
|
||||
require.Equal(t, subnetCount, defectiveSubnetsCount)
|
||||
subnets := map[uint64]bool{1: true, 2: true, 3: true}
|
||||
defectiveSubnets := service.defectiveSubnets(AttestationSubnetTopicFormat, bootNodeForkDigest, minimumPeersPerSubnet, subnets)
|
||||
require.Equal(t, subnetCount, len(defectiveSubnets))
|
||||
|
||||
var topicsToDial []string
|
||||
for s := range subnets {
|
||||
topicsToDial = append(topicsToDial, builder(s))
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
ctxWithTimeOut, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
for _, topic := range topicsToDial {
|
||||
err = service.GossipDialer().DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnet)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err = service.FindAndDialPeersWithSubnets(ctxWithTimeOut, AttestationSubnetTopicFormat, bootNodeForkDigest, minimumPeersPerSubnet, subnets)
|
||||
require.NoError(t, err)
|
||||
|
||||
defectiveSubnetsCount = defectiveSubnets(service, topics, minimumPeersPerSubnet)
|
||||
require.Equal(t, 0, defectiveSubnetsCount)
|
||||
}
|
||||
|
||||
func defectiveSubnets(service *Service, topics []string, minimumPeersPerSubnet int) int {
|
||||
count := 0
|
||||
for _, topic := range topics {
|
||||
peers := service.pubsub.ListPeers(topic)
|
||||
if len(peers) < minimumPeersPerSubnet {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
defectiveSubnets = service.defectiveSubnets(AttestationSubnetTopicFormat, bootNodeForkDigest, minimumPeersPerSubnet, subnets)
|
||||
require.Equal(t, 0, len(defectiveSubnets))
|
||||
}
|
||||
|
||||
func Test_AttSubnets(t *testing.T) {
|
||||
@@ -640,6 +581,7 @@ func TestFindPeersWithSubnets_NodeDeduplication(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
@@ -800,13 +742,47 @@ func TestFindPeersWithSubnets_NodeDeduplication(t *testing.T) {
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
s := createTestService(t, db)
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
crawler := startTestCrawler(t, s, s.dv5Listener.(*testp2p.MockListener))
|
||||
verifyCrawlerPeers(t, crawler, s, tt.defectiveSubnets, tt.expectedCount, tt.description, tt.eval)
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
tt.defectiveSubnets,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -816,6 +792,7 @@ func TestFindPeersWithSubnets_FilterPeerRemoval(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
@@ -968,7 +945,23 @@ func TestFindPeersWithSubnets_FilterPeerRemoval(t *testing.T) {
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
s := createTestService(t, db)
|
||||
// Create test P2P instance
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
// Create mock service
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
// Mark specific node versions as "bad" to simulate filterPeer failures
|
||||
for _, node := range tt.nodes {
|
||||
@@ -986,11 +979,30 @@ func TestFindPeersWithSubnets_FilterPeerRemoval(t *testing.T) {
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
crawler := startTestCrawler(t, s, s.dv5Listener.(*testp2p.MockListener))
|
||||
verifyCrawlerPeers(t, crawler, s, tt.defectiveSubnets, tt.expectedCount, tt.description, tt.eval)
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
tt.defectiveSubnets,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1034,6 +1046,7 @@ func TestFindPeersWithSubnets_received_bad_existing_node(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
// Create LocalNode with same ID but different sequences
|
||||
@@ -1054,7 +1067,21 @@ func TestFindPeersWithSubnets_received_bad_existing_node(t *testing.T) {
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
service := createTestService(t, db)
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
// Create iterator with callback that marks peer as bad before processing node1_seq2
|
||||
iter := &callbackIteratorForSubnets{
|
||||
@@ -1078,80 +1105,22 @@ func TestFindPeersWithSubnets_received_bad_existing_node(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
service.dv5Listener = testp2p.NewMockListener(localNode, iter)
|
||||
|
||||
crawler := startTestCrawler(t, service, service.dv5Listener.(*testp2p.MockListener))
|
||||
|
||||
// Verification using verifyCrawlerPeers with a custom eval function
|
||||
verifyCrawlerPeers(t, crawler, service, map[uint64]int{1: 1}, 1, "only node2 should remain", func(t *testing.T, result []*enode.Node) {
|
||||
require.Equal(t, localNode2.Node().ID(), result[0].ID())
|
||||
})
|
||||
}
|
||||
|
||||
func createTestService(t *testing.T, d db.Database) *Service {
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: d,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func startTestCrawler(t *testing.T, s *Service, listener *testp2p.MockListener) *GossipPeerCrawler {
|
||||
digest, err := s.currentForkDigest()
|
||||
digest, err := service.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
crawler, err := NewGossipPeerCrawler(t.Context(), s, listener,
|
||||
1*time.Second, 100*time.Millisecond, 10, gossipcrawler.PeerFilterFunc(s.filterPeer),
|
||||
s.Peers().Scorers().Score)
|
||||
|
||||
// Run findPeersWithSubnets - node1_seq1 gets processed first, then callback marks peer bad, then node1_seq2 fails
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := service.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
map[uint64]int{1: 2}, // Need 2 peers for subnet 1
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
s.crawler = crawler
|
||||
require.NoError(t, crawler.Start(func(ctx context.Context, n *enode.Node) ([]string, error) {
|
||||
subs, err := attestationSubnets(n.Record())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var topics []string
|
||||
for subnet := range subs {
|
||||
t := AttestationSubnetTopic(digest, subnet)
|
||||
topics = append(topics, t)
|
||||
}
|
||||
return topics, nil
|
||||
}))
|
||||
return crawler
|
||||
}
|
||||
|
||||
func verifyCrawlerPeers(t *testing.T, crawler *GossipPeerCrawler, s *Service, subnets map[uint64]int, expectedCount int, description string, eval func(t *testing.T, result []*enode.Node)) {
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
var topics []string
|
||||
for subnet := range subnets {
|
||||
topics = append(topics, AttestationSubnetTopic(digest, subnet))
|
||||
}
|
||||
|
||||
var results []*enode.Node
|
||||
require2.Eventually(t, func() bool {
|
||||
results = results[:0]
|
||||
seen := make(map[enode.ID]struct{})
|
||||
for _, topic := range topics {
|
||||
peers := crawler.PeersForTopic(topic)
|
||||
for _, peer := range peers {
|
||||
if _, ok := seen[peer.ID()]; !ok {
|
||||
seen[peer.ID()] = struct{}{}
|
||||
results = append(results, peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(results) == expectedCount
|
||||
}, 1*time.Second, 100*time.Millisecond, description)
|
||||
|
||||
if eval != nil {
|
||||
eval(t, results)
|
||||
}
|
||||
require.Equal(t, 1, len(result))
|
||||
require.Equal(t, localNode2.Node().ID(), result[0].ID()) // only node2 should remain
|
||||
}
|
||||
|
||||
@@ -21,9 +21,9 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/gossipcrawler:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -41,16 +41,6 @@ func (*FakeP2P) AddConnectionHandler(_, _ func(ctx context.Context, id peer.ID)
|
||||
|
||||
}
|
||||
|
||||
// Crawler -- fake.
|
||||
func (*FakeP2P) Crawler() gossipcrawler.Crawler {
|
||||
return &MockCrawler{}
|
||||
}
|
||||
|
||||
// GossipDialer -- fake.
|
||||
func (*FakeP2P) GossipDialer() gossipcrawler.GossipDialer {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddDisconnectionHandler -- fake.
|
||||
func (*FakeP2P) AddDisconnectionHandler(_ func(ctx context.Context, id peer.ID) error) {
|
||||
}
|
||||
@@ -80,6 +70,11 @@ func (*FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// FindAndDialPeersWithSubnets mocks the p2p func.
|
||||
func (*FakeP2P) FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshPersistentSubnets mocks the p2p func.
|
||||
func (*FakeP2P) RefreshPersistentSubnets() {}
|
||||
|
||||
@@ -98,11 +93,6 @@ func (*FakeP2P) Peers() *peers.Status {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DialPeers -- fake.
|
||||
func (*FakeP2P) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
}
|
||||
|
||||
// PublishToTopic -- fake.
|
||||
func (*FakeP2P) PublishToTopic(_ context.Context, _ string, _ []byte, _ ...pubsub.PubOpt) error {
|
||||
return nil
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
@@ -19,7 +19,6 @@ type MockPeerManager struct {
|
||||
BHost host.Host
|
||||
DiscoveryAddr []multiaddr.Multiaddr
|
||||
FailDiscoveryAddr bool
|
||||
Dialer gossipcrawler.GossipDialer
|
||||
}
|
||||
|
||||
// Disconnect .
|
||||
@@ -47,11 +46,6 @@ func (m MockPeerManager) NodeID() enode.ID {
|
||||
return enode.ID{}
|
||||
}
|
||||
|
||||
// GossipDialer returns the configured dialer mock, if any.
|
||||
func (m MockPeerManager) GossipDialer() gossipcrawler.GossipDialer {
|
||||
return m.Dialer
|
||||
}
|
||||
|
||||
// DiscoveryAddresses .
|
||||
func (m *MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if m.FailDiscoveryAddr {
|
||||
@@ -63,15 +57,10 @@ func (m *MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
// RefreshPersistentSubnets .
|
||||
func (*MockPeerManager) RefreshPersistentSubnets() {}
|
||||
|
||||
// DialPeers
|
||||
func (p *MockPeerManager) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
// FindAndDialPeersWithSubnet .
|
||||
func (*MockPeerManager) FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddPingMethod .
|
||||
func (*MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {}
|
||||
|
||||
// Crawler.
|
||||
func (*MockPeerManager) Crawler() gossipcrawler.Crawler {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -66,7 +66,6 @@ type TestP2P struct {
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
enr *enr.Record
|
||||
dialer gossipcrawler.GossipDialer
|
||||
}
|
||||
|
||||
// NewTestP2P initializes a new p2p test service.
|
||||
@@ -184,7 +183,11 @@ func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
|
||||
if _, err := p.Encoding().EncodeGossip(buf, castedMsg); err != nil {
|
||||
p.t.Fatalf("Failed to encode message: %v", err)
|
||||
}
|
||||
topicHandle, err := ps.Join(topic)
|
||||
digest, err := p.ForkDigest()
|
||||
if err != nil {
|
||||
p.t.Fatal(err)
|
||||
}
|
||||
topicHandle, err := ps.Join(fmt.Sprintf(topic, digest) + p.Encoding().ProtocolSuffix())
|
||||
if err != nil {
|
||||
p.t.Fatal(err)
|
||||
}
|
||||
@@ -276,9 +279,6 @@ func (p *TestP2P) SubscribeToTopic(topic string, opts ...pubsub.SubOpt) (*pubsub
|
||||
// LeaveTopic closes topic and removes corresponding handler from list of joined topics.
|
||||
// This method will return error if there are outstanding event handlers or subscriptions.
|
||||
func (p *TestP2P) LeaveTopic(topic string) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if t, ok := p.joinedTopics[topic]; ok {
|
||||
if err := t.Close(); err != nil {
|
||||
return err
|
||||
@@ -419,8 +419,9 @@ func (p *TestP2P) Peers() *peers.Status {
|
||||
return p.peers
|
||||
}
|
||||
|
||||
func (p *TestP2P) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
// FindAndDialPeersWithSubnets mocks the p2p func.
|
||||
func (*TestP2P) FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshPersistentSubnets mocks the p2p func.
|
||||
@@ -557,40 +558,3 @@ func (s *TestP2P) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
|
||||
return custodyGroupCount
|
||||
}
|
||||
|
||||
// MockCrawler is a minimal mock implementation of PeerCrawler for testing
|
||||
type MockCrawler struct{}
|
||||
|
||||
// Start does nothing as this is a mock
|
||||
func (m *MockCrawler) Start(gossipcrawler.TopicExtractor) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop does nothing as this is a mock
|
||||
func (m *MockCrawler) Stop() {}
|
||||
|
||||
// SetTopicExtractor does nothing as this is a mock
|
||||
func (m *MockCrawler) SetTopicExtractor(extractor func(context.Context, *enode.Node) ([]string, error)) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveTopic does nothing as this is a mock
|
||||
func (m *MockCrawler) RemoveTopic(topic string) {}
|
||||
|
||||
// RemovePeerID does nothing as this is a mock
|
||||
func (m *MockCrawler) RemovePeerByPeerId(pid peer.ID) {}
|
||||
|
||||
// PeersForTopic returns empty list as this is a mock
|
||||
func (m *MockCrawler) PeersForTopic(topic string) []*enode.Node {
|
||||
return []*enode.Node{}
|
||||
}
|
||||
|
||||
// Crawler returns a mock crawler implementation for testing.
|
||||
func (*TestP2P) Crawler() gossipcrawler.Crawler {
|
||||
return &MockCrawler{}
|
||||
}
|
||||
|
||||
// GossipDialer returns nil for tests that do not exercise dialer behaviour.
|
||||
func (p *TestP2P) GossipDialer() gossipcrawler.GossipDialer {
|
||||
return p.dialer
|
||||
}
|
||||
|
||||
@@ -204,6 +204,9 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientOptimisticUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyOptimisticUpdateDeneb(), nil
|
||||
},
|
||||
}
|
||||
|
||||
// Reset our light client finality update map.
|
||||
@@ -223,5 +226,8 @@ func InitializeDataMaps() {
|
||||
bytesutil.ToBytes4(params.BeaconConfig().ElectraForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
bytesutil.ToBytes4(params.BeaconConfig().FuluForkVersion): func() (interfaces.LightClientFinalityUpdate, error) {
|
||||
return lightclientConsensusTypes.NewEmptyFinalityUpdateElectra(), nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,6 +130,10 @@ func (s *Server) SubmitAttestationsV2(w http.ResponseWriter, r *http.Request) {
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitAttestationsV2")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
@@ -238,22 +242,14 @@ func (s *Server) handleAttestationsElectra(
|
||||
},
|
||||
})
|
||||
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(ctx, singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get target state for attestation")
|
||||
}
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(ctx, targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get committee for attestation")
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
wantedEpoch := slots.ToEpoch(att.Data.Slot)
|
||||
// Broadcast first using CommitteeId directly (fast path)
|
||||
// This matches gRPC behavior and avoids blocking on state fetching
|
||||
wantedEpoch := slots.ToEpoch(singleAtt.Data.Slot)
|
||||
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not get head validator indices")
|
||||
}
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), att.GetCommitteeIndex(), att.Data.Slot)
|
||||
subnet := corehelpers.ComputeSubnetFromCommitteeAndSlot(uint64(len(vals)), singleAtt.CommitteeId, singleAtt.Data.Slot)
|
||||
if err = s.Broadcaster.BroadcastAttestation(ctx, subnet, singleAtt); err != nil {
|
||||
failedBroadcasts = append(failedBroadcasts, &server.IndexedError{
|
||||
Index: i,
|
||||
@@ -264,17 +260,35 @@ func (s *Server) handleAttestationsElectra(
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
// Save to pool after broadcast (slow path - requires state fetching)
|
||||
// Run in goroutine to avoid blocking the HTTP response
|
||||
go func() {
|
||||
for _, singleAtt := range validAttestations {
|
||||
targetState, err := s.AttestationStateFetcher.AttestationTargetState(context.Background(), singleAtt.Data.Target)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get target state for attestation")
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
committee, err := corehelpers.BeaconCommitteeFromState(context.Background(), targetState, singleAtt.Data.Slot, singleAtt.CommitteeId)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get committee for attestation")
|
||||
continue
|
||||
}
|
||||
att := singleAtt.ToAttestationElectra(committee)
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = s.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err = s.AttestationsPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if len(failedBroadcasts) > 0 {
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -470,6 +484,10 @@ func (s *Server) SubmitSyncCommitteeSignatures(w http.ResponseWriter, r *http.Re
|
||||
ctx, span := trace.StartSpan(r.Context(), "beacon.SubmitPoolSyncCommitteeSignatures")
|
||||
defer span.End()
|
||||
|
||||
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
|
||||
return
|
||||
}
|
||||
|
||||
var req structs.SubmitSyncCommitteeSignaturesRequest
|
||||
err := json.NewDecoder(r.Body).Decode(&req.Data)
|
||||
switch {
|
||||
@@ -711,6 +729,7 @@ func (s *Server) SubmitAttesterSlashingsV2(w http.ResponseWriter, r *http.Reques
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
v, err := version.FromString(versionHeader)
|
||||
if err != nil {
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/voluntaryexits/mock"
|
||||
p2pMock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -622,6 +623,8 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
HeadFetcher: chainService,
|
||||
ChainInfoFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OperationNotifier: &blockchainmock.MockOperationNotifier{},
|
||||
AttestationStateFetcher: chainService,
|
||||
}
|
||||
@@ -654,6 +657,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -673,6 +677,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("phase0 att post electra", func(t *testing.T) {
|
||||
@@ -793,6 +798,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Source.Epoch)
|
||||
assert.Equal(t, "0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2", hexutil.Encode(broadcaster.BroadcastAttestations[0].GetData().Target.Root))
|
||||
assert.Equal(t, primitives.Epoch(0), broadcaster.BroadcastAttestations[0].GetData().Target.Epoch)
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 1, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
@@ -812,6 +818,7 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, true, broadcaster.BroadcastCalled.Load())
|
||||
assert.Equal(t, 2, broadcaster.NumAttestations())
|
||||
time.Sleep(100 * time.Millisecond) // Wait for async pool save
|
||||
assert.Equal(t, 2, s.AttestationsPool.UnaggregatedAttestationCount())
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
@@ -861,6 +868,27 @@ func TestSubmitAttestationsV2(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Failures[0].Message, "Incorrect attestation signature"))
|
||||
})
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleAtt)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
request.Header.Set(api.VersionHeader, version.String(version.Phase0))
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttestationsV2(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListVoluntaryExits(t *testing.T) {
|
||||
@@ -1057,14 +1085,19 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
|
||||
t.Run("single", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1089,14 +1122,19 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1120,13 +1158,18 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
})
|
||||
t.Run("invalid", func(t *testing.T) {
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
chainService := &blockchainmock.ChainService{
|
||||
State: st,
|
||||
}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
CoreService: &core.Service{
|
||||
SyncCommitteePool: synccommittee.NewStore(),
|
||||
P2P: broadcaster,
|
||||
HeadFetcher: &blockchainmock.ChainService{
|
||||
State: st,
|
||||
},
|
||||
HeadFetcher: chainService,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1149,7 +1192,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, false, broadcaster.BroadcastCalled.Load())
|
||||
})
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
s := &Server{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[]")
|
||||
@@ -1166,7 +1215,13 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("no body", func(t *testing.T) {
|
||||
s := &Server{}
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", nil)
|
||||
writer := httptest.NewRecorder()
|
||||
@@ -1179,6 +1234,26 @@ func TestSubmitSyncCommitteeSignatures(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.Equal(t, true, strings.Contains(e.Message, "No data submitted"))
|
||||
})
|
||||
t.Run("syncing", func(t *testing.T) {
|
||||
chainService := &blockchainmock.ChainService{State: st}
|
||||
s := &Server{
|
||||
HeadFetcher: chainService,
|
||||
TimeFetcher: chainService,
|
||||
OptimisticModeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString(singleSyncCommitteeMsg)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com", &body)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitSyncCommitteeSignatures(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
assert.Equal(t, true, strings.Contains(writer.Body.String(), "Beacon node is currently syncing"))
|
||||
})
|
||||
}
|
||||
|
||||
func TestListBLSToExecutionChanges(t *testing.T) {
|
||||
@@ -2112,6 +2187,33 @@ func TestSubmitAttesterSlashingsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "Invalid attester slashing", e.Message)
|
||||
})
|
||||
|
||||
t.Run("missing-version-header", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString(invalidAttesterSlashing)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/attester_slashings", &body)
|
||||
// Intentionally do not set api.VersionHeader to verify missing header handling.
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, api.VersionHeader+" header is required", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
|
||||
|
||||
@@ -654,6 +654,10 @@ func (m *futureSyncMockFetcher) StateBySlot(context.Context, primitives.Slot) (s
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
func (m *futureSyncMockFetcher) StateByEpoch(context.Context, primitives.Epoch) (state.BeaconState, error) {
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
func TestGetSyncCommittees_Future(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -116,6 +116,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
for _, update := range updates {
|
||||
if ctx.Err() != nil {
|
||||
httputil.HandleError(w, "Context error: "+ctx.Err().Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
updateSlot := update.AttestedHeader().Beacon().Slot
|
||||
@@ -131,12 +132,15 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
chunkLength = ssz.MarshalUint64(chunkLength, uint64(len(updateSSZ)+4))
|
||||
if _, err := w.Write(chunkLength); err != nil {
|
||||
httputil.HandleError(w, "Could not write chunk length: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if _, err := w.Write(updateEntry.ForkDigest[:]); err != nil {
|
||||
httputil.HandleError(w, "Could not write fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if _, err := w.Write(updateSSZ); err != nil {
|
||||
httputil.HandleError(w, "Could not write update SSZ: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -145,6 +149,7 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
for _, update := range updates {
|
||||
if ctx.Err() != nil {
|
||||
httputil.HandleError(w, "Context error: "+ctx.Err().Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
updateJson, err := structs.LightClientUpdateFromConsensus(update)
|
||||
|
||||
@@ -132,6 +132,7 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if s.SyncChecker.Synced() && !optimistic {
|
||||
return
|
||||
|
||||
@@ -228,7 +228,7 @@ func (s *Server) attRewardsState(w http.ResponseWriter, r *http.Request) (state.
|
||||
}
|
||||
st, err := s.Stater.StateBySlot(r.Context(), nextEpochEnd)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get state for epoch's starting slot: "+err.Error(), http.StatusInternalServerError)
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return nil, false
|
||||
}
|
||||
return st, true
|
||||
|
||||
@@ -19,7 +19,6 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
@@ -78,6 +77,7 @@ go_test(
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/rewards/testing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared/testing:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
|
||||
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
rpchelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/shared"
|
||||
@@ -898,20 +897,15 @@ func (s *Server) GetAttesterDuties(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
var startSlot primitives.Slot
|
||||
// For next epoch requests, we use the current epoch's state since committee
|
||||
// assignments for next epoch can be computed from current epoch's state.
|
||||
epochForState := requestedEpoch
|
||||
if requestedEpoch == nextEpoch {
|
||||
startSlot, err = slots.EpochStart(currentEpoch)
|
||||
} else {
|
||||
startSlot, err = slots.EpochStart(requestedEpoch)
|
||||
epochForState = currentEpoch
|
||||
}
|
||||
st, err := s.Stater.StateByEpoch(ctx, epochForState)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get start slot from epoch %d: %v", requestedEpoch, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
st, err := s.Stater.StateBySlot(ctx, startSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1020,39 +1014,11 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
nextEpochLookahead = true
|
||||
}
|
||||
|
||||
epochStartSlot, err := slots.EpochStart(requestedEpoch)
|
||||
st, err := s.Stater.StateByEpoch(ctx, requestedEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get start slot of epoch %d: %v", requestedEpoch, err), http.StatusInternalServerError)
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
var st state.BeaconState
|
||||
// if the requested epoch is new, use the head state and the next slot cache
|
||||
if requestedEpoch < currentEpoch {
|
||||
st, err = s.Stater.StateBySlot(ctx, epochStartSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get state for slot %d: %v ", epochStartSlot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
st, err = s.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head state: %v ", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
// Notice that even for Fulu requests for the next epoch, we are only advancing the state to the start of the current epoch.
|
||||
if st.Slot() < epochStartSlot {
|
||||
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head root: %v ", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, epochStartSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not process slots up to %d: %v ", epochStartSlot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var assignments map[primitives.ValidatorIndex][]primitives.Slot
|
||||
if nextEpochLookahead {
|
||||
@@ -1103,7 +1069,8 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if !sortProposerDuties(w, duties) {
|
||||
if err = sortProposerDuties(duties); err != nil {
|
||||
httputil.HandleError(w, "Could not sort proposer duties: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1174,14 +1141,10 @@ func (s *Server) GetSyncCommitteeDuties(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
startingEpoch := min(requestedEpoch, currentEpoch)
|
||||
slot, err := slots.EpochStart(startingEpoch)
|
||||
|
||||
st, err := s.Stater.StateByEpoch(ctx, startingEpoch)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get sync committee slot: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
st, err := s.Stater.State(ctx, []byte(strconv.FormatUint(uint64(slot), 10)))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get sync committee state: "+err.Error(), http.StatusInternalServerError)
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1327,7 +1290,7 @@ func (s *Server) GetLiveness(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
st, err = s.Stater.StateBySlot(ctx, epochEnd)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get slot for requested epoch: "+err.Error(), http.StatusInternalServerError)
|
||||
shared.WriteStateFetchError(w, err)
|
||||
return
|
||||
}
|
||||
participation, err = st.CurrentEpochParticipation()
|
||||
@@ -1447,22 +1410,20 @@ func syncCommitteeDutiesAndVals(
|
||||
return duties, vals, nil
|
||||
}
|
||||
|
||||
func sortProposerDuties(w http.ResponseWriter, duties []*structs.ProposerDuty) bool {
|
||||
ok := true
|
||||
func sortProposerDuties(duties []*structs.ProposerDuty) error {
|
||||
var err error
|
||||
sort.Slice(duties, func(i, j int) bool {
|
||||
si, err := strconv.ParseUint(duties[i].Slot, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse slot: "+err.Error(), http.StatusInternalServerError)
|
||||
ok = false
|
||||
si, parseErr := strconv.ParseUint(duties[i].Slot, 10, 64)
|
||||
if parseErr != nil {
|
||||
err = errors.Wrap(parseErr, "could not parse slot")
|
||||
return false
|
||||
}
|
||||
sj, err := strconv.ParseUint(duties[j].Slot, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse slot: "+err.Error(), http.StatusInternalServerError)
|
||||
ok = false
|
||||
sj, parseErr := strconv.ParseUint(duties[j].Slot, 10, 64)
|
||||
if parseErr != nil {
|
||||
err = errors.Wrap(parseErr, "could not parse slot")
|
||||
return false
|
||||
}
|
||||
return si < sj
|
||||
})
|
||||
return ok
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
p2pmock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/lookup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
@@ -2006,6 +2007,7 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
@@ -2184,6 +2186,7 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
}
|
||||
@@ -2224,6 +2227,62 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString("[\"0\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/attester/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString("[\"0\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/attester/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetProposerDuties(t *testing.T) {
|
||||
@@ -2427,6 +2486,60 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err)
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err)
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
@@ -2457,7 +2570,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, st.SetNextSyncCommittee(nextCommittee))
|
||||
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, State: st}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -2648,7 +2761,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
return newSyncPeriodSt
|
||||
}
|
||||
}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Slot: &newSyncPeriodStartSlot}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Slot: &newSyncPeriodStartSlot, State: newSyncPeriodSt}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: stateFetchFn(newSyncPeriodStartSlot)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -2729,8 +2842,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
slot, err := slots.EpochStart(1)
|
||||
require.NoError(t, err)
|
||||
|
||||
st2, err := util.NewBeaconStateBellatrix()
|
||||
require.NoError(t, err)
|
||||
st2 := st.Copy()
|
||||
require.NoError(t, st2.SetSlot(slot))
|
||||
|
||||
mockChainService := &mockChain.ChainService{
|
||||
@@ -2744,7 +2856,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
State: st2,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: st},
|
||||
Stater: &testutil.MockStater{BeaconState: st2},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
@@ -2789,6 +2901,62 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
slot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
chainService := &mockChain.ChainService{
|
||||
Slot: &slot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[\"1\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/sync/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncCommitteeDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
slot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
chainService := &mockChain.ChainService{
|
||||
Slot: &slot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[\"1\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/sync/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncCommitteeDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrepareBeaconProposer(t *testing.T) {
|
||||
|
||||
@@ -11,6 +11,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
@@ -98,6 +99,7 @@ type Stater interface {
|
||||
State(ctx context.Context, id []byte) (state.BeaconState, error)
|
||||
StateRoot(ctx context.Context, id []byte) ([]byte, error)
|
||||
StateBySlot(ctx context.Context, slot primitives.Slot) (state.BeaconState, error)
|
||||
StateByEpoch(ctx context.Context, epoch primitives.Epoch) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// BeaconDbStater is an implementation of Stater. It retrieves states from the beacon chain database.
|
||||
@@ -267,6 +269,46 @@ func (p *BeaconDbStater) StateBySlot(ctx context.Context, target primitives.Slot
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// StateByEpoch returns the state for the start of the requested epoch.
|
||||
// For current or next epoch, it uses the head state and next slot cache for efficiency.
|
||||
// For past epochs, it replays blocks from the most recent canonical state.
|
||||
func (p *BeaconDbStater) StateByEpoch(ctx context.Context, epoch primitives.Epoch) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "statefetcher.StateByEpoch")
|
||||
defer span.End()
|
||||
|
||||
targetSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get epoch start slot")
|
||||
}
|
||||
|
||||
currentSlot := p.GenesisTimeFetcher.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// For past epochs, use the replay mechanism
|
||||
if epoch < currentEpoch {
|
||||
return p.StateBySlot(ctx, targetSlot)
|
||||
}
|
||||
|
||||
// For current or next epoch, use head state + next slot cache (much faster)
|
||||
headState, err := p.ChainInfoFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
|
||||
// If head state is already at or past the target slot, return it
|
||||
if headState.Slot() >= targetSlot {
|
||||
return headState, nil
|
||||
}
|
||||
|
||||
// Process slots using the next slot cache
|
||||
headRoot := p.ChainInfoFetcher.CachedHeadRoot()
|
||||
st, err := transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot[:], targetSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to %d", targetSlot)
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (p *BeaconDbStater) headStateRoot(ctx context.Context) ([]byte, error) {
|
||||
b, err := p.ChainInfoFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -444,3 +444,111 @@ func TestStateBySlot_AfterHeadSlot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(101), st.Slot())
|
||||
}
|
||||
|
||||
func TestStateByEpoch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
t.Run("current epoch uses head state", func(t *testing.T) {
|
||||
// Head is at slot 5 (epoch 0), requesting epoch 0
|
||||
headSlot := primitives.Slot(5)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
// Should return head state since it's already past epoch start
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
|
||||
t.Run("current epoch processes slots to epoch start", func(t *testing.T) {
|
||||
// Head is at slot 5 (epoch 0), requesting epoch 1
|
||||
// Current slot is 32 (epoch 1), so epoch 1 is current epoch
|
||||
headSlot := primitives.Slot(5)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := slotsPerEpoch // slot 32, epoch 1
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
// Note: This will fail since ProcessSlotsUsingNextSlotCache requires proper setup
|
||||
// In real usage, the transition package handles this properly
|
||||
_, err = p.StateByEpoch(ctx, 1)
|
||||
// The error is expected since we don't have a fully initialized beacon state
|
||||
// that can process slots (missing committees, etc.)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("past epoch uses replay", func(t *testing.T) {
|
||||
// Head is at epoch 2, requesting epoch 0 (past)
|
||||
headSlot := slotsPerEpoch * 2 // slot 64, epoch 2
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
pastEpochSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 0})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
mockReplayer := mockstategen.NewReplayerBuilder()
|
||||
mockReplayer.SetMockStateForSlot(pastEpochSt, 0)
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock, ReplayerBuilder: mockReplayer}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), st.Slot())
|
||||
})
|
||||
|
||||
t.Run("next epoch uses head state path", func(t *testing.T) {
|
||||
// Head is at slot 30 (epoch 0), requesting epoch 1 (next)
|
||||
// Current slot is 30 (epoch 0), so epoch 1 is next epoch
|
||||
headSlot := primitives.Slot(30)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
// Note: This will fail since ProcessSlotsUsingNextSlotCache requires proper setup
|
||||
_, err = p.StateByEpoch(ctx, 1)
|
||||
// The error is expected since we don't have a fully initialized beacon state
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("head state already at target slot returns immediately", func(t *testing.T) {
|
||||
// Head is at slot 32 (epoch 1 start), requesting epoch 1
|
||||
headSlot := slotsPerEpoch // slot 32
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
|
||||
t.Run("head state past target slot returns head state", func(t *testing.T) {
|
||||
// Head is at slot 40, requesting epoch 1 (starts at slot 32)
|
||||
headSlot := primitives.Slot(40)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
// Returns head state since it's already >= epoch start
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -52,24 +52,27 @@ func (vs *Server) ProposeAttestation(ctx context.Context, att *ethpb.Attestation
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestation")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, att, att.GetData().CommitteeIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
attCopy := att.Copy()
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(attCopy); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
@@ -82,6 +85,10 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
ctx, span := trace.StartSpan(ctx, "AttesterServer.ProposeAttestationElectra")
|
||||
defer span.End()
|
||||
|
||||
if vs.SyncChecker.Syncing() {
|
||||
return nil, status.Errorf(codes.Unavailable, "Syncing to latest head, not ready to respond")
|
||||
}
|
||||
|
||||
resp, err := vs.proposeAtt(ctx, singleAtt, singleAtt.GetCommitteeIndex())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -98,18 +105,17 @@ func (vs *Server) ProposeAttestationElectra(ctx context.Context, singleAtt *ethp
|
||||
|
||||
singleAttCopy := singleAtt.Copy()
|
||||
att := singleAttCopy.ToAttestationElectra(committee)
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err = vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
go func() {
|
||||
go func() {
|
||||
if features.Get().EnableExperimentalAttestationPool {
|
||||
if err := vs.AttestationCache.Add(att); err != nil {
|
||||
log.WithError(err).Error("Could not save attestation")
|
||||
}
|
||||
} else {
|
||||
if err := vs.AttPool.SaveUnaggregatedAttestation(att); err != nil {
|
||||
log.WithError(err).Error("Could not save unaggregated attestation")
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -38,6 +38,7 @@ func TestProposeAttestation(t *testing.T) {
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
TimeFetcher: chainService,
|
||||
AttestationStateFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
head := util.NewBeaconBlock()
|
||||
head.Block.Slot = 999
|
||||
@@ -141,6 +142,7 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
P2P: &mockp2p.MockBroadcaster{},
|
||||
AttPool: attestations.NewPool(),
|
||||
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
@@ -149,6 +151,37 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
|
||||
assert.ErrorContains(t, wanted, err)
|
||||
}
|
||||
|
||||
func TestProposeAttestation_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := util.HydrateAttestation(ðpb.Attestation{})
|
||||
_, err := attesterServer.ProposeAttestation(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestProposeAttestationElectra_Syncing(t *testing.T) {
|
||||
attesterServer := &Server{
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: true},
|
||||
}
|
||||
|
||||
req := ðpb.SingleAttestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Source: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
},
|
||||
}
|
||||
_, err := attesterServer.ProposeAttestationElectra(t.Context(), req)
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
s, ok := status.FromError(err)
|
||||
require.Equal(t, true, ok)
|
||||
assert.Equal(t, codes.Unavailable, s.Code())
|
||||
}
|
||||
|
||||
func TestGetAttestationData_OK(t *testing.T) {
|
||||
block := util.NewBeaconBlock()
|
||||
block.Block.Slot = 3*params.BeaconConfig().SlotsPerEpoch + 1
|
||||
|
||||
@@ -26,5 +26,6 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// MockStater is a fake implementation of lookup.Stater.
|
||||
@@ -14,6 +15,7 @@ type MockStater struct {
|
||||
StateProviderFunc func(ctx context.Context, stateId []byte) (state.BeaconState, error)
|
||||
BeaconStateRoot []byte
|
||||
StatesBySlot map[primitives.Slot]state.BeaconState
|
||||
StatesByEpoch map[primitives.Epoch]state.BeaconState
|
||||
StatesByRoot map[[32]byte]state.BeaconState
|
||||
CustomError error
|
||||
}
|
||||
@@ -43,3 +45,22 @@ func (m *MockStater) StateRoot(context.Context, []byte) ([]byte, error) {
|
||||
func (m *MockStater) StateBySlot(_ context.Context, s primitives.Slot) (state.BeaconState, error) {
|
||||
return m.StatesBySlot[s], nil
|
||||
}
|
||||
|
||||
// StateByEpoch --
|
||||
func (m *MockStater) StateByEpoch(_ context.Context, e primitives.Epoch) (state.BeaconState, error) {
|
||||
if m.CustomError != nil {
|
||||
return nil, m.CustomError
|
||||
}
|
||||
if m.StatesByEpoch != nil {
|
||||
return m.StatesByEpoch[e], nil
|
||||
}
|
||||
// Fall back to StatesBySlot if StatesByEpoch is not set
|
||||
slot, err := slots.EpochStart(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m.StatesBySlot != nil {
|
||||
return m.StatesBySlot[slot], nil
|
||||
}
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
@@ -16,8 +16,6 @@ go_library(
|
||||
"error.go",
|
||||
"fork_watcher.go",
|
||||
"fuzz_exports.go", # keep
|
||||
"gossipsub_base.go",
|
||||
"gossipsub_topic_family.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"once.go",
|
||||
@@ -51,11 +49,7 @@ go_library(
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
"subscription_controller.go",
|
||||
"subscription_topic_handler.go",
|
||||
"topic_families_dynamic_subnets.go",
|
||||
"topic_families_static_subnets.go",
|
||||
"topic_families_without_subnets.go",
|
||||
"validate_aggregate_proof.go",
|
||||
"validate_attester_slashing.go",
|
||||
"validate_beacon_attestation.go",
|
||||
@@ -143,7 +137,6 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
@@ -151,6 +144,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_libp2p_go_mplex//:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
@@ -181,8 +175,6 @@ go_test(
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
"gossipsub_base_test.go",
|
||||
"gossipsub_topic_family_test.go",
|
||||
"kzg_batch_verifier_test.go",
|
||||
"once_test.go",
|
||||
"pending_attestations_queue_bucket_test.go",
|
||||
@@ -208,11 +200,10 @@ go_test(
|
||||
"subscriber_beacon_aggregate_proof_test.go",
|
||||
"subscriber_beacon_blocks_test.go",
|
||||
"subscriber_data_column_sidecar_test.go",
|
||||
"subscription_controller_test.go",
|
||||
"subscriber_test.go",
|
||||
"subscription_topic_handler_test.go",
|
||||
"sync_fuzz_test.go",
|
||||
"sync_test.go",
|
||||
"topic_families_dynamic_subnets_test.go",
|
||||
"validate_aggregate_proof_test.go",
|
||||
"validate_attester_slashing_test.go",
|
||||
"validate_beacon_attestation_test.go",
|
||||
@@ -295,7 +286,6 @@ go_test(
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
|
||||
@@ -161,13 +161,17 @@ func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns
|
||||
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
|
||||
resChan := make(chan error)
|
||||
resChan := make(chan error, 1)
|
||||
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
s.kzgChan <- verificationSet
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
select {
|
||||
case s.kzgChan <- verificationSet:
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return pubsub.ValidationIgnore, ctx.Err() // parent context canceled, give up
|
||||
|
||||
@@ -3,6 +3,7 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -243,8 +244,10 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
|
||||
// Compute missing indices by root, excluding those already in storage.
|
||||
var lastRoot [fieldparams.RootLength]byte
|
||||
missingIndicesByRoot := make(map[[fieldparams.RootLength]byte]map[uint64]bool, len(incompleteRoots))
|
||||
for root := range incompleteRoots {
|
||||
lastRoot = root
|
||||
storedIndices := storedIndicesByRoot[root]
|
||||
|
||||
missingIndices := make(map[uint64]bool, len(requestedIndices))
|
||||
@@ -259,6 +262,7 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
initialMissingRootCount := len(missingIndicesByRoot)
|
||||
initialMissingCount := computeTotalCount(missingIndicesByRoot)
|
||||
|
||||
indicesByRootByPeer, err := computeIndicesByRootByPeer(params.P2P, slotByRoot, missingIndicesByRoot, connectedPeers)
|
||||
@@ -301,11 +305,19 @@ func requestDirectSidecarsFromPeers(
|
||||
}
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
}).Debug("Requested direct data column sidecars from peers")
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"duration": time.Since(start),
|
||||
"initialMissingRootCount": initialMissingRootCount,
|
||||
"initialMissingCount": initialMissingCount,
|
||||
"finalMissingRootCount": len(missingIndicesByRoot),
|
||||
"finalMissingCount": computeTotalCount(missingIndicesByRoot),
|
||||
})
|
||||
|
||||
if initialMissingRootCount == 1 {
|
||||
log = log.WithField("root", fmt.Sprintf("%#x", lastRoot))
|
||||
}
|
||||
|
||||
log.Debug("Requested direct data column sidecars from peers")
|
||||
|
||||
return verifiedColumnsByRoot, nil
|
||||
}
|
||||
|
||||
@@ -1,398 +0,0 @@
|
||||
# Gossipsub Control Plane Design Document
|
||||
|
||||
## Overview
|
||||
|
||||
This branch introduces a declarative, fork-aware gossipsub control plane that manages topic subscriptions and peer discovery for subnet-based topics. The system replaces ad-hoc topic management with a structured approach centered on **Topic Families**.
|
||||
|
||||
### Key Components
|
||||
|
||||
| Component | Location | Responsibility |
|
||||
|-----------|----------|----------------|
|
||||
| **GossipsubController** | `sync/gossipsub_controller.go` | Orchestrates topic family lifecycle across forks |
|
||||
| **GossipsubPeerCrawler** | `p2p/gossipsub_peer_crawler.go` | Discovers and indexes peers by topic via discv5 |
|
||||
| **GossipsubPeerDialer** | `p2p/gossipsub_peer_controller.go` | Maintains peer connections for required topics |
|
||||
| **Topic Family Abstractions** | `sync/gossipsub_topic_family.go` | Interfaces for topic subscription management |
|
||||
|
||||
---
|
||||
|
||||
## 1. Topic Family Abstraction
|
||||
|
||||
### 1.1 Design Goals
|
||||
|
||||
- **Declarative Fork Management**: Topic families declare when they activate/deactivate based on fork epochs
|
||||
- **Unified Subscription Logic**: Common base handles validator registration, message loops, and cleanup
|
||||
- **Dynamic vs Static Distinction**: Clear separation between global topics and subnet-based topics that change per slot
|
||||
|
||||
### 1.2 Interface Hierarchy
|
||||
|
||||
```
|
||||
GossipsubTopicFamily (base)
|
||||
├── Name()
|
||||
├── NetworkScheduleEntry()
|
||||
└── UnsubscribeAll()
|
||||
|
||||
GossipsubTopicFamilyWithoutDynamicSubnets
|
||||
└── Subscribe() // Called once when registered
|
||||
|
||||
GossipsubTopicFamilyWithDynamicSubnets
|
||||
├── TopicsToSubscribeForSlot(slot)
|
||||
├── ExtractTopicsForNode(node) // For peer discovery
|
||||
├── SubscribeForSlot(slot)
|
||||
└── UnsubscribeForSlot(slot)
|
||||
```
|
||||
|
||||
### 1.3 Implementation Categories
|
||||
|
||||
**Global Topics** (subscribed once per fork):
|
||||
- Block, AggregateAndProof, VoluntaryExit, ProposerSlashing, AttesterSlashing
|
||||
- SyncContributionAndProof (Altair+), BlsToExecutionChange (Capella+)
|
||||
- LightClient updates (Altair+, feature-flagged)
|
||||
|
||||
**Static Per-Subnet**:
|
||||
- BlobTopicFamily - One instance per blob subnet (Deneb/Electra)
|
||||
|
||||
**Dynamic Subnets** (change per slot based on validator duties):
|
||||
- **AttestationTopicFamily** - Subnets based on attestation committee assignments
|
||||
- **SyncCommitteeTopicFamily** - Subnets based on sync committee membership
|
||||
- **DataColumnTopicFamily** - Subnets based on data column custody (Fulu+)
|
||||
|
||||
### 1.4 Base Implementation Features
|
||||
|
||||
`baseGossipsubTopicFamily` provides:
|
||||
- **Idempotent subscriptions** - Safe to call multiple times for same topic
|
||||
- **Automatic validator registration** - Registers message validator with pubsub
|
||||
- **Message loop management** - Spawns goroutine to process incoming messages
|
||||
- **Cleanup coordination** - Notifies crawler when topics are unsubscribed
|
||||
|
||||
### 1.5 Dynamic Subnet Selection
|
||||
|
||||
Dynamic families combine two subnet sources:
|
||||
- **Subnets to Join**: Topics we must subscribe to
|
||||
- **Subnets for Broadcast**: Topics we need peers for but may not subscribe to
|
||||
|
||||
| Family | Subnets to Join | Subnets for Broadcast |
|
||||
|--------|-----------------|----------------------|
|
||||
| Attestation | Persistent + aggregator subnets | Attester duty subnets |
|
||||
| SyncCommittee | Active sync committee subnets | (none) |
|
||||
| DataColumn | Custody column subnets | All column subnets |
|
||||
|
||||
### 1.6 Fork Schedule
|
||||
|
||||
Topic families declare activation and deactivation epochs (both are non-optional):
|
||||
|
||||
| Fork | Activations | Deactivations |
|
||||
|------|-------------|---------------|
|
||||
| Genesis | Block, AggregateAndProof, VoluntaryExit, ProposerSlashing, AttesterSlashing, Attestation | - |
|
||||
| Altair | SyncContributionAndProof, SyncCommittee, [LightClient*] | - |
|
||||
| Capella | BlsToExecutionChange | - |
|
||||
| Deneb | Blob (6 subnets) | - |
|
||||
| Electra | Blob (9 subnets) | Blob (Deneb config) |
|
||||
| Fulu | DataColumn | Blob (all) |
|
||||
|
||||
---
|
||||
|
||||
## 2. GossipsubController
|
||||
|
||||
### 2.1 Responsibilities
|
||||
|
||||
- **Fork-Aware Topic Management**: Automatically subscribes/unsubscribes based on fork schedule
|
||||
- **Smooth Fork Transitions**: Pre-subscribes 1 epoch before fork, unsubscribes 1 epoch after
|
||||
- **Slot-Based Updates**: Updates dynamic subnet subscriptions every slot
|
||||
- **Topic Extraction**: Provides interface for crawler to determine peer topic relevance
|
||||
|
||||
### 2.2 Lifecycle
|
||||
|
||||
1. **Startup**: Waits for initial sync to complete
|
||||
2. **Control Loop**: Runs on slot ticker, calling `updateActiveTopicFamilies()`
|
||||
3. **Shutdown**: Unsubscribes all families, cancels context
|
||||
|
||||
### 2.3 Fork Transition Handling
|
||||
|
||||
**Timeline for Fork at Epoch N:**
|
||||
```
|
||||
Epoch N-1: Subscribe to both old and new fork topics (overlap period)
|
||||
Epoch N: Fork occurs, both topic sets remain active
|
||||
Epoch N+1: Unsubscribe from old fork topics, only new fork active
|
||||
```
|
||||
|
||||
This ensures no message loss during the transition window.
|
||||
|
||||
### 2.4 Update Logic (per slot)
|
||||
|
||||
1. **Get families for current epoch** from declarative schedule
|
||||
2. **Check for upcoming fork** - if next epoch is fork boundary, include next fork's families
|
||||
3. **Register new families** - add to active map, subscribe based on type:
|
||||
- Static families: `Subscribe()` once
|
||||
- Dynamic families: `SubscribeForSlot()` and `UnsubscribeForSlot()` every slot
|
||||
4. **Remove old fork families** - if 1 epoch past fork boundary, unsubscribe and remove
|
||||
|
||||
### 2.5 Topic Extraction for Peer Discovery
|
||||
|
||||
The controller exposes `ExtractTopics(node)` which:
|
||||
- Iterates all active **dynamic** subnet families
|
||||
- Calls `ExtractTopicsForNode(node)` on each
|
||||
- Returns deduplicated list of topics the node can serve
|
||||
|
||||
This is used by the peer crawler to index discovered peers by topic.
|
||||
|
||||
### 2.6 Topics Provider
|
||||
|
||||
The controller exposes `GetCurrentActiveTopics()` which:
|
||||
- Returns all topics from dynamic families for the current slot
|
||||
- Used by the peer dialer to know which topics need peer connections
|
||||
|
||||
---
|
||||
|
||||
## 3. GossipsubPeerCrawler
|
||||
|
||||
### 3.1 Purpose
|
||||
|
||||
Discovers peers via discv5, indexes them by topic, and verifies reachability via ping. Provides the peer dialer with a pool of verified, scored peers for each topic.
|
||||
|
||||
### 3.2 Key Design Decisions
|
||||
|
||||
**Triple Index Structure:**
|
||||
- `byEnode` - Fast lookup by enode ID
|
||||
- `byPeerId` - Fast lookup by libp2p peer ID
|
||||
- `byTopic` - Fast lookup of peers serving a topic
|
||||
|
||||
**Ping-Once Guarantee:**
|
||||
- A node is pinged exactly **once** regardless of ENR sequence number updates
|
||||
- Prevents ping explosion when nodes frequently update their records
|
||||
- Ping success sets `isPinged=true`, failure removes peer entirely
|
||||
|
||||
**Sequence Number Handling:**
|
||||
- Only updates peer record if new sequence number is higher
|
||||
- Stale records are ignored to prevent processing outdated data
|
||||
|
||||
### 3.3 Three Concurrent Loops
|
||||
|
||||
| Loop | Interval | Purpose |
|
||||
|------|----------|---------|
|
||||
| **crawlLoop** | `crawlInterval` | Iterates discv5 `RandomNodes()`, extracts topics, updates index |
|
||||
| **pingLoop** | Continuous | Consumes ping queue, verifies reachability |
|
||||
| **cleanupLoop** | 5 minutes | Prunes peers that fail filter or have no relevant topics |
|
||||
|
||||
### 3.4 Crawl Flow
|
||||
|
||||
1. Create timeout context for crawl iteration
|
||||
2. Get random nodes iterator from discv5
|
||||
3. For each node:
|
||||
- Apply peer filter (reject bad/incompatible peers)
|
||||
- Extract topics via `topicExtractor` (provided by controller)
|
||||
- Update index if sequence number is newer
|
||||
- Queue for ping if not already pinged and has topics
|
||||
|
||||
### 3.5 Ping Queue and Backpressure
|
||||
|
||||
- **Channel capacity**: `4 * maxConcurrentPings`
|
||||
- **Backpressure**: When queue is full, crawl loop blocks on send
|
||||
- **Semaphore**: Limits concurrent ping goroutines to `maxConcurrentPings`
|
||||
- **Ping failure**: Removes peer from index entirely (unreachable)
|
||||
- **Ping success**: Marks peer as verified (`isPinged=true`)
|
||||
|
||||
### 3.6 Peer Retrieval (`PeersForTopic`)
|
||||
|
||||
Returns peers for a topic with guarantees:
|
||||
1. **Only pinged peers** - Verified reachable
|
||||
2. **Filter applied** - Passes current peer filter
|
||||
3. **Sorted by score** - Best peers first (using p2p scorer)
|
||||
|
||||
### 3.7 Peer Removal Triggers
|
||||
|
||||
| Trigger | Behavior |
|
||||
|---------|----------|
|
||||
| Ping failure | Remove immediately |
|
||||
| Peer disconnection | `RemovePeerId()` called from disconnect handler |
|
||||
| Topic unsubscription | `RemoveTopic()` called from base family cleanup |
|
||||
| Filter rejection during crawl | Remove if previously indexed |
|
||||
| Cleanup loop | Remove if no longer passes filter or has no topics |
|
||||
|
||||
### 3.8 Topic Extraction for Dynamic Subnets
|
||||
|
||||
For each dynamic family, extraction:
|
||||
1. Gets subnets we currently need (union of join + broadcast)
|
||||
2. Reads subnet bitfield from node's ENR record
|
||||
3. Returns intersection - topics both we need AND the node advertises
|
||||
|
||||
---
|
||||
|
||||
## 4. GossipsubPeerDialer
|
||||
|
||||
### 4.1 Purpose
|
||||
|
||||
Maintains peer connections for topics we need. Works with the crawler to dial verified peers when topic peer counts fall below threshold.
|
||||
|
||||
### 4.2 Key Design Decisions
|
||||
|
||||
**Target Peer Count**: 20 peers per topic (`peerPerTopic` constant)
|
||||
|
||||
**Dial Loop Frequency**: Every 1 second
|
||||
|
||||
**Deduplication**: Peers appearing for multiple topics are only dialed once
|
||||
|
||||
### 4.3 Dial Flow
|
||||
|
||||
1. Get current topics from `topicsProvider` (controller's `GetCurrentActiveTopics`)
|
||||
2. For each topic:
|
||||
- Check current connected peer count via `listPeersFunc`
|
||||
- If below target, calculate how many more needed
|
||||
- Get peers from crawler (already filtered, scored, pinged)
|
||||
- Limit to what's needed
|
||||
3. Deduplicate peer list across all topics
|
||||
4. Dial peers via `dialPeersFunc`
|
||||
|
||||
### 4.4 Blocking Dial
|
||||
|
||||
`DialPeersForTopicBlocking(ctx, topic, nPeers)` provides synchronous peer acquisition:
|
||||
- Loops until target peer count reached or context cancelled
|
||||
- Used for critical operations that need guaranteed peer connectivity
|
||||
- Polls every 100ms to check connection status
|
||||
|
||||
---
|
||||
|
||||
## 5. Component Interactions
|
||||
|
||||
### 5.1 Architecture Diagram
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Sync Service │
|
||||
│ ┌───────────────────────────────────────────────────────────────────────- │
|
||||
│ │ GossipsubController | │
|
||||
│ │ | │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ │
|
||||
│ │ │ AttestationTF │ │ SyncCommitteeTF │ │ DataColumnTF │ │ │
|
||||
│ │ │ (dynamic) │ │ (dynamic) │ │ (dynamic) │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────┘ │ │
|
||||
│ │ | │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ │
|
||||
│ │ │ BlockTF, etc. │ │ BlobTF (static) │ │ baseTopicFamily │ │ │
|
||||
│ │ │ (global) │ │ │ │ (shared logic) │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────┘ │ │
|
||||
│ │ | │
|
||||
│ └──────────────────┬─────────────────────────────┬──────────────────────┘ │
|
||||
│ │ │ │
|
||||
│ GetCurrentActiveTopics() ExtractTopics() │
|
||||
│ │ │ │
|
||||
└─────────────────────┼─────────────────────────────┼─────────────────────────┘
|
||||
│ │
|
||||
▼ ▼
|
||||
┌─────────────────────────────────┐ ┌─────────────────────────────────────┐
|
||||
│ GossipsubPeerDialer │ │ GossipsubPeerCrawler │
|
||||
│ │ │ │
|
||||
│ - Polls topics every 1 second │ │ - Crawls discv5 periodically │
|
||||
│ - Checks peer count per topic │ │ - Indexes peers by topic │
|
||||
│ - Dials missing peers │ │ - Verifies via ping │
|
||||
│ │ │ - Filters and scores peers │
|
||||
│ │ │ │ │
|
||||
│ │ PeersForTopic() │ │ │ │
|
||||
│ └───────────────────────┼──┼─────────┘ │
|
||||
│ │ │ │
|
||||
└─────────────────────────────────┘ └──────────────────┬──────────────────┘
|
||||
│
|
||||
│ RemovePeerId()
|
||||
┌──────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────┐
|
||||
│ P2P Service │
|
||||
│ │
|
||||
│ - Disconnect handler calls │
|
||||
│ RemovePeerId() on crawler │
|
||||
│ - Provides filterPeer, scorer │
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
### 5.2 Data Flow Summary
|
||||
|
||||
| Flow | Description |
|
||||
|------|-------------|
|
||||
| **Discovery** | discv5 → crawlLoop → topicExtractor → crawledPeers index → pingCh |
|
||||
| **Ping** | pingCh → semaphore → dv5.Ping() → isPinged=true or remove |
|
||||
| **Dial** | controller topics → dialer → crawler.PeersForTopic() → dialPeers |
|
||||
| **Cleanup** | disconnect/unsubscribe → RemovePeerId()/RemoveTopic() |
|
||||
|
||||
### 5.3 Key Invariants
|
||||
|
||||
**Peers from `PeersForTopic()` are always:**
|
||||
- Successfully pinged (reachable)
|
||||
- Passing the peer filter
|
||||
- Sorted by score (best first)
|
||||
|
||||
**Topic subscriptions are:**
|
||||
- Pre-subscribed 1 epoch before fork
|
||||
- Unsubscribed 1 epoch after fork
|
||||
- Updated every slot for dynamic families
|
||||
|
||||
**Ping behavior:**
|
||||
- Each node ID pinged at most once
|
||||
- Ping failures remove peer entirely
|
||||
- Sequence number updates don't trigger re-ping
|
||||
|
||||
**Backpressure:**
|
||||
- Ping queue blocks crawl when full
|
||||
- Semaphore limits concurrent pings
|
||||
- Natural rate limiting without explicit throttling
|
||||
|
||||
---
|
||||
|
||||
## 6. Initialization Sequence
|
||||
|
||||
```
|
||||
PHASE 1: P2P Service Start
|
||||
══════════════════════════
|
||||
├─► Start discv5 listener
|
||||
├─► Create GossipsubPeerCrawler (with filterPeer, scorer)
|
||||
└─► Create GossipsubPeerDialer (not started yet)
|
||||
|
||||
PHASE 2: Sync Service Start
|
||||
═══════════════════════════
|
||||
├─► Create GossipsubController
|
||||
└─► Launch startDiscoveryAndSubscriptions goroutine
|
||||
|
||||
PHASE 3: Discovery and Subscriptions (after chain start)
|
||||
════════════════════════════════════════════════════════
|
||||
├─► Start GossipsubController (control loop)
|
||||
├─► Start Crawler with topicExtractor from controller
|
||||
└─► Start Dialer with topicsProvider from controller
|
||||
```
|
||||
|
||||
### Dependency Injection
|
||||
|
||||
| Component | Dependencies | Provider |
|
||||
|-----------|-------------|----------|
|
||||
| Crawler | discv5, filterPeer, scorer | P2P Service |
|
||||
| Crawler | topicExtractor | GossipsubController |
|
||||
| Dialer | crawler, listPeers, dialPeers | P2P Service |
|
||||
| Dialer | topicsProvider | GossipsubController |
|
||||
|
||||
---
|
||||
|
||||
## 7. Configuration Parameters
|
||||
|
||||
| Parameter | Default | Description |
|
||||
|-----------|---------|-------------|
|
||||
| `crawlInterval` | configurable | How often to crawl discv5 |
|
||||
| `crawlTimeout` | configurable | Max duration per crawl iteration |
|
||||
| `maxConcurrentPings` | configurable | Parallel ping limit |
|
||||
| `cleanupInterval` | 5 minutes | Stale peer pruning frequency |
|
||||
| `peerPerTopic` | 20 | Target peer count per topic |
|
||||
| `dialLoop interval` | 1 second | Topic peer check frequency |
|
||||
|
||||
---
|
||||
|
||||
## 8. Key Files
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `sync/gossipsub_controller.go` | Controller orchestrating topic families |
|
||||
| `sync/gossipsub_topic_family.go` | Interface definitions and fork schedule |
|
||||
| `sync/gossipsub_base.go` | Base implementation for all topic families |
|
||||
| `sync/topic_families_without_subnets.go` | Global topic family implementations |
|
||||
| `sync/topic_families_static_subnets.go` | Blob topic family |
|
||||
| `sync/topic_families_dynamic_subnets.go` | Dynamic subnet families |
|
||||
| `p2p/gossipsub_peer_crawler.go` | Peer discovery and indexing |
|
||||
| `p2p/gossipsub_peer_controller.go` | Peer dialing logic |
|
||||
| `p2p/gossipsubcrawler/interface.go` | Shared interfaces |
|
||||
| `p2p/handshake.go` | Disconnect handler integration |
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
multiplex "github.com/libp2p/go-mplex"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -147,7 +148,7 @@ func closeStreamAndWait(stream network.Stream, log *logrus.Entry) {
|
||||
}
|
||||
|
||||
func isUnwantedError(err error) bool {
|
||||
for _, e := range []error{network.ErrReset, io.EOF, types.ErrIODeadline} {
|
||||
for _, e := range []error{network.ErrReset, multiplex.ErrShutdown, io.EOF, types.ErrIODeadline} {
|
||||
if errors.Is(err, e) || err.Error() == e.Error() {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
@@ -9,18 +10,27 @@ import (
|
||||
)
|
||||
|
||||
// p2pHandlerControlLoop runs in a continuous loop to ensure that:
|
||||
// - We are subscribed to the correct gossipsub topics (for the current and upcoming epoch).
|
||||
// - We have registered the correct RPC stream handlers (for the current and upcoming epoch).
|
||||
func (s *Service) rpcHandlerControlLoop() {
|
||||
// - We have cleaned up gossipsub topics and RPC stream handlers that are no longer needed.
|
||||
func (s *Service) p2pHandlerControlLoop() {
|
||||
// At startup, launch registration and peer discovery loops, and register rpc stream handlers.
|
||||
startEntry := params.GetNetworkScheduleEntry(s.cfg.clock.CurrentEpoch())
|
||||
s.registerSubscribers(startEntry)
|
||||
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
// In the event of a node restart, we will still end up subscribing to the correct
|
||||
// topics during/after the fork epoch. This routine is to ensure correct
|
||||
// subscriptions for nodes running before a fork epoch.
|
||||
case <-slotTicker.C():
|
||||
current := s.cfg.clock.CurrentEpoch()
|
||||
if err := s.ensureRPCRegistrationsForEpoch(current); err != nil {
|
||||
if err := s.ensureRegistrationsForEpoch(current); err != nil {
|
||||
log.WithError(err).Error("Unable to check for fork in the next epoch")
|
||||
continue
|
||||
}
|
||||
if err := s.ensureRPCDeregistrationForEpoch(current); err != nil {
|
||||
if err := s.ensureDeregistrationForEpoch(current); err != nil {
|
||||
log.WithError(err).Error("Unable to check for fork in the previous epoch")
|
||||
continue
|
||||
}
|
||||
@@ -34,8 +44,9 @@ func (s *Service) rpcHandlerControlLoop() {
|
||||
|
||||
// ensureRegistrationsForEpoch ensures that gossip topic and RPC stream handler
|
||||
// registrations are in place for the current and subsequent epoch.
|
||||
func (s *Service) ensureRPCRegistrationsForEpoch(epoch primitives.Epoch) error {
|
||||
func (s *Service) ensureRegistrationsForEpoch(epoch primitives.Epoch) error {
|
||||
current := params.GetNetworkScheduleEntry(epoch)
|
||||
s.registerSubscribers(current)
|
||||
|
||||
currentHandler, err := s.rpcHandlerByTopicFromFork(current.VersionEnum)
|
||||
if err != nil {
|
||||
@@ -51,6 +62,7 @@ func (s *Service) ensureRPCRegistrationsForEpoch(epoch primitives.Epoch) error {
|
||||
if current.Epoch == next.Epoch {
|
||||
return nil // no fork in the next epoch
|
||||
}
|
||||
s.registerSubscribers(next)
|
||||
|
||||
if s.digestActionDone(next.ForkDigest, registerRpcOnce) {
|
||||
return nil
|
||||
@@ -72,7 +84,7 @@ func (s *Service) ensureRPCRegistrationsForEpoch(epoch primitives.Epoch) error {
|
||||
}
|
||||
|
||||
// ensureDeregistrationForEpoch deregisters appropriate gossip and RPC topic if there is a fork in the current epoch.
|
||||
func (s *Service) ensureRPCDeregistrationForEpoch(currentEpoch primitives.Epoch) error {
|
||||
func (s *Service) ensureDeregistrationForEpoch(currentEpoch primitives.Epoch) error {
|
||||
current := params.GetNetworkScheduleEntry(currentEpoch)
|
||||
|
||||
// If we are still in our genesis fork version then exit early.
|
||||
@@ -103,5 +115,20 @@ func (s *Service) ensureRPCDeregistrationForEpoch(currentEpoch primitives.Epoch)
|
||||
}
|
||||
}
|
||||
|
||||
// Unsubscribe from all gossip topics with the previous fork digest.
|
||||
if s.digestActionDone(previous.ForkDigest, unregisterGossipOnce) {
|
||||
return nil
|
||||
}
|
||||
for _, t := range s.subHandler.allTopics() {
|
||||
retDigest, err := p2p.ExtractGossipDigest(t)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve digest")
|
||||
continue
|
||||
}
|
||||
if retDigest == previous.ForkDigest {
|
||||
s.unSubscribeFromTopic(t)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -42,11 +44,36 @@ func testForkWatcherService(t *testing.T, current primitives.Epoch) *Service {
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
initialSyncComplete: closedChan,
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestRegisterSubscriptions_Idempotent(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
fulu := params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.BeaconConfig().FuluForkEpoch = fulu
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
current := fulu - 1
|
||||
s := testForkWatcherService(t, current)
|
||||
next := params.GetNetworkScheduleEntry(fulu)
|
||||
wg := attachSpawner(s)
|
||||
require.Equal(t, true, s.registerSubscribers(next))
|
||||
done := make(chan struct{})
|
||||
go func() { wg.Wait(); close(done) }()
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("timed out waiting for subscriptions to be registered")
|
||||
case <-done:
|
||||
}
|
||||
// the goal of this callback is just to assert that spawn is never called.
|
||||
s.subscriptionSpawner = func(func()) { t.Error("registration routines spawned twice for the same digest") }
|
||||
require.NoError(t, s.ensureRegistrationsForEpoch(fulu))
|
||||
}
|
||||
|
||||
func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
closedChan := make(chan struct{})
|
||||
close(closedChan)
|
||||
@@ -76,6 +103,7 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
nextForkEpoch: params.BeaconConfig().BellatrixForkEpoch,
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
@@ -83,6 +111,8 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlocksByRangeTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlocksByRootTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCMetaDataTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
expected := fmt.Sprintf(p2p.SyncContributionAndProofSubnetTopicFormat+s.cfg.p2p.Encoding().ProtocolSuffix(), digest)
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), "subnet topic doesn't exist")
|
||||
// TODO: we should check subcommittee indices here but we need to work with the committee cache to do it properly
|
||||
/*
|
||||
subIndices := mapFromCount(params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
@@ -97,10 +127,14 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
{
|
||||
name: "capella fork in the next epoch",
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().CapellaForkEpoch)
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
}
|
||||
|
||||
expected := fmt.Sprintf(p2p.BlsToExecutionChangeSubnetTopicFormat+s.cfg.p2p.Encoding().ProtocolSuffix(), digest)
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), "subnet topic doesn't exist")
|
||||
},
|
||||
forkEpoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
nextForkEpoch: params.BeaconConfig().DenebForkEpoch,
|
||||
@@ -109,10 +143,17 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
{
|
||||
name: "deneb fork in the next epoch",
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().DenebForkEpoch)
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
}
|
||||
subIndices := mapFromCount(params.BeaconConfig().BlobsidecarSubnetCount)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.BlobSubnetTopicFormat, digest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
},
|
||||
@@ -121,8 +162,16 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
},
|
||||
{
|
||||
name: "electra fork in the next epoch",
|
||||
checkRegistration: func(t *testing.T, s *Service) {},
|
||||
name: "electra fork in the next epoch",
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().ElectraForkEpoch)
|
||||
subIndices := mapFromCount(params.BeaconConfig().BlobsidecarSubnetCountElectra)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.BlobSubnetTopicFormat, digest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
},
|
||||
forkEpoch: params.BeaconConfig().ElectraForkEpoch,
|
||||
nextForkEpoch: params.BeaconConfig().FuluForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
@@ -145,28 +194,52 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
current := tt.epochAtRegistration(tt.forkEpoch)
|
||||
s := testForkWatcherService(t, current)
|
||||
require.NoError(t, s.ensureRPCRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
|
||||
wg := attachSpawner(s)
|
||||
require.NoError(t, s.ensureRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
|
||||
wg.Wait()
|
||||
tt.checkRegistration(t, s)
|
||||
|
||||
if current != tt.forkEpoch-1 {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure the topics were registered for the upcoming fork
|
||||
digest := params.ForkDigest(tt.forkEpoch)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
|
||||
// After this point we are checking deregistration, which doesn't apply if there isn't a higher
|
||||
// nextForkEpoch.
|
||||
if tt.forkEpoch >= tt.nextForkEpoch {
|
||||
return
|
||||
}
|
||||
|
||||
nextDigest := params.ForkDigest(tt.nextForkEpoch)
|
||||
// Move the clock to just before the next fork epoch and ensure deregistration is correct
|
||||
wg = attachSpawner(s)
|
||||
s.cfg.clock = defaultClockWithTimeAtEpoch(tt.nextForkEpoch - 1)
|
||||
require.NoError(t, s.ensureRPCRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
|
||||
require.NoError(t, s.ensureRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
|
||||
wg.Wait()
|
||||
|
||||
require.NoError(t, s.ensureRPCDeregistrationForEpoch(tt.nextForkEpoch))
|
||||
require.NoError(t, s.ensureDeregistrationForEpoch(tt.nextForkEpoch))
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
// deregister as if it is the epoch after the next fork epoch
|
||||
require.NoError(t, s.ensureDeregistrationForEpoch(tt.nextForkEpoch+1))
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
assert.Equal(t, true, s.subHandler.digestExists(nextDigest))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func attachSpawner(s *Service) *sync.WaitGroup {
|
||||
wg := new(sync.WaitGroup)
|
||||
s.subscriptionSpawner = func(f func()) {
|
||||
wg.Go(func() {
|
||||
f()
|
||||
})
|
||||
}
|
||||
return wg
|
||||
}
|
||||
|
||||
// oneEpoch returns the duration of one epoch.
|
||||
func oneEpoch() time.Duration {
|
||||
return time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) * time.Second
|
||||
|
||||
@@ -1,230 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type baseTopicFamily struct {
|
||||
syncService *Service
|
||||
nse params.NetworkScheduleEntry
|
||||
validator wrappedVal
|
||||
handler subHandler
|
||||
|
||||
tf TopicFamily
|
||||
|
||||
mu sync.Mutex
|
||||
subscriptions map[string]*pubsub.Subscription
|
||||
}
|
||||
|
||||
func newBaseTopicFamily(syncService *Service, nse params.NetworkScheduleEntry, validator wrappedVal,
|
||||
handler subHandler, tf TopicFamily) *baseTopicFamily {
|
||||
return &baseTopicFamily{
|
||||
syncService: syncService,
|
||||
nse: nse,
|
||||
validator: validator,
|
||||
handler: handler,
|
||||
tf: tf,
|
||||
subscriptions: make(map[string]*pubsub.Subscription),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseTopicFamily) NetworkScheduleEntry() params.NetworkScheduleEntry {
|
||||
return b.nse
|
||||
}
|
||||
|
||||
// subscribeToTopics subscribes to the given list of gossipsub topics.
|
||||
//
|
||||
// This method is idempotent for a given topic - if a subscription already exists for a topic,
|
||||
// it will be skipped without error. This allows callers to safely call subscribeToTopics
|
||||
// multiple times with overlapping topic lists without creating duplicate subscriptions.
|
||||
//
|
||||
// For each new topic subscription, this method:
|
||||
// 1. Registers a topic validator with the pubsub system
|
||||
// 2. Creates the subscription via the p2p layer
|
||||
// 3. Spawns a goroutine running a message loop that processes incoming messages
|
||||
// 4. Tracks the subscription in the internal subscriptions map
|
||||
//
|
||||
// The message loop for each subscription runs until the context is cancelled or an error
|
||||
// occurs. Each received message is processed in its own goroutine with panic recovery.
|
||||
//
|
||||
// Errors during subscription (validator registration failures, subscription failures) are
|
||||
// logged but do not prevent other topics from being subscribed to.
|
||||
func (b *baseTopicFamily) subscribeToTopics(topics []string) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
for _, topic := range topics {
|
||||
log := log.WithField("topic", topic)
|
||||
s := b.syncService
|
||||
|
||||
// Do not resubscribe to topics that we already have a subscription for.
|
||||
_, ok := b.subscriptions[topic]
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.cfg.p2p.PubSub().RegisterTopicValidator(s.wrapAndReportValidation(topic, b.validator)); err != nil {
|
||||
log.WithError(err).Error("Could not register validator for topic")
|
||||
continue
|
||||
}
|
||||
|
||||
sub, err := s.cfg.p2p.SubscribeToTopic(topic)
|
||||
if err != nil {
|
||||
// Any error subscribing to a PubSub topic would be the result of a misconfiguration of
|
||||
// libp2p PubSub library or a subscription request to a topic that fails to match the topic
|
||||
// subscription filter.
|
||||
log.WithError(err).Error("Could not subscribe topic")
|
||||
continue
|
||||
}
|
||||
|
||||
// Pipeline decodes the incoming subscription data, runs the validation, and handles the
|
||||
// message.
|
||||
pipeline := func(msg *pubsub.Message) {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, pubsubMessageTimeout)
|
||||
defer cancel()
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "sync.pubsub")
|
||||
defer span.End()
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
tracing.AnnotateError(span, fmt.Errorf("panic occurred: %v", r))
|
||||
log.WithField("error", r).
|
||||
WithField("recoveredAt", "subscribeWithBase").
|
||||
WithField("stack", string(debug.Stack())).
|
||||
Error("Panic occurred")
|
||||
}
|
||||
}()
|
||||
|
||||
span.SetAttributes(trace.StringAttribute("topic", topic))
|
||||
|
||||
if msg.ValidatorData == nil {
|
||||
log.Error("Received nil message on pubsub")
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
return
|
||||
}
|
||||
|
||||
if err := b.handler(ctx, msg.ValidatorData.(proto.Message)); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Could not handle p2p pubsub")
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The main message loop for receiving incoming messages from this subscription.
|
||||
messageLoop := func() {
|
||||
for {
|
||||
msg, err := sub.Next(s.ctx)
|
||||
if err != nil {
|
||||
// This should only happen when the context is cancelled or subscription is cancelled.
|
||||
if !errors.Is(err, pubsub.ErrSubscriptionCancelled) { // Only log a warning on unexpected errors.
|
||||
log.WithError(err).Warn("Subscription next failed")
|
||||
}
|
||||
// Cancel subscription in the event of an error, as we are
|
||||
// now exiting topic event loop.
|
||||
sub.Cancel()
|
||||
return
|
||||
}
|
||||
|
||||
if msg.ReceivedFrom == s.cfg.p2p.PeerID() {
|
||||
continue
|
||||
}
|
||||
|
||||
go pipeline(msg)
|
||||
}
|
||||
}
|
||||
|
||||
go messageLoop()
|
||||
log.WithField("topic", topic).Info("Subscribed to")
|
||||
b.subscriptions[topic] = sub
|
||||
s.subHandler.addTopic(topic, sub)
|
||||
}
|
||||
}
|
||||
|
||||
// UnsubscribeAll unsubscribes from all topics managed by this topic family.
|
||||
//
|
||||
// This method iterates through all active subscriptions and performs cleanup for each:
|
||||
// - Unregisters the topic validator from pubsub
|
||||
// - Cancels the subscription (stopping the message loop goroutine)
|
||||
// - Leaves the topic in the p2p layer
|
||||
// - Removes the topic from the crawler's tracking (if crawler is configured)
|
||||
// - Removes the subscription from internal tracking
|
||||
//
|
||||
// After this method returns, the topic family has no active subscriptions.
|
||||
// This is typically called during shutdown or when transitioning between network forks.
|
||||
func (b *baseTopicFamily) UnsubscribeAll() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
for topic, sub := range b.subscriptions {
|
||||
b.cleanupSubscription(topic, sub)
|
||||
delete(b.subscriptions, topic)
|
||||
}
|
||||
}
|
||||
|
||||
// pruneTopicsExcept unsubscribes from all topics except those in the provided list.
|
||||
//
|
||||
// This method is used to efficiently manage dynamic subnet subscriptions. When the set of
|
||||
// required topics changes (e.g., due to slot progression or validator duty changes), this
|
||||
// method removes subscriptions that are no longer needed while preserving active ones.
|
||||
//
|
||||
// Parameters:
|
||||
// - wantedTopics: List of topic strings that should remain subscribed. Any topic not in
|
||||
// this list will be unsubscribed and cleaned up.
|
||||
//
|
||||
// For each topic being pruned, the cleanup process:
|
||||
// - Unregisters the topic validator from pubsub
|
||||
// - Cancels the subscription (stopping the message loop goroutine)
|
||||
// - Leaves the topic in the p2p layer
|
||||
// - Removes the topic from the crawler's tracking (if crawler is configured)
|
||||
// - Removes the subscription from internal tracking
|
||||
//
|
||||
// This method is safe to call with an empty wantedTopics list, which will unsubscribe from
|
||||
// all topics (equivalent to UnsubscribeAll).
|
||||
func (b *baseTopicFamily) pruneTopicsExcept(wantedTopics []string) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
neededMap := make(map[string]bool, len(wantedTopics))
|
||||
for _, t := range wantedTopics {
|
||||
neededMap[t] = true
|
||||
}
|
||||
|
||||
for topic, sub := range b.subscriptions {
|
||||
if !neededMap[topic] {
|
||||
b.cleanupSubscription(topic, sub)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseTopicFamily) cleanupSubscription(topic string, sub *pubsub.Subscription) {
|
||||
s := b.syncService
|
||||
log.WithField("topic", topic).Info("Unsubscribed from")
|
||||
if err := s.cfg.p2p.PubSub().UnregisterTopicValidator(topic); err != nil {
|
||||
log.WithError(err).Error("Could not unregister topic validator")
|
||||
}
|
||||
|
||||
if sub != nil {
|
||||
sub.Cancel()
|
||||
}
|
||||
if err := s.cfg.p2p.LeaveTopic(topic); err != nil {
|
||||
log.WithError(err).Error("Unable to leave topic")
|
||||
}
|
||||
|
||||
if crawler := s.cfg.p2p.Crawler(); crawler != nil {
|
||||
crawler.RemoveTopic(topic)
|
||||
}
|
||||
delete(b.subscriptions, topic)
|
||||
s.subHandler.removeTopic(topic)
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// wrappedVal represents a gossip validator which also returns an error along with the result.
|
||||
type wrappedVal func(context.Context, peer.ID, *pubsub.Message) (pubsub.ValidationResult, error)
|
||||
|
||||
// subHandler represents handler for a given subscription.
|
||||
type subHandler func(context.Context, proto.Message) error
|
||||
|
||||
// noopHandler is used for subscriptions that do not require anything to be done.
|
||||
var noopHandler subHandler = func(ctx context.Context, msg proto.Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type TopicFamily interface {
|
||||
Name() string
|
||||
NetworkScheduleEntry() params.NetworkScheduleEntry
|
||||
UnsubscribeAll()
|
||||
}
|
||||
|
||||
type ShardedTopicFamily interface {
|
||||
TopicFamily
|
||||
Subscribe()
|
||||
}
|
||||
|
||||
type DynamicShardedTopicFamily interface {
|
||||
TopicFamily
|
||||
TopicsWithMinPeerCount(slot primitives.Slot) map[string]int
|
||||
TopicsToSubscribeForSlot(slot primitives.Slot) []string
|
||||
ExtractTopicsForNode(node *enode.Node) ([]string, error)
|
||||
SubscribeForSlot(slot primitives.Slot)
|
||||
UnsubscribeForSlot(slot primitives.Slot)
|
||||
}
|
||||
|
||||
type topicFamilyEntry struct {
|
||||
activationEpoch primitives.Epoch
|
||||
deactivationEpoch primitives.Epoch
|
||||
factory func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily
|
||||
}
|
||||
|
||||
func topicFamilySchedule() []topicFamilyEntry {
|
||||
cfg := params.BeaconConfig()
|
||||
return []topicFamilyEntry{
|
||||
// Genesis topic families
|
||||
{
|
||||
activationEpoch: cfg.GenesisEpoch,
|
||||
deactivationEpoch: cfg.FarFutureEpoch,
|
||||
factory: func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
return []TopicFamily{
|
||||
NewBlockTopicFamily(s, nse),
|
||||
NewAggregateAndProofTopicFamily(s, nse),
|
||||
NewVoluntaryExitTopicFamily(s, nse),
|
||||
NewProposerSlashingTopicFamily(s, nse),
|
||||
NewAttesterSlashingTopicFamily(s, nse),
|
||||
NewAttestationTopicFamily(s, nse),
|
||||
}
|
||||
},
|
||||
},
|
||||
// Altair topic families
|
||||
{
|
||||
activationEpoch: cfg.AltairForkEpoch,
|
||||
deactivationEpoch: cfg.FarFutureEpoch,
|
||||
factory: func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
families := []TopicFamily{
|
||||
NewSyncContributionAndProofTopicFamily(s, nse),
|
||||
NewSyncCommitteeTopicFamily(s, nse),
|
||||
}
|
||||
if features.Get().EnableLightClient {
|
||||
families = append(families,
|
||||
NewLightClientOptimisticUpdateTopicFamily(s, nse),
|
||||
NewLightClientFinalityUpdateTopicFamily(s, nse),
|
||||
)
|
||||
}
|
||||
return families
|
||||
},
|
||||
},
|
||||
// Capella topic families
|
||||
{
|
||||
activationEpoch: cfg.CapellaForkEpoch,
|
||||
deactivationEpoch: cfg.FarFutureEpoch,
|
||||
factory: func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
return []TopicFamily{NewBlsToExecutionChangeTopicFamily(s, nse)}
|
||||
},
|
||||
},
|
||||
// Blob topic families (static per-subnet) in Deneb and Electra forks (removed in Fulu)
|
||||
{
|
||||
activationEpoch: cfg.DenebForkEpoch,
|
||||
deactivationEpoch: cfg.ElectraForkEpoch,
|
||||
factory: func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
count := cfg.BlobsidecarSubnetCount
|
||||
families := make([]TopicFamily, 0, count)
|
||||
for i := range count {
|
||||
families = append(families, NewBlobTopicFamily(s, nse, i))
|
||||
}
|
||||
return families
|
||||
},
|
||||
},
|
||||
{
|
||||
activationEpoch: cfg.ElectraForkEpoch,
|
||||
deactivationEpoch: cfg.FuluForkEpoch,
|
||||
factory: func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
count := cfg.BlobsidecarSubnetCountElectra
|
||||
families := make([]TopicFamily, 0, count)
|
||||
for i := range count {
|
||||
families = append(families, NewBlobTopicFamily(s, nse, i))
|
||||
}
|
||||
return families
|
||||
},
|
||||
},
|
||||
// Fulu data column topic family
|
||||
{
|
||||
activationEpoch: cfg.FuluForkEpoch,
|
||||
deactivationEpoch: cfg.FarFutureEpoch,
|
||||
factory: func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
return []TopicFamily{NewDataColumnTopicFamily(s, nse)}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TopicFamiliesForEpoch(epoch primitives.Epoch, s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
var activeFamilies []TopicFamily
|
||||
for _, entry := range topicFamilySchedule() {
|
||||
if epoch < entry.activationEpoch {
|
||||
continue
|
||||
}
|
||||
if epoch >= entry.deactivationEpoch {
|
||||
continue
|
||||
}
|
||||
activeFamilies = append(activeFamilies, entry.factory(s, nse)...)
|
||||
}
|
||||
return activeFamilies
|
||||
}
|
||||
@@ -1,311 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
)
|
||||
|
||||
// createMinimalService creates a minimal Service instance for testing
|
||||
func createMinimalService(t *testing.T) *Service {
|
||||
p2pService := p2ptest.NewTestP2P(t)
|
||||
return &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
},
|
||||
ctx: context.Background(),
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopicFamiliesForEpoch(t *testing.T) {
|
||||
// Define test epochs
|
||||
const (
|
||||
genesisEpoch = primitives.Epoch(0)
|
||||
altairEpoch = primitives.Epoch(100)
|
||||
bellatrixEpoch = primitives.Epoch(200)
|
||||
capellaEpoch = primitives.Epoch(300)
|
||||
denebEpoch = primitives.Epoch(400)
|
||||
electraEpoch = primitives.Epoch(500)
|
||||
fuluEpoch = primitives.Epoch(600)
|
||||
)
|
||||
|
||||
// Define topic families for each fork
|
||||
// These names must match what's returned by the Name() method of each topic family
|
||||
genesisFamilies := []string{
|
||||
"BlockTopicFamily",
|
||||
"AggregateAndProofTopicFamily",
|
||||
"VoluntaryExitTopicFamily",
|
||||
"ProposerSlashingTopicFamily",
|
||||
"AttesterSlashingTopicFamily",
|
||||
"AttestationTopicFamily",
|
||||
}
|
||||
|
||||
altairFamilies := []string{
|
||||
"SyncContributionAndProofTopicFamily",
|
||||
"SyncCommitteeTopicFamily",
|
||||
}
|
||||
|
||||
altairLightClientFamilies := []string{
|
||||
"LightClientOptimisticUpdateTopicFamily",
|
||||
"LightClientFinalityUpdateTopicFamily",
|
||||
}
|
||||
|
||||
capellaFamilies := []string{
|
||||
"BlsToExecutionChangeTopicFamily",
|
||||
}
|
||||
|
||||
denebBlobFamilies := []string{
|
||||
"BlobTopicFamily-0",
|
||||
"BlobTopicFamily-1",
|
||||
"BlobTopicFamily-2",
|
||||
"BlobTopicFamily-3",
|
||||
"BlobTopicFamily-4",
|
||||
"BlobTopicFamily-5",
|
||||
}
|
||||
|
||||
electraBlobFamilies := append(slices.Clone(denebBlobFamilies), "BlobTopicFamily-6", "BlobTopicFamily-7")
|
||||
|
||||
fuluFamilies := []string{
|
||||
"DataColumnTopicFamily",
|
||||
}
|
||||
|
||||
// Helper function to combine fork families
|
||||
combineForks := func(forkSets ...[]string) []string {
|
||||
var combined []string
|
||||
for _, forkSet := range forkSets {
|
||||
combined = append(combined, forkSet...)
|
||||
}
|
||||
return combined
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
epoch primitives.Epoch
|
||||
setupConfig func()
|
||||
enableLightClient bool
|
||||
expectedFamilies []string
|
||||
}{
|
||||
{
|
||||
name: "epoch before any fork activation should return empty",
|
||||
epoch: primitives.Epoch(0),
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
// Set all fork epochs to future epochs
|
||||
config.GenesisEpoch = primitives.Epoch(1000)
|
||||
config.AltairForkEpoch = primitives.Epoch(2000)
|
||||
config.BellatrixForkEpoch = primitives.Epoch(3000)
|
||||
config.CapellaForkEpoch = primitives.Epoch(4000)
|
||||
config.DenebForkEpoch = primitives.Epoch(5000)
|
||||
config.ElectraForkEpoch = primitives.Epoch(6000)
|
||||
config.FuluForkEpoch = primitives.Epoch(7000)
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: []string{},
|
||||
},
|
||||
{
|
||||
name: "epoch at genesis should return genesis topic families",
|
||||
epoch: genesisEpoch,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: genesisFamilies,
|
||||
},
|
||||
{
|
||||
name: "epoch at Altair without light client should have genesis + Altair families",
|
||||
epoch: altairEpoch,
|
||||
enableLightClient: false,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies),
|
||||
},
|
||||
{
|
||||
name: "epoch at Altair with light client enabled should include light client families",
|
||||
epoch: altairEpoch,
|
||||
enableLightClient: true,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, altairLightClientFamilies),
|
||||
},
|
||||
{
|
||||
name: "epoch at Capella should have genesis + Altair + Capella families",
|
||||
epoch: capellaEpoch,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, capellaFamilies),
|
||||
},
|
||||
{
|
||||
name: "epoch at Deneb should include blob sidecars",
|
||||
epoch: denebEpoch,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
config.BlobsidecarSubnetCount = 6 // Deneb has 6 blob subnets
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, capellaFamilies, denebBlobFamilies),
|
||||
},
|
||||
{
|
||||
name: "epoch at Electra should have Electra blobs not Deneb blobs",
|
||||
epoch: electraEpoch,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
config.BlobsidecarSubnetCount = 6
|
||||
config.BlobsidecarSubnetCountElectra = 8 // Electra has 8 blob subnets
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, capellaFamilies, electraBlobFamilies),
|
||||
},
|
||||
{
|
||||
name: "epoch at Fulu should have data columns not blobs",
|
||||
epoch: fuluEpoch,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
config.BlobsidecarSubnetCount = 6
|
||||
config.BlobsidecarSubnetCountElectra = 8
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, capellaFamilies, fuluFamilies),
|
||||
},
|
||||
{
|
||||
name: "epoch after Fulu should maintain Fulu families",
|
||||
epoch: fuluEpoch + 100,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
config.BlobsidecarSubnetCount = 6
|
||||
config.BlobsidecarSubnetCountElectra = 8
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, capellaFamilies, fuluFamilies),
|
||||
},
|
||||
{
|
||||
name: "edge case - epoch exactly at deactivation should not include deactivated family",
|
||||
epoch: electraEpoch, // This deactivates Deneb blobs
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
config.BlobsidecarSubnetCount = 6
|
||||
config.BlobsidecarSubnetCountElectra = 8
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, capellaFamilies, electraBlobFamilies),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
if tt.enableLightClient {
|
||||
resetFlags := features.InitWithReset(&features.Flags{
|
||||
EnableLightClient: true,
|
||||
})
|
||||
defer resetFlags()
|
||||
}
|
||||
tt.setupConfig()
|
||||
service := createMinimalService(t)
|
||||
families := TopicFamiliesForEpoch(tt.epoch, service, params.NetworkScheduleEntry{})
|
||||
|
||||
// Collect actual family names
|
||||
actualFamilies := make([]string, 0, len(families))
|
||||
for _, family := range families {
|
||||
actualFamilies = append(actualFamilies, family.Name())
|
||||
}
|
||||
|
||||
// Assert exact match - families should have exactly the expected families and nothing more
|
||||
assert.Equal(t, len(tt.expectedFamilies), len(actualFamilies),
|
||||
"Expected %d families but got %d", len(tt.expectedFamilies), len(actualFamilies))
|
||||
|
||||
// Create a map for efficient lookup
|
||||
expectedMap := make(map[string]bool)
|
||||
for _, expected := range tt.expectedFamilies {
|
||||
expectedMap[expected] = true
|
||||
}
|
||||
|
||||
// Check each actual family is expected
|
||||
for _, actual := range actualFamilies {
|
||||
if !expectedMap[actual] {
|
||||
t.Errorf("Unexpected topic family found: %s", actual)
|
||||
}
|
||||
delete(expectedMap, actual) // Remove from map as we find it
|
||||
}
|
||||
|
||||
// Check all expected families were found (anything left in map was missing)
|
||||
for missing := range expectedMap {
|
||||
t.Errorf("Expected topic family not found: %s", missing)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
@@ -268,6 +269,71 @@ func TestKzgBatchVerifierFallback(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_DeadlockOnTimeout(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.SecondsPerSlot = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
_, _ = service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier blocked")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_ContextCanceledBeforeSend(t *testing.T) {
|
||||
cancelledCtx, cancel := context.WithCancel(t.Context())
|
||||
cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: context.Background(),
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
result, err := service.validateWithKzgBatchVerifier(cancelledCtx, nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.Canceled)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier did not return after context cancellation")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-service.kzgChan:
|
||||
t.Fatal("verificationSet was sent to kzgChan despite canceled context")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
||||
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
|
||||
if len(roSidecars) >= count {
|
||||
|
||||
@@ -204,6 +204,13 @@ var (
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnsRecoveredFromELAttempts = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "data_columns_recovered_from_el_attempts",
|
||||
Help: "Count the number of data columns recovery attempts from the execution layer.",
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnsRecoveredFromELTotal = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "data_columns_recovered_from_el_total",
|
||||
@@ -242,6 +249,13 @@ var (
|
||||
Buckets: []float64{100, 250, 500, 750, 1000, 1500, 2000, 4000, 8000, 12000, 16000},
|
||||
},
|
||||
)
|
||||
|
||||
dataColumnSidecarsObtainedViaELCount = promauto.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "data_column_obtained_via_el_count",
|
||||
Help: "Count the number of data column sidecars obtained via the execution layer.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
func (s *Service) updateMetrics() {
|
||||
|
||||
@@ -3,9 +3,9 @@ package sync
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/blocks"
|
||||
@@ -21,13 +21,23 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var pendingAttsLimit = 32768
|
||||
const pendingAttsLimit = 32768
|
||||
|
||||
// aggregatorIndexFilter defines how aggregator index should be handled in equality checks.
|
||||
type aggregatorIndexFilter int
|
||||
|
||||
const (
|
||||
// ignoreAggregatorIndex means aggregates differing only by aggregator index are considered equal.
|
||||
ignoreAggregatorIndex aggregatorIndexFilter = iota
|
||||
// includeAggregatorIndex means aggregator index must also match for aggregates to be considered equal.
|
||||
includeAggregatorIndex
|
||||
)
|
||||
|
||||
// This method processes pending attestations as a "known" block as arrived. With validations,
|
||||
// the valid attestations get saved into the operation mem pool, and the invalid attestations gets deleted
|
||||
@@ -50,16 +60,7 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
attestations := s.blkRootToPendingAtts[bRoot]
|
||||
s.pendingAttsLock.RUnlock()
|
||||
|
||||
if len(attestations) > 0 {
|
||||
start := time.Now()
|
||||
s.processAttestations(ctx, attestations)
|
||||
duration := time.Since(start)
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
"pendingAttsCount": len(attestations),
|
||||
"duration": duration,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
s.processAttestations(ctx, attestations)
|
||||
|
||||
randGen := rand.NewGenerator()
|
||||
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
|
||||
@@ -79,26 +80,71 @@ func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte
|
||||
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
|
||||
}
|
||||
|
||||
// processAttestations processes a list of attestations.
|
||||
// It assumes (for logging purposes only) that all attestations pertain to the same block.
|
||||
func (s *Service) processAttestations(ctx context.Context, attestations []any) {
|
||||
if len(attestations) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
firstAttestation := attestations[0]
|
||||
var blockRoot []byte
|
||||
switch v := firstAttestation.(type) {
|
||||
case ethpb.Att:
|
||||
blockRoot = v.GetData().BeaconBlockRoot
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
blockRoot = v.AggregateAttestationAndProof().AggregateVal().GetData().BeaconBlockRoot
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping processing", v)
|
||||
return
|
||||
}
|
||||
|
||||
validAggregates := make([]ethpb.SignedAggregateAttAndProof, 0, len(attestations))
|
||||
startAggregate := time.Now()
|
||||
atts := make([]ethpb.Att, 0, len(attestations))
|
||||
aggregateAttAndProofCount := 0
|
||||
for _, att := range attestations {
|
||||
switch v := att.(type) {
|
||||
case ethpb.Att:
|
||||
atts = append(atts, v)
|
||||
case ethpb.SignedAggregateAttAndProof:
|
||||
s.processAggregate(ctx, v)
|
||||
aggregateAttAndProofCount++
|
||||
// Avoid processing multiple aggregates only differing by aggregator index.
|
||||
if slices.ContainsFunc(validAggregates, func(other ethpb.SignedAggregateAttAndProof) bool {
|
||||
return pendingAggregatesAreEqual(v, other, ignoreAggregatorIndex)
|
||||
}) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.processAggregate(ctx, v); err != nil {
|
||||
log.WithError(err).Debug("Pending aggregate attestation could not be processed")
|
||||
continue
|
||||
}
|
||||
|
||||
validAggregates = append(validAggregates, v)
|
||||
default:
|
||||
log.Warnf("Unexpected attestation type %T, skipping", v)
|
||||
}
|
||||
}
|
||||
durationAggregateAttAndProof := time.Since(startAggregate)
|
||||
|
||||
startAtts := time.Now()
|
||||
for _, bucket := range bucketAttestationsByData(atts) {
|
||||
s.processAttestationBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
durationAtts := time.Since(startAtts)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": fmt.Sprintf("%#x", blockRoot),
|
||||
"totalCount": len(attestations),
|
||||
"aggregateAttAndProofCount": aggregateAttAndProofCount,
|
||||
"uniqueAggregateAttAndProofCount": len(validAggregates),
|
||||
"attCount": len(atts),
|
||||
"durationTotal": durationAggregateAttAndProof + durationAtts,
|
||||
"durationAggregateAttAndProof": durationAggregateAttAndProof,
|
||||
"durationAtts": durationAtts,
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
|
||||
// attestationBucket groups attestations with the same AttestationData for batch processing.
|
||||
@@ -303,21 +349,20 @@ func (s *Service) processVerifiedAttestation(
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) {
|
||||
func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAggregateAttAndProof) error {
|
||||
res, err := s.validateAggregatedAtt(ctx, aggregate)
|
||||
if err != nil {
|
||||
log.WithError(err).Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
return errors.Wrap(err, "validate aggregated att")
|
||||
}
|
||||
|
||||
if res != pubsub.ValidationAccept || !s.validateBlockInAttestation(ctx, aggregate) {
|
||||
log.Debug("Pending aggregated attestation failed validation")
|
||||
return
|
||||
return errors.New("Pending aggregated attestation failed validation")
|
||||
}
|
||||
|
||||
att := aggregate.AggregateAttestationAndProof().AggregateVal()
|
||||
if err := s.saveAttestation(att); err != nil {
|
||||
log.WithError(err).Debug("Could not save aggregated attestation")
|
||||
return
|
||||
return errors.Wrap(err, "save attestation")
|
||||
}
|
||||
|
||||
_ = s.setAggregatorIndexEpochSeen(att.GetData().Target.Epoch, aggregate.AggregateAttestationAndProof().GetAggregatorIndex())
|
||||
@@ -325,6 +370,8 @@ func (s *Service) processAggregate(ctx context.Context, aggregate ethpb.SignedAg
|
||||
if err := s.cfg.p2p.Broadcast(ctx, aggregate); err != nil {
|
||||
log.WithError(err).Debug("Could not broadcast aggregated attestation")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// This defines how pending aggregates are saved in the map. The key is the
|
||||
@@ -336,7 +383,7 @@ func (s *Service) savePendingAggregate(agg ethpb.SignedAggregateAttAndProof) {
|
||||
|
||||
s.savePending(root, agg, func(other any) bool {
|
||||
a, ok := other.(ethpb.SignedAggregateAttAndProof)
|
||||
return ok && pendingAggregatesAreEqual(agg, a)
|
||||
return ok && pendingAggregatesAreEqual(agg, a, includeAggregatorIndex)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -391,13 +438,19 @@ func (s *Service) savePending(root [32]byte, pending any, isEqual func(other any
|
||||
s.blkRootToPendingAtts[root] = append(s.blkRootToPendingAtts[root], pending)
|
||||
}
|
||||
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof) bool {
|
||||
// pendingAggregatesAreEqual checks if two pending aggregate attestations are equal.
|
||||
// The filter parameter controls whether aggregator index is considered in the equality check.
|
||||
func pendingAggregatesAreEqual(a, b ethpb.SignedAggregateAttAndProof, filter aggregatorIndexFilter) bool {
|
||||
if a.Version() != b.Version() {
|
||||
return false
|
||||
}
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
|
||||
if filter == includeAggregatorIndex {
|
||||
if a.AggregateAttestationAndProof().GetAggregatorIndex() != b.AggregateAttestationAndProof().GetAggregatorIndex() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
aAtt := a.AggregateAttestationAndProof().AggregateVal()
|
||||
bAtt := b.AggregateAttestationAndProof().AggregateVal()
|
||||
if aAtt.GetData().Slot != bAtt.GetData().Slot {
|
||||
|
||||
@@ -94,7 +94,7 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
// Process block A (which exists and has no pending attestations)
|
||||
// This should skip processing attestations for A and request blocks B and C
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), rootA))
|
||||
require.LogsContain(t, hook, "Requesting block by root")
|
||||
require.LogsContain(t, hook, "Requesting blocks by root")
|
||||
}
|
||||
|
||||
func TestProcessPendingAtts_HasBlockSaveUnaggregatedAtt(t *testing.T) {
|
||||
@@ -911,17 +911,17 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different version", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProofElectra{Message: ðpb.AggregateAttestationAndProofElectra{AggregatorIndex: 1}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 1}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{Message: ðpb.AggregateAttestationAndProof{AggregatorIndex: 2}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different slot", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -942,7 +942,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different committee index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -963,7 +963,7 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregation bits", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
@@ -984,7 +984,30 @@ func Test_pendingAggregatesAreEqual(t *testing.T) {
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1000},
|
||||
}}}
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b))
|
||||
assert.Equal(t, false, pendingAggregatesAreEqual(a, b, includeAggregatorIndex))
|
||||
})
|
||||
t.Run("different aggregator index should be equal while ignoring aggregator index", func(t *testing.T) {
|
||||
a := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 1,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
b := ðpb.SignedAggregateAttestationAndProof{
|
||||
Message: ðpb.AggregateAttestationAndProof{
|
||||
AggregatorIndex: 2,
|
||||
Aggregate: ðpb.Attestation{
|
||||
Data: ðpb.AttestationData{
|
||||
Slot: 1,
|
||||
CommitteeIndex: 1,
|
||||
},
|
||||
AggregationBits: bitfield.Bitlist{0b1111},
|
||||
}}}
|
||||
assert.Equal(t, true, pendingAggregatesAreEqual(a, b, ignoreAggregatorIndex))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
@@ -44,11 +43,13 @@ func (s *Service) processPendingBlocksQueue() {
|
||||
if !s.chainIsStarted() {
|
||||
return
|
||||
}
|
||||
|
||||
locker.Lock()
|
||||
defer locker.Unlock()
|
||||
|
||||
if err := s.processPendingBlocks(s.ctx); err != nil {
|
||||
log.WithError(err).Debug("Could not process pending blocks")
|
||||
}
|
||||
locker.Unlock()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -73,8 +74,10 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
randGen := rand.NewGenerator()
|
||||
var parentRoots [][32]byte
|
||||
|
||||
blkRoots := make([][32]byte, 0, len(sortedSlots)*maxBlocksPerSlot)
|
||||
|
||||
// Iterate through sorted slots.
|
||||
for _, slot := range sortedSlots {
|
||||
for i, slot := range sortedSlots {
|
||||
// Skip processing if slot is in the future.
|
||||
if slot > s.cfg.clock.CurrentSlot() {
|
||||
continue
|
||||
@@ -91,6 +94,9 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
|
||||
// Process each block in the queue.
|
||||
for _, b := range blocksInCache {
|
||||
start := time.Now()
|
||||
totalDuration := time.Duration(0)
|
||||
|
||||
if err := blocks.BeaconBlockIsNil(b); err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -147,19 +153,34 @@ func (s *Service) processPendingBlocks(ctx context.Context) error {
|
||||
}
|
||||
cancelFunction()
|
||||
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
blkRoots = append(blkRoots, blkRoot)
|
||||
|
||||
// Remove the processed block from the queue.
|
||||
if err := s.removeBlockFromQueue(b, blkRoot); err != nil {
|
||||
return err
|
||||
}
|
||||
log.WithFields(logrus.Fields{"slot": slot, "blockRoot": hex.EncodeToString(bytesutil.Trunc(blkRoot[:]))}).Debug("Processed pending block and cleared it in cache")
|
||||
|
||||
duration := time.Since(start)
|
||||
totalDuration += duration
|
||||
log.WithFields(logrus.Fields{
|
||||
"slotIndex": fmt.Sprintf("%d/%d", i+1, len(sortedSlots)),
|
||||
"slot": slot,
|
||||
"root": fmt.Sprintf("%#x", blkRoot),
|
||||
"duration": duration,
|
||||
"totalDuration": totalDuration,
|
||||
}).Debug("Processed pending block and cleared it in cache")
|
||||
}
|
||||
|
||||
span.End()
|
||||
}
|
||||
|
||||
for _, blkRoot := range blkRoots {
|
||||
// Process pending attestations for this block.
|
||||
if err := s.processPendingAttsForBlock(ctx, blkRoot); err != nil {
|
||||
log.WithError(err).Debug("Failed to process pending attestations for block")
|
||||
}
|
||||
}
|
||||
|
||||
return s.sendBatchRootRequest(ctx, parentRoots, randGen)
|
||||
}
|
||||
|
||||
@@ -379,6 +400,19 @@ func (s *Service) sendBatchRootRequest(ctx context.Context, roots [][32]byte, ra
|
||||
req = roots[:maxReqBlock]
|
||||
}
|
||||
|
||||
if logrus.GetLevel() >= logrus.DebugLevel {
|
||||
rootsStr := make([]string, 0, len(roots))
|
||||
for _, req := range roots {
|
||||
rootsStr = append(rootsStr, fmt.Sprintf("%#x", req))
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"peer": pid,
|
||||
"count": len(req),
|
||||
"roots": rootsStr,
|
||||
}).Debug("Requesting blocks by root")
|
||||
}
|
||||
|
||||
// Send the request to the peer.
|
||||
if err := s.sendBeaconBlocksRequest(ctx, &req, pid); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -438,8 +472,6 @@ func (s *Service) filterOutPendingAndSynced(roots [][fieldparams.RootLength]byte
|
||||
roots = append(roots[:i], roots[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
log.WithField("blockRoot", fmt.Sprintf("%#x", r)).Debug("Requesting block by root")
|
||||
}
|
||||
return roots
|
||||
}
|
||||
|
||||
@@ -329,8 +329,6 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, r)
|
||||
|
||||
markInitSyncComplete(t, r)
|
||||
clock := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clock.SetClock(startup.NewClock(time.Now(), [32]byte{})))
|
||||
@@ -947,8 +945,6 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, r)
|
||||
|
||||
markInitSyncComplete(t, r)
|
||||
clock := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clock.SetClock(startup.NewClock(time.Now(), [32]byte{})))
|
||||
|
||||
@@ -181,7 +181,7 @@ type Service struct {
|
||||
lcStore *lightClient.Store
|
||||
dataColumnLogCh chan dataColumnLogEntry
|
||||
digestActions perDigestSet
|
||||
subscriptionController *SubscriptionController
|
||||
subscriptionSpawner func(func()) // see Service.spawn for details
|
||||
}
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
@@ -198,7 +198,6 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
dataColumnLogCh: make(chan dataColumnLogEntry, 1000),
|
||||
reconstructionRandGen: rand.NewGenerator(),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, r)
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(r); err != nil {
|
||||
@@ -233,7 +232,6 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
delete(r.seenPendingBlocks, root)
|
||||
}
|
||||
})
|
||||
|
||||
r.subHandler = newSubTopicHandler()
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
r.initCaches()
|
||||
@@ -325,10 +323,9 @@ func (s *Service) Stop() error {
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
s.cfg.p2p.Host().RemoveStreamHandler(p)
|
||||
}
|
||||
|
||||
// Stop the gossipsub controller.
|
||||
s.subscriptionController.Stop()
|
||||
|
||||
for _, t := range s.cfg.p2p.PubSub().GetTopics() {
|
||||
s.unSubscribeFromTopic(t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -408,36 +405,7 @@ func (s *Service) startDiscoveryAndSubscriptions() {
|
||||
}
|
||||
|
||||
// Start the fork watcher.
|
||||
go s.rpcHandlerControlLoop()
|
||||
|
||||
// Start the gossipsub controller.
|
||||
go s.subscriptionController.Start()
|
||||
|
||||
// Configure the crawler and dialer with the topic extractor / subnet topics
|
||||
// provider if available.
|
||||
crawler := s.cfg.p2p.Crawler()
|
||||
if crawler == nil {
|
||||
log.Info("No crawler available, topic extraction disabled")
|
||||
return
|
||||
}
|
||||
|
||||
// Start the crawler now that it has the extractor.
|
||||
if err := crawler.Start(s.subscriptionController.ExtractTopics); err != nil {
|
||||
log.WithError(err).Warn("Failed to start peer crawler")
|
||||
return
|
||||
}
|
||||
|
||||
// Start the gossipsub dialer if available.
|
||||
if dialer := s.cfg.p2p.GossipDialer(); dialer != nil {
|
||||
provider := func() map[string]int {
|
||||
return s.subscriptionController.GetCurrentActiveTopicsWithMinPeerCount()
|
||||
}
|
||||
if err := dialer.Start(provider); err != nil {
|
||||
log.WithError(err).Warn("Failed to start gossip peer dialer")
|
||||
}
|
||||
} else {
|
||||
log.Error("No gossip peer dialer available")
|
||||
}
|
||||
go s.p2pHandlerControlLoop()
|
||||
}
|
||||
|
||||
func (s *Service) writeErrorResponseToStream(responseCode byte, reason string, stream libp2pcore.Stream) {
|
||||
|
||||
@@ -2,7 +2,6 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
@@ -69,9 +67,8 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
clockWaiter: gs,
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(t.Context(), &r)
|
||||
|
||||
topicFmt := "/eth2/%x/beacon_block"
|
||||
topic := "/eth2/%x/beacon_block"
|
||||
go r.startDiscoveryAndSubscriptions()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
@@ -85,10 +82,7 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
msg := util.NewBeaconBlock()
|
||||
msg.Block.ParentRoot = util.Random32Bytes(t)
|
||||
msg.Signature = sk.Sign([]byte("data")).Marshal()
|
||||
// Build full topic using current fork digest
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
fullTopic := fmt.Sprintf(topicFmt, nse.ForkDigest) + p2p.Encoding().ProtocolSuffix()
|
||||
p2p.ReceivePubSub(fullTopic, msg)
|
||||
p2p.ReceivePubSub(topic, msg)
|
||||
// wait for chainstart to be sent
|
||||
time.Sleep(400 * time.Millisecond)
|
||||
require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.")
|
||||
@@ -143,7 +137,6 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) {
|
||||
clockWaiter: gs,
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(t.Context(), &r)
|
||||
r.initCaches()
|
||||
|
||||
var vr [32]byte
|
||||
@@ -176,16 +169,14 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) {
|
||||
// Save block into DB so that validateBeaconBlockPubSub() process gets short cut.
|
||||
util.SaveBlock(t, ctx, r.cfg.beaconDB, msg)
|
||||
|
||||
topicFmt := "/eth2/%x/beacon_block"
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
fullTopic := fmt.Sprintf(topicFmt, nse.ForkDigest) + p2p.Encoding().ProtocolSuffix()
|
||||
p2p.ReceivePubSub(fullTopic, msg)
|
||||
topic := "/eth2/%x/beacon_block"
|
||||
p2p.ReceivePubSub(topic, msg)
|
||||
assert.Equal(t, 0, len(blockChan), "block was received by sync service despite not being fully synced")
|
||||
|
||||
close(r.initialSyncComplete)
|
||||
<-syncCompleteCh
|
||||
|
||||
p2p.ReceivePubSub(fullTopic, msg)
|
||||
p2p.ReceivePubSub(topic, msg)
|
||||
|
||||
select {
|
||||
case <-blockChan:
|
||||
@@ -215,7 +206,6 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
clockWaiter: gs,
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(t.Context(), &r)
|
||||
markInitSyncComplete(t, &r)
|
||||
|
||||
go r.startDiscoveryAndSubscriptions()
|
||||
@@ -262,7 +252,7 @@ func TestService_Stop_SendsGoodbyeMessages(t *testing.T) {
|
||||
// Create service with connected peers
|
||||
d := dbTest.SetupDB(t)
|
||||
chain := &mockChain.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
@@ -275,7 +265,6 @@ func TestService_Stop_SendsGoodbyeMessages(t *testing.T) {
|
||||
cancel: cancel,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, r)
|
||||
|
||||
// Initialize context map for RPC
|
||||
ctxMap, err := ContextByteVersionsForValRoot(chain.ValidatorsRoot)
|
||||
@@ -341,7 +330,7 @@ func TestService_Stop_TimeoutHandling(t *testing.T) {
|
||||
|
||||
d := dbTest.SetupDB(t)
|
||||
chain := &mockChain.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
@@ -354,7 +343,6 @@ func TestService_Stop_TimeoutHandling(t *testing.T) {
|
||||
cancel: cancel,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, r)
|
||||
|
||||
// Initialize context map for RPC
|
||||
ctxMap, err := ContextByteVersionsForValRoot(chain.ValidatorsRoot)
|
||||
@@ -403,7 +391,7 @@ func TestService_Stop_ConcurrentGoodbyeMessages(t *testing.T) {
|
||||
|
||||
d := dbTest.SetupDB(t)
|
||||
chain := &mockChain.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
@@ -416,7 +404,6 @@ func TestService_Stop_ConcurrentGoodbyeMessages(t *testing.T) {
|
||||
cancel: cancel,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, r)
|
||||
|
||||
// Initialize context map for RPC
|
||||
ctxMap, err := ContextByteVersionsForValRoot(chain.ValidatorsRoot)
|
||||
|
||||
@@ -4,7 +4,9 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
@@ -18,6 +20,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/messagehandler"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
@@ -27,12 +31,124 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const pubsubMessageTimeout = 30 * time.Second
|
||||
|
||||
var errInvalidDigest = errors.New("invalid digest")
|
||||
|
||||
// wrappedVal represents a gossip validator which also returns an error along with the result.
|
||||
type wrappedVal func(context.Context, peer.ID, *pubsub.Message) (pubsub.ValidationResult, error)
|
||||
|
||||
// subHandler represents handler for a given subscription.
|
||||
type subHandler func(context.Context, proto.Message) error
|
||||
|
||||
// noopHandler is used for subscriptions that do not require anything to be done.
|
||||
var noopHandler subHandler = func(ctx context.Context, msg proto.Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// subscribeParameters holds the parameters that are needed to construct a set of subscriptions topics for a given
|
||||
// set of gossipsub subnets.
|
||||
type subscribeParameters struct {
|
||||
topicFormat string
|
||||
validate wrappedVal
|
||||
handle subHandler
|
||||
nse params.NetworkScheduleEntry
|
||||
// getSubnetsToJoin is a function that returns all subnets the node should join.
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool
|
||||
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
|
||||
// but for which no subscriptions are needed.
|
||||
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
|
||||
}
|
||||
|
||||
// shortTopic is a less verbose version of topic strings used for logging.
|
||||
func (p subscribeParameters) shortTopic() string {
|
||||
short := p.topicFormat
|
||||
fmtLen := len(short)
|
||||
if fmtLen >= 3 && short[fmtLen-3:] == "_%d" {
|
||||
short = short[:fmtLen-3]
|
||||
}
|
||||
return fmt.Sprintf(short, p.nse.ForkDigest)
|
||||
}
|
||||
|
||||
func (p subscribeParameters) logFields() logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"topic": p.shortTopic(),
|
||||
}
|
||||
}
|
||||
|
||||
// fullTopic is the fully qualified topic string, given to gossipsub.
|
||||
func (p subscribeParameters) fullTopic(subnet uint64, suffix string) string {
|
||||
return fmt.Sprintf(p.topicFormat, p.nse.ForkDigest, subnet) + suffix
|
||||
}
|
||||
|
||||
// subnetTracker keeps track of which subnets we are subscribed to, out of the set of
|
||||
// possible subnets described by a `subscribeParameters`.
|
||||
type subnetTracker struct {
|
||||
subscribeParameters
|
||||
mu sync.RWMutex
|
||||
subscriptions map[uint64]*pubsub.Subscription
|
||||
}
|
||||
|
||||
func newSubnetTracker(p subscribeParameters) *subnetTracker {
|
||||
return &subnetTracker{
|
||||
subscribeParameters: p,
|
||||
subscriptions: make(map[uint64]*pubsub.Subscription),
|
||||
}
|
||||
}
|
||||
|
||||
// unwanted takes a list of wanted subnets and returns a list of currently subscribed subnets that are not included.
|
||||
func (t *subnetTracker) unwanted(wanted map[uint64]bool) []uint64 {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
unwanted := make([]uint64, 0, len(t.subscriptions))
|
||||
for subnet := range t.subscriptions {
|
||||
if wanted == nil || !wanted[subnet] {
|
||||
unwanted = append(unwanted, subnet)
|
||||
}
|
||||
}
|
||||
return unwanted
|
||||
}
|
||||
|
||||
// missing takes a list of wanted subnets and returns a list of wanted subnets that are not currently tracked.
|
||||
func (t *subnetTracker) missing(wanted map[uint64]bool) []uint64 {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
missing := make([]uint64, 0, len(wanted))
|
||||
for subnet := range wanted {
|
||||
if _, ok := t.subscriptions[subnet]; !ok {
|
||||
missing = append(missing, subnet)
|
||||
}
|
||||
}
|
||||
return missing
|
||||
}
|
||||
|
||||
// cancelSubscription cancels and removes the subscription for a given subnet.
|
||||
func (t *subnetTracker) cancelSubscription(subnet uint64) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
defer delete(t.subscriptions, subnet)
|
||||
|
||||
sub := t.subscriptions[subnet]
|
||||
if sub == nil {
|
||||
return
|
||||
}
|
||||
sub.Cancel()
|
||||
}
|
||||
|
||||
// track asks subscriptionTracker to hold on to the subscription for a given subnet so
|
||||
// that we can remember that it is tracked and cancel its context when it's time to unsubscribe.
|
||||
func (t *subnetTracker) track(subnet uint64, sub *pubsub.Subscription) {
|
||||
if sub == nil {
|
||||
return
|
||||
}
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.subscriptions[subnet] = sub
|
||||
}
|
||||
|
||||
// noopValidator is a no-op that only decodes the message, but does not check its contents.
|
||||
func (s *Service) noopValidator(_ context.Context, _ peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
@@ -76,6 +192,272 @@ func (s *Service) activeSyncSubnetIndices(currentSlot primitives.Slot) map[uint6
|
||||
return mapFromSlice(subscriptions)
|
||||
}
|
||||
|
||||
// spawn allows the Service to use a custom function for launching goroutines.
|
||||
// This is useful in tests where we can set spawner to a sync.WaitGroup and
|
||||
// wait for the spawned goroutines to finish.
|
||||
func (s *Service) spawn(f func()) {
|
||||
if s.subscriptionSpawner != nil {
|
||||
s.subscriptionSpawner(f)
|
||||
} else {
|
||||
go f()
|
||||
}
|
||||
}
|
||||
|
||||
// Register PubSub subscribers
|
||||
func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
||||
// If we have already registered for this fork digest, exit early.
|
||||
if s.digestActionDone(nse.ForkDigest, registerGossipOnce) {
|
||||
return false
|
||||
}
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.BlockSubnetTopicFormat, s.validateBeaconBlockPubSub, s.beaconBlockSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.AggregateAndProofSubnetTopicFormat, s.validateAggregateAndProof, s.beaconAggregateProofSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.ExitSubnetTopicFormat, s.validateVoluntaryExit, s.voluntaryExitSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.ProposerSlashingSubnetTopicFormat, s.validateProposerSlashing, s.proposerSlashingSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.AttesterSlashingSubnetTopicFormat, s.validateAttesterSlashing, s.attesterSlashingSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.AttestationSubnetTopicFormat,
|
||||
validate: s.validateCommitteeIndexBeaconAttestation,
|
||||
handle: s.committeeIndexBeaconAttestationSubscriber,
|
||||
getSubnetsToJoin: s.persistentAndAggregatorSubnetIndices,
|
||||
getSubnetsRequiringPeers: attesterSubnetIndices,
|
||||
nse: nse,
|
||||
})
|
||||
})
|
||||
|
||||
// New gossip topic in Altair
|
||||
if params.BeaconConfig().AltairForkEpoch <= nse.Epoch {
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.SyncContributionAndProofSubnetTopicFormat,
|
||||
s.validateSyncContributionAndProof,
|
||||
s.syncContributionAndProofSubscriber,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
validate: s.validateSyncCommitteeMessage,
|
||||
handle: s.syncCommitteeMessageSubscriber,
|
||||
getSubnetsToJoin: s.activeSyncSubnetIndices,
|
||||
nse: nse,
|
||||
})
|
||||
})
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.LightClientOptimisticUpdateTopicFormat,
|
||||
s.validateLightClientOptimisticUpdate,
|
||||
noopHandler,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.LightClientFinalityUpdateTopicFormat,
|
||||
s.validateLightClientFinalityUpdate,
|
||||
noopHandler,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// New gossip topic in Capella
|
||||
if params.BeaconConfig().CapellaForkEpoch <= nse.Epoch {
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.BlsToExecutionChangeSubnetTopicFormat,
|
||||
s.validateBlsToExecutionChange,
|
||||
s.blsToExecutionChangeSubscriber,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Deneb, removed in Electra
|
||||
if params.BeaconConfig().DenebForkEpoch <= nse.Epoch && nse.Epoch < params.BeaconConfig().ElectraForkEpoch {
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
nse: nse,
|
||||
getSubnetsToJoin: func(primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCount)
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Electra, removed in Fulu
|
||||
if params.BeaconConfig().ElectraForkEpoch <= nse.Epoch && nse.Epoch < params.BeaconConfig().FuluForkEpoch {
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
nse: nse,
|
||||
getSubnetsToJoin: func(currentSlot primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCountElectra)
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Fulu.
|
||||
if params.BeaconConfig().FuluForkEpoch <= nse.Epoch {
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.DataColumnSubnetTopicFormat,
|
||||
validate: s.validateDataColumn,
|
||||
handle: s.dataColumnSubscriber,
|
||||
nse: nse,
|
||||
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
||||
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
||||
})
|
||||
})
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Service) subscriptionRequestExpired(nse params.NetworkScheduleEntry) bool {
|
||||
next := params.NextNetworkScheduleEntry(nse.Epoch)
|
||||
return next.Epoch != nse.Epoch && s.cfg.clock.CurrentEpoch() > next.Epoch
|
||||
}
|
||||
|
||||
func (s *Service) subscribeLogFields(topic string, nse params.NetworkScheduleEntry) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"topic": topic,
|
||||
"digest": nse.ForkDigest,
|
||||
"forkEpoch": nse.Epoch,
|
||||
"currentEpoch": s.cfg.clock.CurrentEpoch(),
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe to a given topic with a given validator and subscription handler.
|
||||
// The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandler, nse params.NetworkScheduleEntry) {
|
||||
if err := s.waitForInitialSync(s.ctx); err != nil {
|
||||
log.WithFields(s.subscribeLogFields(topic, nse)).WithError(err).Debug("Context cancelled while waiting for initial sync, not subscribing to topic")
|
||||
return
|
||||
}
|
||||
// Check if this subscribe request is still valid - we may have crossed another fork epoch while waiting for initial sync.
|
||||
if s.subscriptionRequestExpired(nse) {
|
||||
// If we are already past the next fork epoch, do not subscribe to this topic.
|
||||
log.WithFields(s.subscribeLogFields(topic, nse)).Debug("Not subscribing to topic as we are already past the next fork epoch")
|
||||
return
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topic, nse.Epoch)
|
||||
if base == nil {
|
||||
// Impossible condition as it would mean topic does not exist.
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic)) // lint:nopanic -- Impossible condition.
|
||||
}
|
||||
s.subscribeWithBase(s.addDigestToTopic(topic, nse.ForkDigest), validator, handle)
|
||||
}
|
||||
|
||||
func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle subHandler) *pubsub.Subscription {
|
||||
topic += s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
log := log.WithField("topic", topic)
|
||||
|
||||
// Do not resubscribe already seen subscriptions.
|
||||
ok := s.subHandler.topicExists(topic)
|
||||
if ok {
|
||||
log.WithField("topic", topic).Error("Provided topic already has an active subscription running")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.cfg.p2p.PubSub().RegisterTopicValidator(s.wrapAndReportValidation(topic, validator)); err != nil {
|
||||
log.WithError(err).Error("Could not register validator for topic")
|
||||
return nil
|
||||
}
|
||||
|
||||
sub, err := s.cfg.p2p.SubscribeToTopic(topic)
|
||||
if err != nil {
|
||||
// Any error subscribing to a PubSub topic would be the result of a misconfiguration of
|
||||
// libp2p PubSub library or a subscription request to a topic that fails to match the topic
|
||||
// subscription filter.
|
||||
log.WithError(err).Error("Could not subscribe topic")
|
||||
return nil
|
||||
}
|
||||
|
||||
s.subHandler.addTopic(sub.Topic(), sub)
|
||||
|
||||
// Pipeline decodes the incoming subscription data, runs the validation, and handles the
|
||||
// message.
|
||||
pipeline := func(msg *pubsub.Message) {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, pubsubMessageTimeout)
|
||||
defer cancel()
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "sync.pubsub")
|
||||
defer span.End()
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
tracing.AnnotateError(span, fmt.Errorf("panic occurred: %v", r))
|
||||
log.WithField("error", r).
|
||||
WithField("recoveredAt", "subscribeWithBase").
|
||||
WithField("stack", string(debug.Stack())).
|
||||
Error("Panic occurred")
|
||||
}
|
||||
}()
|
||||
|
||||
span.SetAttributes(trace.StringAttribute("topic", topic))
|
||||
|
||||
if msg.ValidatorData == nil {
|
||||
log.Error("Received nil message on pubsub")
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
return
|
||||
}
|
||||
|
||||
if err := handle(ctx, msg.ValidatorData.(proto.Message)); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Could not handle p2p pubsub")
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The main message loop for receiving incoming messages from this subscription.
|
||||
messageLoop := func() {
|
||||
for {
|
||||
msg, err := sub.Next(s.ctx)
|
||||
if err != nil {
|
||||
// This should only happen when the context is cancelled or subscription is cancelled.
|
||||
if !errors.Is(err, pubsub.ErrSubscriptionCancelled) { // Only log a warning on unexpected errors.
|
||||
log.WithError(err).Warn("Subscription next failed")
|
||||
}
|
||||
// Cancel subscription in the event of an error, as we are
|
||||
// now exiting topic event loop.
|
||||
sub.Cancel()
|
||||
return
|
||||
}
|
||||
|
||||
if msg.ReceivedFrom == s.cfg.p2p.PeerID() {
|
||||
continue
|
||||
}
|
||||
|
||||
go pipeline(msg)
|
||||
}
|
||||
}
|
||||
|
||||
go messageLoop()
|
||||
log.WithField("topic", topic).Info("Subscribed to")
|
||||
return sub
|
||||
}
|
||||
|
||||
// Wrap the pubsub validator with a metric monitoring function. This function increments the
|
||||
// appropriate counter if the particular message fails to validate.
|
||||
func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, pubsub.ValidatorEx) {
|
||||
@@ -145,6 +527,151 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
}
|
||||
}
|
||||
|
||||
// pruneNotWanted unsubscribes from topics we are currently subscribed to but that are
|
||||
// not in the list of wanted subnets.
|
||||
func (s *Service) pruneNotWanted(t *subnetTracker, wantedSubnets map[uint64]bool) {
|
||||
for _, subnet := range t.unwanted(wantedSubnets) {
|
||||
t.cancelSubscription(subnet)
|
||||
s.unSubscribeFromTopic(t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix()))
|
||||
}
|
||||
}
|
||||
|
||||
// subscribeWithParameters subscribes to a list of subnets.
|
||||
func (s *Service) subscribeWithParameters(p subscribeParameters) {
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
defer cancel()
|
||||
|
||||
tracker := newSubnetTracker(p)
|
||||
go s.ensurePeers(ctx, tracker)
|
||||
go s.logMinimumPeersPerSubnet(ctx, p)
|
||||
|
||||
if err := s.waitForInitialSync(ctx); err != nil {
|
||||
log.WithFields(p.logFields()).WithError(err).Debug("Could not subscribe to subnets as initial sync failed")
|
||||
return
|
||||
}
|
||||
s.trySubscribeSubnets(tracker)
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
for {
|
||||
select {
|
||||
case <-slotTicker.C():
|
||||
// Check if this subscribe request is still valid - we may have crossed another fork epoch while waiting for initial sync.
|
||||
if s.subscriptionRequestExpired(p.nse) {
|
||||
// If we are already past the next fork epoch, do not subscribe to this topic.
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": p.shortTopic(),
|
||||
"digest": p.nse.ForkDigest,
|
||||
"epoch": p.nse.Epoch,
|
||||
"currentEpoch": s.cfg.clock.CurrentEpoch(),
|
||||
}).Debug("Exiting topic subnet subscription loop")
|
||||
return
|
||||
}
|
||||
s.trySubscribeSubnets(tracker)
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// trySubscribeSubnets attempts to subscribe to any missing subnets that we should be subscribed to.
|
||||
// Only if initial sync is complete.
|
||||
func (s *Service) trySubscribeSubnets(t *subnetTracker) {
|
||||
subnetsToJoin := t.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
||||
s.pruneNotWanted(t, subnetsToJoin)
|
||||
for _, subnet := range t.missing(subnetsToJoin) {
|
||||
// TODO: subscribeWithBase appends the protocol suffix, other methods don't. Make this consistent.
|
||||
topic := t.fullTopic(subnet, "")
|
||||
t.track(subnet, s.subscribeWithBase(topic, t.validate, t.handle))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) ensurePeers(ctx context.Context, tracker *subnetTracker) {
|
||||
// Try once immediately so we don't have to wait until the next slot.
|
||||
s.tryEnsurePeers(ctx, tracker)
|
||||
|
||||
oncePerSlot := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer oncePerSlot.Done()
|
||||
for {
|
||||
select {
|
||||
case <-oncePerSlot.C():
|
||||
s.tryEnsurePeers(ctx, tracker)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) tryEnsurePeers(ctx context.Context, tracker *subnetTracker) {
|
||||
timeout := (time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) - 100*time.Millisecond
|
||||
minPeers := flags.Get().MinimumPeersPerSubnet
|
||||
neededSubnets := computeAllNeededSubnets(s.cfg.clock.CurrentSlot(), tracker.getSubnetsToJoin, tracker.getSubnetsRequiringPeers)
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, tracker.topicFormat, tracker.nse.ForkDigest, minPeers, neededSubnets)
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
log.WithFields(tracker.logFields()).WithError(err).Debug("Could not find peers with subnets")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) logMinimumPeersPerSubnet(ctx context.Context, p subscribeParameters) {
|
||||
logFields := p.logFields()
|
||||
minimumPeersPerSubnet := flags.Get().MinimumPeersPerSubnet
|
||||
// Warn the user if we are not subscribed to enough peers in the subnets.
|
||||
log := log.WithField("minimum", minimumPeersPerSubnet)
|
||||
logTicker := time.NewTicker(5 * time.Minute)
|
||||
defer logTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-logTicker.C:
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
subnetsToFindPeersIndex := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
|
||||
isSubnetWithMissingPeers := false
|
||||
// Find new peers for wanted subnets if needed.
|
||||
for index := range subnetsToFindPeersIndex {
|
||||
topic := fmt.Sprintf(p.topicFormat, p.nse.ForkDigest, index)
|
||||
|
||||
// Check if we have enough peers in the subnet. Skip if we do.
|
||||
if count := s.connectedPeersCount(topic); count < minimumPeersPerSubnet {
|
||||
isSubnetWithMissingPeers = true
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"actual": count,
|
||||
}).Debug("Not enough connected peers")
|
||||
}
|
||||
}
|
||||
if !isSubnetWithMissingPeers {
|
||||
log.WithFields(logFields).Debug("All subnets have enough connected peers")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) unSubscribeFromTopic(topic string) {
|
||||
log.WithField("topic", topic).Info("Unsubscribed from")
|
||||
if err := s.cfg.p2p.PubSub().UnregisterTopicValidator(topic); err != nil {
|
||||
log.WithError(err).Error("Could not unregister topic validator")
|
||||
}
|
||||
sub := s.subHandler.subForTopic(topic)
|
||||
if sub != nil {
|
||||
sub.Cancel()
|
||||
}
|
||||
s.subHandler.removeTopic(topic)
|
||||
if err := s.cfg.p2p.LeaveTopic(topic); err != nil {
|
||||
log.WithError(err).Error("Unable to leave topic")
|
||||
}
|
||||
}
|
||||
|
||||
// connectedPeersCount counts how many peer for a given topic are connected to the node.
|
||||
func (s *Service) connectedPeersCount(subnetTopic string) int {
|
||||
topic := subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
peersWithSubnet := s.cfg.p2p.PubSub().ListPeers(topic)
|
||||
return len(peersWithSubnet)
|
||||
}
|
||||
|
||||
func (s *Service) dataColumnSubnetIndices(primitives.Slot) map[uint64]bool {
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
|
||||
@@ -289,6 +816,34 @@ func isDigestValid(digest [4]byte, clock *startup.Clock) (bool, error) {
|
||||
return params.ForkDigest(current) == digest, nil
|
||||
}
|
||||
|
||||
// computeAllNeededSubnets computes the subnets we want to join
|
||||
// and the subnets for which we want to find peers.
|
||||
func computeAllNeededSubnets(
|
||||
currentSlot primitives.Slot,
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool,
|
||||
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool,
|
||||
) map[uint64]bool {
|
||||
// Retrieve the subnets we want to join.
|
||||
subnetsToJoin := getSubnetsToJoin(currentSlot)
|
||||
|
||||
// Retrieve the subnets we want to find peers into.
|
||||
subnetsRequiringPeers := make(map[uint64]bool)
|
||||
if getSubnetsRequiringPeers != nil {
|
||||
subnetsRequiringPeers = getSubnetsRequiringPeers(currentSlot)
|
||||
}
|
||||
|
||||
// Combine the two maps to get all needed subnets.
|
||||
neededSubnets := make(map[uint64]bool, len(subnetsToJoin)+len(subnetsRequiringPeers))
|
||||
for subnet := range subnetsToJoin {
|
||||
neededSubnets[subnet] = true
|
||||
}
|
||||
for subnet := range subnetsRequiringPeers {
|
||||
neededSubnets[subnet] = true
|
||||
}
|
||||
|
||||
return neededSubnets
|
||||
}
|
||||
|
||||
func agentString(pid peer.ID, hst host.Host) string {
|
||||
rawVersion, storeErr := hst.Peerstore().Get(pid, "AgentVersion")
|
||||
agString, ok := rawVersion.(string)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
@@ -48,7 +49,15 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
|
||||
return errors.Wrap(err, "new ro block with root")
|
||||
}
|
||||
|
||||
go s.processSidecarsFromExecutionFromBlock(ctx, roBlock)
|
||||
var wg sync.WaitGroup
|
||||
wg.Go(func() {
|
||||
if err := s.processSidecarsFromExecutionFromBlock(ctx, roBlock); err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": block.Slot(),
|
||||
}).Error("Failed to process sidecars from execution from block")
|
||||
}
|
||||
})
|
||||
|
||||
if err := s.cfg.chain.ReceiveBlock(ctx, signed, root, nil); err != nil {
|
||||
if blockchain.IsInvalidBlock(err) {
|
||||
@@ -69,28 +78,33 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.processPendingAttsForBlock(ctx, root); err != nil {
|
||||
return errors.Wrap(err, "process pending atts for block")
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,
|
||||
// builds corresponding sidecars, save them to the storage, and broadcasts them over P2P if necessary.
|
||||
func (s *Service) processSidecarsFromExecutionFromBlock(ctx context.Context, roBlock blocks.ROBlock) {
|
||||
func (s *Service) processSidecarsFromExecutionFromBlock(ctx context.Context, roBlock blocks.ROBlock) error {
|
||||
if roBlock.Version() >= version.Fulu {
|
||||
if err := s.processDataColumnSidecarsFromExecution(ctx, peerdas.PopulateFromBlock(roBlock)); err != nil {
|
||||
log.WithError(err).Error("Failed to process data column sidecars from execution")
|
||||
return
|
||||
return errors.Wrap(err, "process data column sidecars from execution")
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if roBlock.Version() >= version.Deneb {
|
||||
s.processBlobSidecarsFromExecution(ctx, roBlock)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// processBlobSidecarsFromExecution retrieves (if available) blob sidecars data from the execution client,
|
||||
@@ -168,7 +182,6 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
key := fmt.Sprintf("%#x", source.Root())
|
||||
if _, err, _ := s.columnSidecarsExecSingleFlight.Do(key, func() (any, error) {
|
||||
const delay = 250 * time.Millisecond
|
||||
secondsPerHalfSlot := time.Duration(params.BeaconConfig().SecondsPerSlot/2) * time.Second
|
||||
|
||||
commitments, err := source.Commitments()
|
||||
if err != nil {
|
||||
@@ -186,15 +199,35 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
return nil, errors.Wrap(err, "column indices to sample")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, secondsPerHalfSlot)
|
||||
defer cancel()
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", source.Root()),
|
||||
"slot": source.Slot(),
|
||||
"proposerIndex": source.ProposerIndex(),
|
||||
"type": source.Type(),
|
||||
})
|
||||
|
||||
var constructedSidecarCount uint64
|
||||
for iteration := uint64(0); ; /*no stop condition*/ iteration++ {
|
||||
log = log.WithField("iteration", iteration)
|
||||
|
||||
// Exit early if all sidecars to sample have been seen.
|
||||
if s.haveAllSidecarsBeenSeen(source.Slot(), source.ProposerIndex(), columnIndicesToSample) {
|
||||
if iteration > 0 && constructedSidecarCount == 0 {
|
||||
log.Debug("No data column sidecars constructed from the execution client")
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Return if the context is done.
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
if iteration == 0 {
|
||||
dataColumnsRecoveredFromELAttempts.Inc()
|
||||
}
|
||||
|
||||
// Try to reconstruct data column constructedSidecars from the execution client.
|
||||
constructedSidecars, err := s.cfg.executionReconstructor.ConstructDataColumnSidecars(ctx, source)
|
||||
if err != nil {
|
||||
@@ -202,19 +235,11 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
}
|
||||
|
||||
// No sidecars are retrieved from the EL, retry later
|
||||
sidecarCount := uint64(len(constructedSidecars))
|
||||
if sidecarCount == 0 {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
|
||||
time.Sleep(delay)
|
||||
continue
|
||||
}
|
||||
constructedCount := uint64(len(constructedSidecars))
|
||||
|
||||
// Boundary check.
|
||||
if sidecarCount != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", sidecarCount, fieldparams.NumberOfColumns)
|
||||
if constructedSidecarCount > 0 && constructedSidecarCount != fieldparams.NumberOfColumns {
|
||||
return nil, errors.Errorf("reconstruct data column sidecars returned %d sidecars, expected %d - should never happen", constructedSidecarCount, fieldparams.NumberOfColumns)
|
||||
}
|
||||
|
||||
unseenIndices, err := s.broadcastAndReceiveUnseenDataColumnSidecars(ctx, source.Slot(), source.ProposerIndex(), columnIndicesToSample, constructedSidecars)
|
||||
@@ -222,7 +247,7 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
return nil, errors.Wrap(err, "broadcast and receive unseen data column sidecars")
|
||||
}
|
||||
|
||||
if len(unseenIndices) > 0 {
|
||||
if constructedCount > 0 {
|
||||
dataColumnsRecoveredFromELTotal.Inc()
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -234,9 +259,12 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
"count": len(unseenIndices),
|
||||
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
|
||||
}).Debug("Constructed data column sidecars from the execution client")
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
// Wait before retrying.
|
||||
time.Sleep(delay)
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
@@ -271,6 +299,11 @@ func (s *Service) broadcastAndReceiveUnseenDataColumnSidecars(
|
||||
unseenIndices[sidecar.Index] = true
|
||||
}
|
||||
|
||||
// Exit early if there are no nothing to broadcast or receive.
|
||||
if len(unseenSidecars) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Broadcast all the data column sidecars we reconstructed but did not see via gossip (non blocking).
|
||||
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars); err != nil {
|
||||
return nil, errors.Wrap(err, "broadcast data column sidecars")
|
||||
|
||||
@@ -194,7 +194,8 @@ func TestProcessSidecarsFromExecutionFromBlock(t *testing.T) {
|
||||
},
|
||||
seenBlobCache: lruwrpr.New(1),
|
||||
}
|
||||
s.processSidecarsFromExecutionFromBlock(t.Context(), roBlock)
|
||||
err := s.processSidecarsFromExecutionFromBlock(t.Context(), roBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedBlobCount, len(chainService.Blobs))
|
||||
})
|
||||
}
|
||||
@@ -293,7 +294,8 @@ func TestProcessSidecarsFromExecutionFromBlock(t *testing.T) {
|
||||
roBlock, err := blocks.NewROBlock(sb)
|
||||
require.NoError(t, err)
|
||||
|
||||
s.processSidecarsFromExecutionFromBlock(t.Context(), roBlock)
|
||||
err = s.processSidecarsFromExecutionFromBlock(t.Context(), roBlock)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expectedDataColumnCount, len(chainService.DataColumns))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -25,12 +25,12 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
|
||||
}
|
||||
|
||||
if err := s.receiveDataColumnSidecar(ctx, sidecar); err != nil {
|
||||
return errors.Wrap(err, "receive data column sidecar")
|
||||
return wrapDataColumnError(sidecar, "receive data column sidecar", err)
|
||||
}
|
||||
|
||||
wg.Go(func() error {
|
||||
if err := s.processDataColumnSidecarsFromReconstruction(ctx, sidecar); err != nil {
|
||||
return errors.Wrap(err, "process data column sidecars from reconstruction")
|
||||
return wrapDataColumnError(sidecar, "process data column sidecars from reconstruction", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -38,7 +38,7 @@ func (s *Service) dataColumnSubscriber(ctx context.Context, msg proto.Message) e
|
||||
|
||||
wg.Go(func() error {
|
||||
if err := s.processDataColumnSidecarsFromExecution(ctx, peerdas.PopulateFromSidecar(sidecar)); err != nil {
|
||||
return errors.Wrap(err, "process data column sidecars from execution")
|
||||
return wrapDataColumnError(sidecar, "process data column sidecars from execution", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -110,3 +110,7 @@ func (s *Service) allDataColumnSubnets(_ primitives.Slot) map[uint64]bool {
|
||||
|
||||
return allSubnets
|
||||
}
|
||||
|
||||
func wrapDataColumnError(sidecar blocks.VerifiedRODataColumn, message string, err error) error {
|
||||
return fmt.Errorf("%s - slot %d, root %s: %w", message, sidecar.SignedBlockHeader.Header.Slot, fmt.Sprintf("%#x", sidecar.BlockRoot()), err)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -32,6 +33,7 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -58,10 +60,11 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
p2pService.Digest = nse.ForkDigest
|
||||
topic := "/eth2/%x/voluntary_exit"
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
handler := func(_ context.Context, msg proto.Message) error {
|
||||
r.subscribe(topic, r.noopValidator, func(_ context.Context, msg proto.Message) error {
|
||||
m, ok := msg.(*pb.SignedVoluntaryExit)
|
||||
assert.Equal(t, true, ok, "Object is not of type *pb.SignedVoluntaryExit")
|
||||
if m.Exit == nil || m.Exit.Epoch != 55 {
|
||||
@@ -69,15 +72,10 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
|
||||
}
|
||||
wg.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
tf := NewVoluntaryExitTopicFamily(&r, nse)
|
||||
base := newBaseTopicFamily(&r, nse, r.noopValidator, handler, tf)
|
||||
tf.baseTopicFamily = base
|
||||
|
||||
tf.Subscribe()
|
||||
}, nse)
|
||||
r.markForChainStart()
|
||||
p2pService.ReceivePubSub(tf.getFullTopicString(), &pb.SignedVoluntaryExit{Exit: &pb.VoluntaryExit{Epoch: 55}, Signature: make([]byte, fieldparams.BLSSignatureLength)})
|
||||
|
||||
p2pService.ReceivePubSub(topic, &pb.SignedVoluntaryExit{Exit: &pb.VoluntaryExit{Epoch: 55}, Signature: make([]byte, fieldparams.BLSSignatureLength)})
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
@@ -112,22 +110,19 @@ func TestSubscribe_UnsubscribeTopic(t *testing.T) {
|
||||
p2pService.Digest = nse.ForkDigest
|
||||
topic := "/eth2/%x/voluntary_exit"
|
||||
|
||||
tf := staticTopicFamily{
|
||||
name: "VoluntaryExitTopicFamily",
|
||||
topics: []string{topic},
|
||||
}
|
||||
base := newBaseTopicFamily(&r, nse, r.noopValidator, noopHandler, &tf)
|
||||
tf.baseTopicFamily = base
|
||||
|
||||
tf.Subscribe()
|
||||
r.subscribe(topic, r.noopValidator, func(_ context.Context, msg proto.Message) error {
|
||||
return nil
|
||||
}, nse)
|
||||
r.markForChainStart()
|
||||
assert.Equal(t, true, r.subHandler.topicExists(topic))
|
||||
|
||||
fullTopic := fmt.Sprintf(topic, p2pService.Digest) + p2pService.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, r.subHandler.topicExists(fullTopic))
|
||||
topics := p2pService.PubSub().GetTopics()
|
||||
assert.Equal(t, topic, topics[0])
|
||||
assert.Equal(t, fullTopic, topics[0])
|
||||
|
||||
tf.UnsubscribeAll()
|
||||
r.unSubscribeFromTopic(fullTopic)
|
||||
|
||||
assert.Equal(t, false, r.subHandler.topicExists(topic))
|
||||
assert.Equal(t, false, r.subHandler.topicExists(fullTopic))
|
||||
assert.Equal(t, 0, len(p2pService.PubSub().GetTopics()))
|
||||
|
||||
}
|
||||
@@ -162,20 +157,16 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := "/eth2/%x/attester_slashing"
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
p2pService.Digest = nse.ForkDigest
|
||||
|
||||
tf := NewAttesterSlashingTopicFamily(&r, nse)
|
||||
tf.baseTopicFamily.validator = r.noopValidator
|
||||
tf.baseTopicFamily.handler = func(ctx context.Context, msg proto.Message) error {
|
||||
r.subscribe(topic, r.noopValidator, func(ctx context.Context, msg proto.Message) error {
|
||||
require.NoError(t, r.attesterSlashingSubscriber(ctx, msg))
|
||||
wg.Done()
|
||||
return nil
|
||||
}
|
||||
tf.Subscribe()
|
||||
|
||||
}, nse)
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
chainService.State = beaconState
|
||||
r.markForChainStart()
|
||||
@@ -187,7 +178,7 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
|
||||
require.NoError(t, err, "Error generating attester slashing")
|
||||
err = r.cfg.beaconDB.SaveState(ctx, beaconState, bytesutil.ToBytes32(attesterSlashing.FirstAttestation().GetData().BeaconBlockRoot))
|
||||
require.NoError(t, err)
|
||||
p2pService.ReceivePubSub(tf.getFullTopicString(), attesterSlashing)
|
||||
p2pService.ReceivePubSub(topic, attesterSlashing)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
@@ -219,22 +210,18 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := "/eth2/%x/proposer_slashing"
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
p2pService.Digest = nse.ForkDigest
|
||||
|
||||
tf := NewProposerSlashingTopicFamily(&r, nse)
|
||||
tf.baseTopicFamily.validator = r.noopValidator
|
||||
tf.baseTopicFamily.handler = func(ctx context.Context, msg proto.Message) error {
|
||||
r.subscribe(topic, r.noopValidator, func(ctx context.Context, msg proto.Message) error {
|
||||
require.NoError(t, r.proposerSlashingSubscriber(ctx, msg))
|
||||
wg.Done()
|
||||
return nil
|
||||
}
|
||||
tf.Subscribe()
|
||||
|
||||
}, nse)
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
chainService.State = beaconState
|
||||
r.markForChainStart()
|
||||
@@ -245,7 +232,7 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err, "Error generating proposer slashing")
|
||||
|
||||
p2pService.ReceivePubSub(tf.getFullTopicString(), proposerSlashing)
|
||||
p2pService.ReceivePubSub(topic, proposerSlashing)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
@@ -275,27 +262,70 @@ func TestSubscribe_HandlesPanic(t *testing.T) {
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
p.Digest = nse.ForkDigest
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeFor[*pb.SignedVoluntaryExit]()]
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
tf := NewVoluntaryExitTopicFamily(&r, nse)
|
||||
handler := func(_ context.Context, msg proto.Message) error {
|
||||
r.subscribe(topic, r.noopValidator, func(_ context.Context, msg proto.Message) error {
|
||||
defer wg.Done()
|
||||
panic("bad")
|
||||
}
|
||||
base := newBaseTopicFamily(&r, nse, r.noopValidator, handler, tf)
|
||||
tf.baseTopicFamily = base
|
||||
|
||||
tf.Subscribe()
|
||||
|
||||
}, nse)
|
||||
r.markForChainStart()
|
||||
p.ReceivePubSub(tf.getFullTopicString(), &pb.SignedVoluntaryExit{Exit: &pb.VoluntaryExit{Epoch: 55}, Signature: make([]byte, fieldparams.BLSSignatureLength)})
|
||||
p.ReceivePubSub(topic, &pb.SignedVoluntaryExit{Exit: &pb.VoluntaryExit{Epoch: 55}, Signature: make([]byte, fieldparams.BLSSignatureLength)})
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRevalidateSubscription_CorrectlyFormatsTopic(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
hook := logTest.NewGlobal()
|
||||
chain := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
r := Service{
|
||||
ctx: t.Context(),
|
||||
cfg: &config{
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
p2p: p,
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
|
||||
params := subscribeParameters{
|
||||
topicFormat: "/eth2/testing/%#x/committee%d",
|
||||
nse: nse,
|
||||
}
|
||||
tracker := newSubnetTracker(params)
|
||||
|
||||
// committee index 1
|
||||
c1 := uint64(1)
|
||||
fullTopic := params.fullTopic(c1, r.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
_, topVal := r.wrapAndReportValidation(fullTopic, r.noopValidator)
|
||||
require.NoError(t, r.cfg.p2p.PubSub().RegisterTopicValidator(fullTopic, topVal))
|
||||
sub1, err := r.cfg.p2p.SubscribeToTopic(fullTopic)
|
||||
require.NoError(t, err)
|
||||
tracker.track(c1, sub1)
|
||||
|
||||
// committee index 2
|
||||
c2 := uint64(2)
|
||||
fullTopic = params.fullTopic(c2, r.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
_, topVal = r.wrapAndReportValidation(fullTopic, r.noopValidator)
|
||||
err = r.cfg.p2p.PubSub().RegisterTopicValidator(fullTopic, topVal)
|
||||
require.NoError(t, err)
|
||||
sub2, err := r.cfg.p2p.SubscribeToTopic(fullTopic)
|
||||
require.NoError(t, err)
|
||||
tracker.track(c2, sub2)
|
||||
|
||||
r.pruneNotWanted(tracker, map[uint64]bool{c2: true})
|
||||
require.LogsDoNotContain(t, hook, "Could not unregister topic validator")
|
||||
}
|
||||
|
||||
func Test_wrapAndReportValidation(t *testing.T) {
|
||||
mChain := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
@@ -416,15 +446,11 @@ func TestFilterSubnetPeers(t *testing.T) {
|
||||
cfg.SlotDurationMilliseconds = 1000
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Save the current flags to restore them after the test
|
||||
resetFlags := flags.Get()
|
||||
defer func() {
|
||||
flags.Init(resetFlags)
|
||||
}()
|
||||
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 4
|
||||
flags.Init(gFlags)
|
||||
// Reset config.
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
@@ -454,7 +480,6 @@ func TestFilterSubnetPeers(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, &r)
|
||||
markInitSyncComplete(t, &r)
|
||||
// Empty cache at the end of the test.
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
@@ -528,12 +553,11 @@ func TestSubscribeWithSyncSubnets_DynamicOK(t *testing.T) {
|
||||
currEpoch := slots.ToEpoch(slot)
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte("pubkey"), currEpoch, []uint64{0, 1}, 10*time.Second)
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
|
||||
tfDyn := NewSyncCommitteeTopicFamily(&r, nse)
|
||||
base := newBaseTopicFamily(&r, nse, r.noopValidator, noopHandler, tfDyn)
|
||||
tfDyn.baseTopicFamily = base
|
||||
tfDyn.SubscribeForSlot(slot)
|
||||
|
||||
go r.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
nse: nse,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
})
|
||||
time.Sleep(2 * time.Second)
|
||||
assert.Equal(t, 2, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
topicMap := map[string]bool{}
|
||||
@@ -578,11 +602,12 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
require.Equal(t, [4]byte(params.BeaconConfig().DenebForkVersion), nse.ForkVersion)
|
||||
require.Equal(t, params.BeaconConfig().DenebForkEpoch, nse.Epoch)
|
||||
|
||||
tfDyn2 := NewSyncCommitteeTopicFamily(&r, nse)
|
||||
base := newBaseTopicFamily(&r, nse, r.noopValidator, noopHandler, tfDyn2)
|
||||
tfDyn2.baseTopicFamily = base
|
||||
tfDyn2.SubscribeForSlot(r.cfg.clock.CurrentSlot())
|
||||
|
||||
sp := newSubnetTracker(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
nse: nse,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
})
|
||||
r.trySubscribeSubnets(sp)
|
||||
assert.Equal(t, 2, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
topicMap := map[string]bool{}
|
||||
for _, t := range r.cfg.p2p.PubSub().GetTopics() {
|
||||
@@ -601,14 +626,11 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
require.Equal(t, [4]byte(params.BeaconConfig().ElectraForkVersion), nse.ForkVersion)
|
||||
require.Equal(t, params.BeaconConfig().ElectraForkEpoch, nse.Epoch)
|
||||
|
||||
tfDyn2.nse = nse
|
||||
sp.nse = nse
|
||||
// clear the cache and re-subscribe to subnets.
|
||||
// this should result in the subscriptions being removed
|
||||
cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
|
||||
slot := r.cfg.clock.CurrentSlot()
|
||||
tfDyn2.UnsubscribeForSlot(slot)
|
||||
tfDyn2.SubscribeForSlot(r.cfg.clock.CurrentSlot())
|
||||
r.trySubscribeSubnets(sp)
|
||||
assert.Equal(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
}
|
||||
|
||||
@@ -1,199 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type topicFamilyKey struct {
|
||||
topicName string
|
||||
forkDigest [4]byte
|
||||
}
|
||||
|
||||
func topicFamilyKeyFrom(tf TopicFamily) topicFamilyKey {
|
||||
return topicFamilyKey{topicName: tf.Name(), forkDigest: tf.NetworkScheduleEntry().ForkDigest}
|
||||
}
|
||||
|
||||
type SubscriptionController struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
syncService *Service
|
||||
wg sync.WaitGroup
|
||||
|
||||
mu sync.RWMutex
|
||||
activeTopicFamilies map[topicFamilyKey]TopicFamily
|
||||
}
|
||||
|
||||
func NewSubscriptionController(ctx context.Context, s *Service) *SubscriptionController {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &SubscriptionController{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
syncService: s,
|
||||
activeTopicFamilies: make(map[topicFamilyKey]TopicFamily),
|
||||
}
|
||||
}
|
||||
|
||||
func (g *SubscriptionController) Start() {
|
||||
currentEpoch := g.syncService.cfg.clock.CurrentEpoch()
|
||||
if err := g.syncService.waitForInitialSync(g.ctx); err != nil {
|
||||
log.WithError(err).Debug("Context cancelled while waiting for initial sync, not starting SubscriptionController")
|
||||
return
|
||||
}
|
||||
|
||||
g.updateActiveTopicFamilies(currentEpoch)
|
||||
g.wg.Go(func() { g.controlLoop() })
|
||||
|
||||
log.Info("SubscriptionController started")
|
||||
}
|
||||
|
||||
func (g *SubscriptionController) controlLoop() {
|
||||
slotTicker := slots.NewSlotTicker(g.syncService.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-slotTicker.C():
|
||||
currentEpoch := g.syncService.cfg.clock.CurrentEpoch()
|
||||
g.updateActiveTopicFamilies(currentEpoch)
|
||||
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *SubscriptionController) updateActiveTopicFamilies(currentEpoch primitives.Epoch) {
|
||||
slot := g.syncService.cfg.clock.CurrentSlot()
|
||||
currentNSE := params.GetNetworkScheduleEntry(currentEpoch)
|
||||
|
||||
families := TopicFamiliesForEpoch(currentEpoch, g.syncService, currentNSE)
|
||||
|
||||
// also subscribe to topics for the next epoch if there is a fork in the next epoch
|
||||
nextNSE := params.GetNetworkScheduleEntry(currentEpoch + 1)
|
||||
if currentNSE.Epoch != nextNSE.Epoch {
|
||||
families = append(families, TopicFamiliesForEpoch(nextNSE.Epoch, g.syncService, nextNSE)...)
|
||||
}
|
||||
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
|
||||
// register topic families for the current NSE -> this is idempotent
|
||||
for _, family := range families {
|
||||
key := topicFamilyKeyFrom(family)
|
||||
existing, seen := g.activeTopicFamilies[key]
|
||||
if !seen {
|
||||
g.activeTopicFamilies[key] = family
|
||||
existing = family
|
||||
}
|
||||
|
||||
switch tf := existing.(type) {
|
||||
case DynamicShardedTopicFamily:
|
||||
tf.UnsubscribeForSlot(slot)
|
||||
tf.SubscribeForSlot(slot)
|
||||
case ShardedTopicFamily:
|
||||
if !seen {
|
||||
tf.Subscribe()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we are still in our genesis fork version then exit early.
|
||||
if currentNSE.Epoch == params.BeaconConfig().GenesisEpoch {
|
||||
return
|
||||
}
|
||||
if currentEpoch < currentNSE.Epoch+1 {
|
||||
return
|
||||
}
|
||||
previous := params.GetNetworkScheduleEntry(currentNSE.Epoch - 1)
|
||||
|
||||
// remove topic families for the previous NSE -> this is idempotent
|
||||
for key, family := range g.activeTopicFamilies {
|
||||
if key.forkDigest == previous.ForkDigest {
|
||||
|
||||
family.UnsubscribeAll()
|
||||
delete(g.activeTopicFamilies, key)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"topicName": key.topicName,
|
||||
"forkDigest": fmt.Sprintf("%#x", key.forkDigest),
|
||||
}).Info("Removed topic family")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *SubscriptionController) Stop() {
|
||||
g.cancel()
|
||||
g.wg.Wait()
|
||||
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
|
||||
for _, family := range g.activeTopicFamilies {
|
||||
family.UnsubscribeAll()
|
||||
}
|
||||
}
|
||||
|
||||
func (g *SubscriptionController) GetCurrentActiveTopicsWithMinPeerCount() map[string]int {
|
||||
g.mu.RLock()
|
||||
defer g.mu.RUnlock()
|
||||
|
||||
slot := g.syncService.cfg.clock.CurrentSlot()
|
||||
topics := make(map[string]int)
|
||||
for _, f := range g.activeTopicFamilies {
|
||||
tfm, ok := f.(DynamicShardedTopicFamily)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for topic, count := range tfm.TopicsWithMinPeerCount(slot) {
|
||||
topics[topic] += count
|
||||
}
|
||||
}
|
||||
return topics
|
||||
}
|
||||
|
||||
func (g *SubscriptionController) ExtractTopics(_ context.Context, node *enode.Node) ([]string, error) {
|
||||
if node == nil {
|
||||
return nil, errors.New("enode is nil")
|
||||
}
|
||||
|
||||
g.mu.RLock()
|
||||
defer g.mu.RUnlock()
|
||||
|
||||
families := make([]DynamicShardedTopicFamily, 0, len(g.activeTopicFamilies))
|
||||
for _, f := range g.activeTopicFamilies {
|
||||
if tfm, ok := f.(DynamicShardedTopicFamily); ok {
|
||||
families = append(families, tfm)
|
||||
}
|
||||
}
|
||||
|
||||
// Collect topics from dynamic families only, de-duplicated.
|
||||
topicSet := make(map[string]struct{})
|
||||
for _, df := range families {
|
||||
topics, err := df.ExtractTopicsForNode(node)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"topicFamily": fmt.Sprintf("%T", df),
|
||||
}).Debug("Failed to get topics for node from family")
|
||||
continue
|
||||
}
|
||||
for _, t := range topics {
|
||||
topicSet[t] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
out := make([]string, 0, len(topicSet))
|
||||
for t := range topicSet {
|
||||
out = append(out, t)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
@@ -1,545 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/async/abool"
|
||||
mockChain "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/genesis"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
var _ DynamicShardedTopicFamily = (*testDynFamly)(nil)
|
||||
|
||||
// testDynFamly is a test implementation of a dynamic-subnet topic family.
|
||||
type testDynFamly struct {
|
||||
baseTopicFamily
|
||||
topics []string
|
||||
name string
|
||||
topicsWithMinPeers map[string]int
|
||||
}
|
||||
|
||||
func (f *testDynFamly) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *testDynFamly) Validator() wrappedVal {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *testDynFamly) Handler() subHandler {
|
||||
return noopHandler
|
||||
}
|
||||
|
||||
func (f *testDynFamly) GetFullTopicString(subnet uint64) string {
|
||||
return fmt.Sprintf("topic-%d", subnet)
|
||||
}
|
||||
|
||||
func (f *testDynFamly) TopicsToSubscribeForSlot(_ primitives.Slot) []string {
|
||||
return f.topics
|
||||
}
|
||||
|
||||
func (f *testDynFamly) ExtractTopicsForNode(_ *enode.Node) ([]string, error) {
|
||||
return f.topics, nil
|
||||
}
|
||||
|
||||
func (f *testDynFamly) SubscribeForSlot(_ primitives.Slot) {
|
||||
f.baseTopicFamily.subscribeToTopics(f.topics)
|
||||
}
|
||||
|
||||
func (f *testDynFamly) UnsubscribeForSlot(_ primitives.Slot) {}
|
||||
|
||||
func (f *testDynFamly) TopicsWithMinPeerCount(_ primitives.Slot) map[string]int {
|
||||
return f.topicsWithMinPeers
|
||||
}
|
||||
|
||||
type staticTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
name string
|
||||
topics []string
|
||||
}
|
||||
|
||||
func (f *staticTopicFamily) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *staticTopicFamily) Validator() wrappedVal {
|
||||
return f.validator
|
||||
}
|
||||
|
||||
func (f *staticTopicFamily) Handler() subHandler {
|
||||
return f.handler
|
||||
}
|
||||
|
||||
func (f *staticTopicFamily) Subscribe() {
|
||||
f.baseTopicFamily.subscribeToTopics(f.topics)
|
||||
}
|
||||
|
||||
func testSubscriptionControllerService(t *testing.T, current primitives.Epoch) *Service {
|
||||
closedChan := make(chan struct{})
|
||||
close(closedChan)
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: genesis.Time(),
|
||||
ValidatorsRoot: genesis.ValidatorsRoot(),
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 10*time.Millisecond)
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: defaultClockWithTimeAtEpoch(current),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
initialSyncComplete: closedChan,
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(context.Background(), r)
|
||||
return r
|
||||
}
|
||||
|
||||
func TestSubscriptionController_CheckForNextEpochForkSubscriptions(t *testing.T) {
|
||||
closedChan := make(chan struct{})
|
||||
close(closedChan)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
altairForkEpoch := cfg.AltairForkEpoch
|
||||
bellatrixForkEpoch := cfg.BellatrixForkEpoch
|
||||
capellaForkEpoch := cfg.CapellaForkEpoch
|
||||
denebForkEpoch := cfg.DenebForkEpoch
|
||||
electraForkEpoch := cfg.ElectraForkEpoch
|
||||
fuluForkEpoch := cfg.FuluForkEpoch
|
||||
blobsidecarSubnetCount := cfg.BlobsidecarSubnetCount
|
||||
blobsidecarSubnetCountElectra := cfg.BlobsidecarSubnetCountElectra
|
||||
|
||||
// Pre-compute digests using current config state
|
||||
altairDigest := params.ForkDigest(altairForkEpoch)
|
||||
bellatrixDigest := params.ForkDigest(bellatrixForkEpoch)
|
||||
capellaDigest := params.ForkDigest(capellaForkEpoch)
|
||||
denebDigest := params.ForkDigest(denebForkEpoch)
|
||||
electraDigest := params.ForkDigest(electraForkEpoch)
|
||||
fuluDigest := params.ForkDigest(fuluForkEpoch)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
svcCreator func(t *testing.T) *Service
|
||||
checkRegistration func(t *testing.T, s *Service)
|
||||
epochAtRegistration func(primitives.Epoch) primitives.Epoch
|
||||
forkEpoch primitives.Epoch
|
||||
nextForkEpoch primitives.Epoch
|
||||
forkDigest [4]byte
|
||||
nextForkDigest [4]byte
|
||||
}{
|
||||
{
|
||||
name: "no fork in the next epoch",
|
||||
forkEpoch: altairForkEpoch,
|
||||
forkDigest: altairDigest,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 2 },
|
||||
nextForkEpoch: bellatrixForkEpoch,
|
||||
nextForkDigest: bellatrixDigest,
|
||||
checkRegistration: func(t *testing.T, s *Service) {},
|
||||
},
|
||||
{
|
||||
name: "altair fork in the next epoch",
|
||||
forkEpoch: altairForkEpoch,
|
||||
forkDigest: altairDigest,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
nextForkEpoch: bellatrixForkEpoch,
|
||||
nextForkDigest: bellatrixDigest,
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
expected := fmt.Sprintf(p2p.SyncContributionAndProofSubnetTopicFormat+s.cfg.p2p.Encoding().ProtocolSuffix(), altairDigest)
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), "subnet topic doesn't exist")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "capella fork in the next epoch",
|
||||
forkEpoch: capellaForkEpoch,
|
||||
forkDigest: capellaDigest,
|
||||
nextForkEpoch: denebForkEpoch,
|
||||
nextForkDigest: denebDigest,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
expected := fmt.Sprintf(p2p.BlsToExecutionChangeSubnetTopicFormat+s.cfg.p2p.Encoding().ProtocolSuffix(), capellaDigest)
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), "subnet topic doesn't exist")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deneb fork in the next epoch",
|
||||
forkEpoch: denebForkEpoch,
|
||||
forkDigest: denebDigest,
|
||||
nextForkEpoch: electraForkEpoch,
|
||||
nextForkDigest: electraDigest,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
subIndices := mapFromCount(blobsidecarSubnetCount)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.BlobSubnetTopicFormat, denebDigest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "electra fork in the next epoch",
|
||||
forkEpoch: electraForkEpoch,
|
||||
forkDigest: electraDigest,
|
||||
nextForkEpoch: fuluForkEpoch,
|
||||
nextForkDigest: fuluDigest,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
subIndices := mapFromCount(blobsidecarSubnetCountElectra)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.BlobSubnetTopicFormat, electraDigest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fulu fork in the next epoch; should not have blob topics",
|
||||
forkEpoch: fuluForkEpoch,
|
||||
forkDigest: fuluDigest,
|
||||
nextForkEpoch: fuluForkEpoch,
|
||||
nextForkDigest: fuluDigest,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
// Advance to two epochs after Fulu activation and assert no blob topics remain.
|
||||
target := fuluForkEpoch + 2
|
||||
s.cfg.clock = defaultClockWithTimeAtEpoch(target)
|
||||
s.subscriptionController.updateActiveTopicFamilies(s.cfg.clock.CurrentEpoch())
|
||||
|
||||
for _, topic := range s.subHandler.allTopics() {
|
||||
if strings.Contains(topic, "/"+p2p.GossipBlobSidecarMessage) {
|
||||
t.Fatalf("blob topic still exists after Fulu+2: %s", topic)
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
current := tt.epochAtRegistration(tt.forkEpoch)
|
||||
s := testSubscriptionControllerService(t, current)
|
||||
s.subscriptionController.updateActiveTopicFamilies(s.cfg.clock.CurrentEpoch())
|
||||
tt.checkRegistration(t, s)
|
||||
|
||||
if current != tt.forkEpoch-1 {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure the topics were registered for the upcoming fork
|
||||
// Use pre-computed digest from test struct to avoid race with parallel tests
|
||||
assert.Equal(t, true, s.subHandler.digestExists(tt.forkDigest))
|
||||
|
||||
// After this point we are checking deregistration, which doesn't apply if there isn't a higher
|
||||
// nextForkEpoch.
|
||||
if tt.forkEpoch >= tt.nextForkEpoch {
|
||||
return
|
||||
}
|
||||
|
||||
// Move the clock to just before the next fork epoch and ensure deregistration is correct
|
||||
s.cfg.clock = defaultClockWithTimeAtEpoch(tt.nextForkEpoch - 1)
|
||||
s.subscriptionController.updateActiveTopicFamilies(s.cfg.clock.CurrentEpoch())
|
||||
|
||||
s.subscriptionController.updateActiveTopicFamilies(tt.nextForkEpoch)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(tt.forkDigest))
|
||||
// deregister as if it is the epoch after the next fork epoch
|
||||
s.subscriptionController.updateActiveTopicFamilies(tt.nextForkEpoch + 1)
|
||||
assert.Equal(t, false, s.subHandler.digestExists(tt.forkDigest))
|
||||
assert.Equal(t, true, s.subHandler.digestExists(tt.nextForkDigest))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubscriptionController_ExtractTopics(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
|
||||
type tc struct {
|
||||
name string
|
||||
setup func(*SubscriptionController)
|
||||
ctx func() context.Context
|
||||
node *enode.Node
|
||||
want []string
|
||||
wantErr bool
|
||||
}
|
||||
|
||||
dummyNode := new(enode.Node)
|
||||
|
||||
tests := []tc{
|
||||
{
|
||||
name: "nil node returns error",
|
||||
setup: func(g *SubscriptionController) {},
|
||||
ctx: func() context.Context { return context.Background() },
|
||||
node: nil,
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no families yields empty",
|
||||
setup: func(g *SubscriptionController) {},
|
||||
ctx: func() context.Context { return context.Background() },
|
||||
node: dummyNode,
|
||||
want: []string{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "static family ignored",
|
||||
setup: func(g *SubscriptionController) {
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "static", forkDigest: [4]byte{1, 2, 3, 4}}] = &staticTopicFamily{name: "StaticFam"}
|
||||
g.mu.Unlock()
|
||||
},
|
||||
ctx: func() context.Context { return context.Background() },
|
||||
node: dummyNode,
|
||||
want: []string{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "single dynamic family topics returned",
|
||||
setup: func(g *SubscriptionController) {
|
||||
fam := &testDynFamly{topics: []string{"t1", "t2"}, name: "Dyn1"}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn1", forkDigest: [4]byte{0}}] = fam
|
||||
g.mu.Unlock()
|
||||
},
|
||||
ctx: func() context.Context { return context.Background() },
|
||||
node: dummyNode,
|
||||
want: []string{"t1", "t2"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "multiple dynamic families de-dup",
|
||||
setup: func(g *SubscriptionController) {
|
||||
f1 := &testDynFamly{topics: []string{"t1", "t2"}, name: "Dyn1"}
|
||||
f2 := &testDynFamly{topics: []string{"t2", "t3"}, name: "Dyn2"}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "static", forkDigest: [4]byte{1, 2, 3, 4}}] = &staticTopicFamily{name: "StaticFam"}
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn1", forkDigest: [4]byte{0}}] = f1
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn2", forkDigest: [4]byte{0}}] = f2
|
||||
g.mu.Unlock()
|
||||
},
|
||||
ctx: func() context.Context { return context.Background() },
|
||||
node: dummyNode,
|
||||
want: []string{"t1", "t2", "t3"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "mixed static and dynamic",
|
||||
setup: func(g *SubscriptionController) {
|
||||
f1 := &testDynFamly{topics: []string{"a", "b"}, name: "Dyn"}
|
||||
s1 := &staticTopicFamily{name: "Static"}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn", forkDigest: [4]byte{9}}] = f1
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "static", forkDigest: [4]byte{9}}] = s1
|
||||
g.mu.Unlock()
|
||||
},
|
||||
ctx: func() context.Context { return context.Background() },
|
||||
node: dummyNode,
|
||||
want: []string{"a", "b"},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
s := &Service{}
|
||||
g := NewSubscriptionController(context.Background(), s)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Reset families for each subtest
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies = make(map[topicFamilyKey]TopicFamily)
|
||||
g.mu.Unlock()
|
||||
|
||||
tt.setup(g)
|
||||
topics, err := g.ExtractTopics(tt.ctx(), tt.node)
|
||||
if tt.wantErr {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
got := map[string]bool{}
|
||||
for _, tpc := range topics {
|
||||
got[tpc] = true
|
||||
}
|
||||
want := map[string]bool{}
|
||||
for _, tpc := range tt.want {
|
||||
want[tpc] = true
|
||||
}
|
||||
require.Equal(t, len(want), len(got))
|
||||
for k := range want {
|
||||
require.Equal(t, true, got[k], "missing topic %s", k)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubscriptionController_GetCurrentActiveTopicsWithMinPeerCount(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(*SubscriptionController)
|
||||
want map[string]int
|
||||
}{
|
||||
{
|
||||
name: "no families yields empty map",
|
||||
setup: func(_ *SubscriptionController) {},
|
||||
want: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "static family ignored",
|
||||
setup: func(g *SubscriptionController) {
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "static", forkDigest: [4]byte{1, 2, 3, 4}}] = &staticTopicFamily{name: "StaticFam"}
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "single dynamic family returns topics with min peer counts",
|
||||
setup: func(g *SubscriptionController) {
|
||||
fam := &testDynFamly{
|
||||
name: "Dyn1",
|
||||
topicsWithMinPeers: map[string]int{"topic/a": 8, "topic/b": 6},
|
||||
}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn1", forkDigest: [4]byte{0}}] = fam
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{"topic/a": 8, "topic/b": 6},
|
||||
},
|
||||
{
|
||||
name: "dynamic family with nil topicsWithMinPeers returns empty",
|
||||
setup: func(g *SubscriptionController) {
|
||||
fam := &testDynFamly{
|
||||
name: "DynNil",
|
||||
topicsWithMinPeers: nil,
|
||||
}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dynnil", forkDigest: [4]byte{0}}] = fam
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "dynamic family with empty topicsWithMinPeers returns empty",
|
||||
setup: func(g *SubscriptionController) {
|
||||
fam := &testDynFamly{
|
||||
name: "DynEmpty",
|
||||
topicsWithMinPeers: map[string]int{},
|
||||
}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dynempty", forkDigest: [4]byte{0}}] = fam
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "multiple dynamic families with disjoint topics",
|
||||
setup: func(g *SubscriptionController) {
|
||||
f1 := &testDynFamly{
|
||||
name: "Dyn1",
|
||||
topicsWithMinPeers: map[string]int{"topic/a": 8, "topic/b": 6},
|
||||
}
|
||||
f2 := &testDynFamly{
|
||||
name: "Dyn2",
|
||||
topicsWithMinPeers: map[string]int{"topic/c": 4, "topic/d": 2},
|
||||
}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn1", forkDigest: [4]byte{0}}] = f1
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn2", forkDigest: [4]byte{0}}] = f2
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{"topic/a": 8, "topic/b": 6, "topic/c": 4, "topic/d": 2},
|
||||
},
|
||||
{
|
||||
name: "multiple dynamic families with overlapping topics - counts are summed",
|
||||
setup: func(g *SubscriptionController) {
|
||||
f1 := &testDynFamly{
|
||||
name: "Dyn1",
|
||||
topicsWithMinPeers: map[string]int{"topic/shared": 5, "topic/a": 3},
|
||||
}
|
||||
f2 := &testDynFamly{
|
||||
name: "Dyn2",
|
||||
topicsWithMinPeers: map[string]int{"topic/shared": 7, "topic/b": 2},
|
||||
}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn1", forkDigest: [4]byte{0}}] = f1
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn2", forkDigest: [4]byte{0}}] = f2
|
||||
g.mu.Unlock()
|
||||
},
|
||||
// topic/shared: 5 + 7 = 12
|
||||
want: map[string]int{"topic/shared": 12, "topic/a": 3, "topic/b": 2},
|
||||
},
|
||||
{
|
||||
name: "mixed static and dynamic families - only dynamic counted",
|
||||
setup: func(g *SubscriptionController) {
|
||||
dynFam := &testDynFamly{
|
||||
name: "Dyn",
|
||||
topicsWithMinPeers: map[string]int{"topic/dyn": 8},
|
||||
}
|
||||
staticFam := &staticTopicFamily{name: "Static"}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn", forkDigest: [4]byte{9}}] = dynFam
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "static", forkDigest: [4]byte{9}}] = staticFam
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{"topic/dyn": 8},
|
||||
},
|
||||
{
|
||||
name: "single topic with zero peer count",
|
||||
setup: func(g *SubscriptionController) {
|
||||
fam := &testDynFamly{
|
||||
name: "DynZero",
|
||||
topicsWithMinPeers: map[string]int{"topic/zero": 0},
|
||||
}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dynzero", forkDigest: [4]byte{0}}] = fam
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{"topic/zero": 0},
|
||||
},
|
||||
}
|
||||
|
||||
current := params.BeaconConfig().AltairForkEpoch
|
||||
s := testSubscriptionControllerService(t, current)
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Reset families for each subtest
|
||||
s.subscriptionController.mu.Lock()
|
||||
s.subscriptionController.activeTopicFamilies = make(map[topicFamilyKey]TopicFamily)
|
||||
s.subscriptionController.mu.Unlock()
|
||||
|
||||
tt.setup(s.subscriptionController)
|
||||
got := s.subscriptionController.GetCurrentActiveTopicsWithMinPeerCount()
|
||||
|
||||
require.Equal(t, len(tt.want), len(got), "result length mismatch")
|
||||
for topic, expectedCount := range tt.want {
|
||||
actualCount, exists := got[topic]
|
||||
require.Equal(t, true, exists, "expected topic %s not found in result", topic)
|
||||
require.Equal(t, expectedCount, actualCount, "peer count mismatch for topic %s", topic)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -35,7 +35,6 @@ func (s *subTopicHandler) addTopic(topic string, sub *pubsub.Subscription) {
|
||||
s.digestMap[digest] += 1
|
||||
}
|
||||
|
||||
// topicExists checks if a topic is currently tracked.
|
||||
func (s *subTopicHandler) topicExists(topic string) bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
@@ -65,7 +64,6 @@ func (s *subTopicHandler) removeTopic(topic string) {
|
||||
}
|
||||
}
|
||||
|
||||
// digestExists checks if a fork digest is currently tracked.
|
||||
func (s *subTopicHandler) digestExists(digest [4]byte) bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
@@ -74,7 +72,6 @@ func (s *subTopicHandler) digestExists(digest [4]byte) bool {
|
||||
return ok && count > 0
|
||||
}
|
||||
|
||||
// allTopics returns all currently tracked topics.
|
||||
func (s *subTopicHandler) allTopics() []string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
@@ -86,7 +83,6 @@ func (s *subTopicHandler) allTopics() []string {
|
||||
return topics
|
||||
}
|
||||
|
||||
// subForTopic returns the subscription for a given topic.
|
||||
func (s *subTopicHandler) subForTopic(topic string) *pubsub.Subscription {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
@@ -1,277 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// AttestationTopicFamily
|
||||
var _ DynamicShardedTopicFamily = (*AttestationTopicFamily)(nil)
|
||||
|
||||
var (
|
||||
attestationMinMeshPeers = 8
|
||||
attestationMinFanoutPeers = 6
|
||||
syncCommitteeMinMeshPeers = 8
|
||||
syncCommitteeMinFanoutPeers = 6
|
||||
dataColumnMinMeshPeers = 6
|
||||
dataColumnMinFanoutPeers = 2
|
||||
)
|
||||
|
||||
type AttestationTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
// NewAttestationTopicFamily creates a new AttestationTopicFamily.
|
||||
func NewAttestationTopicFamily(s *Service, nse params.NetworkScheduleEntry) *AttestationTopicFamily {
|
||||
a := &AttestationTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateCommitteeIndexBeaconAttestation, s.committeeIndexBeaconAttestationSubscriber, a)
|
||||
a.baseTopicFamily = base
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *AttestationTopicFamily) Name() string {
|
||||
return "AttestationTopicFamily"
|
||||
}
|
||||
|
||||
// SubscribeForSlot subscribes to the topics for the given slot.
|
||||
func (a *AttestationTopicFamily) SubscribeForSlot(slot primitives.Slot) {
|
||||
a.subscribeToTopics(a.TopicsToSubscribeForSlot(slot))
|
||||
}
|
||||
|
||||
// UnsubscribeForSlot unsubscribes from topics we no longer need for the slot.
|
||||
func (a *AttestationTopicFamily) UnsubscribeForSlot(slot primitives.Slot) {
|
||||
a.pruneTopicsExcept(a.TopicsToSubscribeForSlot(slot))
|
||||
}
|
||||
|
||||
// TopicsToSubscribeFor returns the topics to subscribe to for a given slot.
|
||||
func (a *AttestationTopicFamily) TopicsToSubscribeForSlot(slot primitives.Slot) []string {
|
||||
return topicsFromSubnets(a.getSubnetsToJoin(slot), a)
|
||||
}
|
||||
|
||||
// getFullTopicString builds the full topic string for an attestation subnet.
|
||||
func (a *AttestationTopicFamily) getFullTopicString(subnet uint64) string {
|
||||
return p2p.AttestationSubnetTopic(a.nse.ForkDigest, subnet)
|
||||
}
|
||||
|
||||
// getSubnetsToJoin returns persistent and aggregator subnets.
|
||||
func (a *AttestationTopicFamily) getSubnetsToJoin(slot primitives.Slot) map[uint64]bool {
|
||||
return a.syncService.persistentAndAggregatorSubnetIndices(slot)
|
||||
}
|
||||
|
||||
// getSubnetsForBroadcast returns subnets needed for attestation duties.
|
||||
func (a *AttestationTopicFamily) getSubnetsForBroadcast(slot primitives.Slot) map[uint64]bool {
|
||||
return attesterSubnetIndices(slot)
|
||||
}
|
||||
|
||||
// ExtractTopicsForNode returns all topics for the given node that are relevant to this topic family.
|
||||
func (a *AttestationTopicFamily) ExtractTopicsForNode(node *enode.Node) ([]string, error) {
|
||||
return getTopicsForNode(a.syncService, a, node, p2p.AttestationSubnets)
|
||||
}
|
||||
|
||||
// TopicsWithMinPeerCount returns all topics (mesh and fanout) with their respective min peer counts.
|
||||
func (a *AttestationTopicFamily) TopicsWithMinPeerCount(slot primitives.Slot) map[string]int {
|
||||
return topicsWithMinPeerCount(a, slot, attestationMinMeshPeers, attestationMinFanoutPeers)
|
||||
}
|
||||
|
||||
// SyncCommitteeTopicFamily
|
||||
var _ DynamicShardedTopicFamily = (*SyncCommitteeTopicFamily)(nil)
|
||||
|
||||
type SyncCommitteeTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
// NewSyncCommitteeTopicFamily creates a new SyncCommitteeTopicFamily.
|
||||
func NewSyncCommitteeTopicFamily(s *Service, nse params.NetworkScheduleEntry) *SyncCommitteeTopicFamily {
|
||||
sc := &SyncCommitteeTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateSyncCommitteeMessage, s.syncCommitteeMessageSubscriber, sc)
|
||||
sc.baseTopicFamily = base
|
||||
return sc
|
||||
}
|
||||
|
||||
func (s *SyncCommitteeTopicFamily) Name() string {
|
||||
return "SyncCommitteeTopicFamily"
|
||||
}
|
||||
|
||||
// SubscribeFor subscribes to the topics for the given slot.
|
||||
func (s *SyncCommitteeTopicFamily) SubscribeForSlot(slot primitives.Slot) {
|
||||
s.subscribeToTopics(s.TopicsToSubscribeForSlot(slot))
|
||||
}
|
||||
|
||||
// UnsubscribeFor unsubscribes from topics we no longer need for the slot.
|
||||
func (s *SyncCommitteeTopicFamily) UnsubscribeForSlot(slot primitives.Slot) {
|
||||
s.pruneTopicsExcept(s.TopicsToSubscribeForSlot(slot))
|
||||
}
|
||||
|
||||
// TopicsToSubscribeFor returns the topics to subscribe to for a given slot.
|
||||
func (s *SyncCommitteeTopicFamily) TopicsToSubscribeForSlot(slot primitives.Slot) []string {
|
||||
return topicsFromSubnets(s.getSubnetsToJoin(slot), s)
|
||||
}
|
||||
|
||||
// getFullTopicString builds the full topic string for a sync committee subnet.
|
||||
func (s *SyncCommitteeTopicFamily) getFullTopicString(subnet uint64) string {
|
||||
return p2p.SyncCommitteeSubnetTopic(s.nse.ForkDigest, subnet)
|
||||
}
|
||||
|
||||
// getSubnetsToJoin returns active sync committee subnets.
|
||||
func (s *SyncCommitteeTopicFamily) getSubnetsToJoin(slot primitives.Slot) map[uint64]bool {
|
||||
return s.syncService.activeSyncSubnetIndices(slot)
|
||||
}
|
||||
|
||||
// getSubnetsForBroadcast returns nil as there are no separate peer requirements.
|
||||
func (s *SyncCommitteeTopicFamily) getSubnetsForBroadcast(slot primitives.Slot) map[uint64]bool {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtractTopicsForNode returns all topics for the given node that are relevant to this topic family.
|
||||
func (s *SyncCommitteeTopicFamily) ExtractTopicsForNode(node *enode.Node) ([]string, error) {
|
||||
return getTopicsForNode(s.syncService, s, node, p2p.SyncSubnets)
|
||||
}
|
||||
|
||||
// TopicsWithMinPeerCount returns all topics (mesh and fanout) with their respective min peer counts.
|
||||
func (s *SyncCommitteeTopicFamily) TopicsWithMinPeerCount(slot primitives.Slot) map[string]int {
|
||||
return topicsWithMinPeerCount(s, slot, syncCommitteeMinMeshPeers, syncCommitteeMinFanoutPeers)
|
||||
}
|
||||
|
||||
// DataColumnTopicFamily
|
||||
var _ DynamicShardedTopicFamily = (*DataColumnTopicFamily)(nil)
|
||||
|
||||
type DataColumnTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
// NewDataColumnTopicFamily creates a new DataColumnTopicFamily.
|
||||
func NewDataColumnTopicFamily(s *Service, nse params.NetworkScheduleEntry) *DataColumnTopicFamily {
|
||||
d := &DataColumnTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateDataColumn, s.dataColumnSubscriber, d)
|
||||
d.baseTopicFamily = base
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *DataColumnTopicFamily) Name() string {
|
||||
return "DataColumnTopicFamily"
|
||||
}
|
||||
|
||||
// SubscribeFor subscribes to the topics for the given slot.
|
||||
func (d *DataColumnTopicFamily) SubscribeForSlot(slot primitives.Slot) {
|
||||
d.subscribeToTopics(d.TopicsToSubscribeForSlot(slot))
|
||||
}
|
||||
|
||||
// UnsubscribeForSlot unsubscribes from topics we no longer need for the slot.
|
||||
func (d *DataColumnTopicFamily) UnsubscribeForSlot(slot primitives.Slot) {
|
||||
d.pruneTopicsExcept(d.TopicsToSubscribeForSlot(slot))
|
||||
}
|
||||
|
||||
// TopicsToSubscribeFor returns the topics to subscribe to for a given slot.
|
||||
func (d *DataColumnTopicFamily) TopicsToSubscribeForSlot(slot primitives.Slot) []string {
|
||||
return topicsFromSubnets(d.getSubnetsToJoin(slot), d)
|
||||
}
|
||||
|
||||
// getFullTopicString builds the full topic string for a data column subnet.
|
||||
func (d *DataColumnTopicFamily) getFullTopicString(subnet uint64) string {
|
||||
return p2p.DataColumnSubnetTopic(d.nse.ForkDigest, subnet)
|
||||
}
|
||||
|
||||
// getSubnetsToJoin returns data column subnets.
|
||||
func (d *DataColumnTopicFamily) getSubnetsToJoin(slot primitives.Slot) map[uint64]bool {
|
||||
return d.syncService.dataColumnSubnetIndices(slot)
|
||||
}
|
||||
|
||||
// getSubnetsForBroadcast returns all data column subnets.
|
||||
func (d *DataColumnTopicFamily) getSubnetsForBroadcast(slot primitives.Slot) map[uint64]bool {
|
||||
return d.syncService.allDataColumnSubnets(slot)
|
||||
}
|
||||
|
||||
// ExtractTopicsForNode returns all topics for the given node that are relevant to this topic family.
|
||||
func (d *DataColumnTopicFamily) ExtractTopicsForNode(node *enode.Node) ([]string, error) {
|
||||
return getTopicsForNode(d.syncService, d, node, p2p.DataColumnSubnets)
|
||||
}
|
||||
|
||||
// TopicsWithMinPeerCount returns all topics (mesh and fanout) with their respective min peer counts.
|
||||
func (d *DataColumnTopicFamily) TopicsWithMinPeerCount(slot primitives.Slot) map[string]int {
|
||||
return topicsWithMinPeerCount(d, slot, dataColumnMinMeshPeers, dataColumnMinFanoutPeers)
|
||||
}
|
||||
|
||||
type nodeSubnetExtractor func(id enode.ID, n *enode.Node, r *enr.Record) (map[uint64]bool, error)
|
||||
|
||||
type dynamicSubnetFamily interface {
|
||||
getSubnetsToJoin(primitives.Slot) map[uint64]bool
|
||||
getSubnetsForBroadcast(primitives.Slot) map[uint64]bool
|
||||
getFullTopicString(subnet uint64) string
|
||||
}
|
||||
|
||||
func getTopicsForNode(
|
||||
s *Service,
|
||||
tf dynamicSubnetFamily,
|
||||
node *enode.Node,
|
||||
extractor nodeSubnetExtractor,
|
||||
) ([]string, error) {
|
||||
if node == nil {
|
||||
return nil, errors.New("enode is nil")
|
||||
}
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
neededSubnets := computeNeededSubnets(tf, currentSlot)
|
||||
|
||||
nodeSubnets, err := extractor(node.ID(), node, node.Record())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var topics []string
|
||||
for subnet := range neededSubnets {
|
||||
if nodeSubnets[subnet] {
|
||||
topics = append(topics, tf.getFullTopicString(subnet))
|
||||
}
|
||||
}
|
||||
return topics, nil
|
||||
}
|
||||
|
||||
func computeNeededSubnets(tf dynamicSubnetFamily, slot primitives.Slot) map[uint64]bool {
|
||||
subnetsToJoin := tf.getSubnetsToJoin(slot)
|
||||
subnetsRequiringPeers := tf.getSubnetsForBroadcast(slot)
|
||||
|
||||
neededSubnets := make(map[uint64]bool, len(subnetsToJoin)+len(subnetsRequiringPeers))
|
||||
for subnet := range subnetsToJoin {
|
||||
neededSubnets[subnet] = true
|
||||
}
|
||||
for subnet := range subnetsRequiringPeers {
|
||||
neededSubnets[subnet] = true
|
||||
}
|
||||
return neededSubnets
|
||||
}
|
||||
|
||||
func topicsFromSubnets(subnets map[uint64]bool, tf dynamicSubnetFamily) []string {
|
||||
topics := make([]string, 0, len(subnets))
|
||||
for s := range subnets {
|
||||
topics = append(topics, tf.getFullTopicString(s))
|
||||
}
|
||||
return topics
|
||||
}
|
||||
|
||||
// topicsWithMinPeerCount returns all topics (mesh and fanout) with their respective min peer counts.
|
||||
// If a subnet appears in both mesh and fanout, the mesh peer count is used.
|
||||
func topicsWithMinPeerCount(tf dynamicSubnetFamily, slot primitives.Slot, minMeshPeers int, minFanoutPeers int) map[string]int {
|
||||
meshSubnets := tf.getSubnetsToJoin(slot)
|
||||
fanoutSubnets := tf.getSubnetsForBroadcast(slot)
|
||||
|
||||
result := make(map[string]int, len(meshSubnets)+len(fanoutSubnets))
|
||||
|
||||
// Add mesh topics with mesh min peer count
|
||||
for subnet := range meshSubnets {
|
||||
topic := tf.getFullTopicString(subnet)
|
||||
result[topic] = minMeshPeers
|
||||
}
|
||||
|
||||
// Add fanout topics with fanout min peer count (only if not already in mesh)
|
||||
for subnet := range fanoutSubnets {
|
||||
topic := tf.getFullTopicString(subnet)
|
||||
if _, exists := result[topic]; !exists {
|
||||
result[topic] = minFanoutPeers
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
@@ -1,137 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// mockDynamicSubnetFamily is a test implementation of dynamicSubnetFamily.
|
||||
type mockDynamicSubnetFamily struct {
|
||||
meshSubnets map[uint64]bool
|
||||
fanoutSubnets map[uint64]bool
|
||||
topicPrefix string
|
||||
}
|
||||
|
||||
func (m *mockDynamicSubnetFamily) getSubnetsToJoin(_ primitives.Slot) map[uint64]bool {
|
||||
return m.meshSubnets
|
||||
}
|
||||
|
||||
func (m *mockDynamicSubnetFamily) getSubnetsForBroadcast(_ primitives.Slot) map[uint64]bool {
|
||||
return m.fanoutSubnets
|
||||
}
|
||||
|
||||
func (m *mockDynamicSubnetFamily) getFullTopicString(subnet uint64) string {
|
||||
return fmt.Sprintf("%s/subnet/%d", m.topicPrefix, subnet)
|
||||
}
|
||||
|
||||
func TestTopicsWithMinPeerCount(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
meshSubnets map[uint64]bool
|
||||
fanoutSubnets map[uint64]bool
|
||||
minMeshPeers int
|
||||
minFanoutPeers int
|
||||
expected map[string]int
|
||||
}{
|
||||
{
|
||||
name: "empty subnets returns empty map",
|
||||
meshSubnets: nil,
|
||||
fanoutSubnets: nil,
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "empty maps returns empty map",
|
||||
meshSubnets: map[uint64]bool{},
|
||||
fanoutSubnets: map[uint64]bool{},
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "only mesh subnets",
|
||||
meshSubnets: map[uint64]bool{1: true, 2: true, 3: true},
|
||||
fanoutSubnets: nil,
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{
|
||||
"test/subnet/1": 8,
|
||||
"test/subnet/2": 8,
|
||||
"test/subnet/3": 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only fanout subnets",
|
||||
meshSubnets: nil,
|
||||
fanoutSubnets: map[uint64]bool{4: true, 5: true},
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{
|
||||
"test/subnet/4": 6,
|
||||
"test/subnet/5": 6,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mesh and fanout with no overlap",
|
||||
meshSubnets: map[uint64]bool{1: true, 2: true},
|
||||
fanoutSubnets: map[uint64]bool{3: true, 4: true},
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{
|
||||
"test/subnet/1": 8,
|
||||
"test/subnet/2": 8,
|
||||
"test/subnet/3": 6,
|
||||
"test/subnet/4": 6,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fanout subset of mesh - all get mesh peer count",
|
||||
meshSubnets: map[uint64]bool{1: true, 2: true, 3: true, 4: true},
|
||||
fanoutSubnets: map[uint64]bool{2: true, 3: true},
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{
|
||||
"test/subnet/1": 8,
|
||||
"test/subnet/2": 8, // in both, mesh takes precedence
|
||||
"test/subnet/3": 8, // in both, mesh takes precedence
|
||||
"test/subnet/4": 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mesh subset of fanout - mesh subnets get mesh count, remaining get fanout",
|
||||
meshSubnets: map[uint64]bool{2: true, 3: true},
|
||||
fanoutSubnets: map[uint64]bool{1: true, 2: true, 3: true, 4: true},
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{
|
||||
"test/subnet/1": 6, // fanout only
|
||||
"test/subnet/2": 8, // in both, mesh takes precedence
|
||||
"test/subnet/3": 8, // in both, mesh takes precedence
|
||||
"test/subnet/4": 6, // fanout only
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mock := &mockDynamicSubnetFamily{
|
||||
meshSubnets: tt.meshSubnets,
|
||||
fanoutSubnets: tt.fanoutSubnets,
|
||||
topicPrefix: "test",
|
||||
}
|
||||
|
||||
result := topicsWithMinPeerCount(mock, 0, tt.minMeshPeers, tt.minFanoutPeers)
|
||||
|
||||
require.Equal(t, len(tt.expected), len(result), "result length mismatch")
|
||||
for topic, expectedCount := range tt.expected {
|
||||
actualCount, exists := result[topic]
|
||||
require.True(t, exists, "expected topic %s not found in result", topic)
|
||||
require.Equal(t, expectedCount, actualCount, "peer count mismatch for topic %s", topic)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
)
|
||||
|
||||
var _ ShardedTopicFamily = (*BlobTopicFamily)(nil)
|
||||
|
||||
// BlobTopicFamily represents a static-subnet family instance for a specific blob subnet index.
|
||||
type BlobTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
subnetIndex uint64
|
||||
}
|
||||
|
||||
func NewBlobTopicFamily(s *Service, nse params.NetworkScheduleEntry, subnetIndex uint64) *BlobTopicFamily {
|
||||
b := &BlobTopicFamily{
|
||||
subnetIndex: subnetIndex,
|
||||
}
|
||||
base := newBaseTopicFamily(s, nse, s.validateBlob, s.blobSubscriber, b)
|
||||
b.baseTopicFamily = base
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *BlobTopicFamily) Name() string {
|
||||
return fmt.Sprintf("BlobTopicFamily-%d", b.subnetIndex)
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the static subnet topic. Slot is ignored for this topic family.
|
||||
func (b *BlobTopicFamily) Subscribe() {
|
||||
b.subscribeToTopics([]string{b.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (b *BlobTopicFamily) getFullTopicString() string {
|
||||
return p2p.BlobSubnetTopic(b.nse.ForkDigest, b.subnetIndex)
|
||||
}
|
||||
@@ -1,247 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
)
|
||||
|
||||
// Blocks
|
||||
var _ ShardedTopicFamily = (*BlockTopicFamily)(nil)
|
||||
|
||||
type BlockTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewBlockTopicFamily(s *Service, nse params.NetworkScheduleEntry) *BlockTopicFamily {
|
||||
b := &BlockTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateBeaconBlockPubSub, s.beaconBlockSubscriber, b)
|
||||
b.baseTopicFamily = base
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *BlockTopicFamily) Name() string {
|
||||
return "BlockTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic.
|
||||
func (b *BlockTopicFamily) Subscribe() {
|
||||
b.subscribeToTopics([]string{b.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (b *BlockTopicFamily) getFullTopicString() string {
|
||||
return p2p.BlockSubnetTopic(b.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Aggregate and Proof
|
||||
var _ ShardedTopicFamily = (*AggregateAndProofTopicFamily)(nil)
|
||||
|
||||
type AggregateAndProofTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewAggregateAndProofTopicFamily(s *Service, nse params.NetworkScheduleEntry) *AggregateAndProofTopicFamily {
|
||||
a := &AggregateAndProofTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateAggregateAndProof, s.beaconAggregateProofSubscriber, a)
|
||||
a.baseTopicFamily = base
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *AggregateAndProofTopicFamily) Name() string {
|
||||
return "AggregateAndProofTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic.
|
||||
func (a *AggregateAndProofTopicFamily) Subscribe() {
|
||||
a.subscribeToTopics([]string{a.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (a *AggregateAndProofTopicFamily) getFullTopicString() string {
|
||||
return p2p.AggregateAndProofSubnetTopic(a.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Voluntary Exit
|
||||
var _ ShardedTopicFamily = (*VoluntaryExitTopicFamily)(nil)
|
||||
|
||||
type VoluntaryExitTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewVoluntaryExitTopicFamily(s *Service, nse params.NetworkScheduleEntry) *VoluntaryExitTopicFamily {
|
||||
v := &VoluntaryExitTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateVoluntaryExit, s.voluntaryExitSubscriber, v)
|
||||
v.baseTopicFamily = base
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *VoluntaryExitTopicFamily) Name() string {
|
||||
return "VoluntaryExitTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (v *VoluntaryExitTopicFamily) Subscribe() {
|
||||
v.subscribeToTopics([]string{v.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (v *VoluntaryExitTopicFamily) getFullTopicString() string {
|
||||
return p2p.VoluntaryExitSubnetTopic(v.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Proposer Slashing
|
||||
var _ ShardedTopicFamily = (*ProposerSlashingTopicFamily)(nil)
|
||||
|
||||
type ProposerSlashingTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewProposerSlashingTopicFamily(s *Service, nse params.NetworkScheduleEntry) *ProposerSlashingTopicFamily {
|
||||
p := &ProposerSlashingTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateProposerSlashing, s.proposerSlashingSubscriber, p)
|
||||
p.baseTopicFamily = base
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *ProposerSlashingTopicFamily) Name() string {
|
||||
return "ProposerSlashingTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (p *ProposerSlashingTopicFamily) Subscribe() {
|
||||
p.subscribeToTopics([]string{p.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (p *ProposerSlashingTopicFamily) getFullTopicString() string {
|
||||
return p2p.ProposerSlashingSubnetTopic(p.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Attester Slashing
|
||||
var _ ShardedTopicFamily = (*AttesterSlashingTopicFamily)(nil)
|
||||
|
||||
type AttesterSlashingTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewAttesterSlashingTopicFamily(s *Service, nse params.NetworkScheduleEntry) *AttesterSlashingTopicFamily {
|
||||
a := &AttesterSlashingTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateAttesterSlashing, s.attesterSlashingSubscriber, a)
|
||||
a.baseTopicFamily = base
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *AttesterSlashingTopicFamily) Name() string {
|
||||
return "AttesterSlashingTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (a *AttesterSlashingTopicFamily) Subscribe() {
|
||||
a.subscribeToTopics([]string{a.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (a *AttesterSlashingTopicFamily) getFullTopicString() string {
|
||||
return p2p.AttesterSlashingSubnetTopic(a.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Sync Contribution and Proof (Altair+)
|
||||
var _ ShardedTopicFamily = (*SyncContributionAndProofTopicFamily)(nil)
|
||||
|
||||
type SyncContributionAndProofTopicFamily struct{ *baseTopicFamily }
|
||||
|
||||
func NewSyncContributionAndProofTopicFamily(s *Service, nse params.NetworkScheduleEntry) *SyncContributionAndProofTopicFamily {
|
||||
sc := &SyncContributionAndProofTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateSyncContributionAndProof, s.syncContributionAndProofSubscriber, sc)
|
||||
sc.baseTopicFamily = base
|
||||
return sc
|
||||
}
|
||||
|
||||
func (sc *SyncContributionAndProofTopicFamily) Name() string {
|
||||
return "SyncContributionAndProofTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (sc *SyncContributionAndProofTopicFamily) Subscribe() {
|
||||
sc.subscribeToTopics([]string{sc.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (sc *SyncContributionAndProofTopicFamily) getFullTopicString() string {
|
||||
return p2p.SyncContributionAndProofSubnetTopic(sc.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Light Client Optimistic Update (Altair+)
|
||||
var _ ShardedTopicFamily = (*LightClientOptimisticUpdateTopicFamily)(nil)
|
||||
|
||||
type LightClientOptimisticUpdateTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewLightClientOptimisticUpdateTopicFamily(s *Service, nse params.NetworkScheduleEntry) *LightClientOptimisticUpdateTopicFamily {
|
||||
l := &LightClientOptimisticUpdateTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateLightClientOptimisticUpdate, noopHandler, l)
|
||||
l.baseTopicFamily = base
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LightClientOptimisticUpdateTopicFamily) Name() string {
|
||||
return "LightClientOptimisticUpdateTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (l *LightClientOptimisticUpdateTopicFamily) Subscribe() {
|
||||
l.subscribeToTopics([]string{l.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (l *LightClientOptimisticUpdateTopicFamily) getFullTopicString() string {
|
||||
return p2p.LcOptimisticToTopic(l.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Light Client Finality Update (Altair+)
|
||||
var _ ShardedTopicFamily = (*LightClientFinalityUpdateTopicFamily)(nil)
|
||||
|
||||
type LightClientFinalityUpdateTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewLightClientFinalityUpdateTopicFamily(s *Service, nse params.NetworkScheduleEntry) *LightClientFinalityUpdateTopicFamily {
|
||||
l := &LightClientFinalityUpdateTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateLightClientFinalityUpdate, noopHandler, l)
|
||||
l.baseTopicFamily = base
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LightClientFinalityUpdateTopicFamily) Name() string {
|
||||
return "LightClientFinalityUpdateTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (l *LightClientFinalityUpdateTopicFamily) Subscribe() {
|
||||
l.subscribeToTopics([]string{l.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (l *LightClientFinalityUpdateTopicFamily) getFullTopicString() string {
|
||||
return p2p.LcFinalityToTopic(l.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// BLS to Execution Change (Capella+)
|
||||
var _ ShardedTopicFamily = (*BlsToExecutionChangeTopicFamily)(nil)
|
||||
|
||||
type BlsToExecutionChangeTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewBlsToExecutionChangeTopicFamily(s *Service, nse params.NetworkScheduleEntry) *BlsToExecutionChangeTopicFamily {
|
||||
b := &BlsToExecutionChangeTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateBlsToExecutionChange, s.blsToExecutionChangeSubscriber, b)
|
||||
b.baseTopicFamily = base
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *BlsToExecutionChangeTopicFamily) Name() string {
|
||||
return "BlsToExecutionChangeTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (b *BlsToExecutionChangeTopicFamily) Subscribe() {
|
||||
b.subscribeToTopics([]string{b.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (b *BlsToExecutionChangeTopicFamily) getFullTopicString() string {
|
||||
return p2p.BlsToExecutionChangeSubnetTopic(b.nse.ForkDigest)
|
||||
}
|
||||
@@ -51,14 +51,12 @@ func (s *Service) validateDataColumn(ctx context.Context, pid peer.ID, msg *pubs
|
||||
// Decode the message, reject if it fails.
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to decode message")
|
||||
return pubsub.ValidationReject, err
|
||||
}
|
||||
|
||||
// Reject messages that are not of the expected type.
|
||||
dcsc, ok := m.(*eth.DataColumnSidecar)
|
||||
if !ok {
|
||||
log.WithField("message", m).Error("Message is not of type *eth.DataColumnSidecar")
|
||||
return pubsub.ValidationReject, errWrongMessage
|
||||
}
|
||||
|
||||
|
||||
@@ -54,11 +54,13 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -101,7 +103,10 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := 1; v < 6; v++ {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
@@ -180,11 +185,13 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
cfg.CapellaForkEpoch = 3
|
||||
cfg.DenebForkEpoch = 4
|
||||
cfg.ElectraForkEpoch = 5
|
||||
cfg.FuluForkEpoch = 6
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{2, 0, 0, 0}] = 2
|
||||
cfg.ForkVersionSchedule[[4]byte{3, 0, 0, 0}] = 3
|
||||
cfg.ForkVersionSchedule[[4]byte{4, 0, 0, 0}] = 4
|
||||
cfg.ForkVersionSchedule[[4]byte{5, 0, 0, 0}] = 5
|
||||
cfg.ForkVersionSchedule[[4]byte{6, 0, 0, 0}] = 6
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
@@ -227,7 +234,10 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for v := 1; v < 6; v++ {
|
||||
for v := range version.All() {
|
||||
if v == version.Phase0 {
|
||||
continue
|
||||
}
|
||||
t.Run(test.name+"_"+version.String(v), func(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
|
||||
@@ -361,7 +361,7 @@ func (dv *RODataColumnsVerifier) SidecarParentSeen(parentSeen func([fieldparams.
|
||||
}
|
||||
|
||||
if !dv.fc.HasNode(parentRoot) {
|
||||
return columnErrBuilder(errSidecarParentNotSeen)
|
||||
return columnErrBuilder(errors.Wrapf(errSidecarParentNotSeen, "parent root: %#x", parentRoot))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
3
changelog/SashaMalysehko_fix-return-after-check.md
Normal file
3
changelog/SashaMalysehko_fix-return-after-check.md
Normal file
@@ -0,0 +1,3 @@
|
||||
## Fixed
|
||||
|
||||
- Fix missing return after version header check in SubmitAttesterSlashingsV2.
|
||||
2
changelog/aarsh-revert-autonatv2.md
Normal file
2
changelog/aarsh-revert-autonatv2.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Reverts AutoNatV2 change introduced in https://github.com/OffchainLabs/prysm/pull/16100 as the libp2p upgrade fails inter-op testing.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Add support for detecting and logging per address reachability via libp2p AutoNAT v2.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- A Gossipsub control plane with topic abstractions, a peer crawler and a peer controller.
|
||||
3
changelog/avoid_kzg_send_after_context_cancel.md
Normal file
3
changelog/avoid_kzg_send_after_context_cancel.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Prevent blocked sends to the KZG batch verifier when the caller context is already canceled, avoiding useless queueing and potential hangs.
|
||||
3
changelog/bastin_fix-lcp2p-bug.md
Normal file
3
changelog/bastin_fix-lcp2p-bug.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix the missing fork version object mapping for Fulu in light client p2p.
|
||||
3
changelog/fix_kzg_batch_verifier_timeout_deadlock.md
Normal file
3
changelog/fix_kzg_batch_verifier_timeout_deadlock.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fix deadlock in data column gossip KZG batch verification when a caller times out preventing result delivery.
|
||||
3
changelog/james-prysm_align-atter-pool-apis.md
Normal file
3
changelog/james-prysm_align-atter-pool-apis.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- the /eth/v2/beacon/pool/attestations and /eth/v1/beacon/pool/sync_committees now returns a 503 error if the node is still syncing, the rest api is also working in a similar process to gRPC broadcasting immediately now.
|
||||
3
changelog/james-prysm_fix-rest-replay-state.md
Normal file
3
changelog/james-prysm_fix-rest-replay-state.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- fixed replay state issue in rest api caused by attester and sync committee duties endpoints
|
||||
3
changelog/james-prysm_skip-e2e-slot1-check.md
Normal file
3
changelog/james-prysm_skip-e2e-slot1-check.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- e2e sync committee evaluator now skips the first slot after startup, we already skip the fork epoch for checks here, this skip only applies on startup, due to altair always from 0 and validators need to warm up.
|
||||
3
changelog/manu-agg.md
Normal file
3
changelog/manu-agg.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Pending aggregates: When multiple aggregated attestations only differing by the aggregator index are in the pending queue, only process one of them.
|
||||
2
changelog/manu-remove-error-logs.md
Normal file
2
changelog/manu-remove-error-logs.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- `validateDataColumn`: Remove error logs.
|
||||
2
changelog/manu_disable_get_blobs_v2.md
Normal file
2
changelog/manu_disable_get_blobs_v2.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Added
|
||||
- `--disable-get-blobs-v2` flag.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user