mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-11 06:18:05 -05:00
Compare commits
95 Commits
remove_pro
...
manu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b57c35a468 | ||
|
|
08f117f04f | ||
|
|
0b6365781f | ||
|
|
0c994445ea | ||
|
|
77c32203af | ||
|
|
743e6bab07 | ||
|
|
451d2a8bc5 | ||
|
|
6508bdfa9a | ||
|
|
d2bf512f36 | ||
|
|
e62fe66b0a | ||
|
|
08fff3dec4 | ||
|
|
75a3d45470 | ||
|
|
bbd2d4da0f | ||
|
|
378468c1ec | ||
|
|
25d375dbe0 | ||
|
|
2d5f3112c8 | ||
|
|
1d49a2a88d | ||
|
|
b119290584 | ||
|
|
9b0cffcdea | ||
|
|
979466a3d7 | ||
|
|
08ba8dd487 | ||
|
|
a235a581e1 | ||
|
|
45b88de7f2 | ||
|
|
f69e017f6a | ||
|
|
19e5684875 | ||
|
|
24fc76b7fb | ||
|
|
0a4aad543b | ||
|
|
ab8584d138 | ||
|
|
9b6cd96012 | ||
|
|
bba1358637 | ||
|
|
e6af417c62 | ||
|
|
3509622323 | ||
|
|
80d7bd6084 | ||
|
|
8d3c3fd40b | ||
|
|
1caf7074a7 | ||
|
|
92bd155e3f | ||
|
|
8360b0f882 | ||
|
|
bc0d60138c | ||
|
|
32f377a665 | ||
|
|
2ab792b0fe | ||
|
|
2ae8de05dd | ||
|
|
cff96129b5 | ||
|
|
8abd1db7c1 | ||
|
|
a4d726184f | ||
|
|
85acf242f6 | ||
|
|
ba6d1d0c6b | ||
|
|
69d3453f97 | ||
|
|
a69cf6f5d4 | ||
|
|
0a46c2d16d | ||
|
|
3c1b8859bc | ||
|
|
9f645ae0a4 | ||
|
|
5eeeb9ed15 | ||
|
|
2061fc8a2f | ||
|
|
eb93350583 | ||
|
|
affdab7776 | ||
|
|
eb556adab5 | ||
|
|
09d886c676 | ||
|
|
11c5c6fb8b | ||
|
|
10804bbb56 | ||
|
|
e6f3b636ac | ||
|
|
e95676dd91 | ||
|
|
86b65e0912 | ||
|
|
e9dac06037 | ||
|
|
893cf60921 | ||
|
|
41c9f160a2 | ||
|
|
707abe6112 | ||
|
|
76975a134d | ||
|
|
672de432a2 | ||
|
|
4a6d88d9fb | ||
|
|
1ff836e549 | ||
|
|
08f038fe80 | ||
|
|
63279bcadf | ||
|
|
61628efd44 | ||
|
|
9b07f13cd3 | ||
|
|
1397a79b4c | ||
|
|
929115639d | ||
|
|
14dca40786 | ||
|
|
fa7596bceb | ||
|
|
5161f087fc | ||
|
|
71050ab076 | ||
|
|
614367ddcf | ||
|
|
3f7371445b | ||
|
|
a15a1ade17 | ||
|
|
798376b1d7 | ||
|
|
93271050bf | ||
|
|
8dfbabc691 | ||
|
|
af2522e5f0 | ||
|
|
452d42bd10 | ||
|
|
3e985377ce | ||
|
|
ab2e836d3f | ||
|
|
14158bea9c | ||
|
|
e14590636f | ||
|
|
ce3660d2e7 | ||
|
|
7853cb9db0 | ||
|
|
8cfeda1473 |
7
.gitignore
vendored
7
.gitignore
vendored
@@ -38,6 +38,13 @@ metaData
|
||||
|
||||
# execution API authentication
|
||||
jwt.hex
|
||||
execution/
|
||||
|
||||
# local execution client data
|
||||
execution/
|
||||
|
||||
# local documentation
|
||||
CLAUDE.md
|
||||
|
||||
# manual testing
|
||||
tmp
|
||||
|
||||
@@ -193,7 +193,6 @@ nogo(
|
||||
"//tools/analyzers/featureconfig:go_default_library",
|
||||
"//tools/analyzers/gocognit:go_default_library",
|
||||
"//tools/analyzers/ineffassign:go_default_library",
|
||||
"//tools/analyzers/httperror:go_default_library",
|
||||
"//tools/analyzers/interfacechecker:go_default_library",
|
||||
"//tools/analyzers/logcapitalization:go_default_library",
|
||||
"//tools/analyzers/logruswitherror:go_default_library",
|
||||
|
||||
@@ -335,6 +335,9 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
|
||||
return errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, st, e); err != nil {
|
||||
return errors.Wrap(err, "could not update proposer index cache")
|
||||
}
|
||||
go func(ep primitives.Epoch) {
|
||||
// Use a custom deadline here, since this method runs asynchronously.
|
||||
// We ignore the parent method's context and instead create a new one
|
||||
@@ -345,6 +348,26 @@ func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.Beacon
|
||||
log.WithError(err).Warn("Could not update committee cache")
|
||||
}
|
||||
}(e)
|
||||
// The latest block header is from the previous epoch
|
||||
r, err := st.LatestBlockHeader().HashTreeRoot()
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
// The proposer indices cache takes the target root for the previous
|
||||
// epoch as key
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
target, err := s.cfg.ForkChoiceStore.TargetRootForEpoch(r, e)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
return nil
|
||||
}
|
||||
err = helpers.UpdateCachedCheckpointToStateRoot(st, &forkchoicetypes.Checkpoint{Epoch: e, Root: target})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not update proposer index state-root map")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
statefeed "github.com/OffchainLabs/prysm/v7/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
coreTime "github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db/filesystem"
|
||||
@@ -396,6 +397,10 @@ func (s *Service) initializeBeaconChain(
|
||||
if err := helpers.UpdateCommitteeCache(ctx, genesisState, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState, coreTime.CurrentEpoch(genesisState)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.cfg.AttService.SetGenesisTime(genesisState.GenesisTime())
|
||||
|
||||
return genesisState, nil
|
||||
|
||||
5
beacon-chain/cache/BUILD.bazel
vendored
5
beacon-chain/cache/BUILD.bazel
vendored
@@ -17,6 +17,9 @@ go_library(
|
||||
"error.go",
|
||||
"interfaces.go",
|
||||
"payload_id.go",
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
"proposer_indices_type.go",
|
||||
"registration.go",
|
||||
"skip_slot_cache.go",
|
||||
"subnet_ids.go",
|
||||
@@ -37,6 +40,7 @@ go_library(
|
||||
"//beacon-chain/operations/attestations/attmap:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//cache/lru:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//container/slice:go_default_library",
|
||||
@@ -73,6 +77,7 @@ go_test(
|
||||
"committee_test.go",
|
||||
"payload_id_test.go",
|
||||
"private_access_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"registration_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"subnet_ids_test.go",
|
||||
|
||||
122
beacon-chain/cache/proposer_indices.go
vendored
Normal file
122
beacon-chain/cache/proposer_indices.go
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
//go:build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
// ProposerIndicesCacheMiss tracks the number of proposerIndices requests that aren't present in the cache.
|
||||
ProposerIndicesCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "proposer_indices_cache_miss",
|
||||
Help: "The number of proposer indices requests that aren't present in the cache.",
|
||||
})
|
||||
// ProposerIndicesCacheHit tracks the number of proposerIndices requests that are in the cache.
|
||||
ProposerIndicesCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "proposer_indices_cache_hit",
|
||||
Help: "The number of proposer indices requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// ProposerIndicesCache keeps track of the proposer indices in the next two
|
||||
// epochs. It is keyed by the state root of the last epoch before. That is, for
|
||||
// blocks during epoch 2, for example slot 65, it will be keyed by the state
|
||||
// root of slot 63 (last slot in epoch 1).
|
||||
// The cache keeps two sets of indices computed, the "safe" set is computed
|
||||
// right before the epoch transition into the current epoch. For example for
|
||||
// epoch 2 we will compute this list after importing block 63. The "unsafe"
|
||||
// version is computed an epoch in advance, for example for epoch 3, it will be
|
||||
// computed after importing block 63.
|
||||
//
|
||||
// The cache also keeps a map from checkpoints to state roots so that one is
|
||||
// able to access the proposer indices list from a checkpoint instead. The
|
||||
// checkpoint is the checkpoint for the epoch previous to the requested
|
||||
// proposer indices. That is, for a slot in epoch 2 (eg. 65), the checkpoint
|
||||
// root would be for slot 32 if present.
|
||||
type ProposerIndicesCache struct {
|
||||
sync.Mutex
|
||||
indices map[primitives.Epoch]map[[32]byte][fieldparams.SlotsPerEpoch]primitives.ValidatorIndex
|
||||
rootMap map[forkchoicetypes.Checkpoint][32]byte // A map from checkpoint root to state root
|
||||
}
|
||||
|
||||
// NewProposerIndicesCache returns a newly created cache
|
||||
func NewProposerIndicesCache() *ProposerIndicesCache {
|
||||
return &ProposerIndicesCache{
|
||||
indices: make(map[primitives.Epoch]map[[32]byte][fieldparams.SlotsPerEpoch]primitives.ValidatorIndex),
|
||||
rootMap: make(map[forkchoicetypes.Checkpoint][32]byte),
|
||||
}
|
||||
}
|
||||
|
||||
// ProposerIndices returns the proposer indices (safe) for the given root
|
||||
func (p *ProposerIndicesCache) ProposerIndices(epoch primitives.Epoch, root [32]byte) ([fieldparams.SlotsPerEpoch]primitives.ValidatorIndex, bool) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
inner, ok := p.indices[epoch]
|
||||
if !ok {
|
||||
ProposerIndicesCacheMiss.Inc()
|
||||
return [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}, false
|
||||
}
|
||||
indices, exists := inner[root]
|
||||
if exists {
|
||||
ProposerIndicesCacheHit.Inc()
|
||||
} else {
|
||||
ProposerIndicesCacheMiss.Inc()
|
||||
}
|
||||
return indices, exists
|
||||
}
|
||||
|
||||
// Prune resets the ProposerIndicesCache to its initial state
|
||||
func (p *ProposerIndicesCache) Prune(epoch primitives.Epoch) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
for key := range p.indices {
|
||||
if key < epoch {
|
||||
delete(p.indices, key)
|
||||
}
|
||||
}
|
||||
for key := range p.rootMap {
|
||||
if key.Epoch+1 < epoch {
|
||||
delete(p.rootMap, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set sets the proposer indices for the given root as key
|
||||
func (p *ProposerIndicesCache) Set(epoch primitives.Epoch, root [32]byte, indices [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
inner, ok := p.indices[epoch]
|
||||
if !ok {
|
||||
inner = make(map[[32]byte][fieldparams.SlotsPerEpoch]primitives.ValidatorIndex)
|
||||
p.indices[epoch] = inner
|
||||
}
|
||||
inner[root] = indices
|
||||
}
|
||||
|
||||
// SetCheckpoint updates the map from checkpoints to state roots
|
||||
func (p *ProposerIndicesCache) SetCheckpoint(c forkchoicetypes.Checkpoint, root [32]byte) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
p.rootMap[c] = root
|
||||
}
|
||||
|
||||
// IndicesFromCheckpoint returns the proposer indices from a checkpoint rather than the state root
|
||||
func (p *ProposerIndicesCache) IndicesFromCheckpoint(c forkchoicetypes.Checkpoint) ([fieldparams.SlotsPerEpoch]primitives.ValidatorIndex, bool) {
|
||||
p.Lock()
|
||||
emptyIndices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
|
||||
root, ok := p.rootMap[c]
|
||||
p.Unlock()
|
||||
if !ok {
|
||||
ProposerIndicesCacheMiss.Inc()
|
||||
return emptyIndices, ok
|
||||
}
|
||||
return p.ProposerIndices(c.Epoch+1, root)
|
||||
}
|
||||
63
beacon-chain/cache/proposer_indices_disabled.go
vendored
Normal file
63
beacon-chain/cache/proposer_indices_disabled.go
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
//go:build fuzz
|
||||
|
||||
// This file is used in fuzzer builds to bypass proposer indices caches.
|
||||
package cache
|
||||
|
||||
import (
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
var (
|
||||
// ProposerIndicesCacheMiss tracks the number of proposerIndices requests that aren't present in the cache.
|
||||
ProposerIndicesCacheMiss = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "proposer_indices_cache_miss",
|
||||
Help: "The number of proposer indices requests that aren't present in the cache.",
|
||||
})
|
||||
// ProposerIndicesCacheHit tracks the number of proposerIndices requests that are in the cache.
|
||||
ProposerIndicesCacheHit = promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "proposer_indices_cache_hit",
|
||||
Help: "The number of proposer indices requests that are present in the cache.",
|
||||
})
|
||||
)
|
||||
|
||||
// FakeProposerIndicesCache is a struct with 1 queue for looking up proposer indices by root.
|
||||
type FakeProposerIndicesCache struct {
|
||||
}
|
||||
|
||||
// NewProposerIndicesCache creates a new proposer indices cache for storing/accessing proposer index assignments of an epoch.
|
||||
func NewProposerIndicesCache() *FakeProposerIndicesCache {
|
||||
return &FakeProposerIndicesCache{}
|
||||
}
|
||||
|
||||
// ProposerIndices is a stub.
|
||||
func (c *FakeProposerIndicesCache) ProposerIndices(_ primitives.Epoch, _ [32]byte) ([fieldparams.SlotsPerEpoch]primitives.ValidatorIndex, bool) {
|
||||
return [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}, false
|
||||
}
|
||||
|
||||
// UnsafeProposerIndices is a stub.
|
||||
func (c *FakeProposerIndicesCache) UnsafeProposerIndices(_ primitives.Epoch, _ [32]byte) ([fieldparams.SlotsPerEpoch]primitives.ValidatorIndex, bool) {
|
||||
return [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}, false
|
||||
}
|
||||
|
||||
// Prune is a stub.
|
||||
func (p *FakeProposerIndicesCache) Prune(epoch primitives.Epoch) {}
|
||||
|
||||
// Set is a stub.
|
||||
func (p *FakeProposerIndicesCache) Set(epoch primitives.Epoch, root [32]byte, indices [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex) {
|
||||
}
|
||||
|
||||
// SetUnsafe is a stub.
|
||||
func (p *FakeProposerIndicesCache) SetUnsafe(epoch primitives.Epoch, root [32]byte, indices [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex) {
|
||||
}
|
||||
|
||||
// SetCheckpoint is a stub.
|
||||
func (p *FakeProposerIndicesCache) SetCheckpoint(c forkchoicetypes.Checkpoint, root [32]byte) {}
|
||||
|
||||
// IndicesFromCheckpoint is a stub.
|
||||
func (p *FakeProposerIndicesCache) IndicesFromCheckpoint(_ forkchoicetypes.Checkpoint) ([fieldparams.SlotsPerEpoch]primitives.ValidatorIndex, bool) {
|
||||
return [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}, false
|
||||
}
|
||||
105
beacon-chain/cache/proposer_indices_test.go
vendored
Normal file
105
beacon-chain/cache/proposer_indices_test.go
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
//go:build !fuzz
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
)
|
||||
|
||||
func TestProposerCache_Set(t *testing.T) {
|
||||
cache := NewProposerIndicesCache()
|
||||
bRoot := [32]byte{'A'}
|
||||
indices, ok := cache.ProposerIndices(0, bRoot)
|
||||
require.Equal(t, false, ok)
|
||||
emptyIndices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
|
||||
require.Equal(t, indices, emptyIndices, "Expected committee count not to exist in empty cache")
|
||||
emptyIndices[0] = 1
|
||||
cache.Set(0, bRoot, emptyIndices)
|
||||
|
||||
received, ok := cache.ProposerIndices(0, bRoot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, received, emptyIndices)
|
||||
|
||||
newRoot := [32]byte{'B'}
|
||||
copy(emptyIndices[3:], []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6})
|
||||
cache.Set(0, newRoot, emptyIndices)
|
||||
|
||||
received, ok = cache.ProposerIndices(0, newRoot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
}
|
||||
|
||||
func TestProposerCache_CheckpointAndPrune(t *testing.T) {
|
||||
cache := NewProposerIndicesCache()
|
||||
indices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
|
||||
copy(indices[3:], []primitives.ValidatorIndex{1, 2, 3, 4, 5, 6})
|
||||
for i := 1; i < 10; i++ {
|
||||
root := [32]byte{byte(i)}
|
||||
cache.Set(primitives.Epoch(i), root, indices)
|
||||
cpRoot := [32]byte{byte(i - 1)}
|
||||
cache.SetCheckpoint(forkchoicetypes.Checkpoint{Epoch: primitives.Epoch(i - 1), Root: cpRoot}, root)
|
||||
}
|
||||
received, ok := cache.ProposerIndices(1, [32]byte{1})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.ProposerIndices(4, [32]byte{4})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.ProposerIndices(9, [32]byte{9})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{3}})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: [32]byte{4}})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: [32]byte{8}})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
cache.Prune(5)
|
||||
|
||||
emptyIndices := [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex{}
|
||||
received, ok = cache.ProposerIndices(1, [32]byte{1})
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
|
||||
received, ok = cache.ProposerIndices(4, [32]byte{4})
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
|
||||
received, ok = cache.ProposerIndices(9, [32]byte{9})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 0, Root: [32]byte{0}})
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{3}})
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, emptyIndices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 4, Root: [32]byte{4}})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
|
||||
received, ok = cache.IndicesFromCheckpoint(forkchoicetypes.Checkpoint{Epoch: 8, Root: [32]byte{8}})
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, indices, received)
|
||||
}
|
||||
11
beacon-chain/cache/proposer_indices_type.go
vendored
Normal file
11
beacon-chain/cache/proposer_indices_type.go
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
)
|
||||
|
||||
// ProposerIndices defines the cached struct for proposer indices.
|
||||
type ProposerIndices struct {
|
||||
BlockRoot [32]byte
|
||||
ProposerIndices []primitives.ValidatorIndex
|
||||
}
|
||||
@@ -23,6 +23,7 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
@@ -71,6 +72,7 @@ go_test(
|
||||
deps = [
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/time:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
|
||||
@@ -10,7 +10,9 @@ import (
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/container/slice"
|
||||
@@ -25,7 +27,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
committeeCache = cache.NewCommitteesCache()
|
||||
committeeCache = cache.NewCommitteesCache()
|
||||
proposerIndicesCache = cache.NewProposerIndicesCache()
|
||||
)
|
||||
|
||||
type beaconCommitteeFunc = func(
|
||||
@@ -525,6 +528,75 @@ func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState,
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProposerIndicesInCache updates proposer indices entry of the committee cache.
|
||||
// Input state is used to retrieve active validator indices.
|
||||
// Input root is to use as key in the cache.
|
||||
// Input epoch is the epoch to retrieve proposer indices for.
|
||||
func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) error {
|
||||
// The cache uses the state root at the end of (current epoch - 1) as key.
|
||||
// (e.g. for epoch 2, the key is root at slot 63)
|
||||
if epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
return nil
|
||||
}
|
||||
slot, err := slots.EpochEnd(epoch - 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err := StateRootAtSlot(state, slot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var proposerIndices []primitives.ValidatorIndex
|
||||
// use the state if post fulu (EIP-7917)
|
||||
if state.Version() >= version.Fulu {
|
||||
lookAhead, err := state.ProposerLookahead()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get proposer lookahead")
|
||||
}
|
||||
proposerIndices = lookAhead[:params.BeaconConfig().SlotsPerEpoch]
|
||||
} else {
|
||||
// Skip cache update if the key already exists
|
||||
_, ok := proposerIndicesCache.ProposerIndices(epoch, [32]byte(root))
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
indices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerIndices, err = PrecomputeProposerIndices(state, indices, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
return errors.New("invalid proposer length returned from state")
|
||||
}
|
||||
}
|
||||
// This is here to deal with tests only
|
||||
var indicesArray [fieldparams.SlotsPerEpoch]primitives.ValidatorIndex
|
||||
copy(indicesArray[:], proposerIndices)
|
||||
proposerIndicesCache.Prune(epoch - 2)
|
||||
proposerIndicesCache.Set(epoch, [32]byte(root), indicesArray)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateCachedCheckpointToStateRoot updates the map from checkpoints to state root in the proposer indices cache
|
||||
func UpdateCachedCheckpointToStateRoot(state state.ReadOnlyBeaconState, cp *forkchoicetypes.Checkpoint) error {
|
||||
if cp.Epoch <= params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
return nil
|
||||
}
|
||||
slot, err := slots.EpochEnd(cp.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err := state.StateRootAtIndex(uint64(slot % params.BeaconConfig().SlotsPerHistoricalRoot))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proposerIndicesCache.SetCheckpoint(*cp, [32]byte(root))
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpandCommitteeCache resizes the cache to a higher limit.
|
||||
func ExpandCommitteeCache() {
|
||||
committeeCache.ExpandCommitteeCache()
|
||||
@@ -538,6 +610,7 @@ func CompressCommitteeCache() {
|
||||
// ClearCache clears the beacon committee cache and sync committee cache.
|
||||
func ClearCache() {
|
||||
committeeCache.Clear()
|
||||
proposerIndicesCache.Prune(0)
|
||||
syncCommitteeCache.Clear()
|
||||
balanceCache.Clear()
|
||||
}
|
||||
|
||||
@@ -11,3 +11,7 @@ func CommitteeCache() *cache.FakeCommitteeCache {
|
||||
func SyncCommitteeCache() *cache.FakeSyncCommitteeCache {
|
||||
return syncCommitteeCache
|
||||
}
|
||||
|
||||
func ProposerIndicesCache() *cache.FakeProposerIndicesCache {
|
||||
return proposerIndicesCache
|
||||
}
|
||||
|
||||
@@ -11,3 +11,7 @@ func CommitteeCache() *cache.CommitteeCache {
|
||||
func SyncCommitteeCache() *cache.SyncCommitteeCache {
|
||||
return syncCommitteeCache
|
||||
}
|
||||
|
||||
func ProposerIndicesCache() *cache.ProposerIndicesCache {
|
||||
return proposerIndicesCache
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -151,7 +152,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
}
|
||||
|
||||
if err := UpdateCommitteeCache(ctx, s, epoch); err != nil {
|
||||
log.WithError(err).Error("Could not update committee cache")
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
|
||||
return indices, nil
|
||||
@@ -272,6 +273,32 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
|
||||
return BeaconProposerIndexAtSlot(ctx, state, state.Slot())
|
||||
}
|
||||
|
||||
// cachedProposerIndexAtSlot returns the proposer index at the given slot from
|
||||
// the cache at the given root key.
|
||||
func cachedProposerIndexAtSlot(slot primitives.Slot, root [32]byte) (primitives.ValidatorIndex, error) {
|
||||
proposerIndices, has := proposerIndicesCache.ProposerIndices(slots.ToEpoch(slot), root)
|
||||
if !has {
|
||||
return 0, errProposerIndexMiss
|
||||
}
|
||||
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
return 0, errProposerIndexMiss
|
||||
}
|
||||
return proposerIndices[slot%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
}
|
||||
|
||||
// ProposerIndexAtSlotFromCheckpoint returns the proposer index at the given
|
||||
// slot from the cache at the given checkpoint
|
||||
func ProposerIndexAtSlotFromCheckpoint(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, error) {
|
||||
proposerIndices, has := proposerIndicesCache.IndicesFromCheckpoint(*c)
|
||||
if !has {
|
||||
return 0, errProposerIndexMiss
|
||||
}
|
||||
if len(proposerIndices) != int(params.BeaconConfig().SlotsPerEpoch) {
|
||||
return 0, errProposerIndexMiss
|
||||
}
|
||||
return proposerIndices[slot%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
}
|
||||
|
||||
func beaconProposerIndexAtSlotFulu(state state.ReadOnlyBeaconState, slot primitives.Slot) (primitives.ValidatorIndex, error) {
|
||||
e := slots.ToEpoch(slot)
|
||||
stateEpoch := slots.ToEpoch(state.Slot())
|
||||
@@ -302,6 +329,32 @@ func BeaconProposerIndexAtSlot(ctx context.Context, state state.ReadOnlyBeaconSt
|
||||
return beaconProposerIndexAtSlotFulu(state, slot)
|
||||
}
|
||||
}
|
||||
// The cache uses the state root of the previous epoch - minimum_seed_lookahead last slot as key. (e.g. Starting epoch 1, slot 32, the key would be block root at slot 31)
|
||||
// For simplicity, the node will skip caching of genesis epoch. If the passed state has not yet reached this slot then we do not check the cache.
|
||||
if e <= stateEpoch && e > params.BeaconConfig().GenesisEpoch+params.BeaconConfig().MinSeedLookahead {
|
||||
s, err := slots.EpochEnd(e - 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
r, err := StateRootAtSlot(state, s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if r != nil && !bytes.Equal(r, params.BeaconConfig().ZeroHash[:]) {
|
||||
pid, err := cachedProposerIndexAtSlot(slot, [32]byte(r))
|
||||
if err == nil {
|
||||
return pid, nil
|
||||
}
|
||||
if err := UpdateProposerIndicesInCache(ctx, state, e); err != nil {
|
||||
return 0, errors.Wrap(err, "could not update proposer index cache")
|
||||
}
|
||||
pid, err = cachedProposerIndexAtSlot(slot, [32]byte(r))
|
||||
if err == nil {
|
||||
return pid, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconProposer)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not generate seed")
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/time"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -877,6 +878,23 @@ func TestLastActivatedValidatorIndex_OK(t *testing.T) {
|
||||
require.Equal(t, index, primitives.ValidatorIndex(3))
|
||||
}
|
||||
|
||||
func TestProposerIndexFromCheckpoint(t *testing.T) {
|
||||
helpers.ClearCache()
|
||||
|
||||
e := primitives.Epoch(2)
|
||||
r := [32]byte{'a'}
|
||||
root := [32]byte{'b'}
|
||||
ids := [32]primitives.ValidatorIndex{}
|
||||
slot := primitives.Slot(69) // slot 5 in the Epoch
|
||||
ids[5] = primitives.ValidatorIndex(19)
|
||||
helpers.ProposerIndicesCache().Set(e, r, ids)
|
||||
c := &forkchoicetypes.Checkpoint{Root: root, Epoch: e - 1}
|
||||
helpers.ProposerIndicesCache().SetCheckpoint(*c, r)
|
||||
id, err := helpers.ProposerIndexAtSlotFromCheckpoint(c, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, ids[5], id)
|
||||
}
|
||||
|
||||
func TestHasETH1WithdrawalCredentials(t *testing.T) {
|
||||
creds := []byte{0xFA, 0xCC}
|
||||
v := ðpb.Validator{WithdrawalCredentials: creds}
|
||||
|
||||
@@ -664,6 +664,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
IPColocationWhitelist: colocationWhitelist,
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
EnableAutoNAT: cliCtx.Bool(cmd.EnableAutoNATFlag.Name),
|
||||
StateNotifier: b,
|
||||
DB: b.db,
|
||||
StateGen: b.stateGen,
|
||||
|
||||
@@ -13,6 +13,8 @@ go_library(
|
||||
"doc.go",
|
||||
"fork.go",
|
||||
"fork_watcher.go",
|
||||
"gossip_peer_controller.go",
|
||||
"gossip_peer_crawler.go",
|
||||
"gossip_scoring_params.go",
|
||||
"gossip_topic_mappings.go",
|
||||
"handshake.go",
|
||||
@@ -51,6 +53,7 @@ go_library(
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/gossipcrawler:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
@@ -92,6 +95,7 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/control:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/event:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
@@ -101,10 +105,8 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//p2p/security/noise:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/quic:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//p2p/transport/tcp:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_mplex//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
||||
"@com_github_libp2p_go_mplex//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//net:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
@@ -115,6 +117,7 @@ go_library(
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
"@org_golang_x_sync//semaphore:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -128,6 +131,8 @@ go_test(
|
||||
"dial_relay_node_test.go",
|
||||
"discovery_test.go",
|
||||
"fork_test.go",
|
||||
"gossip_peer_controller_test.go",
|
||||
"gossip_peer_crawler_test.go",
|
||||
"gossip_scoring_params_test.go",
|
||||
"gossip_topic_mappings_test.go",
|
||||
"message_id_test.go",
|
||||
@@ -154,9 +159,11 @@ go_test(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/gossipcrawler:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/peerdata:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
@@ -194,6 +201,7 @@ go_test(
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/connmgr:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/crypto:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/event:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
@@ -203,8 +211,10 @@ go_test(
|
||||
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
|
||||
"@com_github_multiformats_go_multiaddr//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/testutil:go_default_library",
|
||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@com_github_stretchr_testify//require:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -60,7 +60,10 @@ func (s *Service) Broadcast(ctx context.Context, msg proto.Message) error {
|
||||
if !ok {
|
||||
return errors.Errorf("message of %T does not support marshaller interface", msg)
|
||||
}
|
||||
return s.broadcastObject(ctx, castMsg, fmt.Sprintf(topic, forkDigest))
|
||||
|
||||
fullTopic := fmt.Sprintf(topic, forkDigest) + s.Encoding().ProtocolSuffix()
|
||||
|
||||
return s.broadcastObject(ctx, castMsg, fullTopic)
|
||||
}
|
||||
|
||||
// BroadcastAttestation broadcasts an attestation to the p2p network, the message is assumed to be
|
||||
@@ -106,6 +109,7 @@ func (s *Service) BroadcastSyncCommitteeMessage(ctx context.Context, subnet uint
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint64, att ethpb.Att, forkDigest [fieldparams.VersionLength]byte) {
|
||||
topic := AttestationSubnetTopic(forkDigest, subnet)
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastAttestation")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -116,7 +120,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
|
||||
// Ensure we have peers with this subnet.
|
||||
s.subnetLocker(subnet).RLock()
|
||||
hasPeer := s.hasPeerWithSubnet(attestationToTopic(subnet, forkDigest))
|
||||
hasPeer := s.hasPeerWithTopic(topic)
|
||||
s.subnetLocker(subnet).RUnlock()
|
||||
|
||||
span.SetAttributes(
|
||||
@@ -131,7 +135,7 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
s.subnetLocker(subnet).Lock()
|
||||
defer s.subnetLocker(subnet).Unlock()
|
||||
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, AttestationSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
|
||||
@@ -153,13 +157,14 @@ func (s *Service) internalBroadcastAttestation(ctx context.Context, subnet uint6
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, att, attestationToTopic(subnet, forkDigest)); err != nil {
|
||||
if err := s.broadcastObject(ctx, att, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast attestation")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMsg *ethpb.SyncCommitteeMessage, forkDigest [fieldparams.VersionLength]byte) {
|
||||
topic := SyncCommitteeSubnetTopic(forkDigest, subnet)
|
||||
_, span := trace.StartSpan(ctx, "p2p.broadcastSyncCommittee")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -173,7 +178,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
// to ensure that we can reuse the same subnet locker.
|
||||
wrappedSubIdx := subnet + syncLockerVal
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
hasPeer := s.hasPeerWithSubnet(syncCommitteeToTopic(subnet, forkDigest))
|
||||
hasPeer := s.hasPeerWithTopic(topic)
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
span.SetAttributes(
|
||||
@@ -187,7 +192,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
if err := func() error {
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, SyncCommitteeSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
|
||||
@@ -205,7 +210,7 @@ func (s *Service) broadcastSyncCommittee(ctx context.Context, subnet uint64, sMs
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, sMsg, syncCommitteeToTopic(subnet, forkDigest)); err != nil {
|
||||
if err := s.broadcastObject(ctx, sMsg, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast sync committee message")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
@@ -233,6 +238,7 @@ func (s *Service) BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blobSidecar *ethpb.BlobSidecar, forkDigest [fieldparams.VersionLength]byte) {
|
||||
topic := BlobSubnetTopic(forkDigest, subnet)
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastBlob")
|
||||
defer span.End()
|
||||
ctx = trace.NewContext(context.Background(), span) // clear parent context / deadline.
|
||||
@@ -243,7 +249,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
|
||||
wrappedSubIdx := subnet + blobSubnetLockerVal
|
||||
s.subnetLocker(wrappedSubIdx).RLock()
|
||||
hasPeer := s.hasPeerWithSubnet(blobSubnetToTopic(subnet, forkDigest))
|
||||
hasPeer := s.hasPeerWithTopic(topic)
|
||||
s.subnetLocker(wrappedSubIdx).RUnlock()
|
||||
|
||||
if !hasPeer {
|
||||
@@ -252,7 +258,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, BlobSubnetTopicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
|
||||
@@ -264,7 +270,7 @@ func (s *Service) internalBroadcastBlob(ctx context.Context, subnet uint64, blob
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.broadcastObject(ctx, blobSidecar, blobSubnetToTopic(subnet, forkDigest)); err != nil {
|
||||
if err := s.broadcastObject(ctx, blobSidecar, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast blob sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
}
|
||||
@@ -293,7 +299,7 @@ func (s *Service) BroadcastLightClientOptimisticUpdate(ctx context.Context, upda
|
||||
}
|
||||
|
||||
digest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcOptimisticToTopic(digest)); err != nil {
|
||||
if err := s.broadcastObject(ctx, update, LcOptimisticToTopic(digest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client optimistic update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -327,7 +333,7 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
}
|
||||
|
||||
forkDigest := params.ForkDigest(slots.ToEpoch(update.AttestedHeader().Beacon().Slot))
|
||||
if err := s.broadcastObject(ctx, update, lcFinalityToTopic(forkDigest)); err != nil {
|
||||
if err := s.broadcastObject(ctx, update, LcFinalityToTopic(forkDigest)); err != nil {
|
||||
log.WithError(err).Debug("Failed to broadcast light client finality update")
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -386,13 +392,14 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
|
||||
// Build the topic corresponding to subnet column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(subnet, forkDigest)
|
||||
topic := DataColumnSubnetTopic(forkDigest, subnet)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := subnet + dataColumnSubnetVal
|
||||
|
||||
// Find peers if needed.
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
||||
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, topic); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Cannot find peers if needed")
|
||||
return
|
||||
@@ -487,20 +494,16 @@ func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [f
|
||||
func (s *Service) findPeersIfNeeded(
|
||||
ctx context.Context,
|
||||
wrappedSubIdx uint64,
|
||||
topicFormat string,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
subnet uint64,
|
||||
topic string,
|
||||
) error {
|
||||
// Sending a data column sidecar to only one peer is not ideal,
|
||||
// but it ensures at least one peer receives it.
|
||||
s.subnetLocker(wrappedSubIdx).Lock()
|
||||
defer s.subnetLocker(wrappedSubIdx).Unlock()
|
||||
|
||||
// No peers found, attempt to find peers with this subnet.
|
||||
if err := s.FindAndDialPeersWithSubnets(ctx, topicFormat, forkDigest, minimumPeersPerSubnetForBroadcast, map[uint64]bool{subnet: true}); err != nil {
|
||||
if err := s.gossipDialer.DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnetForBroadcast); err != nil {
|
||||
return errors.Wrap(err, "find peers with subnet")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -525,34 +528,10 @@ func (s *Service) broadcastObject(ctx context.Context, obj ssz.Marshaler, topic
|
||||
iid := int64(id)
|
||||
span = trace.AddMessageSendEvent(span, iid, messageLen /*uncompressed*/, messageLen /*compressed*/)
|
||||
}
|
||||
if err := s.PublishToTopic(ctx, topic+s.Encoding().ProtocolSuffix(), buf.Bytes()); err != nil {
|
||||
if err := s.PublishToTopic(ctx, topic, buf.Bytes()); err != nil {
|
||||
err := errors.Wrap(err, "could not publish message")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func attestationToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func syncCommitteeToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func blobSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
func lcOptimisticToTopic(forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, forkDigest)
|
||||
}
|
||||
|
||||
func lcFinalityToTopic(forkDigest [4]byte) string {
|
||||
return fmt.Sprintf(LightClientFinalityUpdateTopicFormat, forkDigest)
|
||||
}
|
||||
|
||||
func dataColumnSubnetToTopic(subnet uint64, forkDigest [fieldparams.VersionLength]byte) string {
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, forkDigest, subnet)
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"google.golang.org/protobuf/proto"
|
||||
@@ -109,6 +110,7 @@ func TestService_Attestation_Subnet(t *testing.T) {
|
||||
if gtm := GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()]; gtm != AttestationSubnetTopicFormat {
|
||||
t.Errorf("Constant is out of date. Wanted %s, got %s", AttestationSubnetTopicFormat, gtm)
|
||||
}
|
||||
s := Service{}
|
||||
|
||||
tests := []struct {
|
||||
att *ethpb.Attestation
|
||||
@@ -121,7 +123,7 @@ func TestService_Attestation_Subnet(t *testing.T) {
|
||||
Slot: 2,
|
||||
},
|
||||
},
|
||||
topic: "/eth2/00000000/beacon_attestation_2",
|
||||
topic: "/eth2/00000000/beacon_attestation_2" + s.Encoding().ProtocolSuffix(),
|
||||
},
|
||||
{
|
||||
att: ðpb.Attestation{
|
||||
@@ -130,7 +132,7 @@ func TestService_Attestation_Subnet(t *testing.T) {
|
||||
Slot: 10,
|
||||
},
|
||||
},
|
||||
topic: "/eth2/00000000/beacon_attestation_21",
|
||||
topic: "/eth2/00000000/beacon_attestation_21" + s.Encoding().ProtocolSuffix(),
|
||||
},
|
||||
{
|
||||
att: ðpb.Attestation{
|
||||
@@ -139,12 +141,12 @@ func TestService_Attestation_Subnet(t *testing.T) {
|
||||
Slot: 529,
|
||||
},
|
||||
},
|
||||
topic: "/eth2/00000000/beacon_attestation_8",
|
||||
topic: "/eth2/00000000/beacon_attestation_8" + s.Encoding().ProtocolSuffix(),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(100, tt.att.Data.CommitteeIndex, tt.att.Data.Slot)
|
||||
assert.Equal(t, tt.topic, attestationToTopic(subnet, [4]byte{} /* fork digest */), "Wrong topic")
|
||||
assert.Equal(t, tt.topic, AttestationSubnetTopic([4]byte{}, subnet), "Wrong topic")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -173,14 +175,12 @@ func TestService_BroadcastAttestation(t *testing.T) {
|
||||
msg := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
|
||||
subnet := uint64(5)
|
||||
|
||||
topic := AttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = topic
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = AttestationSubnetTopicFormat
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
topic := AttestationSubnetTopic(digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -226,6 +226,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
// Setup bootnode.
|
||||
cfg := &Config{PingInterval: testPingInterval, DB: db}
|
||||
cfg.UDPPort = uint(port)
|
||||
cfg.TCPPort = uint(port)
|
||||
_, pkey := createAddrAndPrivKey(t)
|
||||
ipAddr := net.ParseIP("127.0.0.1")
|
||||
genesisTime := time.Now()
|
||||
@@ -251,8 +252,9 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
|
||||
var listeners []*listenerWrapper
|
||||
var hosts []host.Host
|
||||
var configs []*Config
|
||||
// setup other nodes.
|
||||
cfg = &Config{
|
||||
baseCfg := &Config{
|
||||
Discv5BootStrapAddrs: []string{bootNode.String()},
|
||||
MaxPeers: 2,
|
||||
PingInterval: testPingInterval,
|
||||
@@ -261,11 +263,21 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
// Setup 2 different hosts
|
||||
for i := uint(1); i <= 2; i++ {
|
||||
h, pkey, ipAddr := createHost(t, port+i)
|
||||
cfg.UDPPort = uint(port + i)
|
||||
cfg.TCPPort = uint(port + i)
|
||||
|
||||
// Create a new config for each service to avoid shared mutations
|
||||
cfg := &Config{
|
||||
Discv5BootStrapAddrs: baseCfg.Discv5BootStrapAddrs,
|
||||
MaxPeers: baseCfg.MaxPeers,
|
||||
PingInterval: baseCfg.PingInterval,
|
||||
DB: baseCfg.DB,
|
||||
UDPPort: uint(port + i),
|
||||
TCPPort: uint(port + i),
|
||||
}
|
||||
|
||||
if len(listeners) > 0 {
|
||||
cfg.Discv5BootStrapAddrs = append(cfg.Discv5BootStrapAddrs, listeners[len(listeners)-1].Self().String())
|
||||
}
|
||||
|
||||
s := &Service{
|
||||
cfg: cfg,
|
||||
genesisTime: genesisTime,
|
||||
@@ -278,18 +290,22 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
close(s.custodyInfoSet)
|
||||
|
||||
listener, err := s.startDiscoveryV5(ipAddr, pkey)
|
||||
// Set for 2nd peer
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
|
||||
// Set listener for the service
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
|
||||
// Set subnet for 2nd peer
|
||||
if i == 2 {
|
||||
s.dv5Listener = listener
|
||||
s.metaData = wrapper.WrappedMetadataV0(new(ethpb.MetaDataV0))
|
||||
bitV := bitfield.NewBitvector64()
|
||||
bitV.SetBitAt(subnet, true)
|
||||
err := s.updateSubnetRecordWithMetadata(bitV)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.NoError(t, err, "Could not start discovery for node")
|
||||
listeners = append(listeners, listener)
|
||||
hosts = append(hosts, h)
|
||||
configs = append(configs, cfg)
|
||||
}
|
||||
defer func() {
|
||||
// Close down all peers.
|
||||
@@ -324,7 +340,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
pubsub: ps1,
|
||||
dv5Listener: listeners[0],
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: cfg,
|
||||
cfg: configs[0],
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
@@ -340,7 +356,7 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
pubsub: ps2,
|
||||
dv5Listener: listeners[1],
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
cfg: cfg,
|
||||
cfg: configs[1],
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
@@ -353,14 +369,12 @@ func TestService_BroadcastAttestationWithDiscoveryAttempts(t *testing.T) {
|
||||
go p2.listenForNewNodes()
|
||||
|
||||
msg := util.HydrateAttestation(ðpb.Attestation{AggregationBits: bitfield.NewBitlist(7)})
|
||||
topic := AttestationSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = topic
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.Attestation]()] = AttestationSubnetTopicFormat
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
topic := AttestationSubnetTopic(digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
// We don't use our internal subscribe method
|
||||
// due to using floodsub over here.
|
||||
tpHandle, err := p2.JoinTopic(topic)
|
||||
@@ -431,14 +445,12 @@ func TestService_BroadcastSyncCommittee(t *testing.T) {
|
||||
msg := util.HydrateSyncCommittee(ðpb.SyncCommitteeMessage{})
|
||||
subnet := uint64(5)
|
||||
|
||||
topic := SyncCommitteeSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.SyncCommitteeMessage]()] = topic
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.SyncCommitteeMessage]()] = SyncCommitteeSubnetTopicFormat
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
topic := SyncCommitteeSubnetTopic(digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -508,14 +520,12 @@ func TestService_BroadcastBlob(t *testing.T) {
|
||||
}
|
||||
subnet := uint64(0)
|
||||
|
||||
topic := BlobSubnetTopicFormat
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.BlobSidecar]()] = topic
|
||||
GossipTypeMapping[reflect.TypeFor[*ethpb.BlobSidecar]()] = BlobSubnetTopicFormat
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest, subnet)
|
||||
topic := BlobSubnetTopic(digest, subnet)
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -575,10 +585,9 @@ func TestService_BroadcastLightClientOptimisticUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientOptimisticUpdateTopicFormat
|
||||
topic := fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
topic := LcOptimisticToTopic(params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -651,10 +660,9 @@ func TestService_BroadcastLightClientFinalityUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = LightClientFinalityUpdateTopicFormat
|
||||
topic := fmt.Sprintf(LightClientFinalityUpdateTopicFormat, params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
topic := LcFinalityToTopic(params.ForkDigest(slots.ToEpoch(msg.AttestedHeader().Beacon().Slot)))
|
||||
|
||||
// External peer subscribes to the topic.
|
||||
topic += p.Encoding().ProtocolSuffix()
|
||||
sub, err := p2.SubscribeToTopic(topic)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -702,7 +710,6 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
const (
|
||||
port = 2000
|
||||
columnIndex = 12
|
||||
topicFormat = DataColumnSubnetTopicFormat
|
||||
)
|
||||
|
||||
ctx := t.Context()
|
||||
@@ -760,7 +767,17 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(columnIndex)
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet) + service.Encoding().ProtocolSuffix()
|
||||
topic := DataColumnSubnetTopic(digest, subnet)
|
||||
|
||||
crawler, err := NewGossipPeerCrawler(t.Context(), service, listener, 1*time.Second, 1*time.Second, 10,
|
||||
func(n *enode.Node) bool { return true },
|
||||
service.Peers().Scorers().Score)
|
||||
require.NoError(t, err)
|
||||
err = crawler.Start(func(ctx context.Context, node *enode.Node) ([]string, error) {
|
||||
return []string{topic}, nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
service.gossipDialer = NewGossipPeerDialer(t.Context(), crawler, service.PubSub().ListPeers, service.DialPeers)
|
||||
|
||||
_, verifiedRoSidecars := util.CreateTestVerifiedRoDataColumnSidecars(t, []util.DataColumnParam{{Index: columnIndex}})
|
||||
verifiedRoSidecar := verifiedRoSidecars[0]
|
||||
|
||||
@@ -28,6 +28,7 @@ const (
|
||||
type Config struct {
|
||||
NoDiscovery bool
|
||||
EnableUPnP bool
|
||||
EnableAutoNAT bool
|
||||
StaticPeerID bool
|
||||
DisableLivenessCheck bool
|
||||
StaticPeers []string
|
||||
|
||||
@@ -369,11 +369,11 @@ func (s *Service) listenForNewNodes() {
|
||||
}
|
||||
}
|
||||
|
||||
// FindAndDialPeersWithSubnets ensures that our node is connected to enough peers.
|
||||
// If, the threshold is met, then this function immediately returns.
|
||||
// findAndDialPeers ensures that our node is connected to enough peers.
|
||||
// If the threshold is met, then this function immediately returns.
|
||||
// Otherwise, it searches for new peers and dials them.
|
||||
// If `ctx“ is canceled while searching for peers, search is stopped, but new found peers are still dialed.
|
||||
// In this case, the function returns an error.
|
||||
// If `ctx` is canceled while searching for peers, search is stopped, but newly
|
||||
// found peers are still dialed. In this case, the function returns an error.
|
||||
func (s *Service) findAndDialPeers(ctx context.Context) error {
|
||||
// Restrict dials if limit is applied.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
@@ -404,8 +404,7 @@ func (s *Service) findAndDialPeers(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
dialedPeerCount := s.dialPeers(s.ctx, maxConcurrentDials, peersToDial)
|
||||
|
||||
dialedPeerCount := s.DialPeers(s.ctx, maxConcurrentDials, peersToDial)
|
||||
if dialedPeerCount > missingPeerCount {
|
||||
missingPeerCount = 0
|
||||
continue
|
||||
@@ -554,6 +553,7 @@ func (s *Service) createListener(
|
||||
Bootnodes: bootNodes,
|
||||
PingInterval: s.cfg.PingInterval,
|
||||
NoFindnodeLivenessCheck: s.cfg.DisableLivenessCheck,
|
||||
V5RespTimeout: 300 * time.Millisecond,
|
||||
}
|
||||
|
||||
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
|
||||
|
||||
252
beacon-chain/p2p/gossip_peer_controller.go
Normal file
252
beacon-chain/p2p/gossip_peer_controller.go
Normal file
@@ -0,0 +1,252 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
const dialInterval = 1 * time.Second
|
||||
|
||||
// GossipPeerDialer maintains minimum peer counts for gossip topics by periodically
|
||||
// dialing new peers discovered by a crawler. It runs a background loop that checks each
|
||||
// topic's peer count and dials new peers when below the target threshold.
|
||||
type GossipPeerDialer struct {
|
||||
ctx context.Context
|
||||
|
||||
listPeers func(topic string) []peer.ID
|
||||
dialPeers func(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint
|
||||
|
||||
crawler gossipcrawler.Crawler
|
||||
topicsProvider gossipcrawler.SubnetTopicsProvider
|
||||
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// NewGossipPeerDialer creates a new GossipPeerDialer instance.
|
||||
//
|
||||
// Parameters:
|
||||
// - ctx: Parent context that controls the lifecycle of the dialer. When cancelled,
|
||||
// the background dial loop will terminate.
|
||||
// - crawler: Source of peer candidates for each topic. The crawler maintains a registry
|
||||
// of peers discovered through DHT crawling, indexed by the topics they subscribe to.
|
||||
// - listPeers: Function that returns the current peers connected for a given topic.
|
||||
// Used to determine how many additional peers need to be dialed.
|
||||
// - dialPeers: Function that dials the given enode.Node peers with a concurrency limit.
|
||||
// Returns the number of successful dials.
|
||||
//
|
||||
// The dialer must be started with Start() before it begins maintaining peer counts.
|
||||
func NewGossipPeerDialer(
|
||||
ctx context.Context,
|
||||
crawler gossipcrawler.Crawler,
|
||||
listPeers func(topic string) []peer.ID,
|
||||
dialPeers func(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint,
|
||||
) *GossipPeerDialer {
|
||||
return &GossipPeerDialer{
|
||||
ctx: ctx,
|
||||
listPeers: listPeers,
|
||||
dialPeers: dialPeers,
|
||||
crawler: crawler,
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins the background dial loop that maintains peer counts for all topics.
|
||||
//
|
||||
// The provider function is called on each tick to get the current list of topics that
|
||||
// need peer maintenance. This allows the set of topics to change dynamically as the node
|
||||
// subscribes/unsubscribes from subnets.
|
||||
//
|
||||
// Start is idempotent - calling it multiple times has no effect after the first call.
|
||||
// Only the provider from the first call will be used; subsequent calls are ignored.
|
||||
//
|
||||
// The dial loop runs every dialInterval (1 second) and for each topic:
|
||||
// 1. Checks current peer count via listPeers()
|
||||
// 2. If below the per-topic min peer count, requests candidates from the crawler
|
||||
// 3. Deduplicates peers across all topics to avoid redundant dials
|
||||
// 4. Dials missing peers with rate limiting if enabled
|
||||
//
|
||||
// Returns nil always (error return preserved for interface compatibility).
|
||||
func (g *GossipPeerDialer) Start(provider gossipcrawler.SubnetTopicsProvider) error {
|
||||
g.once.Do(func() {
|
||||
g.topicsProvider = provider
|
||||
go g.dialLoop()
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *GossipPeerDialer) dialLoop() {
|
||||
ticker := time.NewTicker(dialInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
peersToDial := g.selectPeersForTopics()
|
||||
if len(peersToDial) == 0 {
|
||||
continue
|
||||
}
|
||||
g.dialPeersWithRatelimiting(peersToDial)
|
||||
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// selectPeersForTopics builds a bidirectional mapping of topics to peers and selects
|
||||
// peers to dial using a greedy algorithm that prioritizes peers serving multiple topics.
|
||||
// When a peer is selected, the needed count is decremented for ALL topics that peer serves,
|
||||
// avoiding redundant dials when one peer can satisfy multiple topic requirements.
|
||||
func (g *GossipPeerDialer) selectPeersForTopics() []*enode.Node {
|
||||
topicsWithMinPeers := g.topicsProvider()
|
||||
|
||||
// Calculate how many peers each topic still needs.
|
||||
neededByTopic := make(map[string]int)
|
||||
for topic, minPeers := range topicsWithMinPeers {
|
||||
currentCount := len(g.listPeers(topic))
|
||||
if needed := minPeers - currentCount; needed > 0 {
|
||||
neededByTopic[topic] = needed
|
||||
}
|
||||
}
|
||||
|
||||
if len(neededByTopic) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
peerToTopics := make(map[enode.ID][]string)
|
||||
nodeByID := make(map[enode.ID]*enode.Node)
|
||||
|
||||
for topic := range neededByTopic {
|
||||
candidates := g.crawler.PeersForTopic(topic)
|
||||
for _, node := range candidates {
|
||||
id := node.ID()
|
||||
if _, exists := nodeByID[id]; !exists {
|
||||
nodeByID[id] = node
|
||||
}
|
||||
peerToTopics[id] = append(peerToTopics[id], topic)
|
||||
}
|
||||
}
|
||||
|
||||
// Build candidate list sorted by topic count (descending).
|
||||
// Peers serving more topics are prioritized.
|
||||
type candidate struct {
|
||||
node *enode.Node
|
||||
topics []string
|
||||
}
|
||||
candidates := make([]candidate, 0, len(peerToTopics))
|
||||
for id, topics := range peerToTopics {
|
||||
candidates = append(candidates, candidate{node: nodeByID[id], topics: topics})
|
||||
}
|
||||
|
||||
// sort candidates by topic count (descending)
|
||||
slices.SortFunc(candidates, func(a, b candidate) int {
|
||||
return len(b.topics) - len(a.topics)
|
||||
})
|
||||
|
||||
// Greedy selection with cross-topic accounting.
|
||||
var selected []*enode.Node
|
||||
for _, c := range candidates {
|
||||
// Check if this peer serves any topic we still need.
|
||||
servesNeededTopic := false
|
||||
for _, topic := range c.topics {
|
||||
if neededByTopic[topic] > 0 {
|
||||
servesNeededTopic = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !servesNeededTopic {
|
||||
continue
|
||||
}
|
||||
|
||||
// Select this peer and decrement needed count for ALL topics it serves.
|
||||
selected = append(selected, c.node)
|
||||
for _, topic := range c.topics {
|
||||
if neededByTopic[topic] > 0 {
|
||||
neededByTopic[topic]--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return selected
|
||||
}
|
||||
|
||||
// DialPeersForTopicBlocking blocks until the specified topic has at least nPeers connected,
|
||||
// or until the context is cancelled.
|
||||
//
|
||||
// This method is useful when you need to ensure a minimum number of peers are connected
|
||||
// for a specific topic before proceeding (e.g., before publishing a message).
|
||||
//
|
||||
// The method polls in a loop:
|
||||
// 1. Check if current peer count >= nPeers, return nil if satisfied
|
||||
// 2. Get peer candidates from crawler for this topic
|
||||
// 3. Dial candidates with rate limiting
|
||||
// 4. Wait 100ms for connections to establish in pubsub layer
|
||||
// 5. Repeat until target reached or context cancelled
|
||||
//
|
||||
// Parameters:
|
||||
// - ctx: Context to cancel the blocking operation. Takes precedence for cancellation.
|
||||
// - topic: The gossipsub topic to ensure peers for.
|
||||
// - nPeers: Minimum number of peers required before returning.
|
||||
//
|
||||
// Returns:
|
||||
// - nil: Successfully reached the target peer count.
|
||||
// - ctx.Err(): The provided context was cancelled.
|
||||
// - g.ctx.Err(): The dialer's parent context was cancelled.
|
||||
//
|
||||
// Note: This may block indefinitely if the crawler cannot provide enough peers
|
||||
// and the context has no deadline.
|
||||
func (g *GossipPeerDialer) DialPeersForTopicBlocking(ctx context.Context, topic string, nPeers int) error {
|
||||
for {
|
||||
peers := g.listPeers(topic)
|
||||
if len(peers) >= nPeers {
|
||||
return nil
|
||||
}
|
||||
|
||||
newPeers := g.peersForTopic(topic, nPeers)
|
||||
if len(newPeers) > 0 {
|
||||
g.dialPeersWithRatelimiting(newPeers)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
// some wait here is good after dialing as connections take some time to show up in pubsub
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
case <-g.ctx.Done():
|
||||
return g.ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GossipPeerDialer) peersForTopic(topic string, targetCount int) []*enode.Node {
|
||||
peers := g.listPeers(topic)
|
||||
peerCount := len(peers)
|
||||
if peerCount >= targetCount {
|
||||
return nil
|
||||
}
|
||||
missing := targetCount - peerCount
|
||||
newPeers := g.crawler.PeersForTopic(topic)
|
||||
if len(newPeers) > missing {
|
||||
newPeers = newPeers[:missing]
|
||||
}
|
||||
|
||||
return newPeers
|
||||
}
|
||||
|
||||
func (g *GossipPeerDialer) dialPeersWithRatelimiting(peers []*enode.Node) {
|
||||
// Dial new peers in batches.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials = flags.Get().MaxConcurrentDials
|
||||
}
|
||||
g.dialPeers(g.ctx, maxConcurrentDials, peers)
|
||||
}
|
||||
523
beacon-chain/p2p/gossip_peer_controller_test.go
Normal file
523
beacon-chain/p2p/gossip_peer_controller_test.go
Normal file
@@ -0,0 +1,523 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"net"
|
||||
"slices"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/ecdsa"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGossipPeerDialer_Start(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
newCrawler func(t *testing.T) *mockCrawler
|
||||
provider gossipcrawler.SubnetTopicsProvider
|
||||
expectedConnects int
|
||||
expectStartErr bool
|
||||
}{
|
||||
{
|
||||
name: "dials unique peers across topics",
|
||||
newCrawler: func(t *testing.T) *mockCrawler {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30101)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30102)
|
||||
return &mockCrawler{
|
||||
consume: true,
|
||||
peers: map[string][]*enode.Node{
|
||||
"topic/a": {nodeA, nodeB},
|
||||
"topic/b": {nodeA},
|
||||
},
|
||||
}
|
||||
},
|
||||
provider: func() map[string]int {
|
||||
return map[string]int{"topic/a": 2, "topic/b": 2}
|
||||
},
|
||||
expectedConnects: 2,
|
||||
},
|
||||
{
|
||||
name: "uses per-topic min peer counts",
|
||||
newCrawler: func(t *testing.T) *mockCrawler {
|
||||
nodes := make([]*enode.Node, 5)
|
||||
for i := range nodes {
|
||||
nodes[i] = newTestNode(t, "127.0.0.1", uint16(30110+i))
|
||||
}
|
||||
return &mockCrawler{
|
||||
consume: true,
|
||||
peers: map[string][]*enode.Node{
|
||||
// topic/mesh has 3 available peers, minPeers=2 -> should dial 2
|
||||
"topic/mesh": {nodes[0], nodes[1], nodes[2]},
|
||||
// topic/fanout has 3 available peers, minPeers=1 -> should dial 1
|
||||
"topic/fanout": {nodes[3], nodes[4]},
|
||||
},
|
||||
}
|
||||
},
|
||||
provider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/mesh": 2,
|
||||
"topic/fanout": 1,
|
||||
}
|
||||
},
|
||||
// Total: 2 from mesh + 1 from fanout = 3 peers dialed
|
||||
expectedConnects: 3,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
md := &mockDialer{}
|
||||
listPeers := func(topic string) []peer.ID { return nil }
|
||||
|
||||
dialer := NewGossipPeerDialer(t.Context(), tt.newCrawler(t), listPeers, md.DialPeers)
|
||||
|
||||
err := dialer.Start(tt.provider)
|
||||
if tt.expectStartErr {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return md.dialCount() >= tt.expectedConnects
|
||||
}, 2*time.Second, 20*time.Millisecond)
|
||||
|
||||
require.Equal(t, tt.expectedConnects, md.dialCount())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipPeerDialer_DialPeersForTopicBlocking(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
connectedPeers int
|
||||
newCrawler func(t *testing.T) *mockCrawler
|
||||
targetPeers int
|
||||
ctx func() (context.Context, context.CancelFunc)
|
||||
expectedConnects int
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "returns immediately when enough peers",
|
||||
connectedPeers: 1,
|
||||
newCrawler: func(t *testing.T) *mockCrawler {
|
||||
return &mockCrawler{}
|
||||
},
|
||||
targetPeers: 1,
|
||||
ctx: func() (context.Context, context.CancelFunc) { return context.WithCancel(context.Background()) },
|
||||
expectedConnects: 0,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "dials when peers are missing",
|
||||
connectedPeers: 0,
|
||||
newCrawler: func(t *testing.T) *mockCrawler {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30201)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30202)
|
||||
return &mockCrawler{
|
||||
peers: map[string][]*enode.Node{
|
||||
"topic/a": {nodeA, nodeB},
|
||||
},
|
||||
}
|
||||
},
|
||||
targetPeers: 2,
|
||||
ctx: func() (context.Context, context.CancelFunc) {
|
||||
return context.WithTimeout(context.Background(), 1*time.Second)
|
||||
},
|
||||
expectedConnects: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
md := &mockDialer{}
|
||||
var mu sync.Mutex
|
||||
connected := make([]peer.ID, 0)
|
||||
for i := 0; i < tt.connectedPeers; i++ {
|
||||
connected = append(connected, peer.ID(string(rune(i))))
|
||||
}
|
||||
|
||||
listPeers := func(topic string) []peer.ID {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return connected
|
||||
}
|
||||
|
||||
dialPeers := func(ctx context.Context, max int, nodes []*enode.Node) uint {
|
||||
cnt := md.DialPeers(ctx, max, nodes)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
for range nodes {
|
||||
// Just add a dummy peer ID to simulate connection success
|
||||
connected = append(connected, peer.ID("dummy"))
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
crawler := tt.newCrawler(t)
|
||||
dialer := NewGossipPeerDialer(t.Context(), crawler, listPeers, dialPeers)
|
||||
topic := "topic/a"
|
||||
|
||||
ctx, cancel := tt.ctx()
|
||||
defer cancel()
|
||||
|
||||
err := dialer.DialPeersForTopicBlocking(ctx, topic, tt.targetPeers)
|
||||
if tt.expectErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.Equal(t, tt.expectedConnects, md.dialCount())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipPeerDialer_peersForTopic(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
connected int
|
||||
targetCount int
|
||||
buildPeers func(t *testing.T) ([]*enode.Node, []*enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "returns nil when enough peers already connected",
|
||||
connected: 1,
|
||||
targetCount: 1,
|
||||
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
|
||||
return []*enode.Node{newTestNode(t, "127.0.0.1", 30301)}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "returns crawler peers when none connected",
|
||||
connected: 0,
|
||||
targetCount: 2,
|
||||
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30311)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30312)
|
||||
return []*enode.Node{nodeA, nodeB}, []*enode.Node{nodeA, nodeB}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "truncates peers when more than needed",
|
||||
connected: 0,
|
||||
targetCount: 1,
|
||||
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30321)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30322)
|
||||
nodeC := newTestNode(t, "127.0.0.1", 30323)
|
||||
return []*enode.Node{nodeA, nodeB, nodeC}, []*enode.Node{nodeA}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only returns missing peers",
|
||||
connected: 1,
|
||||
targetCount: 3,
|
||||
buildPeers: func(t *testing.T) ([]*enode.Node, []*enode.Node) {
|
||||
nodeA := newTestNode(t, "127.0.0.1", 30331)
|
||||
nodeB := newTestNode(t, "127.0.0.1", 30332)
|
||||
nodeC := newTestNode(t, "127.0.0.1", 30333)
|
||||
return []*enode.Node{nodeA, nodeB, nodeC}, []*enode.Node{nodeA, nodeB}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
listPeers := func(topic string) []peer.ID {
|
||||
peers := make([]peer.ID, tt.connected)
|
||||
for i := 0; i < tt.connected; i++ {
|
||||
peers[i] = peer.ID(string(rune(i))) // Fake peer ID
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
crawlerPeers, expected := tt.buildPeers(t)
|
||||
crawler := &mockCrawler{
|
||||
peers: map[string][]*enode.Node{"topic/test": crawlerPeers},
|
||||
consume: false,
|
||||
}
|
||||
dialer := NewGossipPeerDialer(t.Context(), crawler, listPeers, func(ctx context.Context,
|
||||
maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
})
|
||||
|
||||
got := dialer.peersForTopic("topic/test", tt.targetCount)
|
||||
if expected == nil {
|
||||
require.Nil(t, got)
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), len(got))
|
||||
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i], got[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGossipPeerDialer_selectPeersForTopics(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
connectedPeers map[string]int // topic -> connected peer count
|
||||
topicsProvider func() map[string]int
|
||||
buildPeers func(t *testing.T) (map[string][]*enode.Node, []*enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "prioritizes multi-topic peer over single-topic peers",
|
||||
connectedPeers: map[string]int{},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 1,
|
||||
"topic/b": 1,
|
||||
"topic/c": 1,
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
// Peer X serves all 3 topics
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30401)
|
||||
// Peer Y serves only topic/a
|
||||
nodeY := newTestNode(t, "127.0.0.1", 30402)
|
||||
// Peer Z serves only topic/b
|
||||
nodeZ := newTestNode(t, "127.0.0.1", 30403)
|
||||
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX, nodeY},
|
||||
"topic/b": {nodeX, nodeZ},
|
||||
"topic/c": {nodeX},
|
||||
}
|
||||
// Only nodeX should be dialed (satisfies all 3 topics)
|
||||
return crawlerPeers, []*enode.Node{nodeX}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cross-topic decrement works correctly",
|
||||
connectedPeers: map[string]int{},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 2, // Need 2 peers
|
||||
"topic/b": 1, // Need 1 peer
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
// Peer X serves both topics
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30411)
|
||||
// Peer Y serves only topic/a
|
||||
nodeY := newTestNode(t, "127.0.0.1", 30412)
|
||||
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX, nodeY},
|
||||
"topic/b": {nodeX},
|
||||
}
|
||||
// nodeX covers topic/b fully, and 1 of 2 for topic/a
|
||||
// nodeY covers remaining 1 for topic/a
|
||||
return crawlerPeers, []*enode.Node{nodeX, nodeY}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no redundant dials when one peer satisfies all",
|
||||
connectedPeers: map[string]int{},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 1,
|
||||
"topic/b": 1,
|
||||
"topic/c": 1,
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30421)
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX},
|
||||
"topic/b": {nodeX},
|
||||
"topic/c": {nodeX},
|
||||
}
|
||||
// Only 1 dial needed for all 3 topics
|
||||
return crawlerPeers, []*enode.Node{nodeX}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "skips topics with enough peers already",
|
||||
connectedPeers: map[string]int{
|
||||
"topic/a": 2, // Already has 2
|
||||
},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 2, // min 2, already have 2
|
||||
"topic/b": 1, // min 1, have 0
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30431)
|
||||
nodeY := newTestNode(t, "127.0.0.1", 30432)
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX},
|
||||
"topic/b": {nodeY},
|
||||
}
|
||||
// Only nodeY should be dialed (topic/a already satisfied)
|
||||
return crawlerPeers, []*enode.Node{nodeY}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "returns nil when all topics satisfied",
|
||||
connectedPeers: map[string]int{"topic/a": 2, "topic/b": 1},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{
|
||||
"topic/a": 2,
|
||||
"topic/b": 1,
|
||||
}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
nodeX := newTestNode(t, "127.0.0.1", 30441)
|
||||
crawlerPeers := map[string][]*enode.Node{
|
||||
"topic/a": {nodeX},
|
||||
"topic/b": {nodeX},
|
||||
}
|
||||
// No dials needed
|
||||
return crawlerPeers, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "handles empty crawler response",
|
||||
connectedPeers: map[string]int{},
|
||||
topicsProvider: func() map[string]int {
|
||||
return map[string]int{"topic/a": 1}
|
||||
},
|
||||
buildPeers: func(t *testing.T) (map[string][]*enode.Node, []*enode.Node) {
|
||||
return map[string][]*enode.Node{}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
listPeers := func(topic string) []peer.ID {
|
||||
count := tt.connectedPeers[topic]
|
||||
peers := make([]peer.ID, count)
|
||||
for i := range count {
|
||||
peers[i] = peer.ID(topic + string(rune(i)))
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
crawlerPeers, expected := tt.buildPeers(t)
|
||||
crawler := &mockCrawler{
|
||||
peers: crawlerPeers,
|
||||
consume: false,
|
||||
}
|
||||
|
||||
dialer := NewGossipPeerDialer(t.Context(), crawler, listPeers, func(ctx context.Context,
|
||||
maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
})
|
||||
dialer.topicsProvider = tt.topicsProvider
|
||||
|
||||
got := dialer.selectPeersForTopics()
|
||||
|
||||
if expected == nil {
|
||||
require.Nil(t, got)
|
||||
return
|
||||
}
|
||||
|
||||
require.Equal(t, len(expected), len(got), "expected %d peers, got %d", len(expected), len(got))
|
||||
|
||||
// Verify all expected nodes are present (order may vary for equal topic counts)
|
||||
expectedIDs := make(map[enode.ID]struct{})
|
||||
for _, n := range expected {
|
||||
expectedIDs[n.ID()] = struct{}{}
|
||||
}
|
||||
for _, n := range got {
|
||||
_, ok := expectedIDs[n.ID()]
|
||||
require.True(t, ok, "unexpected peer %s in result", n.ID())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type mockCrawler struct {
|
||||
mu sync.Mutex
|
||||
peers map[string][]*enode.Node
|
||||
consume bool
|
||||
}
|
||||
|
||||
func (m *mockCrawler) Start(gossipcrawler.TopicExtractor) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockCrawler) Stop() {}
|
||||
func (m *mockCrawler) RemovePeerByPeerId(peer.ID) {}
|
||||
func (m *mockCrawler) RemoveTopic(string) {}
|
||||
func (m *mockCrawler) PeersForTopic(topic string) []*enode.Node {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
nodes := m.peers[topic]
|
||||
if len(nodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
copied := slices.Clone(nodes)
|
||||
if m.consume {
|
||||
m.peers[topic] = nil
|
||||
}
|
||||
return copied
|
||||
}
|
||||
|
||||
type mockDialer struct {
|
||||
mu sync.Mutex
|
||||
dials []*enode.Node
|
||||
}
|
||||
|
||||
func (m *mockDialer) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.dials = append(m.dials, nodes...)
|
||||
return uint(len(nodes))
|
||||
}
|
||||
|
||||
func (m *mockDialer) dialCount() int {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return len(m.dials)
|
||||
}
|
||||
|
||||
func (m *mockDialer) dialedNodes() []*enode.Node {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return slices.Clone(m.dials)
|
||||
}
|
||||
|
||||
func newTestNode(t *testing.T, ip string, tcpPort uint16) *enode.Node {
|
||||
priv, _, err := crypto.GenerateSecp256k1Key(rand.Reader)
|
||||
require.NoError(t, err)
|
||||
|
||||
return newTestNodeWithPriv(t, priv, ip, tcpPort)
|
||||
}
|
||||
|
||||
func newTestNodeWithPriv(t *testing.T, priv crypto.PrivKey, ip string, tcpPort uint16) *enode.Node {
|
||||
t.Helper()
|
||||
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
db.Close()
|
||||
})
|
||||
|
||||
convertedKey, err := ecdsa.ConvertFromInterfacePrivKey(priv)
|
||||
require.NoError(t, err)
|
||||
|
||||
localNode := enode.NewLocalNode(db, convertedKey)
|
||||
localNode.SetStaticIP(net.ParseIP(ip))
|
||||
localNode.Set(enr.TCP(tcpPort))
|
||||
localNode.Set(enr.UDP(tcpPort))
|
||||
|
||||
return localNode.Node()
|
||||
}
|
||||
546
beacon-chain/p2p/gossip_peer_crawler.go
Normal file
546
beacon-chain/p2p/gossip_peer_crawler.go
Normal file
@@ -0,0 +1,546 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
type peerNode struct {
|
||||
isPinged bool
|
||||
node *enode.Node
|
||||
peerID peer.ID
|
||||
topics map[string]struct{}
|
||||
}
|
||||
|
||||
type crawledPeers struct {
|
||||
mu sync.RWMutex
|
||||
peerNodeByEnode map[enode.ID]*peerNode
|
||||
peerNodeByPid map[peer.ID]*peerNode
|
||||
peersByTopic map[string]map[*peerNode]struct{}
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) updateStatusToPinged(enodeID enode.ID) {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
existingPNode, ok := cp.peerNodeByEnode[enodeID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// we only want to ping a node with a given NodeId once -> not on every sequence number change
|
||||
// as ping is simply a test of a node being reachable and not fake
|
||||
existingPNode.isPinged = true
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) updatePeer(node *enode.Node, topics []string) (bool, error) {
|
||||
if node == nil {
|
||||
return false, errors.New("node is nil")
|
||||
}
|
||||
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
enodeID := node.ID()
|
||||
existingPNode, ok := cp.peerNodeByEnode[enodeID]
|
||||
|
||||
if ok && existingPNode.node == nil {
|
||||
return false, errors.New("enode is nil for enodeId")
|
||||
}
|
||||
|
||||
// we don't want to update enodes with a lower sequence number as they're stale records
|
||||
if ok && existingPNode.node.Seq() >= node.Seq() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !ok {
|
||||
// this is a new peer
|
||||
peerID, err := enodeToPeerID(node)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("converting enode to peer ID: %w", err)
|
||||
}
|
||||
existingPNode = &peerNode{
|
||||
node: node,
|
||||
peerID: peerID,
|
||||
topics: make(map[string]struct{}),
|
||||
}
|
||||
cp.peerNodeByEnode[enodeID] = existingPNode
|
||||
cp.peerNodeByPid[peerID] = existingPNode
|
||||
} else {
|
||||
existingPNode.node = node
|
||||
}
|
||||
|
||||
cp.updateTopicsUnlocked(existingPNode, topics)
|
||||
cp.recordMetricsUnlocked()
|
||||
|
||||
if existingPNode.isPinged || len(topics) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) removeTopic(topic string) {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
// Get all peers subscribed to this topic
|
||||
peers, ok := cp.peersByTopic[topic]
|
||||
if !ok {
|
||||
return // Topic doesn't exist
|
||||
}
|
||||
|
||||
// Remove the topic from each peer's topic list
|
||||
for pnode := range peers {
|
||||
delete(pnode.topics, topic)
|
||||
// remove the peer if it has no more topics left
|
||||
if len(pnode.topics) == 0 {
|
||||
cp.updateTopicsUnlocked(pnode, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the topic from byTopic map
|
||||
delete(cp.peersByTopic, topic)
|
||||
cp.recordMetricsUnlocked()
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) removePeerByPeerId(peerID peer.ID) {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
pnode, ok := cp.peerNodeByPid[peerID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// Use updateTopicsUnlocked with empty topics to remove the peer
|
||||
cp.updateTopicsUnlocked(pnode, nil)
|
||||
cp.recordMetricsUnlocked()
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) removePeerByNodeId(enodeID enode.ID) {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
pnode, ok := cp.peerNodeByEnode[enodeID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
cp.updateTopicsUnlocked(pnode, nil)
|
||||
cp.recordMetricsUnlocked()
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) recordMetricsUnlocked() {
|
||||
gossipCrawlerPeersByEnodeCount.Set(float64(len(cp.peerNodeByEnode)))
|
||||
gossipCrawlerPeersByPidCount.Set(float64(len(cp.peerNodeByPid)))
|
||||
gossipCrawlerTopicsCount.Set(float64(len(cp.peersByTopic)))
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) cleanupPeer(pnode *peerNode) {
|
||||
delete(cp.peerNodeByPid, pnode.peerID)
|
||||
delete(cp.peerNodeByEnode, pnode.node.ID())
|
||||
for t := range pnode.topics {
|
||||
if peers, ok := cp.peersByTopic[t]; ok {
|
||||
delete(peers, pnode)
|
||||
if len(peers) == 0 {
|
||||
delete(cp.peersByTopic, t)
|
||||
}
|
||||
}
|
||||
}
|
||||
pnode.topics = nil // Clear topics to indicate removal.
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) removeOldTopicsFromPeer(pnode *peerNode, newTopics map[string]struct{}) {
|
||||
for oldTopic := range pnode.topics {
|
||||
if _, ok := newTopics[oldTopic]; !ok {
|
||||
if peers, ok := cp.peersByTopic[oldTopic]; ok {
|
||||
delete(peers, pnode)
|
||||
if len(peers) == 0 {
|
||||
delete(cp.peersByTopic, oldTopic)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) addNewTopicsToPeer(pnode *peerNode, newTopics map[string]struct{}) {
|
||||
for newTopic := range newTopics {
|
||||
if _, ok := pnode.topics[newTopic]; !ok {
|
||||
if _, ok := cp.peersByTopic[newTopic]; !ok {
|
||||
cp.peersByTopic[newTopic] = make(map[*peerNode]struct{})
|
||||
}
|
||||
cp.peersByTopic[newTopic][pnode] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateTopicsUnlocked updates the topics associated with a peer node.
|
||||
// If the topics slice is empty, the peer is completely removed from the crawled peers.
|
||||
// Otherwise, it updates the peer's topics by removing old topics that are no longer
|
||||
// present and adding new topics. This method assumes the caller holds the lock on cp.mu.
|
||||
// If a topic has no peers after this update, it is removed from the list of topics we track peers for.
|
||||
func (cp *crawledPeers) updateTopicsUnlocked(pnode *peerNode, topics []string) {
|
||||
// If topics is empty, remove the peer completely.
|
||||
if len(topics) == 0 {
|
||||
cp.cleanupPeer(pnode)
|
||||
return
|
||||
}
|
||||
|
||||
newTopics := make(map[string]struct{})
|
||||
for _, t := range topics {
|
||||
newTopics[t] = struct{}{}
|
||||
}
|
||||
|
||||
// Remove old topics that are no longer present.
|
||||
cp.removeOldTopicsFromPeer(pnode, newTopics)
|
||||
|
||||
// Add new topics.
|
||||
cp.addNewTopicsToPeer(pnode, newTopics)
|
||||
|
||||
pnode.topics = newTopics
|
||||
}
|
||||
|
||||
func (cp *crawledPeers) getPeersForTopic(topic string, filter gossipcrawler.PeerFilterFunc) []peerNode {
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
|
||||
peers, ok := cp.peersByTopic[topic]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var peerNodes []peerNode
|
||||
for pnode := range peers {
|
||||
if pnode.node == nil {
|
||||
continue
|
||||
}
|
||||
if pnode.isPinged && filter(pnode.node) {
|
||||
peerNodes = append(peerNodes, *pnode)
|
||||
}
|
||||
}
|
||||
return peerNodes
|
||||
}
|
||||
|
||||
// GossipPeerCrawler discovers and maintains a registry of peers subscribed to gossipsub topics.
|
||||
// It uses discv5 to find peers, extracts their topic subscriptions from ENR records, and verifies
|
||||
// their reachability via ping. Only peers that have been successfully pinged are returned when
|
||||
// querying for peers on a given topic. The crawler runs three background loops: one for discovery,
|
||||
// one for ping verification, and one for periodic cleanup of stale or filtered-out peers.
|
||||
type GossipPeerCrawler struct {
|
||||
ctx context.Context
|
||||
|
||||
crawlInterval, crawlTimeout time.Duration
|
||||
|
||||
crawledPeers *crawledPeers
|
||||
|
||||
// Discovery interface for finding peers
|
||||
dv5 ListenerRebooter
|
||||
|
||||
p2pSvc *Service
|
||||
|
||||
topicExtractor gossipcrawler.TopicExtractor
|
||||
|
||||
peerFilter gossipcrawler.PeerFilterFunc
|
||||
scorer PeerScoreFunc
|
||||
|
||||
pingCh chan enode.Node
|
||||
pingSemaphore *semaphore.Weighted
|
||||
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
// cleanupInterval controls how frequently we sweep crawled peers and prune
|
||||
// those that are no longer useful.
|
||||
const cleanupInterval = 5 * time.Minute
|
||||
|
||||
// PeerScoreFunc calculates a reputation score for a given peer ID.
|
||||
// Higher scores indicate more desirable peers. This function is used by PeersForTopic
|
||||
// to sort returned peers in descending order of quality, allowing callers to prioritize
|
||||
// connections to the most reliable peers.
|
||||
type PeerScoreFunc func(peer.ID) float64
|
||||
|
||||
// NewGossipPeerCrawler creates a new crawler for discovering gossipsub peers.
|
||||
// The crawler uses the provided discv5 listener to discover peers and tracks their
|
||||
// topic subscriptions. Parameters:
|
||||
// - p2pSvc: The P2P service for network operations
|
||||
// - dv5: The discv5 listener used for peer discovery and ping verification
|
||||
// - crawlTimeout: Maximum duration for each crawl iteration
|
||||
// - crawlInterval: The duration between each crawl iteration
|
||||
// - maxConcurrentPings: Limits parallel ping operations to avoid overwhelming the network
|
||||
// - peerFilter: Determines which discovered peers should be tracked
|
||||
// - scorer: Calculates peer quality scores for sorting results
|
||||
//
|
||||
// Returns an error if any required parameter is nil or invalid.
|
||||
func NewGossipPeerCrawler(
|
||||
ctx context.Context,
|
||||
p2pSvc *Service,
|
||||
dv5 ListenerRebooter,
|
||||
crawlTimeout time.Duration,
|
||||
crawlInterval time.Duration,
|
||||
maxConcurrentPings int64,
|
||||
peerFilter gossipcrawler.PeerFilterFunc,
|
||||
scorer PeerScoreFunc,
|
||||
) (*GossipPeerCrawler, error) {
|
||||
if p2pSvc == nil {
|
||||
return nil, errors.New("p2pSvc is nil")
|
||||
}
|
||||
if dv5 == nil {
|
||||
return nil, errors.New("dv5 is nil")
|
||||
}
|
||||
if crawlTimeout <= 0 {
|
||||
return nil, errors.New("crawl timeout must be greater than 0")
|
||||
}
|
||||
if crawlInterval <= 0 {
|
||||
return nil, errors.New("crawl interval must be greater than 0")
|
||||
}
|
||||
if maxConcurrentPings <= 0 {
|
||||
return nil, errors.New("max concurrent pings must be greater than 0")
|
||||
}
|
||||
if peerFilter == nil {
|
||||
return nil, errors.New("peer filter is nil")
|
||||
}
|
||||
if scorer == nil {
|
||||
return nil, errors.New("peer scorer is nil")
|
||||
}
|
||||
|
||||
g := &GossipPeerCrawler{
|
||||
ctx: ctx,
|
||||
crawlInterval: crawlInterval,
|
||||
crawlTimeout: crawlTimeout,
|
||||
p2pSvc: p2pSvc,
|
||||
dv5: dv5,
|
||||
peerFilter: peerFilter,
|
||||
scorer: scorer,
|
||||
}
|
||||
g.pingCh = make(chan enode.Node, 4*maxConcurrentPings)
|
||||
g.pingSemaphore = semaphore.NewWeighted(maxConcurrentPings)
|
||||
g.crawledPeers = &crawledPeers{
|
||||
peerNodeByEnode: make(map[enode.ID]*peerNode),
|
||||
peerNodeByPid: make(map[peer.ID]*peerNode),
|
||||
peersByTopic: make(map[string]map[*peerNode]struct{}),
|
||||
}
|
||||
return g, nil
|
||||
}
|
||||
|
||||
// PeersForTopic returns a list of enode records for peers subscribed to the given topic.
|
||||
// Only peers that have been successfully pinged (verified as reachable) and pass the
|
||||
// configured peer filter are included. Results are sorted in descending order by peer
|
||||
// score, so higher-quality peers appear first. Returns nil if no peers are found for
|
||||
// the topic. The returned slice should not be modified as it contains pointers to
|
||||
// internal enode records.
|
||||
func (g *GossipPeerCrawler) PeersForTopic(topic string) []*enode.Node {
|
||||
peerNodes := g.crawledPeers.getPeersForTopic(topic, g.peerFilter)
|
||||
|
||||
slices.SortFunc(peerNodes, func(a, b peerNode) int {
|
||||
scoreA := g.scorer(a.peerID)
|
||||
scoreB := g.scorer(b.peerID)
|
||||
if scoreA > scoreB {
|
||||
return -1
|
||||
}
|
||||
if scoreA < scoreB {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
|
||||
nodes := make([]*enode.Node, 0, len(peerNodes))
|
||||
for _, pn := range peerNodes {
|
||||
nodes = append(nodes, pn.node)
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// RemovePeerByPeerId removes a peer from the crawler's registry by their libp2p peer ID.
|
||||
// This also removes the peer from all topic subscriptions they were associated with.
|
||||
// If the peer is not found, this operation is a no-op.
|
||||
func (g *GossipPeerCrawler) RemovePeerByPeerId(peerID peer.ID) {
|
||||
g.crawledPeers.removePeerByPeerId(peerID)
|
||||
}
|
||||
|
||||
// RemoveTopic removes a topic and all its peer associations from the crawler.
|
||||
// Peers that were only subscribed to this topic are completely removed from the registry.
|
||||
// Peers subscribed to other topics remain tracked for those topics.
|
||||
// If the topic does not exist, this operation is a no-op.
|
||||
func (g *GossipPeerCrawler) RemoveTopic(topic string) {
|
||||
g.crawledPeers.removeTopic(topic)
|
||||
}
|
||||
|
||||
// Start begins the crawler's background operations. It launches three goroutines:
|
||||
// a crawl loop that periodically discovers new peers via discv5, a ping loop that
|
||||
// verifies peer reachability, and a cleanup loop that removes stale or filtered peers.
|
||||
// The provided TopicExtractor is used to determine which gossipsub topics each
|
||||
// discovered peer subscribes to. Start is idempotent; subsequent calls after the
|
||||
// first are no-ops. Returns an error if the topic extractor is nil.
|
||||
func (g *GossipPeerCrawler) Start(te gossipcrawler.TopicExtractor) error {
|
||||
if te == nil {
|
||||
return errors.New("topic extractor is nil")
|
||||
}
|
||||
g.once.Do(func() {
|
||||
g.topicExtractor = te
|
||||
go g.crawlLoop()
|
||||
go g.pingLoop()
|
||||
go g.cleanupLoop()
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *GossipPeerCrawler) pingLoop() {
|
||||
for {
|
||||
select {
|
||||
case node := <-g.pingCh:
|
||||
if err := g.pingSemaphore.Acquire(g.ctx, 1); err != nil {
|
||||
return
|
||||
}
|
||||
go func(node *enode.Node) {
|
||||
defer g.pingSemaphore.Release(1)
|
||||
|
||||
if err := g.dv5.Ping(node); err != nil {
|
||||
log.WithError(err).WithField("node", node.ID()).Debug("Failed to ping node")
|
||||
g.crawledPeers.removePeerByNodeId(node.ID())
|
||||
return
|
||||
}
|
||||
|
||||
g.crawledPeers.updateStatusToPinged(node.ID())
|
||||
}(&node)
|
||||
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GossipPeerCrawler) crawlLoop() {
|
||||
for {
|
||||
g.crawl()
|
||||
select {
|
||||
case <-time.After(g.crawlInterval):
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GossipPeerCrawler) crawl() {
|
||||
log.Debug("Bitcoin")
|
||||
|
||||
ctx, cancel := context.WithTimeout(g.ctx, g.crawlTimeout)
|
||||
defer cancel()
|
||||
|
||||
iterator := g.dv5.RandomNodes()
|
||||
|
||||
// Ensure iterator unblocks on context cancellation or timeout
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
iterator.Close()
|
||||
}()
|
||||
|
||||
log.Debug("ABCDEFG")
|
||||
|
||||
for iterator.Next() {
|
||||
log.Debug("Ethereum")
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
node := iterator.Node()
|
||||
if node == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !g.peerFilter(node) {
|
||||
g.crawledPeers.removePeerByNodeId(node.ID())
|
||||
continue
|
||||
}
|
||||
|
||||
topics, err := g.topicExtractor(ctx, node)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("node", node.ID()).Debug("Failed to extract topics, skipping")
|
||||
continue
|
||||
}
|
||||
|
||||
shouldPing, err := g.crawledPeers.updatePeer(node, topics)
|
||||
if err != nil {
|
||||
log.WithError(err).WithField("node", node.ID()).Error("Failed to update crawled peers")
|
||||
}
|
||||
if !shouldPing {
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case g.pingCh <- *node:
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupLoop periodically removes peers that the filter rejects or that
|
||||
// have no topics of interest. It uses the same context lifecycle as other
|
||||
// background loops.
|
||||
func (g *GossipPeerCrawler) cleanupLoop() {
|
||||
ticker := time.NewTicker(cleanupInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Initial cleanup to catch any leftovers from startup state
|
||||
g.cleanup()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
g.cleanup()
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup scans the crawled peer set and removes entries that either fail
|
||||
// the current peer filter or have no topics of interest remaining.
|
||||
func (g *GossipPeerCrawler) cleanup() {
|
||||
cp := g.crawledPeers
|
||||
|
||||
// Snapshot current peers to evaluate without holding the lock during
|
||||
// filter and topic extraction.
|
||||
cp.mu.RLock()
|
||||
peers := make([]*peerNode, 0, len(cp.peerNodeByPid))
|
||||
for _, p := range cp.peerNodeByPid {
|
||||
peers = append(peers, p)
|
||||
}
|
||||
cp.mu.RUnlock()
|
||||
|
||||
for _, p := range peers {
|
||||
// Remove peers that no longer pass the filter
|
||||
if !g.peerFilter(p.node) {
|
||||
cp.removePeerByNodeId(p.node.ID())
|
||||
continue
|
||||
}
|
||||
|
||||
// Re-extract topics; if the extractor errors or yields none, drop the peer.
|
||||
topics, err := g.topicExtractor(g.ctx, p.node)
|
||||
if err != nil || len(topics) == 0 {
|
||||
cp.removePeerByNodeId(p.node.ID())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// enodeToPeerID converts an enode record to a peer ID.
|
||||
func enodeToPeerID(n *enode.Node) (peer.ID, error) {
|
||||
info, _, err := convertToAddrInfo(n)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("converting enode to addr info: %w", err)
|
||||
}
|
||||
if info == nil {
|
||||
return "", errors.New("peer info is nil")
|
||||
}
|
||||
return info.ID, nil
|
||||
}
|
||||
787
beacon-chain/p2p/gossip_peer_crawler_test.go
Normal file
787
beacon-chain/p2p/gossip_peer_crawler_test.go
Normal file
@@ -0,0 +1,787 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil"
|
||||
"github.com/stretchr/testify/require"
|
||||
require2 "github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Helpers for crawledPeers tests
|
||||
func newTestCrawledPeers() *crawledPeers {
|
||||
return &crawledPeers{
|
||||
peerNodeByEnode: make(map[enode.ID]*peerNode),
|
||||
peerNodeByPid: make(map[peer.ID]*peerNode),
|
||||
peersByTopic: make(map[string]map[*peerNode]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func addPeerWithTopics(t *testing.T, cp *crawledPeers, node *enode.Node, topics []string, pinged bool) *peerNode {
|
||||
t.Helper()
|
||||
pid, err := enodeToPeerID(node)
|
||||
require.NoError(t, err)
|
||||
p := &peerNode{
|
||||
isPinged: pinged,
|
||||
node: node,
|
||||
peerID: pid,
|
||||
topics: make(map[string]struct{}),
|
||||
}
|
||||
cp.mu.Lock()
|
||||
cp.peerNodeByEnode[p.node.ID()] = p
|
||||
cp.peerNodeByPid[p.peerID] = p
|
||||
cp.updateTopicsUnlocked(p, topics)
|
||||
cp.mu.Unlock()
|
||||
return p
|
||||
}
|
||||
|
||||
func TestUpdateStatusToPinged(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
node1 := localNode.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prep func(*crawledPeers)
|
||||
target *enode.Node
|
||||
expectPinged map[enode.ID]bool
|
||||
}{
|
||||
{
|
||||
name: "sets pinged for existing peer",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"a"}, false)
|
||||
},
|
||||
target: node1,
|
||||
expectPinged: map[enode.ID]bool{
|
||||
node1.ID(): true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "idempotent when already pinged",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"a"}, true)
|
||||
},
|
||||
target: node1,
|
||||
expectPinged: map[enode.ID]bool{
|
||||
node1.ID(): true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no change when peer missing",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"a"}, false)
|
||||
},
|
||||
target: node2,
|
||||
expectPinged: map[enode.ID]bool{
|
||||
node1.ID(): false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
tc.prep(cp)
|
||||
cp.updateStatusToPinged(tc.target.ID())
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
for id, exp := range tc.expectPinged {
|
||||
if p := cp.peerNodeByEnode[id]; p != nil {
|
||||
require.Equal(t, exp, p.isPinged)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveTopic(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
node1 := localNode.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
topic1 := "t1"
|
||||
topic2 := "t2"
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prep func(*crawledPeers)
|
||||
topic string
|
||||
check func(*testing.T, *crawledPeers)
|
||||
}{
|
||||
{
|
||||
name: "removes topic from all peers and index",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1", "t2"}, true)
|
||||
addPeerWithTopics(t, cp, node2, []string{"t1"}, true)
|
||||
},
|
||||
topic: topic1,
|
||||
check: func(t *testing.T, cp *crawledPeers) {
|
||||
_, ok := cp.peersByTopic[topic1]
|
||||
require.False(t, ok)
|
||||
for _, p := range cp.peerNodeByPid {
|
||||
_, has := p.topics[topic1]
|
||||
require.False(t, has)
|
||||
}
|
||||
// Ensure other topics remain
|
||||
_, ok = cp.peersByTopic[topic2]
|
||||
require.True(t, ok)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no-op when topic missing",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t2"}, true)
|
||||
},
|
||||
topic: topic1,
|
||||
check: func(t *testing.T, cp *crawledPeers) {
|
||||
_, ok := cp.peersByTopic[topic2]
|
||||
require.True(t, ok)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
tc.prep(cp)
|
||||
cp.removeTopic(tc.topic)
|
||||
tc.check(t, cp)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePeer(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
node1 := localNode.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prep func(*crawledPeers)
|
||||
target enode.ID
|
||||
wantTopics int
|
||||
}{
|
||||
{
|
||||
name: "removes existing peer and prunes empty topic",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
},
|
||||
target: node1.ID(),
|
||||
wantTopics: 0,
|
||||
},
|
||||
{
|
||||
name: "removes only targeted peer; keeps topic for other",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
addPeerWithTopics(t, cp, node2, []string{"t1"}, true)
|
||||
},
|
||||
target: node1.ID(),
|
||||
wantTopics: 1, // byTopic should still have t1 with one peer
|
||||
},
|
||||
{
|
||||
name: "no-op when peer missing",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
},
|
||||
target: node2.ID(),
|
||||
wantTopics: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
tc.prep(cp)
|
||||
cp.removePeerByNodeId(tc.target)
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
require.Len(t, cp.peersByTopic, tc.wantTopics)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePeerId(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
node1 := localNode.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
pid1, err := enodeToPeerID(node1)
|
||||
require.NoError(t, err)
|
||||
pid2, err := enodeToPeerID(node2)
|
||||
require.NoError(t, err)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
prep func(*crawledPeers)
|
||||
target peer.ID
|
||||
wantTopics int
|
||||
wantPeers int
|
||||
}{
|
||||
{
|
||||
name: "removes existing peer by id and prunes topic",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
},
|
||||
target: pid1,
|
||||
wantTopics: 0,
|
||||
wantPeers: 0,
|
||||
},
|
||||
{
|
||||
name: "removes only targeted peer id; keeps topic for other",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
addPeerWithTopics(t, cp, node2, []string{"t1"}, true)
|
||||
},
|
||||
target: pid1,
|
||||
wantTopics: 1,
|
||||
wantPeers: 1,
|
||||
},
|
||||
{
|
||||
name: "no-op when peer id missing",
|
||||
prep: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, node1, []string{"t1"}, true)
|
||||
},
|
||||
target: pid2,
|
||||
wantTopics: 1,
|
||||
wantPeers: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
tc.prep(cp)
|
||||
cp.removePeerByPeerId(tc.target)
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
require.Len(t, cp.peersByTopic, tc.wantTopics)
|
||||
require.Len(t, cp.peerNodeByPid, tc.wantPeers)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateCrawledIfNewer(t *testing.T) {
|
||||
newCrawler := func() (*crawledPeers, *GossipPeerCrawler, func()) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
g := &GossipPeerCrawler{
|
||||
ctx: ctx,
|
||||
pingCh: make(chan enode.Node, 8),
|
||||
}
|
||||
cp := newTestCrawledPeers()
|
||||
return cp, g, cancel
|
||||
}
|
||||
|
||||
// Helper: local node that will cause enodeToPeerID to fail (no TCP/UDP multiaddrs)
|
||||
newNodeNoPorts := func(t *testing.T) *enode.Node {
|
||||
_, privKey := createAddrAndPrivKey(t)
|
||||
db, err := enode.OpenDB("")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { db.Close() })
|
||||
ln := enode.NewLocalNode(db, privKey)
|
||||
// Do not set TCP/UDP; keep only IP
|
||||
ln.SetStaticIP(net.ParseIP("127.0.0.1"))
|
||||
return ln.Node()
|
||||
}
|
||||
|
||||
// Ensure both A nodes have the same enode.ID but differing seq
|
||||
ln := createTestNodeRandom(t)
|
||||
nodeA1 := ln.Node()
|
||||
setNodeSeq(ln, nodeA1.Seq()+1)
|
||||
nodeA2 := ln.Node()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
arrange func(*crawledPeers)
|
||||
invokeNode *enode.Node
|
||||
invokeTopics []string
|
||||
expectedShouldPing bool
|
||||
expectErr bool
|
||||
assert func(*testing.T, *crawledPeers, <-chan enode.Node)
|
||||
}{
|
||||
{
|
||||
name: "new peer with topics adds peer and pings",
|
||||
arrange: func(cp *crawledPeers) {},
|
||||
invokeNode: nodeA1,
|
||||
invokeTopics: []string{"a"},
|
||||
expectedShouldPing: true,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Len(t, cp.peerNodeByEnode, 1)
|
||||
require.Len(t, cp.peerNodeByPid, 1)
|
||||
require.Contains(t, cp.peersByTopic, "a")
|
||||
cp.mu.RUnlock()
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new peer with empty topics is removed",
|
||||
arrange: func(cp *crawledPeers) {},
|
||||
invokeNode: nodeA1,
|
||||
invokeTopics: nil,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Empty(t, cp.peerNodeByEnode)
|
||||
require.Empty(t, cp.peerNodeByPid)
|
||||
require.Empty(t, cp.peersByTopic)
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing peer lower seq is ignored",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, nodeA2, []string{"x"}, false) // higher seq exists
|
||||
},
|
||||
invokeNode: nodeA1, // lower seq
|
||||
invokeTopics: []string{"a", "b"},
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Contains(t, cp.peersByTopic, "x")
|
||||
require.NotContains(t, cp.peersByTopic, "a")
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing peer equal seq is ignored",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, nodeA1, []string{"x"}, false)
|
||||
},
|
||||
invokeNode: nodeA1,
|
||||
invokeTopics: []string{"a"},
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Contains(t, cp.peersByTopic, "x")
|
||||
require.NotContains(t, cp.peersByTopic, "a")
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing peer higher seq updates topics and pings",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, nodeA1, []string{"x"}, false)
|
||||
},
|
||||
invokeNode: nodeA2,
|
||||
invokeTopics: []string{"a"},
|
||||
expectedShouldPing: true,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.NotContains(t, cp.peersByTopic, "x")
|
||||
require.Contains(t, cp.peersByTopic, "a")
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing peer higher seq but empty topics removes peer",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, nodeA1, []string{"x"}, false)
|
||||
},
|
||||
invokeNode: nodeA2,
|
||||
invokeTopics: nil,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Empty(t, cp.peerNodeByEnode)
|
||||
require.Empty(t, cp.peerNodeByPid)
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "corrupted existing entry with nil node is ignored",
|
||||
arrange: func(cp *crawledPeers) {
|
||||
pid, _ := enodeToPeerID(nodeA1)
|
||||
cp.mu.Lock()
|
||||
pn := &peerNode{node: nil, peerID: pid, topics: map[string]struct{}{"x": {}}}
|
||||
cp.peerNodeByEnode[nodeA1.ID()] = pn
|
||||
cp.peerNodeByPid[pid] = pn
|
||||
cp.peersByTopic["x"] = map[*peerNode]struct{}{pn: {}}
|
||||
cp.mu.Unlock()
|
||||
},
|
||||
expectErr: true,
|
||||
invokeNode: nodeA2,
|
||||
invokeTopics: []string{"a"},
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Contains(t, cp.peersByTopic, "x")
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new peer with no ports causes enodeToPeerID error; no add",
|
||||
arrange: func(cp *crawledPeers) {},
|
||||
invokeNode: newNodeNoPorts(t),
|
||||
invokeTopics: []string{"a"},
|
||||
expectErr: true,
|
||||
assert: func(t *testing.T, cp *crawledPeers, ch <-chan enode.Node) {
|
||||
cp.mu.RLock()
|
||||
require.Empty(t, cp.peerNodeByEnode)
|
||||
require.Empty(t, cp.peerNodeByPid)
|
||||
require.Empty(t, cp.peersByTopic)
|
||||
cp.mu.RUnlock()
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cp, g, cancel := newCrawler()
|
||||
defer cancel()
|
||||
tc.arrange(cp)
|
||||
shouldPing, err := cp.updatePeer(tc.invokeNode, tc.invokeTopics)
|
||||
if tc.expectErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
require.Equal(t, shouldPing, tc.expectedShouldPing)
|
||||
tc.assert(t, cp, g.pingCh)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeersForTopic(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
newCrawler := func(filter gossipcrawler.PeerFilterFunc) (*GossipPeerCrawler, *crawledPeers) {
|
||||
g := &GossipPeerCrawler{
|
||||
peerFilter: filter,
|
||||
scorer: func(peer.ID) float64 { return 0 },
|
||||
crawledPeers: newTestCrawledPeers(),
|
||||
}
|
||||
return g, g.crawledPeers
|
||||
}
|
||||
|
||||
// Prepare nodes
|
||||
ln1 := createTestNodeRandom(t)
|
||||
ln2 := createTestNodeRandom(t)
|
||||
ln3 := createTestNodeRandom(t)
|
||||
n1, n2, n3 := ln1.Node(), ln2.Node(), ln3.Node()
|
||||
topic := "top"
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
filter gossipcrawler.PeerFilterFunc
|
||||
setup func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers)
|
||||
wantIDs []enode.ID
|
||||
}{
|
||||
{
|
||||
name: "no peers for topic returns empty",
|
||||
filter: func(*enode.Node) bool { return true },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {},
|
||||
wantIDs: nil,
|
||||
},
|
||||
{
|
||||
name: "excludes unpinged peers",
|
||||
filter: func(*enode.Node) bool { return true },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
|
||||
// Add one pinged and one not pinged on same topic
|
||||
addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
|
||||
addPeerWithTopics(t, cp, n2, []string{string(topic)}, false)
|
||||
},
|
||||
wantIDs: []enode.ID{n1.ID()},
|
||||
},
|
||||
{
|
||||
name: "applies peer filter to exclude",
|
||||
filter: func(n *enode.Node) bool { return n.ID() != n2.ID() },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
|
||||
addPeerWithTopics(t, cp, n2, []string{string(topic)}, true)
|
||||
},
|
||||
wantIDs: []enode.ID{n1.ID()},
|
||||
},
|
||||
{
|
||||
name: "ignores peerNode with nil node",
|
||||
filter: func(*enode.Node) bool { return true },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
|
||||
addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
|
||||
// Add n2 then set its node to nil to simulate corrupted entry
|
||||
p2 := addPeerWithTopics(t, cp, n2, []string{string(topic)}, true)
|
||||
cp.mu.Lock()
|
||||
p2.node = nil
|
||||
cp.mu.Unlock()
|
||||
},
|
||||
wantIDs: []enode.ID{n1.ID()},
|
||||
},
|
||||
{
|
||||
name: "sorted by score descending",
|
||||
filter: func(*enode.Node) bool { return true },
|
||||
setup: func(t *testing.T, g *GossipPeerCrawler, cp *crawledPeers) {
|
||||
// Add three pinged peers
|
||||
p1 := addPeerWithTopics(t, cp, n1, []string{string(topic)}, true)
|
||||
p2 := addPeerWithTopics(t, cp, n2, []string{string(topic)}, true)
|
||||
p3 := addPeerWithTopics(t, cp, n3, []string{string(topic)}, true)
|
||||
// Provide a deterministic scoring function
|
||||
scores := map[peer.ID]float64{
|
||||
p1.peerID: 3.0,
|
||||
p2.peerID: 2.0,
|
||||
p3.peerID: 1.0,
|
||||
}
|
||||
g.scorer = func(id peer.ID) float64 { return scores[id] }
|
||||
},
|
||||
wantIDs: []enode.ID{n1.ID(), n2.ID(), n3.ID()},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
g, cp := newCrawler(tc.filter)
|
||||
tc.setup(t, g, cp)
|
||||
got := g.PeersForTopic(topic)
|
||||
var gotIDs []enode.ID
|
||||
for _, n := range got {
|
||||
gotIDs = append(gotIDs, n.ID())
|
||||
}
|
||||
if tc.wantIDs == nil {
|
||||
require.Empty(t, gotIDs)
|
||||
return
|
||||
}
|
||||
require.Equal(t, tc.wantIDs, gotIDs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCrawler_AddsAndPingsPeer(t *testing.T) {
|
||||
// Create a test node with valid ENR entries (IP/TCP/UDP)
|
||||
localNode := createTestNodeRandom(t)
|
||||
node := localNode.Node()
|
||||
|
||||
// Prepare a mock iterator returning our single node
|
||||
iterator := p2ptest.NewMockIterator([]*enode.Node{node})
|
||||
// Prepare a mock listener with successful Ping
|
||||
mockListener := p2ptest.NewMockListener(localNode, iterator)
|
||||
mockListener.PingFunc = func(*enode.Node) error { return nil }
|
||||
|
||||
// Inject a permissive peer filter
|
||||
filter := gossipcrawler.PeerFilterFunc(func(n *enode.Node) bool { return true })
|
||||
|
||||
// Create crawler with small intervals
|
||||
scorer := func(peer.ID) float64 { return 0 }
|
||||
g, err := NewGossipPeerCrawler(t.Context(), &Service{}, mockListener, 2*time.Second, 10*time.Millisecond, 4, filter, scorer)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assign a simple topic extractor
|
||||
topic := "test/topic"
|
||||
topicExtractor := func(ctx context.Context, n *enode.Node) ([]string, error) {
|
||||
return []string{topic}, nil
|
||||
}
|
||||
|
||||
// Run ping loop in background and perform a single crawl
|
||||
require.NoError(t, g.Start(topicExtractor))
|
||||
|
||||
// Verify that the peer has been indexed under the topic and marked as pinged
|
||||
require2.Eventually(t, func() bool {
|
||||
g.crawledPeers.mu.RLock()
|
||||
defer g.crawledPeers.mu.RUnlock()
|
||||
|
||||
peers := g.crawledPeers.peersByTopic[topic]
|
||||
if len(peers) == 0 {
|
||||
return false
|
||||
}
|
||||
// Fetch the single peerNode and check status
|
||||
for pn := range peers {
|
||||
if pn == nil {
|
||||
return false
|
||||
}
|
||||
return pn.isPinged
|
||||
}
|
||||
return false
|
||||
}, 2*time.Second, 10*time.Millisecond)
|
||||
}
|
||||
|
||||
func TestCrawler_SkipsPeer_WhenFilterRejects(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
node := localNode.Node()
|
||||
iterator := p2ptest.NewMockIterator([]*enode.Node{node})
|
||||
mockListener := p2ptest.NewMockListener(localNode, iterator)
|
||||
mockListener.PingFunc = func(*enode.Node) error { return nil }
|
||||
|
||||
// Reject all peers via injected filter
|
||||
filter := gossipcrawler.PeerFilterFunc(func(n *enode.Node) bool { return false })
|
||||
|
||||
scorer := func(peer.ID) float64 { return 0 }
|
||||
g, err := NewGossipPeerCrawler(t.Context(), &Service{}, mockListener, 2*time.Second, 10*time.Millisecond, 2, filter, scorer)
|
||||
if err != nil {
|
||||
t.Fatalf("NewGossipPeerCrawler error: %v", err)
|
||||
}
|
||||
|
||||
topic := "test/topic"
|
||||
g.topicExtractor = func(ctx context.Context, n *enode.Node) ([]string, error) { return []string{topic}, nil }
|
||||
|
||||
g.crawl()
|
||||
|
||||
// Verify no peers are indexed, because filter rejected the node
|
||||
g.crawledPeers.mu.RLock()
|
||||
defer g.crawledPeers.mu.RUnlock()
|
||||
if len(g.crawledPeers.peerNodeByEnode) != 0 || len(g.crawledPeers.peerNodeByPid) != 0 || len(g.crawledPeers.peersByTopic) != 0 {
|
||||
t.Fatalf("expected no peers indexed, got byEnode=%d byPeerId=%d byTopic=%d",
|
||||
len(g.crawledPeers.peerNodeByEnode), len(g.crawledPeers.peerNodeByPid), len(g.crawledPeers.peersByTopic))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCrawler_RemoveTopic_RemovesTopicFromIndexes(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
node := localNode.Node()
|
||||
iterator := p2ptest.NewMockIterator([]*enode.Node{node})
|
||||
mockListener := p2ptest.NewMockListener(localNode, iterator)
|
||||
mockListener.PingFunc = func(*enode.Node) error { return nil }
|
||||
|
||||
filter := gossipcrawler.PeerFilterFunc(func(n *enode.Node) bool { return true })
|
||||
|
||||
scorer := func(peer.ID) float64 { return 0 }
|
||||
g, err := NewGossipPeerCrawler(t.Context(), &Service{}, mockListener, 2*time.Second, 10*time.Millisecond, 2, filter, scorer)
|
||||
if err != nil {
|
||||
t.Fatalf("NewGossipPeerCrawler error: %v", err)
|
||||
}
|
||||
|
||||
topic1 := "test/topic1"
|
||||
topic2 := "test/topic2"
|
||||
g.topicExtractor = func(ctx context.Context, n *enode.Node) ([]string, error) { return []string{topic1, topic2}, nil }
|
||||
|
||||
// Single crawl to index topics
|
||||
g.crawl()
|
||||
|
||||
// Remove one topic and assert it is pruned from all indexes
|
||||
g.RemoveTopic(topic1)
|
||||
|
||||
g.crawledPeers.mu.RLock()
|
||||
defer g.crawledPeers.mu.RUnlock()
|
||||
|
||||
if _, ok := g.crawledPeers.peersByTopic[topic1]; ok {
|
||||
t.Fatalf("expected topic1 to be removed from byTopic")
|
||||
}
|
||||
|
||||
// Ensure peer still exists and retains topic2
|
||||
for _, pn := range g.crawledPeers.peerNodeByEnode {
|
||||
if _, has1 := pn.topics[topic1]; has1 {
|
||||
t.Fatalf("expected topic1 to be removed from peer topics")
|
||||
}
|
||||
if _, has2 := pn.topics[topic2]; !has2 {
|
||||
t.Fatalf("expected topic2 to remain for peer")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCrawledPeersMetrics(t *testing.T) {
|
||||
localNode1 := createTestNodeRandom(t)
|
||||
node1 := localNode1.Node()
|
||||
localNode2 := createTestNodeRandom(t)
|
||||
node2 := localNode2.Node()
|
||||
|
||||
pid1, err := enodeToPeerID(node1)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("updatePeer records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add first peer with two topics
|
||||
_, err := cp.updatePeer(node1, []string{"topic1", "topic2"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Add second peer with one overlapping topic
|
||||
_, err = cp.updatePeer(node2, []string{"topic1", "topic3"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(3), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
|
||||
t.Run("removePeerByPeerId records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add two peers
|
||||
_, err := cp.updatePeer(node1, []string{"topic1"})
|
||||
require.NoError(t, err)
|
||||
_, err = cp.updatePeer(node2, []string{"topic1", "topic2"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Remove first peer by peer ID
|
||||
cp.removePeerByPeerId(pid1)
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
|
||||
t.Run("removePeerByNodeId records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add two peers
|
||||
_, err := cp.updatePeer(node1, []string{"topic1"})
|
||||
require.NoError(t, err)
|
||||
_, err = cp.updatePeer(node2, []string{"topic2"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Remove first peer by enode ID
|
||||
cp.removePeerByNodeId(node1.ID())
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
|
||||
t.Run("removeTopic records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add two peers with overlapping topics
|
||||
_, err := cp.updatePeer(node1, []string{"topic1", "topic2"})
|
||||
require.NoError(t, err)
|
||||
_, err = cp.updatePeer(node2, []string{"topic1"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(2), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Remove topic1 - this should also remove node2 which only had topic1
|
||||
cp.removeTopic("topic1")
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
|
||||
t.Run("updatePeer with empty topics removes peer and records metrics", func(t *testing.T) {
|
||||
cp := newTestCrawledPeers()
|
||||
|
||||
// Add peer with topics
|
||||
_, err := cp.updatePeer(node1, []string{"topic1"})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(1), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
|
||||
// Increment sequence number to ensure update is processed
|
||||
setNodeSeq(localNode1, node1.Seq()+1)
|
||||
node1Updated := localNode1.Node()
|
||||
|
||||
// Update with empty topics - should remove the peer
|
||||
_, err = cp.updatePeer(node1Updated, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, float64(0), testutil.ToFloat64(gossipCrawlerPeersByEnodeCount))
|
||||
require.Equal(t, float64(0), testutil.ToFloat64(gossipCrawlerPeersByPidCount))
|
||||
require.Equal(t, float64(0), testutil.ToFloat64(gossipCrawlerTopicsCount))
|
||||
})
|
||||
}
|
||||
14
beacon-chain/p2p/gossipcrawler/BUILD.bazel
Normal file
14
beacon-chain/p2p/gossipcrawler/BUILD.bazel
Normal file
@@ -0,0 +1,14 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["interface.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler",
|
||||
visibility = [
|
||||
"//visibility:public",
|
||||
],
|
||||
deps = [
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
],
|
||||
)
|
||||
35
beacon-chain/p2p/gossipcrawler/interface.go
Normal file
35
beacon-chain/p2p/gossipcrawler/interface.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package gossipcrawler
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// TopicExtractor is a function that can determine the set of topics a current or potential peer
|
||||
// is subscribed to based on key/value pairs from the ENR record.
|
||||
type TopicExtractor func(ctx context.Context, node *enode.Node) ([]string, error)
|
||||
|
||||
// PeerFilterFunc defines the filtering interface used by the crawler to decide if a node
|
||||
// is a valid candidate to index in the crawler.
|
||||
type PeerFilterFunc func(*enode.Node) bool
|
||||
|
||||
type Crawler interface {
|
||||
Start(te TopicExtractor) error
|
||||
RemovePeerByPeerId(peerID peer.ID)
|
||||
RemoveTopic(topic string)
|
||||
PeersForTopic(topic string) []*enode.Node
|
||||
}
|
||||
|
||||
// SubnetTopicsProvider returns the set of gossipsub topics the node
|
||||
// should currently maintain peer connections for along with the minimum number of peers required
|
||||
// for each topic.
|
||||
type SubnetTopicsProvider func() map[string]int
|
||||
|
||||
// GossipDialer controls dialing peers for gossipsub topics based
|
||||
// on a provided SubnetTopicsProvider and the p2p crawler.
|
||||
type GossipDialer interface {
|
||||
Start(provider SubnetTopicsProvider) error
|
||||
DialPeersForTopicBlocking(ctx context.Context, topic string, nPeers int) error
|
||||
}
|
||||
@@ -225,6 +225,10 @@ func (s *Service) AddDisconnectionHandler(handler func(ctx context.Context, id p
|
||||
return
|
||||
}
|
||||
|
||||
if s.crawler != nil {
|
||||
s.crawler.RemovePeerByPeerId(peerID)
|
||||
}
|
||||
|
||||
priorState, err := s.peers.ConnectionState(peerID)
|
||||
if err != nil {
|
||||
// Can happen if the peer has already disconnected, so...
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -98,11 +98,13 @@ type (
|
||||
PeerID() peer.ID
|
||||
Host() host.Host
|
||||
ENR() *enr.Record
|
||||
GossipDialer() gossipcrawler.GossipDialer
|
||||
NodeID() enode.ID
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshPersistentSubnets()
|
||||
FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error
|
||||
DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
Crawler() gossipcrawler.Crawler
|
||||
}
|
||||
|
||||
// Sender abstracts the sending functionality from libp2p.
|
||||
|
||||
@@ -97,6 +97,20 @@ var (
|
||||
Help: "The number of data column sidecar message broadcast attempts.",
|
||||
})
|
||||
|
||||
// Gossip Peer Crawler Metrics
|
||||
gossipCrawlerPeersByEnodeCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "p2p_gossip_crawler_peers_by_enode_count",
|
||||
Help: "The number of peers tracked by enode ID in the gossip peer crawler.",
|
||||
})
|
||||
gossipCrawlerPeersByPidCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "p2p_gossip_crawler_peers_by_pid_count",
|
||||
Help: "The number of peers tracked by peer ID in the gossip peer crawler.",
|
||||
})
|
||||
gossipCrawlerTopicsCount = promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "p2p_gossip_crawler_topics_count",
|
||||
Help: "The number of topics tracked in the gossip peer crawler.",
|
||||
})
|
||||
|
||||
// Gossip Tracer Metrics
|
||||
pubsubTopicsActive = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "p2p_pubsub_topic_active",
|
||||
|
||||
@@ -4,20 +4,17 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
ecdsaprysm "github.com/OffchainLabs/prysm/v7/crypto/ecdsa"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/version"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
mplex "github.com/libp2p/go-libp2p-mplex"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/p2p/net/connmgr"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
libp2pquic "github.com/libp2p/go-libp2p/p2p/transport/quic"
|
||||
libp2ptcp "github.com/libp2p/go-libp2p/p2p/transport/tcp"
|
||||
gomplex "github.com/libp2p/go-mplex"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -110,7 +107,6 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
libp2p.ConnectionGater(s),
|
||||
libp2p.Transport(libp2ptcp.NewTCPTransport),
|
||||
libp2p.DefaultMuxers,
|
||||
libp2p.Muxer("/mplex/6.7.0", mplex.DefaultTransport),
|
||||
libp2p.Security(noise.ID, noise.New),
|
||||
libp2p.Ping(false), // Disable Ping Service.
|
||||
}
|
||||
@@ -162,6 +158,10 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) ([]libp2p.Op
|
||||
options = append(options, libp2p.ResourceManager(&network.NullResourceManager{}))
|
||||
}
|
||||
|
||||
if cfg.EnableAutoNAT {
|
||||
options = append(options, libp2p.EnableAutoNATv2())
|
||||
}
|
||||
|
||||
return options, nil
|
||||
}
|
||||
|
||||
@@ -217,8 +217,3 @@ func privKeyOption(privkey *ecdsa.PrivateKey) libp2p.Option {
|
||||
return cfg.Apply(libp2p.Identity(ifaceKey))
|
||||
}
|
||||
}
|
||||
|
||||
// Configures stream timeouts on mplex.
|
||||
func configureMplex() {
|
||||
gomplex.ResetStreamTimeout = 5 * time.Second
|
||||
}
|
||||
|
||||
@@ -132,7 +132,6 @@ func TestDefaultMultiplexers(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, protocol.ID("/yamux/1.0.0"), cfg.Muxers[0].ID)
|
||||
assert.Equal(t, protocol.ID("/mplex/6.7.0"), cfg.Muxers[1].ID)
|
||||
}
|
||||
|
||||
func TestSetConnManagerOption(t *testing.T) {
|
||||
@@ -188,6 +187,30 @@ func checkLimit(t *testing.T, cm connmgr.ConnManager, expected int) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildOptions_EnableAutoNAT(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
p2pCfg := &Config{
|
||||
UDPPort: 2000,
|
||||
TCPPort: 3000,
|
||||
QUICPort: 3000,
|
||||
EnableAutoNAT: true,
|
||||
StateNotifier: &mock.MockStateNotifier{},
|
||||
}
|
||||
svc := &Service{cfg: p2pCfg}
|
||||
var err error
|
||||
svc.privKey, err = privKey(svc.cfg)
|
||||
require.NoError(t, err)
|
||||
ipAddr := network.IPAddr()
|
||||
opts, err := svc.buildOptions(ipAddr, svc.privKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that options were built without error when EnableAutoNAT is true.
|
||||
// The actual AutoNAT v2 behavior is tested by libp2p itself.
|
||||
var cfg libp2p.Config
|
||||
err = cfg.Apply(append(opts, libp2p.FallbackDefaults)...)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestMultiAddressBuilderWithID(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
||||
@@ -58,7 +58,7 @@ func TestPeerExplicitAdd(t *testing.T) {
|
||||
|
||||
resAddress, err := p.Address(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, address, resAddress, "Unexpected address")
|
||||
assert.Equal(t, address.Equal(resAddress), true, "Unexpected address")
|
||||
|
||||
resDirection, err := p.Direction(id)
|
||||
require.NoError(t, err)
|
||||
@@ -72,7 +72,7 @@ func TestPeerExplicitAdd(t *testing.T) {
|
||||
|
||||
resAddress2, err := p.Address(id)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, address2, resAddress2, "Unexpected address")
|
||||
assert.Equal(t, address2.Equal(resAddress2), true, "Unexpected address")
|
||||
|
||||
resDirection2, err := p.Direction(id)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -6,11 +6,13 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/async"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/types"
|
||||
@@ -28,6 +30,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
@@ -61,6 +64,10 @@ var (
|
||||
// for the current peer limit status for the time period
|
||||
// defined below.
|
||||
pollingPeriod = 6 * time.Second
|
||||
|
||||
crawlTimeout = 5 * time.Second
|
||||
crawlInterval = 1 * time.Second
|
||||
maxConcurrentDials = int64(256)
|
||||
)
|
||||
|
||||
// Service for managing peer to peer (p2p) networking.
|
||||
@@ -95,6 +102,8 @@ type Service struct {
|
||||
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
|
||||
custodyInfoSet chan struct{}
|
||||
allForkDigests map[[4]byte]struct{}
|
||||
crawler gossipcrawler.Crawler
|
||||
gossipDialer gossipcrawler.GossipDialer
|
||||
}
|
||||
|
||||
type custodyInfo struct {
|
||||
@@ -154,8 +163,6 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
return nil, errors.Wrapf(err, "failed to build p2p options")
|
||||
}
|
||||
|
||||
// Sets mplex timeouts
|
||||
configureMplex()
|
||||
h, err := libp2p.New(opts...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create p2p host")
|
||||
@@ -163,7 +170,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
|
||||
|
||||
s.host = h
|
||||
|
||||
// Gossipsub registration is done before we add in any new peers
|
||||
// Gossip registration is done before we add in any new peers
|
||||
// due to libp2p's gossipsub implementation not taking into
|
||||
// account previously added peers when creating the gossipsub
|
||||
// object.
|
||||
@@ -241,6 +248,25 @@ func (s *Service) Start() {
|
||||
|
||||
s.dv5Listener = listener
|
||||
go s.listenForNewNodes()
|
||||
crawler, err := NewGossipPeerCrawler(
|
||||
s.ctx,
|
||||
s,
|
||||
s.dv5Listener,
|
||||
crawlTimeout,
|
||||
crawlInterval,
|
||||
maxConcurrentDials,
|
||||
s.filterPeer,
|
||||
s.Peers().Scorers().Score,
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("Failed to create peer crawler")
|
||||
s.startupErr = err
|
||||
return
|
||||
}
|
||||
s.crawler = crawler
|
||||
// Initialise the gossipsub dialer which will be started
|
||||
// once the sync service is ready to provide subnet topics.
|
||||
s.gossipDialer = NewGossipPeerDialer(s.ctx, s.crawler, s.PubSub().ListPeers, s.DialPeers)
|
||||
}
|
||||
|
||||
s.started = true
|
||||
@@ -259,6 +285,14 @@ func (s *Service) Start() {
|
||||
// current epoch.
|
||||
s.RefreshPersistentSubnets()
|
||||
|
||||
if s.cfg.EnableAutoNAT {
|
||||
if err := s.subscribeReachabilityEvents(); err != nil {
|
||||
log.WithError(err).Error("Failed to subscribe to AutoNAT v2 reachability events")
|
||||
} else {
|
||||
log.Info("AutoNAT v2 enabled for address reachability detection")
|
||||
}
|
||||
}
|
||||
|
||||
// Periodic functions.
|
||||
async.RunEvery(s.ctx, params.BeaconConfig().TtfbTimeoutDuration(), func() {
|
||||
ensurePeerConnections(s.ctx, s.host, s.peers, relayNodes...)
|
||||
@@ -311,12 +345,25 @@ func (s *Service) Start() {
|
||||
func (s *Service) Stop() error {
|
||||
defer s.cancel()
|
||||
s.started = false
|
||||
|
||||
if s.dv5Listener != nil {
|
||||
s.dv5Listener.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Crawler returns the p2p service's peer crawler.
|
||||
func (s *Service) Crawler() gossipcrawler.Crawler {
|
||||
return s.crawler
|
||||
}
|
||||
|
||||
// GossipDialer returns the dialer responsible for maintaining
|
||||
// peer counts per gossipsub topic, if discovery is enabled.
|
||||
func (s *Service) GossipDialer() gossipcrawler.GossipDialer {
|
||||
return s.gossipDialer
|
||||
}
|
||||
|
||||
// Status of the p2p service. Will return an error if the service is considered unhealthy to
|
||||
// indicate that this node should not serve traffic until the issue has been resolved.
|
||||
func (s *Service) Status() error {
|
||||
@@ -557,3 +604,118 @@ func (s *Service) downscorePeer(peerID peer.ID, reason string) {
|
||||
newScore := s.Peers().Scorers().BadResponsesScorer().Increment(peerID)
|
||||
log.WithFields(logrus.Fields{"peerID": peerID, "reason": reason, "newScore": newScore}).Debug("Downscore peer")
|
||||
}
|
||||
|
||||
func (s *Service) subscribeReachabilityEvents() error {
|
||||
sub, err := s.host.EventBus().Subscribe(new(event.EvtHostReachableAddrsChanged))
|
||||
if err != nil {
|
||||
return fmt.Errorf("subscribing to reachability events: %w", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
if err := sub.Close(); err != nil {
|
||||
log.WithError(err).Debug("Failed to close reachability event subscription")
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case ev := <-sub.Out():
|
||||
if event, ok := ev.(event.EvtHostReachableAddrsChanged); ok {
|
||||
log.WithFields(logrus.Fields{
|
||||
"reachable": multiaddrsToStrings(event.Reachable),
|
||||
"unreachable": multiaddrsToStrings(event.Unreachable),
|
||||
"unknown": multiaddrsToStrings(event.Unknown),
|
||||
}).Info("Address reachability changed")
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func multiaddrsToStrings(addrs []multiaddr.Multiaddr) []string {
|
||||
strs := make([]string, len(addrs))
|
||||
for i, a := range addrs {
|
||||
strs[i] = a.String()
|
||||
}
|
||||
return strs
|
||||
}
|
||||
|
||||
func AttestationSubnets(nodeID enode.ID, node *enode.Node, record *enr.Record) (map[uint64]bool, error) {
|
||||
return attestationSubnets(record)
|
||||
}
|
||||
|
||||
func SyncSubnets(nodeID enode.ID, node *enode.Node, record *enr.Record) (map[uint64]bool, error) {
|
||||
return syncSubnets(record)
|
||||
}
|
||||
|
||||
func DataColumnSubnets(nodeID enode.ID, node *enode.Node, record *enr.Record) (map[uint64]bool, error) {
|
||||
return dataColumnSubnets(nodeID, record)
|
||||
}
|
||||
|
||||
func DataColumnSubnetTopic(digest [4]byte, subnet uint64) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(DataColumnSubnetTopicFormat, digest, subnet) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func SyncCommitteeSubnetTopic(digest [4]byte, subnet uint64) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(SyncCommitteeSubnetTopicFormat, digest, subnet) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func AttestationSubnetTopic(digest [4]byte, subnet uint64) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(AttestationSubnetTopicFormat, digest, subnet) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func BlobSubnetTopic(digest [4]byte, subnet uint64) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(BlobSubnetTopicFormat, digest, subnet) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func LcOptimisticToTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(LightClientOptimisticUpdateTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func LcFinalityToTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(LightClientFinalityUpdateTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func BlockSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(BlockSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func AggregateAndProofSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(AggregateAndProofSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func VoluntaryExitSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(ExitSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func ProposerSlashingSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(ProposerSlashingSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func AttesterSlashingSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(AttesterSlashingSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func SyncContributionAndProofSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(SyncContributionAndProofSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
func BlsToExecutionChangeSubnetTopic(forkDigest [4]byte) string {
|
||||
e := &encoder.SszNetworkEncoder{}
|
||||
return fmt.Sprintf(BlsToExecutionChangeSubnetTopicFormat, forkDigest) + e.ProtocolSuffix()
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
prysmTime "github.com/OffchainLabs/prysm/v7/time"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
"github.com/libp2p/go-libp2p/core/event"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
noise "github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
@@ -35,7 +36,8 @@ func createHost(t *testing.T, port uint) (host.Host, *ecdsa.PrivateKey, net.IP)
|
||||
ipAddr := net.ParseIP("127.0.0.1")
|
||||
listen, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
|
||||
require.NoError(t, err, "Failed to p2p listen")
|
||||
h, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.Security(noise.ID, noise.New)}...)
|
||||
h, err := libp2p.New([]libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen),
|
||||
libp2p.Security(noise.ID, noise.New)}...)
|
||||
require.NoError(t, err)
|
||||
return h, pkey, ipAddr
|
||||
}
|
||||
@@ -400,3 +402,58 @@ func TestService_connectWithPeer(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestService_SubscribeReachabilityEvents(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
ctx := t.Context()
|
||||
|
||||
h, _, _ := createHost(t, 0)
|
||||
defer func() {
|
||||
if err := h.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create service with the host
|
||||
s := &Service{
|
||||
ctx: ctx,
|
||||
host: h,
|
||||
cfg: &Config{EnableAutoNAT: true},
|
||||
}
|
||||
|
||||
// Get an emitter for the reachability event
|
||||
emitter, err := h.EventBus().Emitter(new(event.EvtHostReachableAddrsChanged))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := emitter.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
// Subscribe to reachability events
|
||||
require.NoError(t, s.subscribeReachabilityEvents())
|
||||
|
||||
// Create test multiaddrs for each reachability state
|
||||
reachableAddr, err := multiaddr.NewMultiaddr("/ip4/192.168.1.1/tcp/9000")
|
||||
require.NoError(t, err)
|
||||
unreachableAddr, err := multiaddr.NewMultiaddr("/ip4/10.0.0.1/tcp/9001")
|
||||
require.NoError(t, err)
|
||||
unknownAddr, err := multiaddr.NewMultiaddr("/ip4/172.16.0.1/tcp/9002")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Emit a reachability event with all address types
|
||||
err = emitter.Emit(event.EvtHostReachableAddrsChanged{
|
||||
Reachable: []multiaddr.Multiaddr{reachableAddr},
|
||||
Unreachable: []multiaddr.Multiaddr{unreachableAddr},
|
||||
Unknown: []multiaddr.Multiaddr{unknownAddr},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Wait for the event to be processed
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Verify the log message contains all addresses
|
||||
require.LogsContain(t, hook, "Address reachability changed")
|
||||
require.LogsContain(t, hook, "/ip4/192.168.1.1/tcp/9000")
|
||||
require.LogsContain(t, hook, "/ip4/10.0.0.1/tcp/9001")
|
||||
require.LogsContain(t, hook, "/ip4/172.16.0.1/tcp/9002")
|
||||
}
|
||||
|
||||
@@ -3,9 +3,6 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"maps"
|
||||
"math"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -14,19 +11,16 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
pb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/holiman/uint256"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -61,223 +55,9 @@ const dataColumnSubnetVal = 150
|
||||
|
||||
const errSavingSequenceNumber = "saving sequence number after updating subnets: %w"
|
||||
|
||||
// nodeFilter returns a function that filters nodes based on the subnet topic and subnet index.
|
||||
func (s *Service) nodeFilter(topic string, indices map[uint64]int) (func(node *enode.Node) (map[uint64]bool, error), error) {
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
return s.filterPeerForAttSubnet(indices), nil
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
return s.filterPeerForSyncSubnet(indices), nil
|
||||
case strings.Contains(topic, GossipBlobSidecarMessage):
|
||||
return s.filterPeerForBlobSubnet(indices), nil
|
||||
case strings.Contains(topic, GossipDataColumnSidecarMessage):
|
||||
return s.filterPeerForDataColumnsSubnet(indices), nil
|
||||
default:
|
||||
return nil, errors.Errorf("no subnet exists for provided topic: %s", topic)
|
||||
}
|
||||
}
|
||||
|
||||
// FindAndDialPeersWithSubnets ensures that our node is connected to at least `minimumPeersPerSubnet`
|
||||
// peers for each subnet listed in `subnets`.
|
||||
// If, for all subnets, the threshold is met, then this function immediately returns.
|
||||
// Otherwise, it searches for new peers for defective subnets, and dials them.
|
||||
// If `ctx“ is canceled while searching for peers, search is stopped, but new found peers are still dialed.
|
||||
// In this case, the function returns an error.
|
||||
func (s *Service) FindAndDialPeersWithSubnets(
|
||||
ctx context.Context,
|
||||
topicFormat string,
|
||||
digest [fieldparams.VersionLength]byte,
|
||||
minimumPeersPerSubnet int,
|
||||
subnets map[uint64]bool,
|
||||
) error {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindAndDialPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
// Return early if the discovery listener isn't set.
|
||||
if s.dv5Listener == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restrict dials if limit is applied.
|
||||
maxConcurrentDials := math.MaxInt
|
||||
if flags.MaxDialIsActive() {
|
||||
maxConcurrentDials = flags.Get().MaxConcurrentDials
|
||||
}
|
||||
|
||||
defectiveSubnets := s.defectiveSubnets(topicFormat, digest, minimumPeersPerSubnet, subnets)
|
||||
for len(defectiveSubnets) > 0 {
|
||||
// Stop the search/dialing loop if the context is canceled.
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peersToDial, err := func() ([]*enode.Node, error) {
|
||||
ctx, cancel := context.WithTimeout(ctx, batchPeriod)
|
||||
defer cancel()
|
||||
|
||||
peersToDial, err := s.findPeersWithSubnets(ctx, topicFormat, digest, minimumPeersPerSubnet, defectiveSubnets)
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
return nil, errors.Wrap(err, "find peers with subnets")
|
||||
}
|
||||
|
||||
return peersToDial, nil
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Dial new peers in batches.
|
||||
s.dialPeers(s.ctx, maxConcurrentDials, peersToDial)
|
||||
|
||||
defectiveSubnets = s.defectiveSubnets(topicFormat, digest, minimumPeersPerSubnet, subnets)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateDefectiveSubnets updates the defective subnets map when a node with matching subnets is found.
|
||||
// It decrements the defective count for each subnet the node satisfies and removes subnets
|
||||
// that are fully satisfied (count reaches 0).
|
||||
func updateDefectiveSubnets(
|
||||
nodeSubnets map[uint64]bool,
|
||||
defectiveSubnets map[uint64]int,
|
||||
) {
|
||||
for subnet := range defectiveSubnets {
|
||||
if !nodeSubnets[subnet] {
|
||||
continue
|
||||
}
|
||||
defectiveSubnets[subnet]--
|
||||
if defectiveSubnets[subnet] == 0 {
|
||||
delete(defectiveSubnets, subnet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// findPeersWithSubnets finds peers subscribed to defective subnets in batches
|
||||
// until enough peers are found or the context is canceled.
|
||||
// It returns new peers found during the search.
|
||||
func (s *Service) findPeersWithSubnets(
|
||||
ctx context.Context,
|
||||
topicFormat string,
|
||||
digest [fieldparams.VersionLength]byte,
|
||||
minimumPeersPerSubnet int,
|
||||
defectiveSubnetsOrigin map[uint64]int,
|
||||
) ([]*enode.Node, error) {
|
||||
// Copy the defective subnets map to avoid modifying the original map.
|
||||
defectiveSubnets := make(map[uint64]int, len(defectiveSubnetsOrigin))
|
||||
maps.Copy(defectiveSubnets, defectiveSubnetsOrigin)
|
||||
|
||||
// Create an discovery iterator to find new peers.
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
|
||||
// `iterator.Next` can block indefinitely. `iterator.Close` unblocks it.
|
||||
// So it is important to close the iterator when the context is done to ensure
|
||||
// that the search does not hang indefinitely.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
iterator.Close()
|
||||
}()
|
||||
|
||||
// Retrieve the filter function that will be used to filter nodes based on the defective subnets.
|
||||
filter, err := s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
|
||||
// Crawl the network for peers subscribed to the defective subnets.
|
||||
nodeByNodeID := make(map[enode.ID]*enode.Node)
|
||||
|
||||
for len(defectiveSubnets) > 0 && iterator.Next() {
|
||||
if err := ctx.Err(); err != nil {
|
||||
// Convert the map to a slice.
|
||||
peersToDial := make([]*enode.Node, 0, len(nodeByNodeID))
|
||||
for _, node := range nodeByNodeID {
|
||||
peersToDial = append(peersToDial, node)
|
||||
}
|
||||
|
||||
return peersToDial, err
|
||||
}
|
||||
|
||||
node := iterator.Node()
|
||||
|
||||
// Remove duplicates, keeping the node with higher seq.
|
||||
existing, ok := nodeByNodeID[node.ID()]
|
||||
if ok && existing.Seq() >= node.Seq() {
|
||||
continue // keep existing and skip.
|
||||
}
|
||||
|
||||
// Treat nodes that exist in nodeByNodeID with higher seq numbers as new peers
|
||||
// Skip peer not matching the filter.
|
||||
if !s.filterPeer(node) {
|
||||
if ok {
|
||||
// this means the existing peer with the lower sequence number is no longer valid
|
||||
delete(nodeByNodeID, existing.ID())
|
||||
// Note: We are choosing to not rollback changes to the defective subnets map in favor of calling s.defectiveSubnets once again after dialing peers.
|
||||
// This is a case that should rarely happen and should be handled through a second iteration in FindAndDialPeersWithSubnets
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Get all needed subnets that the node is subscribed to.
|
||||
// Skip nodes that are not subscribed to any of the defective subnets.
|
||||
nodeSubnets, err := filter(node)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"nodeID": node.ID(),
|
||||
"topicFormat": topicFormat,
|
||||
}).Debug("Could not get needed subnets from peer")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if len(nodeSubnets) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// We found a new peer. Modify the defective subnets map
|
||||
// and the filter accordingly.
|
||||
nodeByNodeID[node.ID()] = node
|
||||
|
||||
updateDefectiveSubnets(nodeSubnets, defectiveSubnets)
|
||||
filter, err = s.nodeFilter(topicFormat, defectiveSubnets)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "node filter")
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the map to a slice.
|
||||
peersToDial := make([]*enode.Node, 0, len(nodeByNodeID))
|
||||
for _, node := range nodeByNodeID {
|
||||
peersToDial = append(peersToDial, node)
|
||||
}
|
||||
|
||||
return peersToDial, nil
|
||||
}
|
||||
|
||||
// defectiveSubnets returns a map of subnets that have fewer than the minimum peer count.
|
||||
func (s *Service) defectiveSubnets(
|
||||
topicFormat string,
|
||||
digest [fieldparams.VersionLength]byte,
|
||||
minimumPeersPerSubnet int,
|
||||
subnets map[uint64]bool,
|
||||
) map[uint64]int {
|
||||
missingCountPerSubnet := make(map[uint64]int, len(subnets))
|
||||
for subnet := range subnets {
|
||||
topic := fmt.Sprintf(topicFormat, digest, subnet) + s.Encoding().ProtocolSuffix()
|
||||
peers := s.pubsub.ListPeers(topic)
|
||||
peerCount := len(peers)
|
||||
if peerCount < minimumPeersPerSubnet {
|
||||
missingCountPerSubnet[subnet] = minimumPeersPerSubnet - peerCount
|
||||
}
|
||||
}
|
||||
|
||||
return missingCountPerSubnet
|
||||
}
|
||||
|
||||
// dialPeers dials multiple peers concurrently up to `maxConcurrentDials` at a time.
|
||||
// DialPeers dials multiple peers concurrently up to `maxConcurrentDials` at a time.
|
||||
// In case of a dial failure, it logs the error but continues dialing other peers.
|
||||
func (s *Service) dialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
func (s *Service) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
var mut sync.Mutex
|
||||
|
||||
counter := uint(0)
|
||||
@@ -319,75 +99,13 @@ func (s *Service) dialPeers(ctx context.Context, maxConcurrentDials int, nodes [
|
||||
return counter
|
||||
}
|
||||
|
||||
// filterPeerForAttSubnet returns a method with filters peers specifically for a particular attestation subnet.
|
||||
func (s *Service) filterPeerForAttSubnet(indices map[uint64]int) func(node *enode.Node) (map[uint64]bool, error) {
|
||||
return func(node *enode.Node) (map[uint64]bool, error) {
|
||||
if !s.filterPeer(node) {
|
||||
return map[uint64]bool{}, nil
|
||||
}
|
||||
|
||||
subnets, err := attestationSubnets(node.Record())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "attestation subnets")
|
||||
}
|
||||
|
||||
return intersect(indices, subnets), nil
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular sync subnet.
|
||||
func (s *Service) filterPeerForSyncSubnet(indices map[uint64]int) func(node *enode.Node) (map[uint64]bool, error) {
|
||||
return func(node *enode.Node) (map[uint64]bool, error) {
|
||||
if !s.filterPeer(node) {
|
||||
return map[uint64]bool{}, nil
|
||||
}
|
||||
|
||||
subnets, err := syncSubnets(node.Record())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "sync subnets")
|
||||
}
|
||||
|
||||
return intersect(indices, subnets), nil
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular blob subnet.
|
||||
// All peers are supposed to be subscribed to all blob subnets.
|
||||
func (s *Service) filterPeerForBlobSubnet(indices map[uint64]int) func(_ *enode.Node) (map[uint64]bool, error) {
|
||||
result := make(map[uint64]bool, len(indices))
|
||||
for i := range indices {
|
||||
result[i] = true
|
||||
}
|
||||
|
||||
return func(_ *enode.Node) (map[uint64]bool, error) {
|
||||
return result, nil
|
||||
}
|
||||
}
|
||||
|
||||
// returns a method with filters peers specifically for a particular data column subnet.
|
||||
func (s *Service) filterPeerForDataColumnsSubnet(indices map[uint64]int) func(node *enode.Node) (map[uint64]bool, error) {
|
||||
return func(node *enode.Node) (map[uint64]bool, error) {
|
||||
if !s.filterPeer(node) {
|
||||
return map[uint64]bool{}, nil
|
||||
}
|
||||
|
||||
subnets, err := dataColumnSubnets(node.ID(), node.Record())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "data column subnets")
|
||||
}
|
||||
|
||||
return intersect(indices, subnets), nil
|
||||
}
|
||||
}
|
||||
|
||||
// lower threshold to broadcast object compared to searching
|
||||
// for a subnet. So that even in the event of poor peer
|
||||
// connectivity, we can still broadcast an attestation.
|
||||
func (s *Service) hasPeerWithSubnet(subnetTopic string) bool {
|
||||
func (s *Service) hasPeerWithTopic(topic string) bool {
|
||||
// In the event peer threshold is lower, we will choose the lower
|
||||
// threshold.
|
||||
minPeers := min(1, flags.Get().MinimumPeersPerSubnet)
|
||||
topic := subnetTopic + s.Encoding().ProtocolSuffix()
|
||||
peersWithSubnet := s.pubsub.ListPeers(topic)
|
||||
peersWithSubnetCount := len(peersWithSubnet)
|
||||
|
||||
@@ -712,16 +430,3 @@ func byteCount(bitCount int) int {
|
||||
}
|
||||
return numOfBytes
|
||||
}
|
||||
|
||||
// interesect intersects two maps and returns a new map containing only the keys
|
||||
// that are present in both maps.
|
||||
func intersect(left map[uint64]int, right map[uint64]bool) map[uint64]bool {
|
||||
result := make(map[uint64]bool, min(len(left), len(right)))
|
||||
for i := range left {
|
||||
if right[i] {
|
||||
result[i] = true
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -3,14 +3,15 @@ package p2p
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/go-bitfield"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
testDB "github.com/OffchainLabs/prysm/v7/beacon-chain/db/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
testp2p "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
@@ -24,6 +25,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
require2 "github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
@@ -122,6 +124,21 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
// Start the service.
|
||||
service.Start()
|
||||
|
||||
// start the crawler with a topic extractor that maps ENR attestation subnets
|
||||
// to full attestation topics for the current fork digest and encoding.
|
||||
_ = service.Crawler().Start(func(ctx context.Context, node *enode.Node) ([]string, error) {
|
||||
subs, err := attestationSubnets(node.Record())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var topics []string
|
||||
for subnet := range subs {
|
||||
t := AttestationSubnetTopic(bootNodeForkDigest, subnet)
|
||||
topics = append(topics, t)
|
||||
}
|
||||
return topics, nil
|
||||
})
|
||||
|
||||
// Set the ENR `attnets`, used by Prysm to filter peers by subnet.
|
||||
bitV := bitfield.NewBitvector64()
|
||||
bitV.SetBitAt(i, true)
|
||||
@@ -129,7 +146,7 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
service.dv5Listener.LocalNode().Set(entry)
|
||||
|
||||
// Join and subscribe to the subnet, needed by libp2p.
|
||||
topicName := fmt.Sprintf(AttestationSubnetTopicFormat, bootNodeForkDigest, i) + "/ssz_snappy"
|
||||
topicName := AttestationSubnetTopic(bootNodeForkDigest, i)
|
||||
topic, err := service.pubsub.Join(topicName)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -169,23 +186,65 @@ func TestStartDiscV5_FindAndDialPeersWithSubnet(t *testing.T) {
|
||||
close(service.custodyInfoSet)
|
||||
|
||||
service.Start()
|
||||
|
||||
subnets := map[uint64]bool{1: true, 2: true, 3: true}
|
||||
var topics []string
|
||||
for subnet := range subnets {
|
||||
t := AttestationSubnetTopic(bootNodeForkDigest, subnet)
|
||||
topics = append(topics, t)
|
||||
}
|
||||
|
||||
// start the crawler with a topic extractor that maps ENR attestation subnets
|
||||
// to full attestation topics for the current fork digest and encoding.
|
||||
_ = service.Crawler().Start(func(ctx context.Context, node *enode.Node) ([]string, error) {
|
||||
var topics []string
|
||||
subs, err := attestationSubnets(node.Record())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for subnet := range subs {
|
||||
t := AttestationSubnetTopic(bootNodeForkDigest, subnet)
|
||||
topics = append(topics, t)
|
||||
}
|
||||
return topics, nil
|
||||
})
|
||||
defer func() {
|
||||
err := service.Stop()
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
subnets := map[uint64]bool{1: true, 2: true, 3: true}
|
||||
defectiveSubnets := service.defectiveSubnets(AttestationSubnetTopicFormat, bootNodeForkDigest, minimumPeersPerSubnet, subnets)
|
||||
require.Equal(t, subnetCount, len(defectiveSubnets))
|
||||
builder := func(idx uint64) string {
|
||||
return AttestationSubnetTopic(bootNodeForkDigest, idx)
|
||||
}
|
||||
defectiveSubnetsCount := defectiveSubnets(service, topics, minimumPeersPerSubnet)
|
||||
require.Equal(t, subnetCount, defectiveSubnetsCount)
|
||||
|
||||
ctxWithTimeOut, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
var topicsToDial []string
|
||||
for s := range subnets {
|
||||
topicsToDial = append(topicsToDial, builder(s))
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err = service.FindAndDialPeersWithSubnets(ctxWithTimeOut, AttestationSubnetTopicFormat, bootNodeForkDigest, minimumPeersPerSubnet, subnets)
|
||||
require.NoError(t, err)
|
||||
for _, topic := range topicsToDial {
|
||||
err = service.GossipDialer().DialPeersForTopicBlocking(ctx, topic, minimumPeersPerSubnet)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
defectiveSubnets = service.defectiveSubnets(AttestationSubnetTopicFormat, bootNodeForkDigest, minimumPeersPerSubnet, subnets)
|
||||
require.Equal(t, 0, len(defectiveSubnets))
|
||||
defectiveSubnetsCount = defectiveSubnets(service, topics, minimumPeersPerSubnet)
|
||||
require.Equal(t, 0, defectiveSubnetsCount)
|
||||
}
|
||||
|
||||
func defectiveSubnets(service *Service, topics []string, minimumPeersPerSubnet int) int {
|
||||
count := 0
|
||||
for _, topic := range topics {
|
||||
peers := service.pubsub.ListPeers(topic)
|
||||
if len(peers) < minimumPeersPerSubnet {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func Test_AttSubnets(t *testing.T) {
|
||||
@@ -581,7 +640,6 @@ func TestFindPeersWithSubnets_NodeDeduplication(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
@@ -742,47 +800,13 @@ func TestFindPeersWithSubnets_NodeDeduplication(t *testing.T) {
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
|
||||
s := createTestService(t, db)
|
||||
localNode := createTestNodeRandom(t)
|
||||
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
tt.defectiveSubnets,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
crawler := startTestCrawler(t, s, s.dv5Listener.(*testp2p.MockListener))
|
||||
verifyCrawlerPeers(t, crawler, s, tt.defectiveSubnets, tt.expectedCount, tt.description, tt.eval)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -792,7 +816,6 @@ func TestFindPeersWithSubnets_FilterPeerRemoval(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
localNode1 := createTestNodeWithID(t, "node1")
|
||||
@@ -945,23 +968,7 @@ func TestFindPeersWithSubnets_FilterPeerRemoval(t *testing.T) {
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
// Create test P2P instance
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
// Create mock service
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
s := createTestService(t, db)
|
||||
|
||||
// Mark specific node versions as "bad" to simulate filterPeer failures
|
||||
for _, node := range tt.nodes {
|
||||
@@ -979,30 +986,11 @@ func TestFindPeersWithSubnets_FilterPeerRemoval(t *testing.T) {
|
||||
}
|
||||
|
||||
localNode := createTestNodeRandom(t)
|
||||
|
||||
mockIter := testp2p.NewMockIterator(tt.nodes)
|
||||
s.dv5Listener = testp2p.NewMockListener(localNode, mockIter)
|
||||
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
result, err := s.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
tt.defectiveSubnets,
|
||||
)
|
||||
|
||||
require.NoError(t, err, tt.description)
|
||||
require.Equal(t, tt.expectedCount, len(result), tt.description)
|
||||
|
||||
if tt.eval != nil {
|
||||
tt.eval(t, result)
|
||||
}
|
||||
crawler := startTestCrawler(t, s, s.dv5Listener.(*testp2p.MockListener))
|
||||
verifyCrawlerPeers(t, crawler, s, tt.defectiveSubnets, tt.expectedCount, tt.description, tt.eval)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1046,7 +1034,6 @@ func TestFindPeersWithSubnets_received_bad_existing_node(t *testing.T) {
|
||||
cache.SubnetIDs.EmptyAllCaches()
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
|
||||
ctx := context.Background()
|
||||
db := testDB.SetupDB(t)
|
||||
|
||||
// Create LocalNode with same ID but different sequences
|
||||
@@ -1067,21 +1054,7 @@ func TestFindPeersWithSubnets_received_bad_existing_node(t *testing.T) {
|
||||
flags.Init(gFlags)
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
|
||||
service := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: db,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
service := createTestService(t, db)
|
||||
|
||||
// Create iterator with callback that marks peer as bad before processing node1_seq2
|
||||
iter := &callbackIteratorForSubnets{
|
||||
@@ -1105,22 +1078,80 @@ func TestFindPeersWithSubnets_received_bad_existing_node(t *testing.T) {
|
||||
localNode := createTestNodeRandom(t)
|
||||
service.dv5Listener = testp2p.NewMockListener(localNode, iter)
|
||||
|
||||
digest, err := service.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
crawler := startTestCrawler(t, service, service.dv5Listener.(*testp2p.MockListener))
|
||||
|
||||
// Run findPeersWithSubnets - node1_seq1 gets processed first, then callback marks peer bad, then node1_seq2 fails
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
result, err := service.findPeersWithSubnets(
|
||||
ctxWithTimeout,
|
||||
AttestationSubnetTopicFormat,
|
||||
digest,
|
||||
1,
|
||||
map[uint64]int{1: 2}, // Need 2 peers for subnet 1
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(result))
|
||||
require.Equal(t, localNode2.Node().ID(), result[0].ID()) // only node2 should remain
|
||||
// Verification using verifyCrawlerPeers with a custom eval function
|
||||
verifyCrawlerPeers(t, crawler, service, map[uint64]int{1: 1}, 1, "only node2 should remain", func(t *testing.T, result []*enode.Node) {
|
||||
require.Equal(t, localNode2.Node().ID(), result[0].ID())
|
||||
})
|
||||
}
|
||||
|
||||
func createTestService(t *testing.T, d db.Database) *Service {
|
||||
fakePeer := testp2p.NewTestP2P(t)
|
||||
s := &Service{
|
||||
cfg: &Config{
|
||||
MaxPeers: 30,
|
||||
DB: d,
|
||||
},
|
||||
genesisTime: time.Now(),
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{
|
||||
PeerLimit: 30,
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: fakePeer.BHost,
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func startTestCrawler(t *testing.T, s *Service, listener *testp2p.MockListener) *GossipPeerCrawler {
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
crawler, err := NewGossipPeerCrawler(t.Context(), s, listener,
|
||||
1*time.Second, 100*time.Millisecond, 10, gossipcrawler.PeerFilterFunc(s.filterPeer),
|
||||
s.Peers().Scorers().Score)
|
||||
require.NoError(t, err)
|
||||
s.crawler = crawler
|
||||
require.NoError(t, crawler.Start(func(ctx context.Context, n *enode.Node) ([]string, error) {
|
||||
subs, err := attestationSubnets(n.Record())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var topics []string
|
||||
for subnet := range subs {
|
||||
t := AttestationSubnetTopic(digest, subnet)
|
||||
topics = append(topics, t)
|
||||
}
|
||||
return topics, nil
|
||||
}))
|
||||
return crawler
|
||||
}
|
||||
|
||||
func verifyCrawlerPeers(t *testing.T, crawler *GossipPeerCrawler, s *Service, subnets map[uint64]int, expectedCount int, description string, eval func(t *testing.T, result []*enode.Node)) {
|
||||
digest, err := s.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
var topics []string
|
||||
for subnet := range subnets {
|
||||
topics = append(topics, AttestationSubnetTopic(digest, subnet))
|
||||
}
|
||||
|
||||
var results []*enode.Node
|
||||
require2.Eventually(t, func() bool {
|
||||
results = results[:0]
|
||||
seen := make(map[enode.ID]struct{})
|
||||
for _, topic := range topics {
|
||||
peers := crawler.PeersForTopic(topic)
|
||||
for _, peer := range peers {
|
||||
if _, ok := seen[peer.ID()]; !ok {
|
||||
seen[peer.ID()] = struct{}{}
|
||||
results = append(results, peer)
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(results) == expectedCount
|
||||
}, 1*time.Second, 100*time.Millisecond, description)
|
||||
|
||||
if eval != nil {
|
||||
eval(t, results)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,9 +21,9 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/p2p/encoder:go_default_library",
|
||||
"//beacon-chain/p2p/gossipcrawler:go_default_library",
|
||||
"//beacon-chain/p2p/peers:go_default_library",
|
||||
"//beacon-chain/p2p/peers/scorers:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
@@ -41,6 +41,16 @@ func (*FakeP2P) AddConnectionHandler(_, _ func(ctx context.Context, id peer.ID)
|
||||
|
||||
}
|
||||
|
||||
// Crawler -- fake.
|
||||
func (*FakeP2P) Crawler() gossipcrawler.Crawler {
|
||||
return &MockCrawler{}
|
||||
}
|
||||
|
||||
// GossipDialer -- fake.
|
||||
func (*FakeP2P) GossipDialer() gossipcrawler.GossipDialer {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddDisconnectionHandler -- fake.
|
||||
func (*FakeP2P) AddDisconnectionHandler(_ func(ctx context.Context, id peer.ID) error) {
|
||||
}
|
||||
@@ -70,11 +80,6 @@ func (*FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// FindAndDialPeersWithSubnets mocks the p2p func.
|
||||
func (*FakeP2P) FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RefreshPersistentSubnets mocks the p2p func.
|
||||
func (*FakeP2P) RefreshPersistentSubnets() {}
|
||||
|
||||
@@ -93,6 +98,11 @@ func (*FakeP2P) Peers() *peers.Status {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DialPeers -- fake.
|
||||
func (*FakeP2P) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
}
|
||||
|
||||
// PublishToTopic -- fake.
|
||||
func (*FakeP2P) PublishToTopic(_ context.Context, _ string, _ []byte, _ ...pubsub.PubOpt) error {
|
||||
return nil
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
@@ -19,6 +19,7 @@ type MockPeerManager struct {
|
||||
BHost host.Host
|
||||
DiscoveryAddr []multiaddr.Multiaddr
|
||||
FailDiscoveryAddr bool
|
||||
Dialer gossipcrawler.GossipDialer
|
||||
}
|
||||
|
||||
// Disconnect .
|
||||
@@ -46,6 +47,11 @@ func (m MockPeerManager) NodeID() enode.ID {
|
||||
return enode.ID{}
|
||||
}
|
||||
|
||||
// GossipDialer returns the configured dialer mock, if any.
|
||||
func (m MockPeerManager) GossipDialer() gossipcrawler.GossipDialer {
|
||||
return m.Dialer
|
||||
}
|
||||
|
||||
// DiscoveryAddresses .
|
||||
func (m *MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
if m.FailDiscoveryAddr {
|
||||
@@ -57,10 +63,15 @@ func (m *MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
// RefreshPersistentSubnets .
|
||||
func (*MockPeerManager) RefreshPersistentSubnets() {}
|
||||
|
||||
// FindAndDialPeersWithSubnet .
|
||||
func (*MockPeerManager) FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error {
|
||||
return nil
|
||||
// DialPeers
|
||||
func (p *MockPeerManager) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
}
|
||||
|
||||
// AddPingMethod .
|
||||
func (*MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {}
|
||||
|
||||
// Crawler.
|
||||
func (*MockPeerManager) Crawler() gossipcrawler.Crawler {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ import (
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/encoder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/gossipcrawler"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/peers/scorers"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/interfaces"
|
||||
@@ -66,6 +66,7 @@ type TestP2P struct {
|
||||
earliestAvailableSlot primitives.Slot
|
||||
custodyGroupCount uint64
|
||||
enr *enr.Record
|
||||
dialer gossipcrawler.GossipDialer
|
||||
}
|
||||
|
||||
// NewTestP2P initializes a new p2p test service.
|
||||
@@ -183,11 +184,7 @@ func (p *TestP2P) ReceivePubSub(topic string, msg proto.Message) {
|
||||
if _, err := p.Encoding().EncodeGossip(buf, castedMsg); err != nil {
|
||||
p.t.Fatalf("Failed to encode message: %v", err)
|
||||
}
|
||||
digest, err := p.ForkDigest()
|
||||
if err != nil {
|
||||
p.t.Fatal(err)
|
||||
}
|
||||
topicHandle, err := ps.Join(fmt.Sprintf(topic, digest) + p.Encoding().ProtocolSuffix())
|
||||
topicHandle, err := ps.Join(topic)
|
||||
if err != nil {
|
||||
p.t.Fatal(err)
|
||||
}
|
||||
@@ -279,6 +276,9 @@ func (p *TestP2P) SubscribeToTopic(topic string, opts ...pubsub.SubOpt) (*pubsub
|
||||
// LeaveTopic closes topic and removes corresponding handler from list of joined topics.
|
||||
// This method will return error if there are outstanding event handlers or subscriptions.
|
||||
func (p *TestP2P) LeaveTopic(topic string) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if t, ok := p.joinedTopics[topic]; ok {
|
||||
if err := t.Close(); err != nil {
|
||||
return err
|
||||
@@ -419,9 +419,8 @@ func (p *TestP2P) Peers() *peers.Status {
|
||||
return p.peers
|
||||
}
|
||||
|
||||
// FindAndDialPeersWithSubnets mocks the p2p func.
|
||||
func (*TestP2P) FindAndDialPeersWithSubnets(ctx context.Context, topicFormat string, digest [fieldparams.VersionLength]byte, minimumPeersPerSubnet int, subnets map[uint64]bool) error {
|
||||
return nil
|
||||
func (p *TestP2P) DialPeers(ctx context.Context, maxConcurrentDials int, nodes []*enode.Node) uint {
|
||||
return 0
|
||||
}
|
||||
|
||||
// RefreshPersistentSubnets mocks the p2p func.
|
||||
@@ -558,3 +557,40 @@ func (s *TestP2P) custodyGroupCountFromPeerENR(pid peer.ID) uint64 {
|
||||
|
||||
return custodyGroupCount
|
||||
}
|
||||
|
||||
// MockCrawler is a minimal mock implementation of PeerCrawler for testing
|
||||
type MockCrawler struct{}
|
||||
|
||||
// Start does nothing as this is a mock
|
||||
func (m *MockCrawler) Start(gossipcrawler.TopicExtractor) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop does nothing as this is a mock
|
||||
func (m *MockCrawler) Stop() {}
|
||||
|
||||
// SetTopicExtractor does nothing as this is a mock
|
||||
func (m *MockCrawler) SetTopicExtractor(extractor func(context.Context, *enode.Node) ([]string, error)) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveTopic does nothing as this is a mock
|
||||
func (m *MockCrawler) RemoveTopic(topic string) {}
|
||||
|
||||
// RemovePeerID does nothing as this is a mock
|
||||
func (m *MockCrawler) RemovePeerByPeerId(pid peer.ID) {}
|
||||
|
||||
// PeersForTopic returns empty list as this is a mock
|
||||
func (m *MockCrawler) PeersForTopic(topic string) []*enode.Node {
|
||||
return []*enode.Node{}
|
||||
}
|
||||
|
||||
// Crawler returns a mock crawler implementation for testing.
|
||||
func (*TestP2P) Crawler() gossipcrawler.Crawler {
|
||||
return &MockCrawler{}
|
||||
}
|
||||
|
||||
// GossipDialer returns nil for tests that do not exercise dialer behaviour.
|
||||
func (p *TestP2P) GossipDialer() gossipcrawler.GossipDialer {
|
||||
return p.dialer
|
||||
}
|
||||
|
||||
@@ -711,7 +711,6 @@ func (s *Server) SubmitAttesterSlashingsV2(w http.ResponseWriter, r *http.Reques
|
||||
versionHeader := r.Header.Get(api.VersionHeader)
|
||||
if versionHeader == "" {
|
||||
httputil.HandleError(w, api.VersionHeader+" header is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
v, err := version.FromString(versionHeader)
|
||||
if err != nil {
|
||||
|
||||
@@ -2112,33 +2112,6 @@ func TestSubmitAttesterSlashingsV2(t *testing.T) {
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, "Invalid attester slashing", e.Message)
|
||||
})
|
||||
|
||||
t.Run("missing-version-header", func(t *testing.T) {
|
||||
bs, err := util.NewBeaconStateElectra()
|
||||
require.NoError(t, err)
|
||||
|
||||
broadcaster := &p2pMock.MockBroadcaster{}
|
||||
s := &Server{
|
||||
ChainInfoFetcher: &blockchainmock.ChainService{State: bs},
|
||||
SlashingsPool: &slashingsmock.PoolMock{},
|
||||
Broadcaster: broadcaster,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString(invalidAttesterSlashing)
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodPost, "http://example.com/beacon/pool/attester_slashings", &body)
|
||||
// Intentionally do not set api.VersionHeader to verify missing header handling.
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.SubmitAttesterSlashingsV2(writer, request)
|
||||
require.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusBadRequest, e.Code)
|
||||
assert.StringContains(t, api.VersionHeader+" header is required", e.Message)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
|
||||
|
||||
@@ -654,10 +654,6 @@ func (m *futureSyncMockFetcher) StateBySlot(context.Context, primitives.Slot) (s
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
func (m *futureSyncMockFetcher) StateByEpoch(context.Context, primitives.Epoch) (state.BeaconState, error) {
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
func TestGetSyncCommittees_Future(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize)
|
||||
syncCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize)
|
||||
|
||||
@@ -116,7 +116,6 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
for _, update := range updates {
|
||||
if ctx.Err() != nil {
|
||||
httputil.HandleError(w, "Context error: "+ctx.Err().Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
updateSlot := update.AttestedHeader().Beacon().Slot
|
||||
@@ -132,15 +131,12 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
chunkLength = ssz.MarshalUint64(chunkLength, uint64(len(updateSSZ)+4))
|
||||
if _, err := w.Write(chunkLength); err != nil {
|
||||
httputil.HandleError(w, "Could not write chunk length: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if _, err := w.Write(updateEntry.ForkDigest[:]); err != nil {
|
||||
httputil.HandleError(w, "Could not write fork digest: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if _, err := w.Write(updateSSZ); err != nil {
|
||||
httputil.HandleError(w, "Could not write update SSZ: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -149,7 +145,6 @@ func (s *Server) GetLightClientUpdatesByRange(w http.ResponseWriter, req *http.R
|
||||
for _, update := range updates {
|
||||
if ctx.Err() != nil {
|
||||
httputil.HandleError(w, "Context error: "+ctx.Err().Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
updateJson, err := structs.LightClientUpdateFromConsensus(update)
|
||||
|
||||
@@ -132,7 +132,6 @@ func (s *Server) GetHealth(w http.ResponseWriter, r *http.Request) {
|
||||
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if s.SyncChecker.Synced() && !optimistic {
|
||||
return
|
||||
|
||||
@@ -228,7 +228,7 @@ func (s *Server) attRewardsState(w http.ResponseWriter, r *http.Request) (state.
|
||||
}
|
||||
st, err := s.Stater.StateBySlot(r.Context(), nextEpochEnd)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, "Could not get state for epoch's starting slot: "+err.Error(), http.StatusInternalServerError)
|
||||
return nil, false
|
||||
}
|
||||
return st, true
|
||||
|
||||
@@ -19,6 +19,7 @@ go_library(
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/core/feed/operation:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/synccommittee:go_default_library",
|
||||
@@ -77,7 +78,6 @@ go_test(
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
"//beacon-chain/rpc/eth/rewards/testing:go_default_library",
|
||||
"//beacon-chain/rpc/eth/shared/testing:go_default_library",
|
||||
"//beacon-chain/rpc/lookup:go_default_library",
|
||||
"//beacon-chain/rpc/testutil:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/builder"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
rpchelpers "github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/eth/shared"
|
||||
@@ -897,15 +898,20 @@ func (s *Server) GetAttesterDuties(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// For next epoch requests, we use the current epoch's state since committee
|
||||
// assignments for next epoch can be computed from current epoch's state.
|
||||
epochForState := requestedEpoch
|
||||
var startSlot primitives.Slot
|
||||
if requestedEpoch == nextEpoch {
|
||||
epochForState = currentEpoch
|
||||
startSlot, err = slots.EpochStart(currentEpoch)
|
||||
} else {
|
||||
startSlot, err = slots.EpochStart(requestedEpoch)
|
||||
}
|
||||
st, err := s.Stater.StateByEpoch(ctx, epochForState)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get start slot from epoch %d: %v", requestedEpoch, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
st, err := s.Stater.StateBySlot(ctx, startSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1014,11 +1020,39 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
nextEpochLookahead = true
|
||||
}
|
||||
|
||||
st, err := s.Stater.StateByEpoch(ctx, requestedEpoch)
|
||||
epochStartSlot, err := slots.EpochStart(requestedEpoch)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get start slot of epoch %d: %v", requestedEpoch, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
var st state.BeaconState
|
||||
// if the requested epoch is new, use the head state and the next slot cache
|
||||
if requestedEpoch < currentEpoch {
|
||||
st, err = s.Stater.StateBySlot(ctx, epochStartSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get state for slot %d: %v ", epochStartSlot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
st, err = s.HeadFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head state: %v ", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
// Notice that even for Fulu requests for the next epoch, we are only advancing the state to the start of the current epoch.
|
||||
if st.Slot() < epochStartSlot {
|
||||
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not get head root: %v ", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
st, err = transition.ProcessSlotsUsingNextSlotCache(ctx, st, headRoot, epochStartSlot)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, fmt.Sprintf("Could not process slots up to %d: %v ", epochStartSlot, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var assignments map[primitives.ValidatorIndex][]primitives.Slot
|
||||
if nextEpochLookahead {
|
||||
@@ -1069,8 +1103,7 @@ func (s *Server) GetProposerDuties(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if err = sortProposerDuties(duties); err != nil {
|
||||
httputil.HandleError(w, "Could not sort proposer duties: "+err.Error(), http.StatusInternalServerError)
|
||||
if !sortProposerDuties(w, duties) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1141,10 +1174,14 @@ func (s *Server) GetSyncCommitteeDuties(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
|
||||
startingEpoch := min(requestedEpoch, currentEpoch)
|
||||
|
||||
st, err := s.Stater.StateByEpoch(ctx, startingEpoch)
|
||||
slot, err := slots.EpochStart(startingEpoch)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, "Could not get sync committee slot: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
st, err := s.Stater.State(ctx, []byte(strconv.FormatUint(uint64(slot), 10)))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not get sync committee state: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1290,7 +1327,7 @@ func (s *Server) GetLiveness(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
st, err = s.Stater.StateBySlot(ctx, epochEnd)
|
||||
if err != nil {
|
||||
shared.WriteStateFetchError(w, err)
|
||||
httputil.HandleError(w, "Could not get slot for requested epoch: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
participation, err = st.CurrentEpochParticipation()
|
||||
@@ -1410,20 +1447,22 @@ func syncCommitteeDutiesAndVals(
|
||||
return duties, vals, nil
|
||||
}
|
||||
|
||||
func sortProposerDuties(duties []*structs.ProposerDuty) error {
|
||||
var err error
|
||||
func sortProposerDuties(w http.ResponseWriter, duties []*structs.ProposerDuty) bool {
|
||||
ok := true
|
||||
sort.Slice(duties, func(i, j int) bool {
|
||||
si, parseErr := strconv.ParseUint(duties[i].Slot, 10, 64)
|
||||
if parseErr != nil {
|
||||
err = errors.Wrap(parseErr, "could not parse slot")
|
||||
si, err := strconv.ParseUint(duties[i].Slot, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse slot: "+err.Error(), http.StatusInternalServerError)
|
||||
ok = false
|
||||
return false
|
||||
}
|
||||
sj, parseErr := strconv.ParseUint(duties[j].Slot, 10, 64)
|
||||
if parseErr != nil {
|
||||
err = errors.Wrap(parseErr, "could not parse slot")
|
||||
sj, err := strconv.ParseUint(duties[j].Slot, 10, 64)
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not parse slot: "+err.Error(), http.StatusInternalServerError)
|
||||
ok = false
|
||||
return false
|
||||
}
|
||||
return si < sj
|
||||
})
|
||||
return err
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/operations/synccommittee"
|
||||
p2pmock "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/core"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/lookup"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/rpc/testutil"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
@@ -2007,7 +2006,6 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
BeaconDB: db,
|
||||
}
|
||||
|
||||
@@ -2186,7 +2184,6 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
Stater: &testutil.MockStater{StatesBySlot: map[primitives.Slot]state.BeaconState{0: bs}},
|
||||
TimeFetcher: chain,
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
BeaconDB: db,
|
||||
}
|
||||
@@ -2227,62 +2224,6 @@ func TestGetAttesterDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString("[\"0\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/attester/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err = body.WriteString("[\"0\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/attester/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetAttesterDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetProposerDuties(t *testing.T) {
|
||||
@@ -2486,60 +2427,6 @@ func TestGetProposerDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err)
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
bs, err := transition.GenesisBeaconState(t.Context(), deposits, 0, eth1Data)
|
||||
require.NoError(t, err)
|
||||
chainSlot := primitives.Slot(0)
|
||||
chain := &mockChain.ChainService{
|
||||
State: bs, Root: genesisRoot[:], Slot: &chainSlot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chain,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chain,
|
||||
HeadFetcher: chain,
|
||||
}
|
||||
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/proposer/{epoch}", nil)
|
||||
request.SetPathValue("epoch", "0")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetProposerDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
@@ -2570,7 +2457,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
}
|
||||
require.NoError(t, st.SetNextSyncCommittee(nextCommittee))
|
||||
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, State: st}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -2761,7 +2648,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
return newSyncPeriodSt
|
||||
}
|
||||
}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Slot: &newSyncPeriodStartSlot, State: newSyncPeriodSt}
|
||||
mockChainService := &mockChain.ChainService{Genesis: genesisTime, Slot: &newSyncPeriodStartSlot}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: stateFetchFn(newSyncPeriodStartSlot)},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
@@ -2842,7 +2729,8 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
slot, err := slots.EpochStart(1)
|
||||
require.NoError(t, err)
|
||||
|
||||
st2 := st.Copy()
|
||||
st2, err := util.NewBeaconStateBellatrix()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st2.SetSlot(slot))
|
||||
|
||||
mockChainService := &mockChain.ChainService{
|
||||
@@ -2856,7 +2744,7 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
State: st2,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{BeaconState: st2},
|
||||
Stater: &testutil.MockStater{BeaconState: st},
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
TimeFetcher: mockChainService,
|
||||
HeadFetcher: mockChainService,
|
||||
@@ -2901,62 +2789,6 @@ func TestGetSyncCommitteeDuties(t *testing.T) {
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusServiceUnavailable, e.Code)
|
||||
})
|
||||
t.Run("state not found returns 404", func(t *testing.T) {
|
||||
slot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
chainService := &mockChain.ChainService{
|
||||
Slot: &slot,
|
||||
}
|
||||
stateNotFoundErr := lookup.NewStateNotFoundError(8192, []byte("test"))
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: &stateNotFoundErr},
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[\"1\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/sync/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncCommitteeDuties(writer, request)
|
||||
assert.Equal(t, http.StatusNotFound, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusNotFound, e.Code)
|
||||
assert.StringContains(t, "State not found", e.Message)
|
||||
})
|
||||
t.Run("state fetch error returns 500", func(t *testing.T) {
|
||||
slot := 2 * params.BeaconConfig().SlotsPerEpoch
|
||||
chainService := &mockChain.ChainService{
|
||||
Slot: &slot,
|
||||
}
|
||||
s := &Server{
|
||||
Stater: &testutil.MockStater{CustomError: errors.New("internal error")},
|
||||
TimeFetcher: chainService,
|
||||
SyncChecker: &mockSync.Sync{IsSyncing: false},
|
||||
OptimisticModeFetcher: chainService,
|
||||
HeadFetcher: chainService,
|
||||
}
|
||||
|
||||
var body bytes.Buffer
|
||||
_, err := body.WriteString("[\"1\"]")
|
||||
require.NoError(t, err)
|
||||
request := httptest.NewRequest(http.MethodGet, "http://www.example.com/eth/v1/validator/duties/sync/{epoch}", &body)
|
||||
request.SetPathValue("epoch", "1")
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
s.GetSyncCommitteeDuties(writer, request)
|
||||
assert.Equal(t, http.StatusInternalServerError, writer.Code)
|
||||
e := &httputil.DefaultJsonError{}
|
||||
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
|
||||
assert.Equal(t, http.StatusInternalServerError, e.Code)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrepareBeaconProposer(t *testing.T) {
|
||||
|
||||
@@ -11,7 +11,6 @@ go_library(
|
||||
deps = [
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filesystem:go_default_library",
|
||||
"//beacon-chain/rpc/core:go_default_library",
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stategen"
|
||||
@@ -99,7 +98,6 @@ type Stater interface {
|
||||
State(ctx context.Context, id []byte) (state.BeaconState, error)
|
||||
StateRoot(ctx context.Context, id []byte) ([]byte, error)
|
||||
StateBySlot(ctx context.Context, slot primitives.Slot) (state.BeaconState, error)
|
||||
StateByEpoch(ctx context.Context, epoch primitives.Epoch) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// BeaconDbStater is an implementation of Stater. It retrieves states from the beacon chain database.
|
||||
@@ -269,46 +267,6 @@ func (p *BeaconDbStater) StateBySlot(ctx context.Context, target primitives.Slot
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// StateByEpoch returns the state for the start of the requested epoch.
|
||||
// For current or next epoch, it uses the head state and next slot cache for efficiency.
|
||||
// For past epochs, it replays blocks from the most recent canonical state.
|
||||
func (p *BeaconDbStater) StateByEpoch(ctx context.Context, epoch primitives.Epoch) (state.BeaconState, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "statefetcher.StateByEpoch")
|
||||
defer span.End()
|
||||
|
||||
targetSlot, err := slots.EpochStart(epoch)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get epoch start slot")
|
||||
}
|
||||
|
||||
currentSlot := p.GenesisTimeFetcher.CurrentSlot()
|
||||
currentEpoch := slots.ToEpoch(currentSlot)
|
||||
|
||||
// For past epochs, use the replay mechanism
|
||||
if epoch < currentEpoch {
|
||||
return p.StateBySlot(ctx, targetSlot)
|
||||
}
|
||||
|
||||
// For current or next epoch, use head state + next slot cache (much faster)
|
||||
headState, err := p.ChainInfoFetcher.HeadState(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get head state")
|
||||
}
|
||||
|
||||
// If head state is already at or past the target slot, return it
|
||||
if headState.Slot() >= targetSlot {
|
||||
return headState, nil
|
||||
}
|
||||
|
||||
// Process slots using the next slot cache
|
||||
headRoot := p.ChainInfoFetcher.CachedHeadRoot()
|
||||
st, err := transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot[:], targetSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not process slots up to %d", targetSlot)
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (p *BeaconDbStater) headStateRoot(ctx context.Context) ([]byte, error) {
|
||||
b, err := p.ChainInfoFetcher.HeadBlock(ctx)
|
||||
if err != nil {
|
||||
|
||||
@@ -444,111 +444,3 @@ func TestStateBySlot_AfterHeadSlot(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(101), st.Slot())
|
||||
}
|
||||
|
||||
func TestStateByEpoch(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
slotsPerEpoch := params.BeaconConfig().SlotsPerEpoch
|
||||
|
||||
t.Run("current epoch uses head state", func(t *testing.T) {
|
||||
// Head is at slot 5 (epoch 0), requesting epoch 0
|
||||
headSlot := primitives.Slot(5)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
// Should return head state since it's already past epoch start
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
|
||||
t.Run("current epoch processes slots to epoch start", func(t *testing.T) {
|
||||
// Head is at slot 5 (epoch 0), requesting epoch 1
|
||||
// Current slot is 32 (epoch 1), so epoch 1 is current epoch
|
||||
headSlot := primitives.Slot(5)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := slotsPerEpoch // slot 32, epoch 1
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
// Note: This will fail since ProcessSlotsUsingNextSlotCache requires proper setup
|
||||
// In real usage, the transition package handles this properly
|
||||
_, err = p.StateByEpoch(ctx, 1)
|
||||
// The error is expected since we don't have a fully initialized beacon state
|
||||
// that can process slots (missing committees, etc.)
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("past epoch uses replay", func(t *testing.T) {
|
||||
// Head is at epoch 2, requesting epoch 0 (past)
|
||||
headSlot := slotsPerEpoch * 2 // slot 64, epoch 2
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
pastEpochSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: 0})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
mockReplayer := mockstategen.NewReplayerBuilder()
|
||||
mockReplayer.SetMockStateForSlot(pastEpochSt, 0)
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock, ReplayerBuilder: mockReplayer}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 0)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, primitives.Slot(0), st.Slot())
|
||||
})
|
||||
|
||||
t.Run("next epoch uses head state path", func(t *testing.T) {
|
||||
// Head is at slot 30 (epoch 0), requesting epoch 1 (next)
|
||||
// Current slot is 30 (epoch 0), so epoch 1 is next epoch
|
||||
headSlot := primitives.Slot(30)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
// Note: This will fail since ProcessSlotsUsingNextSlotCache requires proper setup
|
||||
_, err = p.StateByEpoch(ctx, 1)
|
||||
// The error is expected since we don't have a fully initialized beacon state
|
||||
assert.NotNil(t, err)
|
||||
})
|
||||
|
||||
t.Run("head state already at target slot returns immediately", func(t *testing.T) {
|
||||
// Head is at slot 32 (epoch 1 start), requesting epoch 1
|
||||
headSlot := slotsPerEpoch // slot 32
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
|
||||
t.Run("head state past target slot returns head state", func(t *testing.T) {
|
||||
// Head is at slot 40, requesting epoch 1 (starts at slot 32)
|
||||
headSlot := primitives.Slot(40)
|
||||
headSt, err := statenative.InitializeFromProtoPhase0(ðpb.BeaconState{Slot: headSlot})
|
||||
require.NoError(t, err)
|
||||
|
||||
currentSlot := headSlot
|
||||
mock := &chainMock.ChainService{State: headSt, Slot: ¤tSlot}
|
||||
p := BeaconDbStater{ChainInfoFetcher: mock, GenesisTimeFetcher: mock}
|
||||
|
||||
st, err := p.StateByEpoch(ctx, 1)
|
||||
require.NoError(t, err)
|
||||
// Returns head state since it's already >= epoch start
|
||||
assert.Equal(t, headSlot, st.Slot())
|
||||
})
|
||||
}
|
||||
|
||||
@@ -26,6 +26,5 @@ go_library(
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
)
|
||||
|
||||
// MockStater is a fake implementation of lookup.Stater.
|
||||
@@ -15,7 +14,6 @@ type MockStater struct {
|
||||
StateProviderFunc func(ctx context.Context, stateId []byte) (state.BeaconState, error)
|
||||
BeaconStateRoot []byte
|
||||
StatesBySlot map[primitives.Slot]state.BeaconState
|
||||
StatesByEpoch map[primitives.Epoch]state.BeaconState
|
||||
StatesByRoot map[[32]byte]state.BeaconState
|
||||
CustomError error
|
||||
}
|
||||
@@ -45,22 +43,3 @@ func (m *MockStater) StateRoot(context.Context, []byte) ([]byte, error) {
|
||||
func (m *MockStater) StateBySlot(_ context.Context, s primitives.Slot) (state.BeaconState, error) {
|
||||
return m.StatesBySlot[s], nil
|
||||
}
|
||||
|
||||
// StateByEpoch --
|
||||
func (m *MockStater) StateByEpoch(_ context.Context, e primitives.Epoch) (state.BeaconState, error) {
|
||||
if m.CustomError != nil {
|
||||
return nil, m.CustomError
|
||||
}
|
||||
if m.StatesByEpoch != nil {
|
||||
return m.StatesByEpoch[e], nil
|
||||
}
|
||||
// Fall back to StatesBySlot if StatesByEpoch is not set
|
||||
slot, err := slots.EpochStart(e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m.StatesBySlot != nil {
|
||||
return m.StatesBySlot[slot], nil
|
||||
}
|
||||
return m.BeaconState, nil
|
||||
}
|
||||
|
||||
@@ -16,6 +16,8 @@ go_library(
|
||||
"error.go",
|
||||
"fork_watcher.go",
|
||||
"fuzz_exports.go", # keep
|
||||
"gossipsub_base.go",
|
||||
"gossipsub_topic_family.go",
|
||||
"log.go",
|
||||
"metrics.go",
|
||||
"once.go",
|
||||
@@ -49,7 +51,11 @@ go_library(
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
"subscription_controller.go",
|
||||
"subscription_topic_handler.go",
|
||||
"topic_families_dynamic_subnets.go",
|
||||
"topic_families_static_subnets.go",
|
||||
"topic_families_without_subnets.go",
|
||||
"validate_aggregate_proof.go",
|
||||
"validate_attester_slashing.go",
|
||||
"validate_beacon_attestation.go",
|
||||
@@ -137,6 +143,7 @@ go_library(
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_hashicorp_golang_lru//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/host:go_default_library",
|
||||
@@ -144,7 +151,6 @@ go_library(
|
||||
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//core/protocol:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
|
||||
"@com_github_libp2p_go_mplex//:go_default_library",
|
||||
"@com_github_patrickmn_go_cache//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
@@ -175,6 +181,8 @@ go_test(
|
||||
"decode_pubsub_test.go",
|
||||
"error_test.go",
|
||||
"fork_watcher_test.go",
|
||||
"gossipsub_base_test.go",
|
||||
"gossipsub_topic_family_test.go",
|
||||
"kzg_batch_verifier_test.go",
|
||||
"once_test.go",
|
||||
"pending_attestations_queue_bucket_test.go",
|
||||
@@ -200,10 +208,11 @@ go_test(
|
||||
"subscriber_beacon_aggregate_proof_test.go",
|
||||
"subscriber_beacon_blocks_test.go",
|
||||
"subscriber_data_column_sidecar_test.go",
|
||||
"subscriber_test.go",
|
||||
"subscription_controller_test.go",
|
||||
"subscription_topic_handler_test.go",
|
||||
"sync_fuzz_test.go",
|
||||
"sync_test.go",
|
||||
"topic_families_dynamic_subnets_test.go",
|
||||
"validate_aggregate_proof_test.go",
|
||||
"validate_attester_slashing_test.go",
|
||||
"validate_beacon_attestation_test.go",
|
||||
@@ -286,6 +295,7 @@ go_test(
|
||||
"@com_github_d4l3k_messagediff//:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//p2p/enr:go_default_library",
|
||||
"@com_github_golang_snappy//:go_default_library",
|
||||
"@com_github_libp2p_go_libp2p//:go_default_library",
|
||||
|
||||
@@ -161,7 +161,7 @@ func (s *Service) validateWithKzgBatchVerifier(ctx context.Context, dataColumns
|
||||
|
||||
timeout := time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second
|
||||
|
||||
resChan := make(chan error, 1)
|
||||
resChan := make(chan error)
|
||||
verificationSet := &kzgVerifier{dataColumns: dataColumns, resChan: resChan}
|
||||
s.kzgChan <- verificationSet
|
||||
|
||||
|
||||
398
beacon-chain/sync/docs/gossipsub_control_plane_design.md
Normal file
398
beacon-chain/sync/docs/gossipsub_control_plane_design.md
Normal file
@@ -0,0 +1,398 @@
|
||||
# Gossipsub Control Plane Design Document
|
||||
|
||||
## Overview
|
||||
|
||||
This branch introduces a declarative, fork-aware gossipsub control plane that manages topic subscriptions and peer discovery for subnet-based topics. The system replaces ad-hoc topic management with a structured approach centered on **Topic Families**.
|
||||
|
||||
### Key Components
|
||||
|
||||
| Component | Location | Responsibility |
|
||||
|-----------|----------|----------------|
|
||||
| **GossipsubController** | `sync/gossipsub_controller.go` | Orchestrates topic family lifecycle across forks |
|
||||
| **GossipsubPeerCrawler** | `p2p/gossipsub_peer_crawler.go` | Discovers and indexes peers by topic via discv5 |
|
||||
| **GossipsubPeerDialer** | `p2p/gossipsub_peer_controller.go` | Maintains peer connections for required topics |
|
||||
| **Topic Family Abstractions** | `sync/gossipsub_topic_family.go` | Interfaces for topic subscription management |
|
||||
|
||||
---
|
||||
|
||||
## 1. Topic Family Abstraction
|
||||
|
||||
### 1.1 Design Goals
|
||||
|
||||
- **Declarative Fork Management**: Topic families declare when they activate/deactivate based on fork epochs
|
||||
- **Unified Subscription Logic**: Common base handles validator registration, message loops, and cleanup
|
||||
- **Dynamic vs Static Distinction**: Clear separation between global topics and subnet-based topics that change per slot
|
||||
|
||||
### 1.2 Interface Hierarchy
|
||||
|
||||
```
|
||||
GossipsubTopicFamily (base)
|
||||
├── Name()
|
||||
├── NetworkScheduleEntry()
|
||||
└── UnsubscribeAll()
|
||||
|
||||
GossipsubTopicFamilyWithoutDynamicSubnets
|
||||
└── Subscribe() // Called once when registered
|
||||
|
||||
GossipsubTopicFamilyWithDynamicSubnets
|
||||
├── TopicsToSubscribeForSlot(slot)
|
||||
├── ExtractTopicsForNode(node) // For peer discovery
|
||||
├── SubscribeForSlot(slot)
|
||||
└── UnsubscribeForSlot(slot)
|
||||
```
|
||||
|
||||
### 1.3 Implementation Categories
|
||||
|
||||
**Global Topics** (subscribed once per fork):
|
||||
- Block, AggregateAndProof, VoluntaryExit, ProposerSlashing, AttesterSlashing
|
||||
- SyncContributionAndProof (Altair+), BlsToExecutionChange (Capella+)
|
||||
- LightClient updates (Altair+, feature-flagged)
|
||||
|
||||
**Static Per-Subnet**:
|
||||
- BlobTopicFamily - One instance per blob subnet (Deneb/Electra)
|
||||
|
||||
**Dynamic Subnets** (change per slot based on validator duties):
|
||||
- **AttestationTopicFamily** - Subnets based on attestation committee assignments
|
||||
- **SyncCommitteeTopicFamily** - Subnets based on sync committee membership
|
||||
- **DataColumnTopicFamily** - Subnets based on data column custody (Fulu+)
|
||||
|
||||
### 1.4 Base Implementation Features
|
||||
|
||||
`baseGossipsubTopicFamily` provides:
|
||||
- **Idempotent subscriptions** - Safe to call multiple times for same topic
|
||||
- **Automatic validator registration** - Registers message validator with pubsub
|
||||
- **Message loop management** - Spawns goroutine to process incoming messages
|
||||
- **Cleanup coordination** - Notifies crawler when topics are unsubscribed
|
||||
|
||||
### 1.5 Dynamic Subnet Selection
|
||||
|
||||
Dynamic families combine two subnet sources:
|
||||
- **Subnets to Join**: Topics we must subscribe to
|
||||
- **Subnets for Broadcast**: Topics we need peers for but may not subscribe to
|
||||
|
||||
| Family | Subnets to Join | Subnets for Broadcast |
|
||||
|--------|-----------------|----------------------|
|
||||
| Attestation | Persistent + aggregator subnets | Attester duty subnets |
|
||||
| SyncCommittee | Active sync committee subnets | (none) |
|
||||
| DataColumn | Custody column subnets | All column subnets |
|
||||
|
||||
### 1.6 Fork Schedule
|
||||
|
||||
Topic families declare activation and deactivation epochs (both are non-optional):
|
||||
|
||||
| Fork | Activations | Deactivations |
|
||||
|------|-------------|---------------|
|
||||
| Genesis | Block, AggregateAndProof, VoluntaryExit, ProposerSlashing, AttesterSlashing, Attestation | - |
|
||||
| Altair | SyncContributionAndProof, SyncCommittee, [LightClient*] | - |
|
||||
| Capella | BlsToExecutionChange | - |
|
||||
| Deneb | Blob (6 subnets) | - |
|
||||
| Electra | Blob (9 subnets) | Blob (Deneb config) |
|
||||
| Fulu | DataColumn | Blob (all) |
|
||||
|
||||
---
|
||||
|
||||
## 2. GossipsubController
|
||||
|
||||
### 2.1 Responsibilities
|
||||
|
||||
- **Fork-Aware Topic Management**: Automatically subscribes/unsubscribes based on fork schedule
|
||||
- **Smooth Fork Transitions**: Pre-subscribes 1 epoch before fork, unsubscribes 1 epoch after
|
||||
- **Slot-Based Updates**: Updates dynamic subnet subscriptions every slot
|
||||
- **Topic Extraction**: Provides interface for crawler to determine peer topic relevance
|
||||
|
||||
### 2.2 Lifecycle
|
||||
|
||||
1. **Startup**: Waits for initial sync to complete
|
||||
2. **Control Loop**: Runs on slot ticker, calling `updateActiveTopicFamilies()`
|
||||
3. **Shutdown**: Unsubscribes all families, cancels context
|
||||
|
||||
### 2.3 Fork Transition Handling
|
||||
|
||||
**Timeline for Fork at Epoch N:**
|
||||
```
|
||||
Epoch N-1: Subscribe to both old and new fork topics (overlap period)
|
||||
Epoch N: Fork occurs, both topic sets remain active
|
||||
Epoch N+1: Unsubscribe from old fork topics, only new fork active
|
||||
```
|
||||
|
||||
This ensures no message loss during the transition window.
|
||||
|
||||
### 2.4 Update Logic (per slot)
|
||||
|
||||
1. **Get families for current epoch** from declarative schedule
|
||||
2. **Check for upcoming fork** - if next epoch is fork boundary, include next fork's families
|
||||
3. **Register new families** - add to active map, subscribe based on type:
|
||||
- Static families: `Subscribe()` once
|
||||
- Dynamic families: `SubscribeForSlot()` and `UnsubscribeForSlot()` every slot
|
||||
4. **Remove old fork families** - if 1 epoch past fork boundary, unsubscribe and remove
|
||||
|
||||
### 2.5 Topic Extraction for Peer Discovery
|
||||
|
||||
The controller exposes `ExtractTopics(node)` which:
|
||||
- Iterates all active **dynamic** subnet families
|
||||
- Calls `ExtractTopicsForNode(node)` on each
|
||||
- Returns deduplicated list of topics the node can serve
|
||||
|
||||
This is used by the peer crawler to index discovered peers by topic.
|
||||
|
||||
### 2.6 Topics Provider
|
||||
|
||||
The controller exposes `GetCurrentActiveTopics()` which:
|
||||
- Returns all topics from dynamic families for the current slot
|
||||
- Used by the peer dialer to know which topics need peer connections
|
||||
|
||||
---
|
||||
|
||||
## 3. GossipsubPeerCrawler
|
||||
|
||||
### 3.1 Purpose
|
||||
|
||||
Discovers peers via discv5, indexes them by topic, and verifies reachability via ping. Provides the peer dialer with a pool of verified, scored peers for each topic.
|
||||
|
||||
### 3.2 Key Design Decisions
|
||||
|
||||
**Triple Index Structure:**
|
||||
- `byEnode` - Fast lookup by enode ID
|
||||
- `byPeerId` - Fast lookup by libp2p peer ID
|
||||
- `byTopic` - Fast lookup of peers serving a topic
|
||||
|
||||
**Ping-Once Guarantee:**
|
||||
- A node is pinged exactly **once** regardless of ENR sequence number updates
|
||||
- Prevents ping explosion when nodes frequently update their records
|
||||
- Ping success sets `isPinged=true`, failure removes peer entirely
|
||||
|
||||
**Sequence Number Handling:**
|
||||
- Only updates peer record if new sequence number is higher
|
||||
- Stale records are ignored to prevent processing outdated data
|
||||
|
||||
### 3.3 Three Concurrent Loops
|
||||
|
||||
| Loop | Interval | Purpose |
|
||||
|------|----------|---------|
|
||||
| **crawlLoop** | `crawlInterval` | Iterates discv5 `RandomNodes()`, extracts topics, updates index |
|
||||
| **pingLoop** | Continuous | Consumes ping queue, verifies reachability |
|
||||
| **cleanupLoop** | 5 minutes | Prunes peers that fail filter or have no relevant topics |
|
||||
|
||||
### 3.4 Crawl Flow
|
||||
|
||||
1. Create timeout context for crawl iteration
|
||||
2. Get random nodes iterator from discv5
|
||||
3. For each node:
|
||||
- Apply peer filter (reject bad/incompatible peers)
|
||||
- Extract topics via `topicExtractor` (provided by controller)
|
||||
- Update index if sequence number is newer
|
||||
- Queue for ping if not already pinged and has topics
|
||||
|
||||
### 3.5 Ping Queue and Backpressure
|
||||
|
||||
- **Channel capacity**: `4 * maxConcurrentPings`
|
||||
- **Backpressure**: When queue is full, crawl loop blocks on send
|
||||
- **Semaphore**: Limits concurrent ping goroutines to `maxConcurrentPings`
|
||||
- **Ping failure**: Removes peer from index entirely (unreachable)
|
||||
- **Ping success**: Marks peer as verified (`isPinged=true`)
|
||||
|
||||
### 3.6 Peer Retrieval (`PeersForTopic`)
|
||||
|
||||
Returns peers for a topic with guarantees:
|
||||
1. **Only pinged peers** - Verified reachable
|
||||
2. **Filter applied** - Passes current peer filter
|
||||
3. **Sorted by score** - Best peers first (using p2p scorer)
|
||||
|
||||
### 3.7 Peer Removal Triggers
|
||||
|
||||
| Trigger | Behavior |
|
||||
|---------|----------|
|
||||
| Ping failure | Remove immediately |
|
||||
| Peer disconnection | `RemovePeerId()` called from disconnect handler |
|
||||
| Topic unsubscription | `RemoveTopic()` called from base family cleanup |
|
||||
| Filter rejection during crawl | Remove if previously indexed |
|
||||
| Cleanup loop | Remove if no longer passes filter or has no topics |
|
||||
|
||||
### 3.8 Topic Extraction for Dynamic Subnets
|
||||
|
||||
For each dynamic family, extraction:
|
||||
1. Gets subnets we currently need (union of join + broadcast)
|
||||
2. Reads subnet bitfield from node's ENR record
|
||||
3. Returns intersection - topics both we need AND the node advertises
|
||||
|
||||
---
|
||||
|
||||
## 4. GossipsubPeerDialer
|
||||
|
||||
### 4.1 Purpose
|
||||
|
||||
Maintains peer connections for topics we need. Works with the crawler to dial verified peers when topic peer counts fall below threshold.
|
||||
|
||||
### 4.2 Key Design Decisions
|
||||
|
||||
**Target Peer Count**: 20 peers per topic (`peerPerTopic` constant)
|
||||
|
||||
**Dial Loop Frequency**: Every 1 second
|
||||
|
||||
**Deduplication**: Peers appearing for multiple topics are only dialed once
|
||||
|
||||
### 4.3 Dial Flow
|
||||
|
||||
1. Get current topics from `topicsProvider` (controller's `GetCurrentActiveTopics`)
|
||||
2. For each topic:
|
||||
- Check current connected peer count via `listPeersFunc`
|
||||
- If below target, calculate how many more needed
|
||||
- Get peers from crawler (already filtered, scored, pinged)
|
||||
- Limit to what's needed
|
||||
3. Deduplicate peer list across all topics
|
||||
4. Dial peers via `dialPeersFunc`
|
||||
|
||||
### 4.4 Blocking Dial
|
||||
|
||||
`DialPeersForTopicBlocking(ctx, topic, nPeers)` provides synchronous peer acquisition:
|
||||
- Loops until target peer count reached or context cancelled
|
||||
- Used for critical operations that need guaranteed peer connectivity
|
||||
- Polls every 100ms to check connection status
|
||||
|
||||
---
|
||||
|
||||
## 5. Component Interactions
|
||||
|
||||
### 5.1 Architecture Diagram
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Sync Service │
|
||||
│ ┌───────────────────────────────────────────────────────────────────────- │
|
||||
│ │ GossipsubController | │
|
||||
│ │ | │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ │
|
||||
│ │ │ AttestationTF │ │ SyncCommitteeTF │ │ DataColumnTF │ │ │
|
||||
│ │ │ (dynamic) │ │ (dynamic) │ │ (dynamic) │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────┘ │ │
|
||||
│ │ | │
|
||||
│ │ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ │
|
||||
│ │ │ BlockTF, etc. │ │ BlobTF (static) │ │ baseTopicFamily │ │ │
|
||||
│ │ │ (global) │ │ │ │ (shared logic) │ │ │
|
||||
│ │ └─────────────────┘ └─────────────────┘ └─────────────────┘ │ │
|
||||
│ │ | │
|
||||
│ └──────────────────┬─────────────────────────────┬──────────────────────┘ │
|
||||
│ │ │ │
|
||||
│ GetCurrentActiveTopics() ExtractTopics() │
|
||||
│ │ │ │
|
||||
└─────────────────────┼─────────────────────────────┼─────────────────────────┘
|
||||
│ │
|
||||
▼ ▼
|
||||
┌─────────────────────────────────┐ ┌─────────────────────────────────────┐
|
||||
│ GossipsubPeerDialer │ │ GossipsubPeerCrawler │
|
||||
│ │ │ │
|
||||
│ - Polls topics every 1 second │ │ - Crawls discv5 periodically │
|
||||
│ - Checks peer count per topic │ │ - Indexes peers by topic │
|
||||
│ - Dials missing peers │ │ - Verifies via ping │
|
||||
│ │ │ - Filters and scores peers │
|
||||
│ │ │ │ │
|
||||
│ │ PeersForTopic() │ │ │ │
|
||||
│ └───────────────────────┼──┼─────────┘ │
|
||||
│ │ │ │
|
||||
└─────────────────────────────────┘ └──────────────────┬──────────────────┘
|
||||
│
|
||||
│ RemovePeerId()
|
||||
┌──────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────┐
|
||||
│ P2P Service │
|
||||
│ │
|
||||
│ - Disconnect handler calls │
|
||||
│ RemovePeerId() on crawler │
|
||||
│ - Provides filterPeer, scorer │
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
### 5.2 Data Flow Summary
|
||||
|
||||
| Flow | Description |
|
||||
|------|-------------|
|
||||
| **Discovery** | discv5 → crawlLoop → topicExtractor → crawledPeers index → pingCh |
|
||||
| **Ping** | pingCh → semaphore → dv5.Ping() → isPinged=true or remove |
|
||||
| **Dial** | controller topics → dialer → crawler.PeersForTopic() → dialPeers |
|
||||
| **Cleanup** | disconnect/unsubscribe → RemovePeerId()/RemoveTopic() |
|
||||
|
||||
### 5.3 Key Invariants
|
||||
|
||||
**Peers from `PeersForTopic()` are always:**
|
||||
- Successfully pinged (reachable)
|
||||
- Passing the peer filter
|
||||
- Sorted by score (best first)
|
||||
|
||||
**Topic subscriptions are:**
|
||||
- Pre-subscribed 1 epoch before fork
|
||||
- Unsubscribed 1 epoch after fork
|
||||
- Updated every slot for dynamic families
|
||||
|
||||
**Ping behavior:**
|
||||
- Each node ID pinged at most once
|
||||
- Ping failures remove peer entirely
|
||||
- Sequence number updates don't trigger re-ping
|
||||
|
||||
**Backpressure:**
|
||||
- Ping queue blocks crawl when full
|
||||
- Semaphore limits concurrent pings
|
||||
- Natural rate limiting without explicit throttling
|
||||
|
||||
---
|
||||
|
||||
## 6. Initialization Sequence
|
||||
|
||||
```
|
||||
PHASE 1: P2P Service Start
|
||||
══════════════════════════
|
||||
├─► Start discv5 listener
|
||||
├─► Create GossipsubPeerCrawler (with filterPeer, scorer)
|
||||
└─► Create GossipsubPeerDialer (not started yet)
|
||||
|
||||
PHASE 2: Sync Service Start
|
||||
═══════════════════════════
|
||||
├─► Create GossipsubController
|
||||
└─► Launch startDiscoveryAndSubscriptions goroutine
|
||||
|
||||
PHASE 3: Discovery and Subscriptions (after chain start)
|
||||
════════════════════════════════════════════════════════
|
||||
├─► Start GossipsubController (control loop)
|
||||
├─► Start Crawler with topicExtractor from controller
|
||||
└─► Start Dialer with topicsProvider from controller
|
||||
```
|
||||
|
||||
### Dependency Injection
|
||||
|
||||
| Component | Dependencies | Provider |
|
||||
|-----------|-------------|----------|
|
||||
| Crawler | discv5, filterPeer, scorer | P2P Service |
|
||||
| Crawler | topicExtractor | GossipsubController |
|
||||
| Dialer | crawler, listPeers, dialPeers | P2P Service |
|
||||
| Dialer | topicsProvider | GossipsubController |
|
||||
|
||||
---
|
||||
|
||||
## 7. Configuration Parameters
|
||||
|
||||
| Parameter | Default | Description |
|
||||
|-----------|---------|-------------|
|
||||
| `crawlInterval` | configurable | How often to crawl discv5 |
|
||||
| `crawlTimeout` | configurable | Max duration per crawl iteration |
|
||||
| `maxConcurrentPings` | configurable | Parallel ping limit |
|
||||
| `cleanupInterval` | 5 minutes | Stale peer pruning frequency |
|
||||
| `peerPerTopic` | 20 | Target peer count per topic |
|
||||
| `dialLoop interval` | 1 second | Topic peer check frequency |
|
||||
|
||||
---
|
||||
|
||||
## 8. Key Files
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `sync/gossipsub_controller.go` | Controller orchestrating topic families |
|
||||
| `sync/gossipsub_topic_family.go` | Interface definitions and fork schedule |
|
||||
| `sync/gossipsub_base.go` | Base implementation for all topic families |
|
||||
| `sync/topic_families_without_subnets.go` | Global topic family implementations |
|
||||
| `sync/topic_families_static_subnets.go` | Blob topic family |
|
||||
| `sync/topic_families_dynamic_subnets.go` | Dynamic subnet families |
|
||||
| `p2p/gossipsub_peer_crawler.go` | Peer discovery and indexing |
|
||||
| `p2p/gossipsub_peer_controller.go` | Peer dialing logic |
|
||||
| `p2p/gossipsubcrawler/interface.go` | Shared interfaces |
|
||||
| `p2p/handshake.go` | Disconnect handler integration |
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
libp2pcore "github.com/libp2p/go-libp2p/core"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
multiplex "github.com/libp2p/go-mplex"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -148,7 +147,7 @@ func closeStreamAndWait(stream network.Stream, log *logrus.Entry) {
|
||||
}
|
||||
|
||||
func isUnwantedError(err error) bool {
|
||||
for _, e := range []error{network.ErrReset, multiplex.ErrShutdown, io.EOF, types.ErrIODeadline} {
|
||||
for _, e := range []error{network.ErrReset, io.EOF, types.ErrIODeadline} {
|
||||
if errors.Is(err, e) || err.Error() == e.Error() {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
@@ -10,27 +9,18 @@ import (
|
||||
)
|
||||
|
||||
// p2pHandlerControlLoop runs in a continuous loop to ensure that:
|
||||
// - We are subscribed to the correct gossipsub topics (for the current and upcoming epoch).
|
||||
// - We have registered the correct RPC stream handlers (for the current and upcoming epoch).
|
||||
// - We have cleaned up gossipsub topics and RPC stream handlers that are no longer needed.
|
||||
func (s *Service) p2pHandlerControlLoop() {
|
||||
// At startup, launch registration and peer discovery loops, and register rpc stream handlers.
|
||||
startEntry := params.GetNetworkScheduleEntry(s.cfg.clock.CurrentEpoch())
|
||||
s.registerSubscribers(startEntry)
|
||||
|
||||
func (s *Service) rpcHandlerControlLoop() {
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
for {
|
||||
select {
|
||||
// In the event of a node restart, we will still end up subscribing to the correct
|
||||
// topics during/after the fork epoch. This routine is to ensure correct
|
||||
// subscriptions for nodes running before a fork epoch.
|
||||
case <-slotTicker.C():
|
||||
current := s.cfg.clock.CurrentEpoch()
|
||||
if err := s.ensureRegistrationsForEpoch(current); err != nil {
|
||||
if err := s.ensureRPCRegistrationsForEpoch(current); err != nil {
|
||||
log.WithError(err).Error("Unable to check for fork in the next epoch")
|
||||
continue
|
||||
}
|
||||
if err := s.ensureDeregistrationForEpoch(current); err != nil {
|
||||
if err := s.ensureRPCDeregistrationForEpoch(current); err != nil {
|
||||
log.WithError(err).Error("Unable to check for fork in the previous epoch")
|
||||
continue
|
||||
}
|
||||
@@ -44,9 +34,8 @@ func (s *Service) p2pHandlerControlLoop() {
|
||||
|
||||
// ensureRegistrationsForEpoch ensures that gossip topic and RPC stream handler
|
||||
// registrations are in place for the current and subsequent epoch.
|
||||
func (s *Service) ensureRegistrationsForEpoch(epoch primitives.Epoch) error {
|
||||
func (s *Service) ensureRPCRegistrationsForEpoch(epoch primitives.Epoch) error {
|
||||
current := params.GetNetworkScheduleEntry(epoch)
|
||||
s.registerSubscribers(current)
|
||||
|
||||
currentHandler, err := s.rpcHandlerByTopicFromFork(current.VersionEnum)
|
||||
if err != nil {
|
||||
@@ -62,7 +51,6 @@ func (s *Service) ensureRegistrationsForEpoch(epoch primitives.Epoch) error {
|
||||
if current.Epoch == next.Epoch {
|
||||
return nil // no fork in the next epoch
|
||||
}
|
||||
s.registerSubscribers(next)
|
||||
|
||||
if s.digestActionDone(next.ForkDigest, registerRpcOnce) {
|
||||
return nil
|
||||
@@ -84,7 +72,7 @@ func (s *Service) ensureRegistrationsForEpoch(epoch primitives.Epoch) error {
|
||||
}
|
||||
|
||||
// ensureDeregistrationForEpoch deregisters appropriate gossip and RPC topic if there is a fork in the current epoch.
|
||||
func (s *Service) ensureDeregistrationForEpoch(currentEpoch primitives.Epoch) error {
|
||||
func (s *Service) ensureRPCDeregistrationForEpoch(currentEpoch primitives.Epoch) error {
|
||||
current := params.GetNetworkScheduleEntry(currentEpoch)
|
||||
|
||||
// If we are still in our genesis fork version then exit early.
|
||||
@@ -115,20 +103,5 @@ func (s *Service) ensureDeregistrationForEpoch(currentEpoch primitives.Epoch) er
|
||||
}
|
||||
}
|
||||
|
||||
// Unsubscribe from all gossip topics with the previous fork digest.
|
||||
if s.digestActionDone(previous.ForkDigest, unregisterGossipOnce) {
|
||||
return nil
|
||||
}
|
||||
for _, t := range s.subHandler.allTopics() {
|
||||
retDigest, err := p2p.ExtractGossipDigest(t)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not retrieve digest")
|
||||
continue
|
||||
}
|
||||
if retDigest == previous.ForkDigest {
|
||||
s.unSubscribeFromTopic(t)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,8 +2,6 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -44,36 +42,11 @@ func testForkWatcherService(t *testing.T, current primitives.Epoch) *Service {
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
initialSyncComplete: closedChan,
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func TestRegisterSubscriptions_Idempotent(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
fulu := params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.BeaconConfig().FuluForkEpoch = fulu
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
current := fulu - 1
|
||||
s := testForkWatcherService(t, current)
|
||||
next := params.GetNetworkScheduleEntry(fulu)
|
||||
wg := attachSpawner(s)
|
||||
require.Equal(t, true, s.registerSubscribers(next))
|
||||
done := make(chan struct{})
|
||||
go func() { wg.Wait(); close(done) }()
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatal("timed out waiting for subscriptions to be registered")
|
||||
case <-done:
|
||||
}
|
||||
// the goal of this callback is just to assert that spawn is never called.
|
||||
s.subscriptionSpawner = func(func()) { t.Error("registration routines spawned twice for the same digest") }
|
||||
require.NoError(t, s.ensureRegistrationsForEpoch(fulu))
|
||||
}
|
||||
|
||||
func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
closedChan := make(chan struct{})
|
||||
close(closedChan)
|
||||
@@ -103,7 +76,6 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
nextForkEpoch: params.BeaconConfig().BellatrixForkEpoch,
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().AltairForkEpoch)
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
@@ -111,8 +83,6 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlocksByRangeTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlocksByRootTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCMetaDataTopicV2+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
expected := fmt.Sprintf(p2p.SyncContributionAndProofSubnetTopicFormat+s.cfg.p2p.Encoding().ProtocolSuffix(), digest)
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), "subnet topic doesn't exist")
|
||||
// TODO: we should check subcommittee indices here but we need to work with the committee cache to do it properly
|
||||
/*
|
||||
subIndices := mapFromCount(params.BeaconConfig().SyncCommitteeSubnetCount)
|
||||
@@ -127,14 +97,10 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
{
|
||||
name: "capella fork in the next epoch",
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().CapellaForkEpoch)
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
}
|
||||
|
||||
expected := fmt.Sprintf(p2p.BlsToExecutionChangeSubnetTopicFormat+s.cfg.p2p.Encoding().ProtocolSuffix(), digest)
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), "subnet topic doesn't exist")
|
||||
},
|
||||
forkEpoch: params.BeaconConfig().CapellaForkEpoch,
|
||||
nextForkEpoch: params.BeaconConfig().DenebForkEpoch,
|
||||
@@ -143,17 +109,10 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
{
|
||||
name: "deneb fork in the next epoch",
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().DenebForkEpoch)
|
||||
rpcMap := make(map[string]bool)
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
rpcMap[string(p)] = true
|
||||
}
|
||||
subIndices := mapFromCount(params.BeaconConfig().BlobsidecarSubnetCount)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.BlobSubnetTopicFormat, digest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRangeTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
assert.Equal(t, true, rpcMap[p2p.RPCBlobSidecarsByRootTopicV1+s.cfg.p2p.Encoding().ProtocolSuffix()], "topic doesn't exist")
|
||||
},
|
||||
@@ -162,16 +121,8 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
},
|
||||
{
|
||||
name: "electra fork in the next epoch",
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
digest := params.ForkDigest(params.BeaconConfig().ElectraForkEpoch)
|
||||
subIndices := mapFromCount(params.BeaconConfig().BlobsidecarSubnetCountElectra)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.BlobSubnetTopicFormat, digest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
},
|
||||
name: "electra fork in the next epoch",
|
||||
checkRegistration: func(t *testing.T, s *Service) {},
|
||||
forkEpoch: params.BeaconConfig().ElectraForkEpoch,
|
||||
nextForkEpoch: params.BeaconConfig().FuluForkEpoch,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
@@ -194,52 +145,28 @@ func TestService_CheckForNextEpochFork(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
current := tt.epochAtRegistration(tt.forkEpoch)
|
||||
s := testForkWatcherService(t, current)
|
||||
wg := attachSpawner(s)
|
||||
require.NoError(t, s.ensureRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
|
||||
wg.Wait()
|
||||
require.NoError(t, s.ensureRPCRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
|
||||
tt.checkRegistration(t, s)
|
||||
|
||||
if current != tt.forkEpoch-1 {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure the topics were registered for the upcoming fork
|
||||
digest := params.ForkDigest(tt.forkEpoch)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
|
||||
// After this point we are checking deregistration, which doesn't apply if there isn't a higher
|
||||
// nextForkEpoch.
|
||||
if tt.forkEpoch >= tt.nextForkEpoch {
|
||||
return
|
||||
}
|
||||
|
||||
nextDigest := params.ForkDigest(tt.nextForkEpoch)
|
||||
// Move the clock to just before the next fork epoch and ensure deregistration is correct
|
||||
wg = attachSpawner(s)
|
||||
s.cfg.clock = defaultClockWithTimeAtEpoch(tt.nextForkEpoch - 1)
|
||||
require.NoError(t, s.ensureRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
|
||||
wg.Wait()
|
||||
require.NoError(t, s.ensureRPCRegistrationsForEpoch(s.cfg.clock.CurrentEpoch()))
|
||||
|
||||
require.NoError(t, s.ensureDeregistrationForEpoch(tt.nextForkEpoch))
|
||||
assert.Equal(t, true, s.subHandler.digestExists(digest))
|
||||
// deregister as if it is the epoch after the next fork epoch
|
||||
require.NoError(t, s.ensureDeregistrationForEpoch(tt.nextForkEpoch+1))
|
||||
assert.Equal(t, false, s.subHandler.digestExists(digest))
|
||||
assert.Equal(t, true, s.subHandler.digestExists(nextDigest))
|
||||
require.NoError(t, s.ensureRPCDeregistrationForEpoch(tt.nextForkEpoch))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func attachSpawner(s *Service) *sync.WaitGroup {
|
||||
wg := new(sync.WaitGroup)
|
||||
s.subscriptionSpawner = func(f func()) {
|
||||
wg.Go(func() {
|
||||
f()
|
||||
})
|
||||
}
|
||||
return wg
|
||||
}
|
||||
|
||||
// oneEpoch returns the duration of one epoch.
|
||||
func oneEpoch() time.Duration {
|
||||
return time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) * time.Second
|
||||
|
||||
230
beacon-chain/sync/gossipsub_base.go
Normal file
230
beacon-chain/sync/gossipsub_base.go
Normal file
@@ -0,0 +1,230 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type baseTopicFamily struct {
|
||||
syncService *Service
|
||||
nse params.NetworkScheduleEntry
|
||||
validator wrappedVal
|
||||
handler subHandler
|
||||
|
||||
tf TopicFamily
|
||||
|
||||
mu sync.Mutex
|
||||
subscriptions map[string]*pubsub.Subscription
|
||||
}
|
||||
|
||||
func newBaseTopicFamily(syncService *Service, nse params.NetworkScheduleEntry, validator wrappedVal,
|
||||
handler subHandler, tf TopicFamily) *baseTopicFamily {
|
||||
return &baseTopicFamily{
|
||||
syncService: syncService,
|
||||
nse: nse,
|
||||
validator: validator,
|
||||
handler: handler,
|
||||
tf: tf,
|
||||
subscriptions: make(map[string]*pubsub.Subscription),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseTopicFamily) NetworkScheduleEntry() params.NetworkScheduleEntry {
|
||||
return b.nse
|
||||
}
|
||||
|
||||
// subscribeToTopics subscribes to the given list of gossipsub topics.
|
||||
//
|
||||
// This method is idempotent for a given topic - if a subscription already exists for a topic,
|
||||
// it will be skipped without error. This allows callers to safely call subscribeToTopics
|
||||
// multiple times with overlapping topic lists without creating duplicate subscriptions.
|
||||
//
|
||||
// For each new topic subscription, this method:
|
||||
// 1. Registers a topic validator with the pubsub system
|
||||
// 2. Creates the subscription via the p2p layer
|
||||
// 3. Spawns a goroutine running a message loop that processes incoming messages
|
||||
// 4. Tracks the subscription in the internal subscriptions map
|
||||
//
|
||||
// The message loop for each subscription runs until the context is cancelled or an error
|
||||
// occurs. Each received message is processed in its own goroutine with panic recovery.
|
||||
//
|
||||
// Errors during subscription (validator registration failures, subscription failures) are
|
||||
// logged but do not prevent other topics from being subscribed to.
|
||||
func (b *baseTopicFamily) subscribeToTopics(topics []string) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
for _, topic := range topics {
|
||||
log := log.WithField("topic", topic)
|
||||
s := b.syncService
|
||||
|
||||
// Do not resubscribe to topics that we already have a subscription for.
|
||||
_, ok := b.subscriptions[topic]
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.cfg.p2p.PubSub().RegisterTopicValidator(s.wrapAndReportValidation(topic, b.validator)); err != nil {
|
||||
log.WithError(err).Error("Could not register validator for topic")
|
||||
continue
|
||||
}
|
||||
|
||||
sub, err := s.cfg.p2p.SubscribeToTopic(topic)
|
||||
if err != nil {
|
||||
// Any error subscribing to a PubSub topic would be the result of a misconfiguration of
|
||||
// libp2p PubSub library or a subscription request to a topic that fails to match the topic
|
||||
// subscription filter.
|
||||
log.WithError(err).Error("Could not subscribe topic")
|
||||
continue
|
||||
}
|
||||
|
||||
// Pipeline decodes the incoming subscription data, runs the validation, and handles the
|
||||
// message.
|
||||
pipeline := func(msg *pubsub.Message) {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, pubsubMessageTimeout)
|
||||
defer cancel()
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "sync.pubsub")
|
||||
defer span.End()
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
tracing.AnnotateError(span, fmt.Errorf("panic occurred: %v", r))
|
||||
log.WithField("error", r).
|
||||
WithField("recoveredAt", "subscribeWithBase").
|
||||
WithField("stack", string(debug.Stack())).
|
||||
Error("Panic occurred")
|
||||
}
|
||||
}()
|
||||
|
||||
span.SetAttributes(trace.StringAttribute("topic", topic))
|
||||
|
||||
if msg.ValidatorData == nil {
|
||||
log.Error("Received nil message on pubsub")
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
return
|
||||
}
|
||||
|
||||
if err := b.handler(ctx, msg.ValidatorData.(proto.Message)); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Could not handle p2p pubsub")
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The main message loop for receiving incoming messages from this subscription.
|
||||
messageLoop := func() {
|
||||
for {
|
||||
msg, err := sub.Next(s.ctx)
|
||||
if err != nil {
|
||||
// This should only happen when the context is cancelled or subscription is cancelled.
|
||||
if !errors.Is(err, pubsub.ErrSubscriptionCancelled) { // Only log a warning on unexpected errors.
|
||||
log.WithError(err).Warn("Subscription next failed")
|
||||
}
|
||||
// Cancel subscription in the event of an error, as we are
|
||||
// now exiting topic event loop.
|
||||
sub.Cancel()
|
||||
return
|
||||
}
|
||||
|
||||
if msg.ReceivedFrom == s.cfg.p2p.PeerID() {
|
||||
continue
|
||||
}
|
||||
|
||||
go pipeline(msg)
|
||||
}
|
||||
}
|
||||
|
||||
go messageLoop()
|
||||
log.WithField("topic", topic).Info("Subscribed to")
|
||||
b.subscriptions[topic] = sub
|
||||
s.subHandler.addTopic(topic, sub)
|
||||
}
|
||||
}
|
||||
|
||||
// UnsubscribeAll unsubscribes from all topics managed by this topic family.
|
||||
//
|
||||
// This method iterates through all active subscriptions and performs cleanup for each:
|
||||
// - Unregisters the topic validator from pubsub
|
||||
// - Cancels the subscription (stopping the message loop goroutine)
|
||||
// - Leaves the topic in the p2p layer
|
||||
// - Removes the topic from the crawler's tracking (if crawler is configured)
|
||||
// - Removes the subscription from internal tracking
|
||||
//
|
||||
// After this method returns, the topic family has no active subscriptions.
|
||||
// This is typically called during shutdown or when transitioning between network forks.
|
||||
func (b *baseTopicFamily) UnsubscribeAll() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
for topic, sub := range b.subscriptions {
|
||||
b.cleanupSubscription(topic, sub)
|
||||
delete(b.subscriptions, topic)
|
||||
}
|
||||
}
|
||||
|
||||
// pruneTopicsExcept unsubscribes from all topics except those in the provided list.
|
||||
//
|
||||
// This method is used to efficiently manage dynamic subnet subscriptions. When the set of
|
||||
// required topics changes (e.g., due to slot progression or validator duty changes), this
|
||||
// method removes subscriptions that are no longer needed while preserving active ones.
|
||||
//
|
||||
// Parameters:
|
||||
// - wantedTopics: List of topic strings that should remain subscribed. Any topic not in
|
||||
// this list will be unsubscribed and cleaned up.
|
||||
//
|
||||
// For each topic being pruned, the cleanup process:
|
||||
// - Unregisters the topic validator from pubsub
|
||||
// - Cancels the subscription (stopping the message loop goroutine)
|
||||
// - Leaves the topic in the p2p layer
|
||||
// - Removes the topic from the crawler's tracking (if crawler is configured)
|
||||
// - Removes the subscription from internal tracking
|
||||
//
|
||||
// This method is safe to call with an empty wantedTopics list, which will unsubscribe from
|
||||
// all topics (equivalent to UnsubscribeAll).
|
||||
func (b *baseTopicFamily) pruneTopicsExcept(wantedTopics []string) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
neededMap := make(map[string]bool, len(wantedTopics))
|
||||
for _, t := range wantedTopics {
|
||||
neededMap[t] = true
|
||||
}
|
||||
|
||||
for topic, sub := range b.subscriptions {
|
||||
if !neededMap[topic] {
|
||||
b.cleanupSubscription(topic, sub)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *baseTopicFamily) cleanupSubscription(topic string, sub *pubsub.Subscription) {
|
||||
s := b.syncService
|
||||
log.WithField("topic", topic).Info("Unsubscribed from")
|
||||
if err := s.cfg.p2p.PubSub().UnregisterTopicValidator(topic); err != nil {
|
||||
log.WithError(err).Error("Could not unregister topic validator")
|
||||
}
|
||||
|
||||
if sub != nil {
|
||||
sub.Cancel()
|
||||
}
|
||||
if err := s.cfg.p2p.LeaveTopic(topic); err != nil {
|
||||
log.WithError(err).Error("Unable to leave topic")
|
||||
}
|
||||
|
||||
if crawler := s.cfg.p2p.Crawler(); crawler != nil {
|
||||
crawler.RemoveTopic(topic)
|
||||
}
|
||||
delete(b.subscriptions, topic)
|
||||
s.subHandler.removeTopic(topic)
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package sync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -33,7 +32,6 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
pubsubpb "github.com/libp2p/go-libp2p-pubsub/pb"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
@@ -60,11 +58,10 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
p2pService.Digest = nse.ForkDigest
|
||||
topic := "/eth2/%x/voluntary_exit"
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
r.subscribe(topic, r.noopValidator, func(_ context.Context, msg proto.Message) error {
|
||||
handler := func(_ context.Context, msg proto.Message) error {
|
||||
m, ok := msg.(*pb.SignedVoluntaryExit)
|
||||
assert.Equal(t, true, ok, "Object is not of type *pb.SignedVoluntaryExit")
|
||||
if m.Exit == nil || m.Exit.Epoch != 55 {
|
||||
@@ -72,10 +69,15 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
|
||||
}
|
||||
wg.Done()
|
||||
return nil
|
||||
}, nse)
|
||||
r.markForChainStart()
|
||||
}
|
||||
|
||||
p2pService.ReceivePubSub(topic, &pb.SignedVoluntaryExit{Exit: &pb.VoluntaryExit{Epoch: 55}, Signature: make([]byte, fieldparams.BLSSignatureLength)})
|
||||
tf := NewVoluntaryExitTopicFamily(&r, nse)
|
||||
base := newBaseTopicFamily(&r, nse, r.noopValidator, handler, tf)
|
||||
tf.baseTopicFamily = base
|
||||
|
||||
tf.Subscribe()
|
||||
r.markForChainStart()
|
||||
p2pService.ReceivePubSub(tf.getFullTopicString(), &pb.SignedVoluntaryExit{Exit: &pb.VoluntaryExit{Epoch: 55}, Signature: make([]byte, fieldparams.BLSSignatureLength)})
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
@@ -110,19 +112,22 @@ func TestSubscribe_UnsubscribeTopic(t *testing.T) {
|
||||
p2pService.Digest = nse.ForkDigest
|
||||
topic := "/eth2/%x/voluntary_exit"
|
||||
|
||||
r.subscribe(topic, r.noopValidator, func(_ context.Context, msg proto.Message) error {
|
||||
return nil
|
||||
}, nse)
|
||||
tf := staticTopicFamily{
|
||||
name: "VoluntaryExitTopicFamily",
|
||||
topics: []string{topic},
|
||||
}
|
||||
base := newBaseTopicFamily(&r, nse, r.noopValidator, noopHandler, &tf)
|
||||
tf.baseTopicFamily = base
|
||||
|
||||
tf.Subscribe()
|
||||
r.markForChainStart()
|
||||
|
||||
fullTopic := fmt.Sprintf(topic, p2pService.Digest) + p2pService.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, r.subHandler.topicExists(fullTopic))
|
||||
assert.Equal(t, true, r.subHandler.topicExists(topic))
|
||||
topics := p2pService.PubSub().GetTopics()
|
||||
assert.Equal(t, fullTopic, topics[0])
|
||||
assert.Equal(t, topic, topics[0])
|
||||
|
||||
r.unSubscribeFromTopic(fullTopic)
|
||||
tf.UnsubscribeAll()
|
||||
|
||||
assert.Equal(t, false, r.subHandler.topicExists(fullTopic))
|
||||
assert.Equal(t, false, r.subHandler.topicExists(topic))
|
||||
assert.Equal(t, 0, len(p2pService.PubSub().GetTopics()))
|
||||
|
||||
}
|
||||
@@ -157,16 +162,20 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := "/eth2/%x/attester_slashing"
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
p2pService.Digest = nse.ForkDigest
|
||||
r.subscribe(topic, r.noopValidator, func(ctx context.Context, msg proto.Message) error {
|
||||
|
||||
tf := NewAttesterSlashingTopicFamily(&r, nse)
|
||||
tf.baseTopicFamily.validator = r.noopValidator
|
||||
tf.baseTopicFamily.handler = func(ctx context.Context, msg proto.Message) error {
|
||||
require.NoError(t, r.attesterSlashingSubscriber(ctx, msg))
|
||||
wg.Done()
|
||||
return nil
|
||||
}, nse)
|
||||
}
|
||||
tf.Subscribe()
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
chainService.State = beaconState
|
||||
r.markForChainStart()
|
||||
@@ -178,7 +187,7 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
|
||||
require.NoError(t, err, "Error generating attester slashing")
|
||||
err = r.cfg.beaconDB.SaveState(ctx, beaconState, bytesutil.ToBytes32(attesterSlashing.FirstAttestation().GetData().BeaconBlockRoot))
|
||||
require.NoError(t, err)
|
||||
p2pService.ReceivePubSub(topic, attesterSlashing)
|
||||
p2pService.ReceivePubSub(tf.getFullTopicString(), attesterSlashing)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
@@ -210,18 +219,22 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := "/eth2/%x/proposer_slashing"
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
p2pService.Digest = nse.ForkDigest
|
||||
r.subscribe(topic, r.noopValidator, func(ctx context.Context, msg proto.Message) error {
|
||||
|
||||
tf := NewProposerSlashingTopicFamily(&r, nse)
|
||||
tf.baseTopicFamily.validator = r.noopValidator
|
||||
tf.baseTopicFamily.handler = func(ctx context.Context, msg proto.Message) error {
|
||||
require.NoError(t, r.proposerSlashingSubscriber(ctx, msg))
|
||||
wg.Done()
|
||||
return nil
|
||||
}, nse)
|
||||
}
|
||||
tf.Subscribe()
|
||||
|
||||
beaconState, privKeys := util.DeterministicGenesisState(t, 64)
|
||||
chainService.State = beaconState
|
||||
r.markForChainStart()
|
||||
@@ -232,7 +245,7 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err, "Error generating proposer slashing")
|
||||
|
||||
p2pService.ReceivePubSub(topic, proposerSlashing)
|
||||
p2pService.ReceivePubSub(tf.getFullTopicString(), proposerSlashing)
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
@@ -262,70 +275,27 @@ func TestSubscribe_HandlesPanic(t *testing.T) {
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
p.Digest = nse.ForkDigest
|
||||
|
||||
topic := p2p.GossipTypeMapping[reflect.TypeFor[*pb.SignedVoluntaryExit]()]
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
r.subscribe(topic, r.noopValidator, func(_ context.Context, msg proto.Message) error {
|
||||
tf := NewVoluntaryExitTopicFamily(&r, nse)
|
||||
handler := func(_ context.Context, msg proto.Message) error {
|
||||
defer wg.Done()
|
||||
panic("bad")
|
||||
}, nse)
|
||||
}
|
||||
base := newBaseTopicFamily(&r, nse, r.noopValidator, handler, tf)
|
||||
tf.baseTopicFamily = base
|
||||
|
||||
tf.Subscribe()
|
||||
|
||||
r.markForChainStart()
|
||||
p.ReceivePubSub(topic, &pb.SignedVoluntaryExit{Exit: &pb.VoluntaryExit{Epoch: 55}, Signature: make([]byte, fieldparams.BLSSignatureLength)})
|
||||
p.ReceivePubSub(tf.getFullTopicString(), &pb.SignedVoluntaryExit{Exit: &pb.VoluntaryExit{Epoch: 55}, Signature: make([]byte, fieldparams.BLSSignatureLength)})
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRevalidateSubscription_CorrectlyFormatsTopic(t *testing.T) {
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
hook := logTest.NewGlobal()
|
||||
chain := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
}
|
||||
r := Service{
|
||||
ctx: t.Context(),
|
||||
cfg: &config{
|
||||
chain: chain,
|
||||
clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot),
|
||||
p2p: p,
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
|
||||
params := subscribeParameters{
|
||||
topicFormat: "/eth2/testing/%#x/committee%d",
|
||||
nse: nse,
|
||||
}
|
||||
tracker := newSubnetTracker(params)
|
||||
|
||||
// committee index 1
|
||||
c1 := uint64(1)
|
||||
fullTopic := params.fullTopic(c1, r.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
_, topVal := r.wrapAndReportValidation(fullTopic, r.noopValidator)
|
||||
require.NoError(t, r.cfg.p2p.PubSub().RegisterTopicValidator(fullTopic, topVal))
|
||||
sub1, err := r.cfg.p2p.SubscribeToTopic(fullTopic)
|
||||
require.NoError(t, err)
|
||||
tracker.track(c1, sub1)
|
||||
|
||||
// committee index 2
|
||||
c2 := uint64(2)
|
||||
fullTopic = params.fullTopic(c2, r.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
_, topVal = r.wrapAndReportValidation(fullTopic, r.noopValidator)
|
||||
err = r.cfg.p2p.PubSub().RegisterTopicValidator(fullTopic, topVal)
|
||||
require.NoError(t, err)
|
||||
sub2, err := r.cfg.p2p.SubscribeToTopic(fullTopic)
|
||||
require.NoError(t, err)
|
||||
tracker.track(c2, sub2)
|
||||
|
||||
r.pruneNotWanted(tracker, map[uint64]bool{c2: true})
|
||||
require.LogsDoNotContain(t, hook, "Could not unregister topic validator")
|
||||
}
|
||||
|
||||
func Test_wrapAndReportValidation(t *testing.T) {
|
||||
mChain := &mockChain.ChainService{
|
||||
Genesis: time.Now(),
|
||||
@@ -446,11 +416,15 @@ func TestFilterSubnetPeers(t *testing.T) {
|
||||
cfg.SlotDurationMilliseconds = 1000
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
// Save the current flags to restore them after the test
|
||||
resetFlags := flags.Get()
|
||||
defer func() {
|
||||
flags.Init(resetFlags)
|
||||
}()
|
||||
|
||||
gFlags := new(flags.GlobalFlags)
|
||||
gFlags.MinimumPeersPerSubnet = 4
|
||||
flags.Init(gFlags)
|
||||
// Reset config.
|
||||
defer flags.Init(new(flags.GlobalFlags))
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
@@ -480,6 +454,7 @@ func TestFilterSubnetPeers(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, &r)
|
||||
markInitSyncComplete(t, &r)
|
||||
// Empty cache at the end of the test.
|
||||
defer cache.SubnetIDs.EmptyAllCaches()
|
||||
@@ -553,11 +528,12 @@ func TestSubscribeWithSyncSubnets_DynamicOK(t *testing.T) {
|
||||
currEpoch := slots.ToEpoch(slot)
|
||||
cache.SyncSubnetIDs.AddSyncCommitteeSubnets([]byte("pubkey"), currEpoch, []uint64{0, 1}, 10*time.Second)
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
go r.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
nse: nse,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
})
|
||||
|
||||
tfDyn := NewSyncCommitteeTopicFamily(&r, nse)
|
||||
base := newBaseTopicFamily(&r, nse, r.noopValidator, noopHandler, tfDyn)
|
||||
tfDyn.baseTopicFamily = base
|
||||
tfDyn.SubscribeForSlot(slot)
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
assert.Equal(t, 2, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
topicMap := map[string]bool{}
|
||||
@@ -602,12 +578,11 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
require.Equal(t, [4]byte(params.BeaconConfig().DenebForkVersion), nse.ForkVersion)
|
||||
require.Equal(t, params.BeaconConfig().DenebForkEpoch, nse.Epoch)
|
||||
|
||||
sp := newSubnetTracker(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
nse: nse,
|
||||
getSubnetsToJoin: r.activeSyncSubnetIndices,
|
||||
})
|
||||
r.trySubscribeSubnets(sp)
|
||||
tfDyn2 := NewSyncCommitteeTopicFamily(&r, nse)
|
||||
base := newBaseTopicFamily(&r, nse, r.noopValidator, noopHandler, tfDyn2)
|
||||
tfDyn2.baseTopicFamily = base
|
||||
tfDyn2.SubscribeForSlot(r.cfg.clock.CurrentSlot())
|
||||
|
||||
assert.Equal(t, 2, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
topicMap := map[string]bool{}
|
||||
for _, t := range r.cfg.p2p.PubSub().GetTopics() {
|
||||
@@ -626,11 +601,14 @@ func TestSubscribeWithSyncSubnets_DynamicSwitchFork(t *testing.T) {
|
||||
require.Equal(t, [4]byte(params.BeaconConfig().ElectraForkVersion), nse.ForkVersion)
|
||||
require.Equal(t, params.BeaconConfig().ElectraForkEpoch, nse.Epoch)
|
||||
|
||||
sp.nse = nse
|
||||
tfDyn2.nse = nse
|
||||
// clear the cache and re-subscribe to subnets.
|
||||
// this should result in the subscriptions being removed
|
||||
cache.SyncSubnetIDs.EmptyAllCaches()
|
||||
r.trySubscribeSubnets(sp)
|
||||
|
||||
slot := r.cfg.clock.CurrentSlot()
|
||||
tfDyn2.UnsubscribeForSlot(slot)
|
||||
tfDyn2.SubscribeForSlot(r.cfg.clock.CurrentSlot())
|
||||
assert.Equal(t, 0, len(r.cfg.p2p.PubSub().GetTopics()))
|
||||
}
|
||||
|
||||
144
beacon-chain/sync/gossipsub_topic_family.go
Normal file
144
beacon-chain/sync/gossipsub_topic_family.go
Normal file
@@ -0,0 +1,144 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// wrappedVal represents a gossip validator which also returns an error along with the result.
|
||||
type wrappedVal func(context.Context, peer.ID, *pubsub.Message) (pubsub.ValidationResult, error)
|
||||
|
||||
// subHandler represents handler for a given subscription.
|
||||
type subHandler func(context.Context, proto.Message) error
|
||||
|
||||
// noopHandler is used for subscriptions that do not require anything to be done.
|
||||
var noopHandler subHandler = func(ctx context.Context, msg proto.Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type TopicFamily interface {
|
||||
Name() string
|
||||
NetworkScheduleEntry() params.NetworkScheduleEntry
|
||||
UnsubscribeAll()
|
||||
}
|
||||
|
||||
type ShardedTopicFamily interface {
|
||||
TopicFamily
|
||||
Subscribe()
|
||||
}
|
||||
|
||||
type DynamicShardedTopicFamily interface {
|
||||
TopicFamily
|
||||
TopicsWithMinPeerCount(slot primitives.Slot) map[string]int
|
||||
TopicsToSubscribeForSlot(slot primitives.Slot) []string
|
||||
ExtractTopicsForNode(node *enode.Node) ([]string, error)
|
||||
SubscribeForSlot(slot primitives.Slot)
|
||||
UnsubscribeForSlot(slot primitives.Slot)
|
||||
}
|
||||
|
||||
type topicFamilyEntry struct {
|
||||
activationEpoch primitives.Epoch
|
||||
deactivationEpoch primitives.Epoch
|
||||
factory func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily
|
||||
}
|
||||
|
||||
func topicFamilySchedule() []topicFamilyEntry {
|
||||
cfg := params.BeaconConfig()
|
||||
return []topicFamilyEntry{
|
||||
// Genesis topic families
|
||||
{
|
||||
activationEpoch: cfg.GenesisEpoch,
|
||||
deactivationEpoch: cfg.FarFutureEpoch,
|
||||
factory: func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
return []TopicFamily{
|
||||
NewBlockTopicFamily(s, nse),
|
||||
NewAggregateAndProofTopicFamily(s, nse),
|
||||
NewVoluntaryExitTopicFamily(s, nse),
|
||||
NewProposerSlashingTopicFamily(s, nse),
|
||||
NewAttesterSlashingTopicFamily(s, nse),
|
||||
NewAttestationTopicFamily(s, nse),
|
||||
}
|
||||
},
|
||||
},
|
||||
// Altair topic families
|
||||
{
|
||||
activationEpoch: cfg.AltairForkEpoch,
|
||||
deactivationEpoch: cfg.FarFutureEpoch,
|
||||
factory: func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
families := []TopicFamily{
|
||||
NewSyncContributionAndProofTopicFamily(s, nse),
|
||||
NewSyncCommitteeTopicFamily(s, nse),
|
||||
}
|
||||
if features.Get().EnableLightClient {
|
||||
families = append(families,
|
||||
NewLightClientOptimisticUpdateTopicFamily(s, nse),
|
||||
NewLightClientFinalityUpdateTopicFamily(s, nse),
|
||||
)
|
||||
}
|
||||
return families
|
||||
},
|
||||
},
|
||||
// Capella topic families
|
||||
{
|
||||
activationEpoch: cfg.CapellaForkEpoch,
|
||||
deactivationEpoch: cfg.FarFutureEpoch,
|
||||
factory: func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
return []TopicFamily{NewBlsToExecutionChangeTopicFamily(s, nse)}
|
||||
},
|
||||
},
|
||||
// Blob topic families (static per-subnet) in Deneb and Electra forks (removed in Fulu)
|
||||
{
|
||||
activationEpoch: cfg.DenebForkEpoch,
|
||||
deactivationEpoch: cfg.ElectraForkEpoch,
|
||||
factory: func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
count := cfg.BlobsidecarSubnetCount
|
||||
families := make([]TopicFamily, 0, count)
|
||||
for i := range count {
|
||||
families = append(families, NewBlobTopicFamily(s, nse, i))
|
||||
}
|
||||
return families
|
||||
},
|
||||
},
|
||||
{
|
||||
activationEpoch: cfg.ElectraForkEpoch,
|
||||
deactivationEpoch: cfg.FuluForkEpoch,
|
||||
factory: func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
count := cfg.BlobsidecarSubnetCountElectra
|
||||
families := make([]TopicFamily, 0, count)
|
||||
for i := range count {
|
||||
families = append(families, NewBlobTopicFamily(s, nse, i))
|
||||
}
|
||||
return families
|
||||
},
|
||||
},
|
||||
// Fulu data column topic family
|
||||
{
|
||||
activationEpoch: cfg.FuluForkEpoch,
|
||||
deactivationEpoch: cfg.FarFutureEpoch,
|
||||
factory: func(s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
return []TopicFamily{NewDataColumnTopicFamily(s, nse)}
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TopicFamiliesForEpoch(epoch primitives.Epoch, s *Service, nse params.NetworkScheduleEntry) []TopicFamily {
|
||||
var activeFamilies []TopicFamily
|
||||
for _, entry := range topicFamilySchedule() {
|
||||
if epoch < entry.activationEpoch {
|
||||
continue
|
||||
}
|
||||
if epoch >= entry.deactivationEpoch {
|
||||
continue
|
||||
}
|
||||
activeFamilies = append(activeFamilies, entry.factory(s, nse)...)
|
||||
}
|
||||
return activeFamilies
|
||||
}
|
||||
311
beacon-chain/sync/gossipsub_topic_family_test.go
Normal file
311
beacon-chain/sync/gossipsub_topic_family_test.go
Normal file
@@ -0,0 +1,311 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
)
|
||||
|
||||
// createMinimalService creates a minimal Service instance for testing
|
||||
func createMinimalService(t *testing.T) *Service {
|
||||
p2pService := p2ptest.NewTestP2P(t)
|
||||
return &Service{
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
},
|
||||
ctx: context.Background(),
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopicFamiliesForEpoch(t *testing.T) {
|
||||
// Define test epochs
|
||||
const (
|
||||
genesisEpoch = primitives.Epoch(0)
|
||||
altairEpoch = primitives.Epoch(100)
|
||||
bellatrixEpoch = primitives.Epoch(200)
|
||||
capellaEpoch = primitives.Epoch(300)
|
||||
denebEpoch = primitives.Epoch(400)
|
||||
electraEpoch = primitives.Epoch(500)
|
||||
fuluEpoch = primitives.Epoch(600)
|
||||
)
|
||||
|
||||
// Define topic families for each fork
|
||||
// These names must match what's returned by the Name() method of each topic family
|
||||
genesisFamilies := []string{
|
||||
"BlockTopicFamily",
|
||||
"AggregateAndProofTopicFamily",
|
||||
"VoluntaryExitTopicFamily",
|
||||
"ProposerSlashingTopicFamily",
|
||||
"AttesterSlashingTopicFamily",
|
||||
"AttestationTopicFamily",
|
||||
}
|
||||
|
||||
altairFamilies := []string{
|
||||
"SyncContributionAndProofTopicFamily",
|
||||
"SyncCommitteeTopicFamily",
|
||||
}
|
||||
|
||||
altairLightClientFamilies := []string{
|
||||
"LightClientOptimisticUpdateTopicFamily",
|
||||
"LightClientFinalityUpdateTopicFamily",
|
||||
}
|
||||
|
||||
capellaFamilies := []string{
|
||||
"BlsToExecutionChangeTopicFamily",
|
||||
}
|
||||
|
||||
denebBlobFamilies := []string{
|
||||
"BlobTopicFamily-0",
|
||||
"BlobTopicFamily-1",
|
||||
"BlobTopicFamily-2",
|
||||
"BlobTopicFamily-3",
|
||||
"BlobTopicFamily-4",
|
||||
"BlobTopicFamily-5",
|
||||
}
|
||||
|
||||
electraBlobFamilies := append(slices.Clone(denebBlobFamilies), "BlobTopicFamily-6", "BlobTopicFamily-7")
|
||||
|
||||
fuluFamilies := []string{
|
||||
"DataColumnTopicFamily",
|
||||
}
|
||||
|
||||
// Helper function to combine fork families
|
||||
combineForks := func(forkSets ...[]string) []string {
|
||||
var combined []string
|
||||
for _, forkSet := range forkSets {
|
||||
combined = append(combined, forkSet...)
|
||||
}
|
||||
return combined
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
epoch primitives.Epoch
|
||||
setupConfig func()
|
||||
enableLightClient bool
|
||||
expectedFamilies []string
|
||||
}{
|
||||
{
|
||||
name: "epoch before any fork activation should return empty",
|
||||
epoch: primitives.Epoch(0),
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
// Set all fork epochs to future epochs
|
||||
config.GenesisEpoch = primitives.Epoch(1000)
|
||||
config.AltairForkEpoch = primitives.Epoch(2000)
|
||||
config.BellatrixForkEpoch = primitives.Epoch(3000)
|
||||
config.CapellaForkEpoch = primitives.Epoch(4000)
|
||||
config.DenebForkEpoch = primitives.Epoch(5000)
|
||||
config.ElectraForkEpoch = primitives.Epoch(6000)
|
||||
config.FuluForkEpoch = primitives.Epoch(7000)
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: []string{},
|
||||
},
|
||||
{
|
||||
name: "epoch at genesis should return genesis topic families",
|
||||
epoch: genesisEpoch,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: genesisFamilies,
|
||||
},
|
||||
{
|
||||
name: "epoch at Altair without light client should have genesis + Altair families",
|
||||
epoch: altairEpoch,
|
||||
enableLightClient: false,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies),
|
||||
},
|
||||
{
|
||||
name: "epoch at Altair with light client enabled should include light client families",
|
||||
epoch: altairEpoch,
|
||||
enableLightClient: true,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, altairLightClientFamilies),
|
||||
},
|
||||
{
|
||||
name: "epoch at Capella should have genesis + Altair + Capella families",
|
||||
epoch: capellaEpoch,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, capellaFamilies),
|
||||
},
|
||||
{
|
||||
name: "epoch at Deneb should include blob sidecars",
|
||||
epoch: denebEpoch,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
config.BlobsidecarSubnetCount = 6 // Deneb has 6 blob subnets
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, capellaFamilies, denebBlobFamilies),
|
||||
},
|
||||
{
|
||||
name: "epoch at Electra should have Electra blobs not Deneb blobs",
|
||||
epoch: electraEpoch,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
config.BlobsidecarSubnetCount = 6
|
||||
config.BlobsidecarSubnetCountElectra = 8 // Electra has 8 blob subnets
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, capellaFamilies, electraBlobFamilies),
|
||||
},
|
||||
{
|
||||
name: "epoch at Fulu should have data columns not blobs",
|
||||
epoch: fuluEpoch,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
config.BlobsidecarSubnetCount = 6
|
||||
config.BlobsidecarSubnetCountElectra = 8
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, capellaFamilies, fuluFamilies),
|
||||
},
|
||||
{
|
||||
name: "epoch after Fulu should maintain Fulu families",
|
||||
epoch: fuluEpoch + 100,
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
config.BlobsidecarSubnetCount = 6
|
||||
config.BlobsidecarSubnetCountElectra = 8
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, capellaFamilies, fuluFamilies),
|
||||
},
|
||||
{
|
||||
name: "edge case - epoch exactly at deactivation should not include deactivated family",
|
||||
epoch: electraEpoch, // This deactivates Deneb blobs
|
||||
setupConfig: func() {
|
||||
config := params.BeaconConfig().Copy()
|
||||
config.GenesisEpoch = genesisEpoch
|
||||
config.AltairForkEpoch = altairEpoch
|
||||
config.BellatrixForkEpoch = bellatrixEpoch
|
||||
config.CapellaForkEpoch = capellaEpoch
|
||||
config.DenebForkEpoch = denebEpoch
|
||||
config.ElectraForkEpoch = electraEpoch
|
||||
config.FuluForkEpoch = fuluEpoch
|
||||
config.BlobsidecarSubnetCount = 6
|
||||
config.BlobsidecarSubnetCountElectra = 8
|
||||
params.OverrideBeaconConfig(config)
|
||||
},
|
||||
expectedFamilies: combineForks(genesisFamilies, altairFamilies, capellaFamilies, electraBlobFamilies),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
if tt.enableLightClient {
|
||||
resetFlags := features.InitWithReset(&features.Flags{
|
||||
EnableLightClient: true,
|
||||
})
|
||||
defer resetFlags()
|
||||
}
|
||||
tt.setupConfig()
|
||||
service := createMinimalService(t)
|
||||
families := TopicFamiliesForEpoch(tt.epoch, service, params.NetworkScheduleEntry{})
|
||||
|
||||
// Collect actual family names
|
||||
actualFamilies := make([]string, 0, len(families))
|
||||
for _, family := range families {
|
||||
actualFamilies = append(actualFamilies, family.Name())
|
||||
}
|
||||
|
||||
// Assert exact match - families should have exactly the expected families and nothing more
|
||||
assert.Equal(t, len(tt.expectedFamilies), len(actualFamilies),
|
||||
"Expected %d families but got %d", len(tt.expectedFamilies), len(actualFamilies))
|
||||
|
||||
// Create a map for efficient lookup
|
||||
expectedMap := make(map[string]bool)
|
||||
for _, expected := range tt.expectedFamilies {
|
||||
expectedMap[expected] = true
|
||||
}
|
||||
|
||||
// Check each actual family is expected
|
||||
for _, actual := range actualFamilies {
|
||||
if !expectedMap[actual] {
|
||||
t.Errorf("Unexpected topic family found: %s", actual)
|
||||
}
|
||||
delete(expectedMap, actual) // Remove from map as we find it
|
||||
}
|
||||
|
||||
// Check all expected families were found (anything left in map was missing)
|
||||
for missing := range expectedMap {
|
||||
t.Errorf("Expected topic family not found: %s", missing)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/kzg"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
@@ -269,41 +268,6 @@ func TestKzgBatchVerifierFallback(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateWithKzgBatchVerifier_DeadlockOnTimeout(t *testing.T) {
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.SecondsPerSlot = 0
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
defer cancel()
|
||||
|
||||
service := &Service{
|
||||
ctx: ctx,
|
||||
kzgChan: make(chan *kzgVerifier),
|
||||
}
|
||||
go service.kzgVerifierRoutine()
|
||||
|
||||
result, err := service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
require.Equal(t, pubsub.ValidationIgnore, result)
|
||||
require.ErrorIs(t, err, context.DeadlineExceeded)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
_, _ = service.validateWithKzgBatchVerifier(context.Background(), nil)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatal("validateWithKzgBatchVerifier blocked")
|
||||
}
|
||||
}
|
||||
|
||||
func createValidTestDataColumns(t *testing.T, count int) []blocks.RODataColumn {
|
||||
_, roSidecars, _ := util.GenerateTestFuluBlockWithSidecars(t, count)
|
||||
if len(roSidecars) >= count {
|
||||
|
||||
@@ -329,6 +329,8 @@ func TestHandshakeHandlers_Roundtrip(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, r)
|
||||
|
||||
markInitSyncComplete(t, r)
|
||||
clock := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clock.SetClock(startup.NewClock(time.Now(), [32]byte{})))
|
||||
@@ -945,6 +947,8 @@ func TestStatusRPCRequest_BadPeerHandshake(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, r)
|
||||
|
||||
markInitSyncComplete(t, r)
|
||||
clock := startup.NewClockSynchronizer()
|
||||
require.NoError(t, clock.SetClock(startup.NewClock(time.Now(), [32]byte{})))
|
||||
|
||||
@@ -181,7 +181,7 @@ type Service struct {
|
||||
lcStore *lightClient.Store
|
||||
dataColumnLogCh chan dataColumnLogEntry
|
||||
digestActions perDigestSet
|
||||
subscriptionSpawner func(func()) // see Service.spawn for details
|
||||
subscriptionController *SubscriptionController
|
||||
}
|
||||
|
||||
// NewService initializes new regular sync service.
|
||||
@@ -198,6 +198,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
dataColumnLogCh: make(chan dataColumnLogEntry, 1000),
|
||||
reconstructionRandGen: rand.NewGenerator(),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, r)
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(r); err != nil {
|
||||
@@ -232,6 +233,7 @@ func NewService(ctx context.Context, opts ...Option) *Service {
|
||||
delete(r.seenPendingBlocks, root)
|
||||
}
|
||||
})
|
||||
|
||||
r.subHandler = newSubTopicHandler()
|
||||
r.rateLimiter = newRateLimiter(r.cfg.p2p)
|
||||
r.initCaches()
|
||||
@@ -323,9 +325,10 @@ func (s *Service) Stop() error {
|
||||
for _, p := range s.cfg.p2p.Host().Mux().Protocols() {
|
||||
s.cfg.p2p.Host().RemoveStreamHandler(p)
|
||||
}
|
||||
for _, t := range s.cfg.p2p.PubSub().GetTopics() {
|
||||
s.unSubscribeFromTopic(t)
|
||||
}
|
||||
|
||||
// Stop the gossipsub controller.
|
||||
s.subscriptionController.Stop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -405,7 +408,36 @@ func (s *Service) startDiscoveryAndSubscriptions() {
|
||||
}
|
||||
|
||||
// Start the fork watcher.
|
||||
go s.p2pHandlerControlLoop()
|
||||
go s.rpcHandlerControlLoop()
|
||||
|
||||
// Start the gossipsub controller.
|
||||
go s.subscriptionController.Start()
|
||||
|
||||
// Configure the crawler and dialer with the topic extractor / subnet topics
|
||||
// provider if available.
|
||||
crawler := s.cfg.p2p.Crawler()
|
||||
if crawler == nil {
|
||||
log.Info("No crawler available, topic extraction disabled")
|
||||
return
|
||||
}
|
||||
|
||||
// Start the crawler now that it has the extractor.
|
||||
if err := crawler.Start(s.subscriptionController.ExtractTopics); err != nil {
|
||||
log.WithError(err).Warn("Failed to start peer crawler")
|
||||
return
|
||||
}
|
||||
|
||||
// Start the gossipsub dialer if available.
|
||||
if dialer := s.cfg.p2p.GossipDialer(); dialer != nil {
|
||||
provider := func() map[string]int {
|
||||
return s.subscriptionController.GetCurrentActiveTopicsWithMinPeerCount()
|
||||
}
|
||||
if err := dialer.Start(provider); err != nil {
|
||||
log.WithError(err).Warn("Failed to start gossip peer dialer")
|
||||
}
|
||||
} else {
|
||||
log.Error("No gossip peer dialer available")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) writeErrorResponseToStream(responseCode byte, reason string, stream libp2pcore.Stream) {
|
||||
|
||||
@@ -2,6 +2,7 @@ package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -16,6 +17,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/startup"
|
||||
state_native "github.com/OffchainLabs/prysm/v7/beacon-chain/state/state-native"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
leakybucket "github.com/OffchainLabs/prysm/v7/container/leaky-bucket"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
@@ -67,8 +69,9 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
chainStarted: abool.New(),
|
||||
clockWaiter: gs,
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(t.Context(), &r)
|
||||
|
||||
topic := "/eth2/%x/beacon_block"
|
||||
topicFmt := "/eth2/%x/beacon_block"
|
||||
go r.startDiscoveryAndSubscriptions()
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
@@ -82,7 +85,10 @@ func TestSyncHandlers_WaitToSync(t *testing.T) {
|
||||
msg := util.NewBeaconBlock()
|
||||
msg.Block.ParentRoot = util.Random32Bytes(t)
|
||||
msg.Signature = sk.Sign([]byte("data")).Marshal()
|
||||
p2p.ReceivePubSub(topic, msg)
|
||||
// Build full topic using current fork digest
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
fullTopic := fmt.Sprintf(topicFmt, nse.ForkDigest) + p2p.Encoding().ProtocolSuffix()
|
||||
p2p.ReceivePubSub(fullTopic, msg)
|
||||
// wait for chainstart to be sent
|
||||
time.Sleep(400 * time.Millisecond)
|
||||
require.Equal(t, true, r.chainStarted.IsSet(), "Did not receive chain start event.")
|
||||
@@ -137,6 +143,7 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) {
|
||||
clockWaiter: gs,
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(t.Context(), &r)
|
||||
r.initCaches()
|
||||
|
||||
var vr [32]byte
|
||||
@@ -169,14 +176,16 @@ func TestSyncHandlers_WaitTillSynced(t *testing.T) {
|
||||
// Save block into DB so that validateBeaconBlockPubSub() process gets short cut.
|
||||
util.SaveBlock(t, ctx, r.cfg.beaconDB, msg)
|
||||
|
||||
topic := "/eth2/%x/beacon_block"
|
||||
p2p.ReceivePubSub(topic, msg)
|
||||
topicFmt := "/eth2/%x/beacon_block"
|
||||
nse := params.GetNetworkScheduleEntry(r.cfg.clock.CurrentEpoch())
|
||||
fullTopic := fmt.Sprintf(topicFmt, nse.ForkDigest) + p2p.Encoding().ProtocolSuffix()
|
||||
p2p.ReceivePubSub(fullTopic, msg)
|
||||
assert.Equal(t, 0, len(blockChan), "block was received by sync service despite not being fully synced")
|
||||
|
||||
close(r.initialSyncComplete)
|
||||
<-syncCompleteCh
|
||||
|
||||
p2p.ReceivePubSub(topic, msg)
|
||||
p2p.ReceivePubSub(fullTopic, msg)
|
||||
|
||||
select {
|
||||
case <-blockChan:
|
||||
@@ -206,6 +215,7 @@ func TestSyncService_StopCleanly(t *testing.T) {
|
||||
clockWaiter: gs,
|
||||
initialSyncComplete: make(chan struct{}),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(t.Context(), &r)
|
||||
markInitSyncComplete(t, &r)
|
||||
|
||||
go r.startDiscoveryAndSubscriptions()
|
||||
@@ -252,7 +262,7 @@ func TestService_Stop_SendsGoodbyeMessages(t *testing.T) {
|
||||
// Create service with connected peers
|
||||
d := dbTest.SetupDB(t)
|
||||
chain := &mockChain.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
@@ -265,6 +275,7 @@ func TestService_Stop_SendsGoodbyeMessages(t *testing.T) {
|
||||
cancel: cancel,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, r)
|
||||
|
||||
// Initialize context map for RPC
|
||||
ctxMap, err := ContextByteVersionsForValRoot(chain.ValidatorsRoot)
|
||||
@@ -330,7 +341,7 @@ func TestService_Stop_TimeoutHandling(t *testing.T) {
|
||||
|
||||
d := dbTest.SetupDB(t)
|
||||
chain := &mockChain.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
@@ -343,6 +354,7 @@ func TestService_Stop_TimeoutHandling(t *testing.T) {
|
||||
cancel: cancel,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, r)
|
||||
|
||||
// Initialize context map for RPC
|
||||
ctxMap, err := ContextByteVersionsForValRoot(chain.ValidatorsRoot)
|
||||
@@ -391,7 +403,7 @@ func TestService_Stop_ConcurrentGoodbyeMessages(t *testing.T) {
|
||||
|
||||
d := dbTest.SetupDB(t)
|
||||
chain := &mockChain.ChainService{Genesis: time.Now(), ValidatorsRoot: [32]byte{}}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(t.Context())
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{
|
||||
@@ -404,6 +416,7 @@ func TestService_Stop_ConcurrentGoodbyeMessages(t *testing.T) {
|
||||
cancel: cancel,
|
||||
rateLimiter: newRateLimiter(p1),
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(ctx, r)
|
||||
|
||||
// Initialize context map for RPC
|
||||
ctxMap, err := ContextByteVersionsForValRoot(chain.ValidatorsRoot)
|
||||
|
||||
@@ -4,9 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/cache"
|
||||
@@ -20,8 +18,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/config/features"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v7/monitoring/tracing/trace"
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/runtime/messagehandler"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
@@ -31,124 +27,12 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const pubsubMessageTimeout = 30 * time.Second
|
||||
|
||||
var errInvalidDigest = errors.New("invalid digest")
|
||||
|
||||
// wrappedVal represents a gossip validator which also returns an error along with the result.
|
||||
type wrappedVal func(context.Context, peer.ID, *pubsub.Message) (pubsub.ValidationResult, error)
|
||||
|
||||
// subHandler represents handler for a given subscription.
|
||||
type subHandler func(context.Context, proto.Message) error
|
||||
|
||||
// noopHandler is used for subscriptions that do not require anything to be done.
|
||||
var noopHandler subHandler = func(ctx context.Context, msg proto.Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// subscribeParameters holds the parameters that are needed to construct a set of subscriptions topics for a given
|
||||
// set of gossipsub subnets.
|
||||
type subscribeParameters struct {
|
||||
topicFormat string
|
||||
validate wrappedVal
|
||||
handle subHandler
|
||||
nse params.NetworkScheduleEntry
|
||||
// getSubnetsToJoin is a function that returns all subnets the node should join.
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool
|
||||
// getSubnetsRequiringPeers is a function that returns all subnets that require peers to be found
|
||||
// but for which no subscriptions are needed.
|
||||
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool
|
||||
}
|
||||
|
||||
// shortTopic is a less verbose version of topic strings used for logging.
|
||||
func (p subscribeParameters) shortTopic() string {
|
||||
short := p.topicFormat
|
||||
fmtLen := len(short)
|
||||
if fmtLen >= 3 && short[fmtLen-3:] == "_%d" {
|
||||
short = short[:fmtLen-3]
|
||||
}
|
||||
return fmt.Sprintf(short, p.nse.ForkDigest)
|
||||
}
|
||||
|
||||
func (p subscribeParameters) logFields() logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"topic": p.shortTopic(),
|
||||
}
|
||||
}
|
||||
|
||||
// fullTopic is the fully qualified topic string, given to gossipsub.
|
||||
func (p subscribeParameters) fullTopic(subnet uint64, suffix string) string {
|
||||
return fmt.Sprintf(p.topicFormat, p.nse.ForkDigest, subnet) + suffix
|
||||
}
|
||||
|
||||
// subnetTracker keeps track of which subnets we are subscribed to, out of the set of
|
||||
// possible subnets described by a `subscribeParameters`.
|
||||
type subnetTracker struct {
|
||||
subscribeParameters
|
||||
mu sync.RWMutex
|
||||
subscriptions map[uint64]*pubsub.Subscription
|
||||
}
|
||||
|
||||
func newSubnetTracker(p subscribeParameters) *subnetTracker {
|
||||
return &subnetTracker{
|
||||
subscribeParameters: p,
|
||||
subscriptions: make(map[uint64]*pubsub.Subscription),
|
||||
}
|
||||
}
|
||||
|
||||
// unwanted takes a list of wanted subnets and returns a list of currently subscribed subnets that are not included.
|
||||
func (t *subnetTracker) unwanted(wanted map[uint64]bool) []uint64 {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
unwanted := make([]uint64, 0, len(t.subscriptions))
|
||||
for subnet := range t.subscriptions {
|
||||
if wanted == nil || !wanted[subnet] {
|
||||
unwanted = append(unwanted, subnet)
|
||||
}
|
||||
}
|
||||
return unwanted
|
||||
}
|
||||
|
||||
// missing takes a list of wanted subnets and returns a list of wanted subnets that are not currently tracked.
|
||||
func (t *subnetTracker) missing(wanted map[uint64]bool) []uint64 {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
missing := make([]uint64, 0, len(wanted))
|
||||
for subnet := range wanted {
|
||||
if _, ok := t.subscriptions[subnet]; !ok {
|
||||
missing = append(missing, subnet)
|
||||
}
|
||||
}
|
||||
return missing
|
||||
}
|
||||
|
||||
// cancelSubscription cancels and removes the subscription for a given subnet.
|
||||
func (t *subnetTracker) cancelSubscription(subnet uint64) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
defer delete(t.subscriptions, subnet)
|
||||
|
||||
sub := t.subscriptions[subnet]
|
||||
if sub == nil {
|
||||
return
|
||||
}
|
||||
sub.Cancel()
|
||||
}
|
||||
|
||||
// track asks subscriptionTracker to hold on to the subscription for a given subnet so
|
||||
// that we can remember that it is tracked and cancel its context when it's time to unsubscribe.
|
||||
func (t *subnetTracker) track(subnet uint64, sub *pubsub.Subscription) {
|
||||
if sub == nil {
|
||||
return
|
||||
}
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.subscriptions[subnet] = sub
|
||||
}
|
||||
|
||||
// noopValidator is a no-op that only decodes the message, but does not check its contents.
|
||||
func (s *Service) noopValidator(_ context.Context, _ peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
@@ -192,272 +76,6 @@ func (s *Service) activeSyncSubnetIndices(currentSlot primitives.Slot) map[uint6
|
||||
return mapFromSlice(subscriptions)
|
||||
}
|
||||
|
||||
// spawn allows the Service to use a custom function for launching goroutines.
|
||||
// This is useful in tests where we can set spawner to a sync.WaitGroup and
|
||||
// wait for the spawned goroutines to finish.
|
||||
func (s *Service) spawn(f func()) {
|
||||
if s.subscriptionSpawner != nil {
|
||||
s.subscriptionSpawner(f)
|
||||
} else {
|
||||
go f()
|
||||
}
|
||||
}
|
||||
|
||||
// Register PubSub subscribers
|
||||
func (s *Service) registerSubscribers(nse params.NetworkScheduleEntry) bool {
|
||||
// If we have already registered for this fork digest, exit early.
|
||||
if s.digestActionDone(nse.ForkDigest, registerGossipOnce) {
|
||||
return false
|
||||
}
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.BlockSubnetTopicFormat, s.validateBeaconBlockPubSub, s.beaconBlockSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.AggregateAndProofSubnetTopicFormat, s.validateAggregateAndProof, s.beaconAggregateProofSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.ExitSubnetTopicFormat, s.validateVoluntaryExit, s.voluntaryExitSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.ProposerSlashingSubnetTopicFormat, s.validateProposerSlashing, s.proposerSlashingSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(p2p.AttesterSlashingSubnetTopicFormat, s.validateAttesterSlashing, s.attesterSlashingSubscriber, nse)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.AttestationSubnetTopicFormat,
|
||||
validate: s.validateCommitteeIndexBeaconAttestation,
|
||||
handle: s.committeeIndexBeaconAttestationSubscriber,
|
||||
getSubnetsToJoin: s.persistentAndAggregatorSubnetIndices,
|
||||
getSubnetsRequiringPeers: attesterSubnetIndices,
|
||||
nse: nse,
|
||||
})
|
||||
})
|
||||
|
||||
// New gossip topic in Altair
|
||||
if params.BeaconConfig().AltairForkEpoch <= nse.Epoch {
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.SyncContributionAndProofSubnetTopicFormat,
|
||||
s.validateSyncContributionAndProof,
|
||||
s.syncContributionAndProofSubscriber,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.SyncCommitteeSubnetTopicFormat,
|
||||
validate: s.validateSyncCommitteeMessage,
|
||||
handle: s.syncCommitteeMessageSubscriber,
|
||||
getSubnetsToJoin: s.activeSyncSubnetIndices,
|
||||
nse: nse,
|
||||
})
|
||||
})
|
||||
|
||||
if features.Get().EnableLightClient {
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.LightClientOptimisticUpdateTopicFormat,
|
||||
s.validateLightClientOptimisticUpdate,
|
||||
noopHandler,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.LightClientFinalityUpdateTopicFormat,
|
||||
s.validateLightClientFinalityUpdate,
|
||||
noopHandler,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// New gossip topic in Capella
|
||||
if params.BeaconConfig().CapellaForkEpoch <= nse.Epoch {
|
||||
s.spawn(func() {
|
||||
s.subscribe(
|
||||
p2p.BlsToExecutionChangeSubnetTopicFormat,
|
||||
s.validateBlsToExecutionChange,
|
||||
s.blsToExecutionChangeSubscriber,
|
||||
nse,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Deneb, removed in Electra
|
||||
if params.BeaconConfig().DenebForkEpoch <= nse.Epoch && nse.Epoch < params.BeaconConfig().ElectraForkEpoch {
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
nse: nse,
|
||||
getSubnetsToJoin: func(primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCount)
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Electra, removed in Fulu
|
||||
if params.BeaconConfig().ElectraForkEpoch <= nse.Epoch && nse.Epoch < params.BeaconConfig().FuluForkEpoch {
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.BlobSubnetTopicFormat,
|
||||
validate: s.validateBlob,
|
||||
handle: s.blobSubscriber,
|
||||
nse: nse,
|
||||
getSubnetsToJoin: func(currentSlot primitives.Slot) map[uint64]bool {
|
||||
return mapFromCount(params.BeaconConfig().BlobsidecarSubnetCountElectra)
|
||||
},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// New gossip topic in Fulu.
|
||||
if params.BeaconConfig().FuluForkEpoch <= nse.Epoch {
|
||||
s.spawn(func() {
|
||||
s.subscribeWithParameters(subscribeParameters{
|
||||
topicFormat: p2p.DataColumnSubnetTopicFormat,
|
||||
validate: s.validateDataColumn,
|
||||
handle: s.dataColumnSubscriber,
|
||||
nse: nse,
|
||||
getSubnetsToJoin: s.dataColumnSubnetIndices,
|
||||
getSubnetsRequiringPeers: s.allDataColumnSubnets,
|
||||
})
|
||||
})
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Service) subscriptionRequestExpired(nse params.NetworkScheduleEntry) bool {
|
||||
next := params.NextNetworkScheduleEntry(nse.Epoch)
|
||||
return next.Epoch != nse.Epoch && s.cfg.clock.CurrentEpoch() > next.Epoch
|
||||
}
|
||||
|
||||
func (s *Service) subscribeLogFields(topic string, nse params.NetworkScheduleEntry) logrus.Fields {
|
||||
return logrus.Fields{
|
||||
"topic": topic,
|
||||
"digest": nse.ForkDigest,
|
||||
"forkEpoch": nse.Epoch,
|
||||
"currentEpoch": s.cfg.clock.CurrentEpoch(),
|
||||
}
|
||||
}
|
||||
|
||||
// subscribe to a given topic with a given validator and subscription handler.
|
||||
// The base protobuf message is used to initialize new messages for decoding.
|
||||
func (s *Service) subscribe(topic string, validator wrappedVal, handle subHandler, nse params.NetworkScheduleEntry) {
|
||||
if err := s.waitForInitialSync(s.ctx); err != nil {
|
||||
log.WithFields(s.subscribeLogFields(topic, nse)).WithError(err).Debug("Context cancelled while waiting for initial sync, not subscribing to topic")
|
||||
return
|
||||
}
|
||||
// Check if this subscribe request is still valid - we may have crossed another fork epoch while waiting for initial sync.
|
||||
if s.subscriptionRequestExpired(nse) {
|
||||
// If we are already past the next fork epoch, do not subscribe to this topic.
|
||||
log.WithFields(s.subscribeLogFields(topic, nse)).Debug("Not subscribing to topic as we are already past the next fork epoch")
|
||||
return
|
||||
}
|
||||
base := p2p.GossipTopicMappings(topic, nse.Epoch)
|
||||
if base == nil {
|
||||
// Impossible condition as it would mean topic does not exist.
|
||||
panic(fmt.Sprintf("%s is not mapped to any message in GossipTopicMappings", topic)) // lint:nopanic -- Impossible condition.
|
||||
}
|
||||
s.subscribeWithBase(s.addDigestToTopic(topic, nse.ForkDigest), validator, handle)
|
||||
}
|
||||
|
||||
func (s *Service) subscribeWithBase(topic string, validator wrappedVal, handle subHandler) *pubsub.Subscription {
|
||||
topic += s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
log := log.WithField("topic", topic)
|
||||
|
||||
// Do not resubscribe already seen subscriptions.
|
||||
ok := s.subHandler.topicExists(topic)
|
||||
if ok {
|
||||
log.WithField("topic", topic).Error("Provided topic already has an active subscription running")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := s.cfg.p2p.PubSub().RegisterTopicValidator(s.wrapAndReportValidation(topic, validator)); err != nil {
|
||||
log.WithError(err).Error("Could not register validator for topic")
|
||||
return nil
|
||||
}
|
||||
|
||||
sub, err := s.cfg.p2p.SubscribeToTopic(topic)
|
||||
if err != nil {
|
||||
// Any error subscribing to a PubSub topic would be the result of a misconfiguration of
|
||||
// libp2p PubSub library or a subscription request to a topic that fails to match the topic
|
||||
// subscription filter.
|
||||
log.WithError(err).Error("Could not subscribe topic")
|
||||
return nil
|
||||
}
|
||||
|
||||
s.subHandler.addTopic(sub.Topic(), sub)
|
||||
|
||||
// Pipeline decodes the incoming subscription data, runs the validation, and handles the
|
||||
// message.
|
||||
pipeline := func(msg *pubsub.Message) {
|
||||
ctx, cancel := context.WithTimeout(s.ctx, pubsubMessageTimeout)
|
||||
defer cancel()
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "sync.pubsub")
|
||||
defer span.End()
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
tracing.AnnotateError(span, fmt.Errorf("panic occurred: %v", r))
|
||||
log.WithField("error", r).
|
||||
WithField("recoveredAt", "subscribeWithBase").
|
||||
WithField("stack", string(debug.Stack())).
|
||||
Error("Panic occurred")
|
||||
}
|
||||
}()
|
||||
|
||||
span.SetAttributes(trace.StringAttribute("topic", topic))
|
||||
|
||||
if msg.ValidatorData == nil {
|
||||
log.Error("Received nil message on pubsub")
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
return
|
||||
}
|
||||
|
||||
if err := handle(ctx, msg.ValidatorData.(proto.Message)); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Could not handle p2p pubsub")
|
||||
messageFailedProcessingCounter.WithLabelValues(topic).Inc()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The main message loop for receiving incoming messages from this subscription.
|
||||
messageLoop := func() {
|
||||
for {
|
||||
msg, err := sub.Next(s.ctx)
|
||||
if err != nil {
|
||||
// This should only happen when the context is cancelled or subscription is cancelled.
|
||||
if !errors.Is(err, pubsub.ErrSubscriptionCancelled) { // Only log a warning on unexpected errors.
|
||||
log.WithError(err).Warn("Subscription next failed")
|
||||
}
|
||||
// Cancel subscription in the event of an error, as we are
|
||||
// now exiting topic event loop.
|
||||
sub.Cancel()
|
||||
return
|
||||
}
|
||||
|
||||
if msg.ReceivedFrom == s.cfg.p2p.PeerID() {
|
||||
continue
|
||||
}
|
||||
|
||||
go pipeline(msg)
|
||||
}
|
||||
}
|
||||
|
||||
go messageLoop()
|
||||
log.WithField("topic", topic).Info("Subscribed to")
|
||||
return sub
|
||||
}
|
||||
|
||||
// Wrap the pubsub validator with a metric monitoring function. This function increments the
|
||||
// appropriate counter if the particular message fails to validate.
|
||||
func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, pubsub.ValidatorEx) {
|
||||
@@ -527,151 +145,6 @@ func (s *Service) wrapAndReportValidation(topic string, v wrappedVal) (string, p
|
||||
}
|
||||
}
|
||||
|
||||
// pruneNotWanted unsubscribes from topics we are currently subscribed to but that are
|
||||
// not in the list of wanted subnets.
|
||||
func (s *Service) pruneNotWanted(t *subnetTracker, wantedSubnets map[uint64]bool) {
|
||||
for _, subnet := range t.unwanted(wantedSubnets) {
|
||||
t.cancelSubscription(subnet)
|
||||
s.unSubscribeFromTopic(t.fullTopic(subnet, s.cfg.p2p.Encoding().ProtocolSuffix()))
|
||||
}
|
||||
}
|
||||
|
||||
// subscribeWithParameters subscribes to a list of subnets.
|
||||
func (s *Service) subscribeWithParameters(p subscribeParameters) {
|
||||
ctx, cancel := context.WithCancel(s.ctx)
|
||||
defer cancel()
|
||||
|
||||
tracker := newSubnetTracker(p)
|
||||
go s.ensurePeers(ctx, tracker)
|
||||
go s.logMinimumPeersPerSubnet(ctx, p)
|
||||
|
||||
if err := s.waitForInitialSync(ctx); err != nil {
|
||||
log.WithFields(p.logFields()).WithError(err).Debug("Could not subscribe to subnets as initial sync failed")
|
||||
return
|
||||
}
|
||||
s.trySubscribeSubnets(tracker)
|
||||
slotTicker := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
for {
|
||||
select {
|
||||
case <-slotTicker.C():
|
||||
// Check if this subscribe request is still valid - we may have crossed another fork epoch while waiting for initial sync.
|
||||
if s.subscriptionRequestExpired(p.nse) {
|
||||
// If we are already past the next fork epoch, do not subscribe to this topic.
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": p.shortTopic(),
|
||||
"digest": p.nse.ForkDigest,
|
||||
"epoch": p.nse.Epoch,
|
||||
"currentEpoch": s.cfg.clock.CurrentEpoch(),
|
||||
}).Debug("Exiting topic subnet subscription loop")
|
||||
return
|
||||
}
|
||||
s.trySubscribeSubnets(tracker)
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// trySubscribeSubnets attempts to subscribe to any missing subnets that we should be subscribed to.
|
||||
// Only if initial sync is complete.
|
||||
func (s *Service) trySubscribeSubnets(t *subnetTracker) {
|
||||
subnetsToJoin := t.getSubnetsToJoin(s.cfg.clock.CurrentSlot())
|
||||
s.pruneNotWanted(t, subnetsToJoin)
|
||||
for _, subnet := range t.missing(subnetsToJoin) {
|
||||
// TODO: subscribeWithBase appends the protocol suffix, other methods don't. Make this consistent.
|
||||
topic := t.fullTopic(subnet, "")
|
||||
t.track(subnet, s.subscribeWithBase(topic, t.validate, t.handle))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) ensurePeers(ctx context.Context, tracker *subnetTracker) {
|
||||
// Try once immediately so we don't have to wait until the next slot.
|
||||
s.tryEnsurePeers(ctx, tracker)
|
||||
|
||||
oncePerSlot := slots.NewSlotTicker(s.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer oncePerSlot.Done()
|
||||
for {
|
||||
select {
|
||||
case <-oncePerSlot.C():
|
||||
s.tryEnsurePeers(ctx, tracker)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) tryEnsurePeers(ctx context.Context, tracker *subnetTracker) {
|
||||
timeout := (time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) - 100*time.Millisecond
|
||||
minPeers := flags.Get().MinimumPeersPerSubnet
|
||||
neededSubnets := computeAllNeededSubnets(s.cfg.clock.CurrentSlot(), tracker.getSubnetsToJoin, tracker.getSubnetsRequiringPeers)
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
err := s.cfg.p2p.FindAndDialPeersWithSubnets(ctx, tracker.topicFormat, tracker.nse.ForkDigest, minPeers, neededSubnets)
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
log.WithFields(tracker.logFields()).WithError(err).Debug("Could not find peers with subnets")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) logMinimumPeersPerSubnet(ctx context.Context, p subscribeParameters) {
|
||||
logFields := p.logFields()
|
||||
minimumPeersPerSubnet := flags.Get().MinimumPeersPerSubnet
|
||||
// Warn the user if we are not subscribed to enough peers in the subnets.
|
||||
log := log.WithField("minimum", minimumPeersPerSubnet)
|
||||
logTicker := time.NewTicker(5 * time.Minute)
|
||||
defer logTicker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-logTicker.C:
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
subnetsToFindPeersIndex := computeAllNeededSubnets(currentSlot, p.getSubnetsToJoin, p.getSubnetsRequiringPeers)
|
||||
|
||||
isSubnetWithMissingPeers := false
|
||||
// Find new peers for wanted subnets if needed.
|
||||
for index := range subnetsToFindPeersIndex {
|
||||
topic := fmt.Sprintf(p.topicFormat, p.nse.ForkDigest, index)
|
||||
|
||||
// Check if we have enough peers in the subnet. Skip if we do.
|
||||
if count := s.connectedPeersCount(topic); count < minimumPeersPerSubnet {
|
||||
isSubnetWithMissingPeers = true
|
||||
log.WithFields(logrus.Fields{
|
||||
"topic": topic,
|
||||
"actual": count,
|
||||
}).Debug("Not enough connected peers")
|
||||
}
|
||||
}
|
||||
if !isSubnetWithMissingPeers {
|
||||
log.WithFields(logFields).Debug("All subnets have enough connected peers")
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) unSubscribeFromTopic(topic string) {
|
||||
log.WithField("topic", topic).Info("Unsubscribed from")
|
||||
if err := s.cfg.p2p.PubSub().UnregisterTopicValidator(topic); err != nil {
|
||||
log.WithError(err).Error("Could not unregister topic validator")
|
||||
}
|
||||
sub := s.subHandler.subForTopic(topic)
|
||||
if sub != nil {
|
||||
sub.Cancel()
|
||||
}
|
||||
s.subHandler.removeTopic(topic)
|
||||
if err := s.cfg.p2p.LeaveTopic(topic); err != nil {
|
||||
log.WithError(err).Error("Unable to leave topic")
|
||||
}
|
||||
}
|
||||
|
||||
// connectedPeersCount counts how many peer for a given topic are connected to the node.
|
||||
func (s *Service) connectedPeersCount(subnetTopic string) int {
|
||||
topic := subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
peersWithSubnet := s.cfg.p2p.PubSub().ListPeers(topic)
|
||||
return len(peersWithSubnet)
|
||||
}
|
||||
|
||||
func (s *Service) dataColumnSubnetIndices(primitives.Slot) map[uint64]bool {
|
||||
nodeID := s.cfg.p2p.NodeID()
|
||||
|
||||
@@ -816,34 +289,6 @@ func isDigestValid(digest [4]byte, clock *startup.Clock) (bool, error) {
|
||||
return params.ForkDigest(current) == digest, nil
|
||||
}
|
||||
|
||||
// computeAllNeededSubnets computes the subnets we want to join
|
||||
// and the subnets for which we want to find peers.
|
||||
func computeAllNeededSubnets(
|
||||
currentSlot primitives.Slot,
|
||||
getSubnetsToJoin func(currentSlot primitives.Slot) map[uint64]bool,
|
||||
getSubnetsRequiringPeers func(currentSlot primitives.Slot) map[uint64]bool,
|
||||
) map[uint64]bool {
|
||||
// Retrieve the subnets we want to join.
|
||||
subnetsToJoin := getSubnetsToJoin(currentSlot)
|
||||
|
||||
// Retrieve the subnets we want to find peers into.
|
||||
subnetsRequiringPeers := make(map[uint64]bool)
|
||||
if getSubnetsRequiringPeers != nil {
|
||||
subnetsRequiringPeers = getSubnetsRequiringPeers(currentSlot)
|
||||
}
|
||||
|
||||
// Combine the two maps to get all needed subnets.
|
||||
neededSubnets := make(map[uint64]bool, len(subnetsToJoin)+len(subnetsRequiringPeers))
|
||||
for subnet := range subnetsToJoin {
|
||||
neededSubnets[subnet] = true
|
||||
}
|
||||
for subnet := range subnetsRequiringPeers {
|
||||
neededSubnets[subnet] = true
|
||||
}
|
||||
|
||||
return neededSubnets
|
||||
}
|
||||
|
||||
func agentString(pid peer.ID, hst host.Host) string {
|
||||
rawVersion, storeErr := hst.Peerstore().Get(pid, "AgentVersion")
|
||||
agString, ok := rawVersion.(string)
|
||||
|
||||
199
beacon-chain/sync/subscription_controller.go
Normal file
199
beacon-chain/sync/subscription_controller.go
Normal file
@@ -0,0 +1,199 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type topicFamilyKey struct {
|
||||
topicName string
|
||||
forkDigest [4]byte
|
||||
}
|
||||
|
||||
func topicFamilyKeyFrom(tf TopicFamily) topicFamilyKey {
|
||||
return topicFamilyKey{topicName: tf.Name(), forkDigest: tf.NetworkScheduleEntry().ForkDigest}
|
||||
}
|
||||
|
||||
type SubscriptionController struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
syncService *Service
|
||||
wg sync.WaitGroup
|
||||
|
||||
mu sync.RWMutex
|
||||
activeTopicFamilies map[topicFamilyKey]TopicFamily
|
||||
}
|
||||
|
||||
func NewSubscriptionController(ctx context.Context, s *Service) *SubscriptionController {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return &SubscriptionController{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
syncService: s,
|
||||
activeTopicFamilies: make(map[topicFamilyKey]TopicFamily),
|
||||
}
|
||||
}
|
||||
|
||||
func (g *SubscriptionController) Start() {
|
||||
currentEpoch := g.syncService.cfg.clock.CurrentEpoch()
|
||||
if err := g.syncService.waitForInitialSync(g.ctx); err != nil {
|
||||
log.WithError(err).Debug("Context cancelled while waiting for initial sync, not starting SubscriptionController")
|
||||
return
|
||||
}
|
||||
|
||||
g.updateActiveTopicFamilies(currentEpoch)
|
||||
g.wg.Go(func() { g.controlLoop() })
|
||||
|
||||
log.Info("SubscriptionController started")
|
||||
}
|
||||
|
||||
func (g *SubscriptionController) controlLoop() {
|
||||
slotTicker := slots.NewSlotTicker(g.syncService.cfg.clock.GenesisTime(), params.BeaconConfig().SecondsPerSlot)
|
||||
defer slotTicker.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-slotTicker.C():
|
||||
currentEpoch := g.syncService.cfg.clock.CurrentEpoch()
|
||||
g.updateActiveTopicFamilies(currentEpoch)
|
||||
|
||||
case <-g.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *SubscriptionController) updateActiveTopicFamilies(currentEpoch primitives.Epoch) {
|
||||
slot := g.syncService.cfg.clock.CurrentSlot()
|
||||
currentNSE := params.GetNetworkScheduleEntry(currentEpoch)
|
||||
|
||||
families := TopicFamiliesForEpoch(currentEpoch, g.syncService, currentNSE)
|
||||
|
||||
// also subscribe to topics for the next epoch if there is a fork in the next epoch
|
||||
nextNSE := params.GetNetworkScheduleEntry(currentEpoch + 1)
|
||||
if currentNSE.Epoch != nextNSE.Epoch {
|
||||
families = append(families, TopicFamiliesForEpoch(nextNSE.Epoch, g.syncService, nextNSE)...)
|
||||
}
|
||||
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
|
||||
// register topic families for the current NSE -> this is idempotent
|
||||
for _, family := range families {
|
||||
key := topicFamilyKeyFrom(family)
|
||||
existing, seen := g.activeTopicFamilies[key]
|
||||
if !seen {
|
||||
g.activeTopicFamilies[key] = family
|
||||
existing = family
|
||||
}
|
||||
|
||||
switch tf := existing.(type) {
|
||||
case DynamicShardedTopicFamily:
|
||||
tf.UnsubscribeForSlot(slot)
|
||||
tf.SubscribeForSlot(slot)
|
||||
case ShardedTopicFamily:
|
||||
if !seen {
|
||||
tf.Subscribe()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we are still in our genesis fork version then exit early.
|
||||
if currentNSE.Epoch == params.BeaconConfig().GenesisEpoch {
|
||||
return
|
||||
}
|
||||
if currentEpoch < currentNSE.Epoch+1 {
|
||||
return
|
||||
}
|
||||
previous := params.GetNetworkScheduleEntry(currentNSE.Epoch - 1)
|
||||
|
||||
// remove topic families for the previous NSE -> this is idempotent
|
||||
for key, family := range g.activeTopicFamilies {
|
||||
if key.forkDigest == previous.ForkDigest {
|
||||
|
||||
family.UnsubscribeAll()
|
||||
delete(g.activeTopicFamilies, key)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"topicName": key.topicName,
|
||||
"forkDigest": fmt.Sprintf("%#x", key.forkDigest),
|
||||
}).Info("Removed topic family")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *SubscriptionController) Stop() {
|
||||
g.cancel()
|
||||
g.wg.Wait()
|
||||
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
|
||||
for _, family := range g.activeTopicFamilies {
|
||||
family.UnsubscribeAll()
|
||||
}
|
||||
}
|
||||
|
||||
func (g *SubscriptionController) GetCurrentActiveTopicsWithMinPeerCount() map[string]int {
|
||||
g.mu.RLock()
|
||||
defer g.mu.RUnlock()
|
||||
|
||||
slot := g.syncService.cfg.clock.CurrentSlot()
|
||||
topics := make(map[string]int)
|
||||
for _, f := range g.activeTopicFamilies {
|
||||
tfm, ok := f.(DynamicShardedTopicFamily)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for topic, count := range tfm.TopicsWithMinPeerCount(slot) {
|
||||
topics[topic] += count
|
||||
}
|
||||
}
|
||||
return topics
|
||||
}
|
||||
|
||||
func (g *SubscriptionController) ExtractTopics(_ context.Context, node *enode.Node) ([]string, error) {
|
||||
if node == nil {
|
||||
return nil, errors.New("enode is nil")
|
||||
}
|
||||
|
||||
g.mu.RLock()
|
||||
defer g.mu.RUnlock()
|
||||
|
||||
families := make([]DynamicShardedTopicFamily, 0, len(g.activeTopicFamilies))
|
||||
for _, f := range g.activeTopicFamilies {
|
||||
if tfm, ok := f.(DynamicShardedTopicFamily); ok {
|
||||
families = append(families, tfm)
|
||||
}
|
||||
}
|
||||
|
||||
// Collect topics from dynamic families only, de-duplicated.
|
||||
topicSet := make(map[string]struct{})
|
||||
for _, df := range families {
|
||||
topics, err := df.ExtractTopicsForNode(node)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logrus.Fields{
|
||||
"topicFamily": fmt.Sprintf("%T", df),
|
||||
}).Debug("Failed to get topics for node from family")
|
||||
continue
|
||||
}
|
||||
for _, t := range topics {
|
||||
topicSet[t] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
out := make([]string, 0, len(topicSet))
|
||||
for t := range topicSet {
|
||||
out = append(out, t)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
545
beacon-chain/sync/subscription_controller_test.go
Normal file
545
beacon-chain/sync/subscription_controller_test.go
Normal file
@@ -0,0 +1,545 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/async/abool"
|
||||
mockChain "github.com/OffchainLabs/prysm/v7/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v7/beacon-chain/p2p/testing"
|
||||
mockSync "github.com/OffchainLabs/prysm/v7/beacon-chain/sync/initial-sync/testing"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/genesis"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
var _ DynamicShardedTopicFamily = (*testDynFamly)(nil)
|
||||
|
||||
// testDynFamly is a test implementation of a dynamic-subnet topic family.
|
||||
type testDynFamly struct {
|
||||
baseTopicFamily
|
||||
topics []string
|
||||
name string
|
||||
topicsWithMinPeers map[string]int
|
||||
}
|
||||
|
||||
func (f *testDynFamly) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *testDynFamly) Validator() wrappedVal {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *testDynFamly) Handler() subHandler {
|
||||
return noopHandler
|
||||
}
|
||||
|
||||
func (f *testDynFamly) GetFullTopicString(subnet uint64) string {
|
||||
return fmt.Sprintf("topic-%d", subnet)
|
||||
}
|
||||
|
||||
func (f *testDynFamly) TopicsToSubscribeForSlot(_ primitives.Slot) []string {
|
||||
return f.topics
|
||||
}
|
||||
|
||||
func (f *testDynFamly) ExtractTopicsForNode(_ *enode.Node) ([]string, error) {
|
||||
return f.topics, nil
|
||||
}
|
||||
|
||||
func (f *testDynFamly) SubscribeForSlot(_ primitives.Slot) {
|
||||
f.baseTopicFamily.subscribeToTopics(f.topics)
|
||||
}
|
||||
|
||||
func (f *testDynFamly) UnsubscribeForSlot(_ primitives.Slot) {}
|
||||
|
||||
func (f *testDynFamly) TopicsWithMinPeerCount(_ primitives.Slot) map[string]int {
|
||||
return f.topicsWithMinPeers
|
||||
}
|
||||
|
||||
type staticTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
name string
|
||||
topics []string
|
||||
}
|
||||
|
||||
func (f *staticTopicFamily) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *staticTopicFamily) Validator() wrappedVal {
|
||||
return f.validator
|
||||
}
|
||||
|
||||
func (f *staticTopicFamily) Handler() subHandler {
|
||||
return f.handler
|
||||
}
|
||||
|
||||
func (f *staticTopicFamily) Subscribe() {
|
||||
f.baseTopicFamily.subscribeToTopics(f.topics)
|
||||
}
|
||||
|
||||
func testSubscriptionControllerService(t *testing.T, current primitives.Epoch) *Service {
|
||||
closedChan := make(chan struct{})
|
||||
close(closedChan)
|
||||
peer2peer := p2ptest.NewTestP2P(t)
|
||||
chainService := &mockChain.ChainService{
|
||||
Genesis: genesis.Time(),
|
||||
ValidatorsRoot: genesis.ValidatorsRoot(),
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 10*time.Millisecond)
|
||||
r := &Service{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
cfg: &config{
|
||||
p2p: peer2peer,
|
||||
chain: chainService,
|
||||
clock: defaultClockWithTimeAtEpoch(current),
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
subHandler: newSubTopicHandler(),
|
||||
initialSyncComplete: closedChan,
|
||||
}
|
||||
r.subscriptionController = NewSubscriptionController(context.Background(), r)
|
||||
return r
|
||||
}
|
||||
|
||||
func TestSubscriptionController_CheckForNextEpochForkSubscriptions(t *testing.T) {
|
||||
closedChan := make(chan struct{})
|
||||
close(closedChan)
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
params.BeaconConfig().FuluForkEpoch = params.BeaconConfig().ElectraForkEpoch + 4096*2
|
||||
params.BeaconConfig().InitializeForkSchedule()
|
||||
|
||||
cfg := params.BeaconConfig()
|
||||
altairForkEpoch := cfg.AltairForkEpoch
|
||||
bellatrixForkEpoch := cfg.BellatrixForkEpoch
|
||||
capellaForkEpoch := cfg.CapellaForkEpoch
|
||||
denebForkEpoch := cfg.DenebForkEpoch
|
||||
electraForkEpoch := cfg.ElectraForkEpoch
|
||||
fuluForkEpoch := cfg.FuluForkEpoch
|
||||
blobsidecarSubnetCount := cfg.BlobsidecarSubnetCount
|
||||
blobsidecarSubnetCountElectra := cfg.BlobsidecarSubnetCountElectra
|
||||
|
||||
// Pre-compute digests using current config state
|
||||
altairDigest := params.ForkDigest(altairForkEpoch)
|
||||
bellatrixDigest := params.ForkDigest(bellatrixForkEpoch)
|
||||
capellaDigest := params.ForkDigest(capellaForkEpoch)
|
||||
denebDigest := params.ForkDigest(denebForkEpoch)
|
||||
electraDigest := params.ForkDigest(electraForkEpoch)
|
||||
fuluDigest := params.ForkDigest(fuluForkEpoch)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
svcCreator func(t *testing.T) *Service
|
||||
checkRegistration func(t *testing.T, s *Service)
|
||||
epochAtRegistration func(primitives.Epoch) primitives.Epoch
|
||||
forkEpoch primitives.Epoch
|
||||
nextForkEpoch primitives.Epoch
|
||||
forkDigest [4]byte
|
||||
nextForkDigest [4]byte
|
||||
}{
|
||||
{
|
||||
name: "no fork in the next epoch",
|
||||
forkEpoch: altairForkEpoch,
|
||||
forkDigest: altairDigest,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 2 },
|
||||
nextForkEpoch: bellatrixForkEpoch,
|
||||
nextForkDigest: bellatrixDigest,
|
||||
checkRegistration: func(t *testing.T, s *Service) {},
|
||||
},
|
||||
{
|
||||
name: "altair fork in the next epoch",
|
||||
forkEpoch: altairForkEpoch,
|
||||
forkDigest: altairDigest,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
nextForkEpoch: bellatrixForkEpoch,
|
||||
nextForkDigest: bellatrixDigest,
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
expected := fmt.Sprintf(p2p.SyncContributionAndProofSubnetTopicFormat+s.cfg.p2p.Encoding().ProtocolSuffix(), altairDigest)
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), "subnet topic doesn't exist")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "capella fork in the next epoch",
|
||||
forkEpoch: capellaForkEpoch,
|
||||
forkDigest: capellaDigest,
|
||||
nextForkEpoch: denebForkEpoch,
|
||||
nextForkDigest: denebDigest,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
expected := fmt.Sprintf(p2p.BlsToExecutionChangeSubnetTopicFormat+s.cfg.p2p.Encoding().ProtocolSuffix(), capellaDigest)
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), "subnet topic doesn't exist")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "deneb fork in the next epoch",
|
||||
forkEpoch: denebForkEpoch,
|
||||
forkDigest: denebDigest,
|
||||
nextForkEpoch: electraForkEpoch,
|
||||
nextForkDigest: electraDigest,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
subIndices := mapFromCount(blobsidecarSubnetCount)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.BlobSubnetTopicFormat, denebDigest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "electra fork in the next epoch",
|
||||
forkEpoch: electraForkEpoch,
|
||||
forkDigest: electraDigest,
|
||||
nextForkEpoch: fuluForkEpoch,
|
||||
nextForkDigest: fuluDigest,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
subIndices := mapFromCount(blobsidecarSubnetCountElectra)
|
||||
for idx := range subIndices {
|
||||
topic := fmt.Sprintf(p2p.BlobSubnetTopicFormat, electraDigest, idx)
|
||||
expected := topic + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
assert.Equal(t, true, s.subHandler.topicExists(expected), fmt.Sprintf("subnet topic %s doesn't exist", expected))
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fulu fork in the next epoch; should not have blob topics",
|
||||
forkEpoch: fuluForkEpoch,
|
||||
forkDigest: fuluDigest,
|
||||
nextForkEpoch: fuluForkEpoch,
|
||||
nextForkDigest: fuluDigest,
|
||||
epochAtRegistration: func(e primitives.Epoch) primitives.Epoch { return e - 1 },
|
||||
checkRegistration: func(t *testing.T, s *Service) {
|
||||
// Advance to two epochs after Fulu activation and assert no blob topics remain.
|
||||
target := fuluForkEpoch + 2
|
||||
s.cfg.clock = defaultClockWithTimeAtEpoch(target)
|
||||
s.subscriptionController.updateActiveTopicFamilies(s.cfg.clock.CurrentEpoch())
|
||||
|
||||
for _, topic := range s.subHandler.allTopics() {
|
||||
if strings.Contains(topic, "/"+p2p.GossipBlobSidecarMessage) {
|
||||
t.Fatalf("blob topic still exists after Fulu+2: %s", topic)
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
current := tt.epochAtRegistration(tt.forkEpoch)
|
||||
s := testSubscriptionControllerService(t, current)
|
||||
s.subscriptionController.updateActiveTopicFamilies(s.cfg.clock.CurrentEpoch())
|
||||
tt.checkRegistration(t, s)
|
||||
|
||||
if current != tt.forkEpoch-1 {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure the topics were registered for the upcoming fork
|
||||
// Use pre-computed digest from test struct to avoid race with parallel tests
|
||||
assert.Equal(t, true, s.subHandler.digestExists(tt.forkDigest))
|
||||
|
||||
// After this point we are checking deregistration, which doesn't apply if there isn't a higher
|
||||
// nextForkEpoch.
|
||||
if tt.forkEpoch >= tt.nextForkEpoch {
|
||||
return
|
||||
}
|
||||
|
||||
// Move the clock to just before the next fork epoch and ensure deregistration is correct
|
||||
s.cfg.clock = defaultClockWithTimeAtEpoch(tt.nextForkEpoch - 1)
|
||||
s.subscriptionController.updateActiveTopicFamilies(s.cfg.clock.CurrentEpoch())
|
||||
|
||||
s.subscriptionController.updateActiveTopicFamilies(tt.nextForkEpoch)
|
||||
assert.Equal(t, true, s.subHandler.digestExists(tt.forkDigest))
|
||||
// deregister as if it is the epoch after the next fork epoch
|
||||
s.subscriptionController.updateActiveTopicFamilies(tt.nextForkEpoch + 1)
|
||||
assert.Equal(t, false, s.subHandler.digestExists(tt.forkDigest))
|
||||
assert.Equal(t, true, s.subHandler.digestExists(tt.nextForkDigest))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubscriptionController_ExtractTopics(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
|
||||
type tc struct {
|
||||
name string
|
||||
setup func(*SubscriptionController)
|
||||
ctx func() context.Context
|
||||
node *enode.Node
|
||||
want []string
|
||||
wantErr bool
|
||||
}
|
||||
|
||||
dummyNode := new(enode.Node)
|
||||
|
||||
tests := []tc{
|
||||
{
|
||||
name: "nil node returns error",
|
||||
setup: func(g *SubscriptionController) {},
|
||||
ctx: func() context.Context { return context.Background() },
|
||||
node: nil,
|
||||
want: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "no families yields empty",
|
||||
setup: func(g *SubscriptionController) {},
|
||||
ctx: func() context.Context { return context.Background() },
|
||||
node: dummyNode,
|
||||
want: []string{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "static family ignored",
|
||||
setup: func(g *SubscriptionController) {
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "static", forkDigest: [4]byte{1, 2, 3, 4}}] = &staticTopicFamily{name: "StaticFam"}
|
||||
g.mu.Unlock()
|
||||
},
|
||||
ctx: func() context.Context { return context.Background() },
|
||||
node: dummyNode,
|
||||
want: []string{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "single dynamic family topics returned",
|
||||
setup: func(g *SubscriptionController) {
|
||||
fam := &testDynFamly{topics: []string{"t1", "t2"}, name: "Dyn1"}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn1", forkDigest: [4]byte{0}}] = fam
|
||||
g.mu.Unlock()
|
||||
},
|
||||
ctx: func() context.Context { return context.Background() },
|
||||
node: dummyNode,
|
||||
want: []string{"t1", "t2"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "multiple dynamic families de-dup",
|
||||
setup: func(g *SubscriptionController) {
|
||||
f1 := &testDynFamly{topics: []string{"t1", "t2"}, name: "Dyn1"}
|
||||
f2 := &testDynFamly{topics: []string{"t2", "t3"}, name: "Dyn2"}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "static", forkDigest: [4]byte{1, 2, 3, 4}}] = &staticTopicFamily{name: "StaticFam"}
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn1", forkDigest: [4]byte{0}}] = f1
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn2", forkDigest: [4]byte{0}}] = f2
|
||||
g.mu.Unlock()
|
||||
},
|
||||
ctx: func() context.Context { return context.Background() },
|
||||
node: dummyNode,
|
||||
want: []string{"t1", "t2", "t3"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "mixed static and dynamic",
|
||||
setup: func(g *SubscriptionController) {
|
||||
f1 := &testDynFamly{topics: []string{"a", "b"}, name: "Dyn"}
|
||||
s1 := &staticTopicFamily{name: "Static"}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn", forkDigest: [4]byte{9}}] = f1
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "static", forkDigest: [4]byte{9}}] = s1
|
||||
g.mu.Unlock()
|
||||
},
|
||||
ctx: func() context.Context { return context.Background() },
|
||||
node: dummyNode,
|
||||
want: []string{"a", "b"},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
s := &Service{}
|
||||
g := NewSubscriptionController(context.Background(), s)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Reset families for each subtest
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies = make(map[topicFamilyKey]TopicFamily)
|
||||
g.mu.Unlock()
|
||||
|
||||
tt.setup(g)
|
||||
topics, err := g.ExtractTopics(tt.ctx(), tt.node)
|
||||
if tt.wantErr {
|
||||
require.NotNil(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
got := map[string]bool{}
|
||||
for _, tpc := range topics {
|
||||
got[tpc] = true
|
||||
}
|
||||
want := map[string]bool{}
|
||||
for _, tpc := range tt.want {
|
||||
want[tpc] = true
|
||||
}
|
||||
require.Equal(t, len(want), len(got))
|
||||
for k := range want {
|
||||
require.Equal(t, true, got[k], "missing topic %s", k)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubscriptionController_GetCurrentActiveTopicsWithMinPeerCount(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
genesis.StoreEmbeddedDuringTest(t, params.BeaconConfig().ConfigName)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(*SubscriptionController)
|
||||
want map[string]int
|
||||
}{
|
||||
{
|
||||
name: "no families yields empty map",
|
||||
setup: func(_ *SubscriptionController) {},
|
||||
want: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "static family ignored",
|
||||
setup: func(g *SubscriptionController) {
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "static", forkDigest: [4]byte{1, 2, 3, 4}}] = &staticTopicFamily{name: "StaticFam"}
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "single dynamic family returns topics with min peer counts",
|
||||
setup: func(g *SubscriptionController) {
|
||||
fam := &testDynFamly{
|
||||
name: "Dyn1",
|
||||
topicsWithMinPeers: map[string]int{"topic/a": 8, "topic/b": 6},
|
||||
}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn1", forkDigest: [4]byte{0}}] = fam
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{"topic/a": 8, "topic/b": 6},
|
||||
},
|
||||
{
|
||||
name: "dynamic family with nil topicsWithMinPeers returns empty",
|
||||
setup: func(g *SubscriptionController) {
|
||||
fam := &testDynFamly{
|
||||
name: "DynNil",
|
||||
topicsWithMinPeers: nil,
|
||||
}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dynnil", forkDigest: [4]byte{0}}] = fam
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "dynamic family with empty topicsWithMinPeers returns empty",
|
||||
setup: func(g *SubscriptionController) {
|
||||
fam := &testDynFamly{
|
||||
name: "DynEmpty",
|
||||
topicsWithMinPeers: map[string]int{},
|
||||
}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dynempty", forkDigest: [4]byte{0}}] = fam
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "multiple dynamic families with disjoint topics",
|
||||
setup: func(g *SubscriptionController) {
|
||||
f1 := &testDynFamly{
|
||||
name: "Dyn1",
|
||||
topicsWithMinPeers: map[string]int{"topic/a": 8, "topic/b": 6},
|
||||
}
|
||||
f2 := &testDynFamly{
|
||||
name: "Dyn2",
|
||||
topicsWithMinPeers: map[string]int{"topic/c": 4, "topic/d": 2},
|
||||
}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn1", forkDigest: [4]byte{0}}] = f1
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn2", forkDigest: [4]byte{0}}] = f2
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{"topic/a": 8, "topic/b": 6, "topic/c": 4, "topic/d": 2},
|
||||
},
|
||||
{
|
||||
name: "multiple dynamic families with overlapping topics - counts are summed",
|
||||
setup: func(g *SubscriptionController) {
|
||||
f1 := &testDynFamly{
|
||||
name: "Dyn1",
|
||||
topicsWithMinPeers: map[string]int{"topic/shared": 5, "topic/a": 3},
|
||||
}
|
||||
f2 := &testDynFamly{
|
||||
name: "Dyn2",
|
||||
topicsWithMinPeers: map[string]int{"topic/shared": 7, "topic/b": 2},
|
||||
}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn1", forkDigest: [4]byte{0}}] = f1
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn2", forkDigest: [4]byte{0}}] = f2
|
||||
g.mu.Unlock()
|
||||
},
|
||||
// topic/shared: 5 + 7 = 12
|
||||
want: map[string]int{"topic/shared": 12, "topic/a": 3, "topic/b": 2},
|
||||
},
|
||||
{
|
||||
name: "mixed static and dynamic families - only dynamic counted",
|
||||
setup: func(g *SubscriptionController) {
|
||||
dynFam := &testDynFamly{
|
||||
name: "Dyn",
|
||||
topicsWithMinPeers: map[string]int{"topic/dyn": 8},
|
||||
}
|
||||
staticFam := &staticTopicFamily{name: "Static"}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dyn", forkDigest: [4]byte{9}}] = dynFam
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "static", forkDigest: [4]byte{9}}] = staticFam
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{"topic/dyn": 8},
|
||||
},
|
||||
{
|
||||
name: "single topic with zero peer count",
|
||||
setup: func(g *SubscriptionController) {
|
||||
fam := &testDynFamly{
|
||||
name: "DynZero",
|
||||
topicsWithMinPeers: map[string]int{"topic/zero": 0},
|
||||
}
|
||||
g.mu.Lock()
|
||||
g.activeTopicFamilies[topicFamilyKey{topicName: "dynzero", forkDigest: [4]byte{0}}] = fam
|
||||
g.mu.Unlock()
|
||||
},
|
||||
want: map[string]int{"topic/zero": 0},
|
||||
},
|
||||
}
|
||||
|
||||
current := params.BeaconConfig().AltairForkEpoch
|
||||
s := testSubscriptionControllerService(t, current)
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Reset families for each subtest
|
||||
s.subscriptionController.mu.Lock()
|
||||
s.subscriptionController.activeTopicFamilies = make(map[topicFamilyKey]TopicFamily)
|
||||
s.subscriptionController.mu.Unlock()
|
||||
|
||||
tt.setup(s.subscriptionController)
|
||||
got := s.subscriptionController.GetCurrentActiveTopicsWithMinPeerCount()
|
||||
|
||||
require.Equal(t, len(tt.want), len(got), "result length mismatch")
|
||||
for topic, expectedCount := range tt.want {
|
||||
actualCount, exists := got[topic]
|
||||
require.Equal(t, true, exists, "expected topic %s not found in result", topic)
|
||||
require.Equal(t, expectedCount, actualCount, "peer count mismatch for topic %s", topic)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -35,6 +35,7 @@ func (s *subTopicHandler) addTopic(topic string, sub *pubsub.Subscription) {
|
||||
s.digestMap[digest] += 1
|
||||
}
|
||||
|
||||
// topicExists checks if a topic is currently tracked.
|
||||
func (s *subTopicHandler) topicExists(topic string) bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
@@ -64,6 +65,7 @@ func (s *subTopicHandler) removeTopic(topic string) {
|
||||
}
|
||||
}
|
||||
|
||||
// digestExists checks if a fork digest is currently tracked.
|
||||
func (s *subTopicHandler) digestExists(digest [4]byte) bool {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
@@ -72,6 +74,7 @@ func (s *subTopicHandler) digestExists(digest [4]byte) bool {
|
||||
return ok && count > 0
|
||||
}
|
||||
|
||||
// allTopics returns all currently tracked topics.
|
||||
func (s *subTopicHandler) allTopics() []string {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
@@ -83,6 +86,7 @@ func (s *subTopicHandler) allTopics() []string {
|
||||
return topics
|
||||
}
|
||||
|
||||
// subForTopic returns the subscription for a given topic.
|
||||
func (s *subTopicHandler) subForTopic(topic string) *pubsub.Subscription {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
|
||||
277
beacon-chain/sync/topic_families_dynamic_subnets.go
Normal file
277
beacon-chain/sync/topic_families_dynamic_subnets.go
Normal file
@@ -0,0 +1,277 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// AttestationTopicFamily
|
||||
var _ DynamicShardedTopicFamily = (*AttestationTopicFamily)(nil)
|
||||
|
||||
var (
|
||||
attestationMinMeshPeers = 8
|
||||
attestationMinFanoutPeers = 6
|
||||
syncCommitteeMinMeshPeers = 8
|
||||
syncCommitteeMinFanoutPeers = 6
|
||||
dataColumnMinMeshPeers = 6
|
||||
dataColumnMinFanoutPeers = 2
|
||||
)
|
||||
|
||||
type AttestationTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
// NewAttestationTopicFamily creates a new AttestationTopicFamily.
|
||||
func NewAttestationTopicFamily(s *Service, nse params.NetworkScheduleEntry) *AttestationTopicFamily {
|
||||
a := &AttestationTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateCommitteeIndexBeaconAttestation, s.committeeIndexBeaconAttestationSubscriber, a)
|
||||
a.baseTopicFamily = base
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *AttestationTopicFamily) Name() string {
|
||||
return "AttestationTopicFamily"
|
||||
}
|
||||
|
||||
// SubscribeForSlot subscribes to the topics for the given slot.
|
||||
func (a *AttestationTopicFamily) SubscribeForSlot(slot primitives.Slot) {
|
||||
a.subscribeToTopics(a.TopicsToSubscribeForSlot(slot))
|
||||
}
|
||||
|
||||
// UnsubscribeForSlot unsubscribes from topics we no longer need for the slot.
|
||||
func (a *AttestationTopicFamily) UnsubscribeForSlot(slot primitives.Slot) {
|
||||
a.pruneTopicsExcept(a.TopicsToSubscribeForSlot(slot))
|
||||
}
|
||||
|
||||
// TopicsToSubscribeFor returns the topics to subscribe to for a given slot.
|
||||
func (a *AttestationTopicFamily) TopicsToSubscribeForSlot(slot primitives.Slot) []string {
|
||||
return topicsFromSubnets(a.getSubnetsToJoin(slot), a)
|
||||
}
|
||||
|
||||
// getFullTopicString builds the full topic string for an attestation subnet.
|
||||
func (a *AttestationTopicFamily) getFullTopicString(subnet uint64) string {
|
||||
return p2p.AttestationSubnetTopic(a.nse.ForkDigest, subnet)
|
||||
}
|
||||
|
||||
// getSubnetsToJoin returns persistent and aggregator subnets.
|
||||
func (a *AttestationTopicFamily) getSubnetsToJoin(slot primitives.Slot) map[uint64]bool {
|
||||
return a.syncService.persistentAndAggregatorSubnetIndices(slot)
|
||||
}
|
||||
|
||||
// getSubnetsForBroadcast returns subnets needed for attestation duties.
|
||||
func (a *AttestationTopicFamily) getSubnetsForBroadcast(slot primitives.Slot) map[uint64]bool {
|
||||
return attesterSubnetIndices(slot)
|
||||
}
|
||||
|
||||
// ExtractTopicsForNode returns all topics for the given node that are relevant to this topic family.
|
||||
func (a *AttestationTopicFamily) ExtractTopicsForNode(node *enode.Node) ([]string, error) {
|
||||
return getTopicsForNode(a.syncService, a, node, p2p.AttestationSubnets)
|
||||
}
|
||||
|
||||
// TopicsWithMinPeerCount returns all topics (mesh and fanout) with their respective min peer counts.
|
||||
func (a *AttestationTopicFamily) TopicsWithMinPeerCount(slot primitives.Slot) map[string]int {
|
||||
return topicsWithMinPeerCount(a, slot, attestationMinMeshPeers, attestationMinFanoutPeers)
|
||||
}
|
||||
|
||||
// SyncCommitteeTopicFamily
|
||||
var _ DynamicShardedTopicFamily = (*SyncCommitteeTopicFamily)(nil)
|
||||
|
||||
type SyncCommitteeTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
// NewSyncCommitteeTopicFamily creates a new SyncCommitteeTopicFamily.
|
||||
func NewSyncCommitteeTopicFamily(s *Service, nse params.NetworkScheduleEntry) *SyncCommitteeTopicFamily {
|
||||
sc := &SyncCommitteeTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateSyncCommitteeMessage, s.syncCommitteeMessageSubscriber, sc)
|
||||
sc.baseTopicFamily = base
|
||||
return sc
|
||||
}
|
||||
|
||||
func (s *SyncCommitteeTopicFamily) Name() string {
|
||||
return "SyncCommitteeTopicFamily"
|
||||
}
|
||||
|
||||
// SubscribeFor subscribes to the topics for the given slot.
|
||||
func (s *SyncCommitteeTopicFamily) SubscribeForSlot(slot primitives.Slot) {
|
||||
s.subscribeToTopics(s.TopicsToSubscribeForSlot(slot))
|
||||
}
|
||||
|
||||
// UnsubscribeFor unsubscribes from topics we no longer need for the slot.
|
||||
func (s *SyncCommitteeTopicFamily) UnsubscribeForSlot(slot primitives.Slot) {
|
||||
s.pruneTopicsExcept(s.TopicsToSubscribeForSlot(slot))
|
||||
}
|
||||
|
||||
// TopicsToSubscribeFor returns the topics to subscribe to for a given slot.
|
||||
func (s *SyncCommitteeTopicFamily) TopicsToSubscribeForSlot(slot primitives.Slot) []string {
|
||||
return topicsFromSubnets(s.getSubnetsToJoin(slot), s)
|
||||
}
|
||||
|
||||
// getFullTopicString builds the full topic string for a sync committee subnet.
|
||||
func (s *SyncCommitteeTopicFamily) getFullTopicString(subnet uint64) string {
|
||||
return p2p.SyncCommitteeSubnetTopic(s.nse.ForkDigest, subnet)
|
||||
}
|
||||
|
||||
// getSubnetsToJoin returns active sync committee subnets.
|
||||
func (s *SyncCommitteeTopicFamily) getSubnetsToJoin(slot primitives.Slot) map[uint64]bool {
|
||||
return s.syncService.activeSyncSubnetIndices(slot)
|
||||
}
|
||||
|
||||
// getSubnetsForBroadcast returns nil as there are no separate peer requirements.
|
||||
func (s *SyncCommitteeTopicFamily) getSubnetsForBroadcast(slot primitives.Slot) map[uint64]bool {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExtractTopicsForNode returns all topics for the given node that are relevant to this topic family.
|
||||
func (s *SyncCommitteeTopicFamily) ExtractTopicsForNode(node *enode.Node) ([]string, error) {
|
||||
return getTopicsForNode(s.syncService, s, node, p2p.SyncSubnets)
|
||||
}
|
||||
|
||||
// TopicsWithMinPeerCount returns all topics (mesh and fanout) with their respective min peer counts.
|
||||
func (s *SyncCommitteeTopicFamily) TopicsWithMinPeerCount(slot primitives.Slot) map[string]int {
|
||||
return topicsWithMinPeerCount(s, slot, syncCommitteeMinMeshPeers, syncCommitteeMinFanoutPeers)
|
||||
}
|
||||
|
||||
// DataColumnTopicFamily
|
||||
var _ DynamicShardedTopicFamily = (*DataColumnTopicFamily)(nil)
|
||||
|
||||
type DataColumnTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
// NewDataColumnTopicFamily creates a new DataColumnTopicFamily.
|
||||
func NewDataColumnTopicFamily(s *Service, nse params.NetworkScheduleEntry) *DataColumnTopicFamily {
|
||||
d := &DataColumnTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateDataColumn, s.dataColumnSubscriber, d)
|
||||
d.baseTopicFamily = base
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *DataColumnTopicFamily) Name() string {
|
||||
return "DataColumnTopicFamily"
|
||||
}
|
||||
|
||||
// SubscribeFor subscribes to the topics for the given slot.
|
||||
func (d *DataColumnTopicFamily) SubscribeForSlot(slot primitives.Slot) {
|
||||
d.subscribeToTopics(d.TopicsToSubscribeForSlot(slot))
|
||||
}
|
||||
|
||||
// UnsubscribeForSlot unsubscribes from topics we no longer need for the slot.
|
||||
func (d *DataColumnTopicFamily) UnsubscribeForSlot(slot primitives.Slot) {
|
||||
d.pruneTopicsExcept(d.TopicsToSubscribeForSlot(slot))
|
||||
}
|
||||
|
||||
// TopicsToSubscribeFor returns the topics to subscribe to for a given slot.
|
||||
func (d *DataColumnTopicFamily) TopicsToSubscribeForSlot(slot primitives.Slot) []string {
|
||||
return topicsFromSubnets(d.getSubnetsToJoin(slot), d)
|
||||
}
|
||||
|
||||
// getFullTopicString builds the full topic string for a data column subnet.
|
||||
func (d *DataColumnTopicFamily) getFullTopicString(subnet uint64) string {
|
||||
return p2p.DataColumnSubnetTopic(d.nse.ForkDigest, subnet)
|
||||
}
|
||||
|
||||
// getSubnetsToJoin returns data column subnets.
|
||||
func (d *DataColumnTopicFamily) getSubnetsToJoin(slot primitives.Slot) map[uint64]bool {
|
||||
return d.syncService.dataColumnSubnetIndices(slot)
|
||||
}
|
||||
|
||||
// getSubnetsForBroadcast returns all data column subnets.
|
||||
func (d *DataColumnTopicFamily) getSubnetsForBroadcast(slot primitives.Slot) map[uint64]bool {
|
||||
return d.syncService.allDataColumnSubnets(slot)
|
||||
}
|
||||
|
||||
// ExtractTopicsForNode returns all topics for the given node that are relevant to this topic family.
|
||||
func (d *DataColumnTopicFamily) ExtractTopicsForNode(node *enode.Node) ([]string, error) {
|
||||
return getTopicsForNode(d.syncService, d, node, p2p.DataColumnSubnets)
|
||||
}
|
||||
|
||||
// TopicsWithMinPeerCount returns all topics (mesh and fanout) with their respective min peer counts.
|
||||
func (d *DataColumnTopicFamily) TopicsWithMinPeerCount(slot primitives.Slot) map[string]int {
|
||||
return topicsWithMinPeerCount(d, slot, dataColumnMinMeshPeers, dataColumnMinFanoutPeers)
|
||||
}
|
||||
|
||||
type nodeSubnetExtractor func(id enode.ID, n *enode.Node, r *enr.Record) (map[uint64]bool, error)
|
||||
|
||||
type dynamicSubnetFamily interface {
|
||||
getSubnetsToJoin(primitives.Slot) map[uint64]bool
|
||||
getSubnetsForBroadcast(primitives.Slot) map[uint64]bool
|
||||
getFullTopicString(subnet uint64) string
|
||||
}
|
||||
|
||||
func getTopicsForNode(
|
||||
s *Service,
|
||||
tf dynamicSubnetFamily,
|
||||
node *enode.Node,
|
||||
extractor nodeSubnetExtractor,
|
||||
) ([]string, error) {
|
||||
if node == nil {
|
||||
return nil, errors.New("enode is nil")
|
||||
}
|
||||
currentSlot := s.cfg.clock.CurrentSlot()
|
||||
neededSubnets := computeNeededSubnets(tf, currentSlot)
|
||||
|
||||
nodeSubnets, err := extractor(node.ID(), node, node.Record())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var topics []string
|
||||
for subnet := range neededSubnets {
|
||||
if nodeSubnets[subnet] {
|
||||
topics = append(topics, tf.getFullTopicString(subnet))
|
||||
}
|
||||
}
|
||||
return topics, nil
|
||||
}
|
||||
|
||||
func computeNeededSubnets(tf dynamicSubnetFamily, slot primitives.Slot) map[uint64]bool {
|
||||
subnetsToJoin := tf.getSubnetsToJoin(slot)
|
||||
subnetsRequiringPeers := tf.getSubnetsForBroadcast(slot)
|
||||
|
||||
neededSubnets := make(map[uint64]bool, len(subnetsToJoin)+len(subnetsRequiringPeers))
|
||||
for subnet := range subnetsToJoin {
|
||||
neededSubnets[subnet] = true
|
||||
}
|
||||
for subnet := range subnetsRequiringPeers {
|
||||
neededSubnets[subnet] = true
|
||||
}
|
||||
return neededSubnets
|
||||
}
|
||||
|
||||
func topicsFromSubnets(subnets map[uint64]bool, tf dynamicSubnetFamily) []string {
|
||||
topics := make([]string, 0, len(subnets))
|
||||
for s := range subnets {
|
||||
topics = append(topics, tf.getFullTopicString(s))
|
||||
}
|
||||
return topics
|
||||
}
|
||||
|
||||
// topicsWithMinPeerCount returns all topics (mesh and fanout) with their respective min peer counts.
|
||||
// If a subnet appears in both mesh and fanout, the mesh peer count is used.
|
||||
func topicsWithMinPeerCount(tf dynamicSubnetFamily, slot primitives.Slot, minMeshPeers int, minFanoutPeers int) map[string]int {
|
||||
meshSubnets := tf.getSubnetsToJoin(slot)
|
||||
fanoutSubnets := tf.getSubnetsForBroadcast(slot)
|
||||
|
||||
result := make(map[string]int, len(meshSubnets)+len(fanoutSubnets))
|
||||
|
||||
// Add mesh topics with mesh min peer count
|
||||
for subnet := range meshSubnets {
|
||||
topic := tf.getFullTopicString(subnet)
|
||||
result[topic] = minMeshPeers
|
||||
}
|
||||
|
||||
// Add fanout topics with fanout min peer count (only if not already in mesh)
|
||||
for subnet := range fanoutSubnets {
|
||||
topic := tf.getFullTopicString(subnet)
|
||||
if _, exists := result[topic]; !exists {
|
||||
result[topic] = minFanoutPeers
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
137
beacon-chain/sync/topic_families_dynamic_subnets_test.go
Normal file
137
beacon-chain/sync/topic_families_dynamic_subnets_test.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// mockDynamicSubnetFamily is a test implementation of dynamicSubnetFamily.
|
||||
type mockDynamicSubnetFamily struct {
|
||||
meshSubnets map[uint64]bool
|
||||
fanoutSubnets map[uint64]bool
|
||||
topicPrefix string
|
||||
}
|
||||
|
||||
func (m *mockDynamicSubnetFamily) getSubnetsToJoin(_ primitives.Slot) map[uint64]bool {
|
||||
return m.meshSubnets
|
||||
}
|
||||
|
||||
func (m *mockDynamicSubnetFamily) getSubnetsForBroadcast(_ primitives.Slot) map[uint64]bool {
|
||||
return m.fanoutSubnets
|
||||
}
|
||||
|
||||
func (m *mockDynamicSubnetFamily) getFullTopicString(subnet uint64) string {
|
||||
return fmt.Sprintf("%s/subnet/%d", m.topicPrefix, subnet)
|
||||
}
|
||||
|
||||
func TestTopicsWithMinPeerCount(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
meshSubnets map[uint64]bool
|
||||
fanoutSubnets map[uint64]bool
|
||||
minMeshPeers int
|
||||
minFanoutPeers int
|
||||
expected map[string]int
|
||||
}{
|
||||
{
|
||||
name: "empty subnets returns empty map",
|
||||
meshSubnets: nil,
|
||||
fanoutSubnets: nil,
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "empty maps returns empty map",
|
||||
meshSubnets: map[uint64]bool{},
|
||||
fanoutSubnets: map[uint64]bool{},
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{},
|
||||
},
|
||||
{
|
||||
name: "only mesh subnets",
|
||||
meshSubnets: map[uint64]bool{1: true, 2: true, 3: true},
|
||||
fanoutSubnets: nil,
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{
|
||||
"test/subnet/1": 8,
|
||||
"test/subnet/2": 8,
|
||||
"test/subnet/3": 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only fanout subnets",
|
||||
meshSubnets: nil,
|
||||
fanoutSubnets: map[uint64]bool{4: true, 5: true},
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{
|
||||
"test/subnet/4": 6,
|
||||
"test/subnet/5": 6,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mesh and fanout with no overlap",
|
||||
meshSubnets: map[uint64]bool{1: true, 2: true},
|
||||
fanoutSubnets: map[uint64]bool{3: true, 4: true},
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{
|
||||
"test/subnet/1": 8,
|
||||
"test/subnet/2": 8,
|
||||
"test/subnet/3": 6,
|
||||
"test/subnet/4": 6,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fanout subset of mesh - all get mesh peer count",
|
||||
meshSubnets: map[uint64]bool{1: true, 2: true, 3: true, 4: true},
|
||||
fanoutSubnets: map[uint64]bool{2: true, 3: true},
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{
|
||||
"test/subnet/1": 8,
|
||||
"test/subnet/2": 8, // in both, mesh takes precedence
|
||||
"test/subnet/3": 8, // in both, mesh takes precedence
|
||||
"test/subnet/4": 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mesh subset of fanout - mesh subnets get mesh count, remaining get fanout",
|
||||
meshSubnets: map[uint64]bool{2: true, 3: true},
|
||||
fanoutSubnets: map[uint64]bool{1: true, 2: true, 3: true, 4: true},
|
||||
minMeshPeers: 8,
|
||||
minFanoutPeers: 6,
|
||||
expected: map[string]int{
|
||||
"test/subnet/1": 6, // fanout only
|
||||
"test/subnet/2": 8, // in both, mesh takes precedence
|
||||
"test/subnet/3": 8, // in both, mesh takes precedence
|
||||
"test/subnet/4": 6, // fanout only
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
mock := &mockDynamicSubnetFamily{
|
||||
meshSubnets: tt.meshSubnets,
|
||||
fanoutSubnets: tt.fanoutSubnets,
|
||||
topicPrefix: "test",
|
||||
}
|
||||
|
||||
result := topicsWithMinPeerCount(mock, 0, tt.minMeshPeers, tt.minFanoutPeers)
|
||||
|
||||
require.Equal(t, len(tt.expected), len(result), "result length mismatch")
|
||||
for topic, expectedCount := range tt.expected {
|
||||
actualCount, exists := result[topic]
|
||||
require.True(t, exists, "expected topic %s not found in result", topic)
|
||||
require.Equal(t, expectedCount, actualCount, "peer count mismatch for topic %s", topic)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
38
beacon-chain/sync/topic_families_static_subnets.go
Normal file
38
beacon-chain/sync/topic_families_static_subnets.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
)
|
||||
|
||||
var _ ShardedTopicFamily = (*BlobTopicFamily)(nil)
|
||||
|
||||
// BlobTopicFamily represents a static-subnet family instance for a specific blob subnet index.
|
||||
type BlobTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
subnetIndex uint64
|
||||
}
|
||||
|
||||
func NewBlobTopicFamily(s *Service, nse params.NetworkScheduleEntry, subnetIndex uint64) *BlobTopicFamily {
|
||||
b := &BlobTopicFamily{
|
||||
subnetIndex: subnetIndex,
|
||||
}
|
||||
base := newBaseTopicFamily(s, nse, s.validateBlob, s.blobSubscriber, b)
|
||||
b.baseTopicFamily = base
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *BlobTopicFamily) Name() string {
|
||||
return fmt.Sprintf("BlobTopicFamily-%d", b.subnetIndex)
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the static subnet topic. Slot is ignored for this topic family.
|
||||
func (b *BlobTopicFamily) Subscribe() {
|
||||
b.subscribeToTopics([]string{b.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (b *BlobTopicFamily) getFullTopicString() string {
|
||||
return p2p.BlobSubnetTopic(b.nse.ForkDigest, b.subnetIndex)
|
||||
}
|
||||
247
beacon-chain/sync/topic_families_without_subnets.go
Normal file
247
beacon-chain/sync/topic_families_without_subnets.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
)
|
||||
|
||||
// Blocks
|
||||
var _ ShardedTopicFamily = (*BlockTopicFamily)(nil)
|
||||
|
||||
type BlockTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewBlockTopicFamily(s *Service, nse params.NetworkScheduleEntry) *BlockTopicFamily {
|
||||
b := &BlockTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateBeaconBlockPubSub, s.beaconBlockSubscriber, b)
|
||||
b.baseTopicFamily = base
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *BlockTopicFamily) Name() string {
|
||||
return "BlockTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic.
|
||||
func (b *BlockTopicFamily) Subscribe() {
|
||||
b.subscribeToTopics([]string{b.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (b *BlockTopicFamily) getFullTopicString() string {
|
||||
return p2p.BlockSubnetTopic(b.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Aggregate and Proof
|
||||
var _ ShardedTopicFamily = (*AggregateAndProofTopicFamily)(nil)
|
||||
|
||||
type AggregateAndProofTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewAggregateAndProofTopicFamily(s *Service, nse params.NetworkScheduleEntry) *AggregateAndProofTopicFamily {
|
||||
a := &AggregateAndProofTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateAggregateAndProof, s.beaconAggregateProofSubscriber, a)
|
||||
a.baseTopicFamily = base
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *AggregateAndProofTopicFamily) Name() string {
|
||||
return "AggregateAndProofTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic.
|
||||
func (a *AggregateAndProofTopicFamily) Subscribe() {
|
||||
a.subscribeToTopics([]string{a.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (a *AggregateAndProofTopicFamily) getFullTopicString() string {
|
||||
return p2p.AggregateAndProofSubnetTopic(a.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Voluntary Exit
|
||||
var _ ShardedTopicFamily = (*VoluntaryExitTopicFamily)(nil)
|
||||
|
||||
type VoluntaryExitTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewVoluntaryExitTopicFamily(s *Service, nse params.NetworkScheduleEntry) *VoluntaryExitTopicFamily {
|
||||
v := &VoluntaryExitTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateVoluntaryExit, s.voluntaryExitSubscriber, v)
|
||||
v.baseTopicFamily = base
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *VoluntaryExitTopicFamily) Name() string {
|
||||
return "VoluntaryExitTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (v *VoluntaryExitTopicFamily) Subscribe() {
|
||||
v.subscribeToTopics([]string{v.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (v *VoluntaryExitTopicFamily) getFullTopicString() string {
|
||||
return p2p.VoluntaryExitSubnetTopic(v.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Proposer Slashing
|
||||
var _ ShardedTopicFamily = (*ProposerSlashingTopicFamily)(nil)
|
||||
|
||||
type ProposerSlashingTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewProposerSlashingTopicFamily(s *Service, nse params.NetworkScheduleEntry) *ProposerSlashingTopicFamily {
|
||||
p := &ProposerSlashingTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateProposerSlashing, s.proposerSlashingSubscriber, p)
|
||||
p.baseTopicFamily = base
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *ProposerSlashingTopicFamily) Name() string {
|
||||
return "ProposerSlashingTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (p *ProposerSlashingTopicFamily) Subscribe() {
|
||||
p.subscribeToTopics([]string{p.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (p *ProposerSlashingTopicFamily) getFullTopicString() string {
|
||||
return p2p.ProposerSlashingSubnetTopic(p.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Attester Slashing
|
||||
var _ ShardedTopicFamily = (*AttesterSlashingTopicFamily)(nil)
|
||||
|
||||
type AttesterSlashingTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewAttesterSlashingTopicFamily(s *Service, nse params.NetworkScheduleEntry) *AttesterSlashingTopicFamily {
|
||||
a := &AttesterSlashingTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateAttesterSlashing, s.attesterSlashingSubscriber, a)
|
||||
a.baseTopicFamily = base
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *AttesterSlashingTopicFamily) Name() string {
|
||||
return "AttesterSlashingTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (a *AttesterSlashingTopicFamily) Subscribe() {
|
||||
a.subscribeToTopics([]string{a.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (a *AttesterSlashingTopicFamily) getFullTopicString() string {
|
||||
return p2p.AttesterSlashingSubnetTopic(a.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Sync Contribution and Proof (Altair+)
|
||||
var _ ShardedTopicFamily = (*SyncContributionAndProofTopicFamily)(nil)
|
||||
|
||||
type SyncContributionAndProofTopicFamily struct{ *baseTopicFamily }
|
||||
|
||||
func NewSyncContributionAndProofTopicFamily(s *Service, nse params.NetworkScheduleEntry) *SyncContributionAndProofTopicFamily {
|
||||
sc := &SyncContributionAndProofTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateSyncContributionAndProof, s.syncContributionAndProofSubscriber, sc)
|
||||
sc.baseTopicFamily = base
|
||||
return sc
|
||||
}
|
||||
|
||||
func (sc *SyncContributionAndProofTopicFamily) Name() string {
|
||||
return "SyncContributionAndProofTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (sc *SyncContributionAndProofTopicFamily) Subscribe() {
|
||||
sc.subscribeToTopics([]string{sc.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (sc *SyncContributionAndProofTopicFamily) getFullTopicString() string {
|
||||
return p2p.SyncContributionAndProofSubnetTopic(sc.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Light Client Optimistic Update (Altair+)
|
||||
var _ ShardedTopicFamily = (*LightClientOptimisticUpdateTopicFamily)(nil)
|
||||
|
||||
type LightClientOptimisticUpdateTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewLightClientOptimisticUpdateTopicFamily(s *Service, nse params.NetworkScheduleEntry) *LightClientOptimisticUpdateTopicFamily {
|
||||
l := &LightClientOptimisticUpdateTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateLightClientOptimisticUpdate, noopHandler, l)
|
||||
l.baseTopicFamily = base
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LightClientOptimisticUpdateTopicFamily) Name() string {
|
||||
return "LightClientOptimisticUpdateTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (l *LightClientOptimisticUpdateTopicFamily) Subscribe() {
|
||||
l.subscribeToTopics([]string{l.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (l *LightClientOptimisticUpdateTopicFamily) getFullTopicString() string {
|
||||
return p2p.LcOptimisticToTopic(l.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// Light Client Finality Update (Altair+)
|
||||
var _ ShardedTopicFamily = (*LightClientFinalityUpdateTopicFamily)(nil)
|
||||
|
||||
type LightClientFinalityUpdateTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewLightClientFinalityUpdateTopicFamily(s *Service, nse params.NetworkScheduleEntry) *LightClientFinalityUpdateTopicFamily {
|
||||
l := &LightClientFinalityUpdateTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateLightClientFinalityUpdate, noopHandler, l)
|
||||
l.baseTopicFamily = base
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LightClientFinalityUpdateTopicFamily) Name() string {
|
||||
return "LightClientFinalityUpdateTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (l *LightClientFinalityUpdateTopicFamily) Subscribe() {
|
||||
l.subscribeToTopics([]string{l.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (l *LightClientFinalityUpdateTopicFamily) getFullTopicString() string {
|
||||
return p2p.LcFinalityToTopic(l.nse.ForkDigest)
|
||||
}
|
||||
|
||||
// BLS to Execution Change (Capella+)
|
||||
var _ ShardedTopicFamily = (*BlsToExecutionChangeTopicFamily)(nil)
|
||||
|
||||
type BlsToExecutionChangeTopicFamily struct {
|
||||
*baseTopicFamily
|
||||
}
|
||||
|
||||
func NewBlsToExecutionChangeTopicFamily(s *Service, nse params.NetworkScheduleEntry) *BlsToExecutionChangeTopicFamily {
|
||||
b := &BlsToExecutionChangeTopicFamily{}
|
||||
base := newBaseTopicFamily(s, nse, s.validateBlsToExecutionChange, s.blsToExecutionChangeSubscriber, b)
|
||||
b.baseTopicFamily = base
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *BlsToExecutionChangeTopicFamily) Name() string {
|
||||
return "BlsToExecutionChangeTopicFamily"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to the topic. Slot is ignored for this topic family.
|
||||
func (b *BlsToExecutionChangeTopicFamily) Subscribe() {
|
||||
b.subscribeToTopics([]string{b.getFullTopicString()})
|
||||
}
|
||||
|
||||
func (b *BlsToExecutionChangeTopicFamily) getFullTopicString() string {
|
||||
return p2p.BlsToExecutionChangeSubnetTopic(b.nse.ForkDigest)
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
@@ -288,15 +289,27 @@ func (bv *ROBlobVerifier) SidecarKzgProofVerified() (err error) {
|
||||
// for later processing while proposers for the block's branch are calculated -- in such a case do not REJECT, instead IGNORE this message.
|
||||
func (bv *ROBlobVerifier) SidecarProposerExpected(ctx context.Context) (err error) {
|
||||
defer bv.recordResult(RequireSidecarProposerExpected, &err)
|
||||
pst, err := bv.parentState(ctx)
|
||||
e := slots.ToEpoch(bv.blob.Slot())
|
||||
if e > 0 {
|
||||
e = e - 1
|
||||
}
|
||||
r, err := bv.fc.TargetRootForEpoch(bv.blob.ParentRoot(), e)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("State replay to parent_root failed")
|
||||
return errSidecarUnexpectedProposer
|
||||
}
|
||||
idx, err := bv.pc.ComputeProposer(ctx, bv.blob.Slot(), pst)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("Error computing proposer index from parent state")
|
||||
return errSidecarUnexpectedProposer
|
||||
c := &forkchoicetypes.Checkpoint{Root: r, Epoch: e}
|
||||
idx, cached := bv.pc.Proposer(c, bv.blob.Slot())
|
||||
if !cached {
|
||||
pst, err := bv.parentState(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("State replay to parent_root failed")
|
||||
return errSidecarUnexpectedProposer
|
||||
}
|
||||
idx, err = bv.pc.ComputeProposer(ctx, bv.blob.ParentRoot(), bv.blob.Slot(), pst)
|
||||
if err != nil {
|
||||
log.WithError(err).WithFields(logging.BlobFields(bv.blob)).Debug("Error computing proposer index from parent state")
|
||||
return errSidecarUnexpectedProposer
|
||||
}
|
||||
}
|
||||
if idx != bv.blob.ProposerIndex() {
|
||||
log.WithError(errSidecarUnexpectedProposer).
|
||||
|
||||
@@ -452,17 +452,33 @@ func TestSidecarProposerExpected(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
_, blobs := util.GenerateTestDenebBlockWithSidecar(t, [32]byte{}, 1, 1)
|
||||
b := blobs[0]
|
||||
t.Run("state lookup failure", func(t *testing.T) {
|
||||
ini := Initializer{shared: &sharedResources{sr: sbrNotFound(t, b.ParentRoot()), pc: &mockProposerCache{}, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}}
|
||||
t.Run("cached, matches", func(t *testing.T) {
|
||||
ini := Initializer{shared: &sharedResources{pc: &mockProposerCache{ProposerCB: pcReturnsIdx(b.ProposerIndex())}, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}}
|
||||
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
|
||||
require.NoError(t, v.SidecarProposerExpected(ctx))
|
||||
require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected))
|
||||
require.NoError(t, v.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
t.Run("cached, does not match", func(t *testing.T) {
|
||||
ini := Initializer{shared: &sharedResources{pc: &mockProposerCache{ProposerCB: pcReturnsIdx(b.ProposerIndex() + 1)}, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}}
|
||||
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
|
||||
require.ErrorIs(t, v.SidecarProposerExpected(ctx), errSidecarUnexpectedProposer)
|
||||
require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, v.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
t.Run("not cached, state lookup failure", func(t *testing.T) {
|
||||
ini := Initializer{shared: &sharedResources{sr: sbrNotFound(t, b.ParentRoot()), pc: &mockProposerCache{ProposerCB: pcReturnsNotFound()}, fc: &mockForkchoicer{TargetRootForEpochCB: fcReturnsTargetRoot([32]byte{})}}}
|
||||
v := ini.NewBlobVerifier(b, GossipBlobSidecarRequirements)
|
||||
require.ErrorIs(t, v.SidecarProposerExpected(ctx), errSidecarUnexpectedProposer)
|
||||
require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, v.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
|
||||
t.Run("proposer matches", func(t *testing.T) {
|
||||
t.Run("not cached, proposer matches", func(t *testing.T) {
|
||||
pc := &mockProposerCache{
|
||||
ComputeProposerCB: func(_ context.Context, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
require.Equal(t, b.ParentRoot(), root)
|
||||
require.Equal(t, b.Slot(), slot)
|
||||
return b.ProposerIndex(), nil
|
||||
},
|
||||
@@ -473,9 +489,11 @@ func TestSidecarProposerExpected(t *testing.T) {
|
||||
require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected))
|
||||
require.NoError(t, v.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
t.Run("proposer does not match", func(t *testing.T) {
|
||||
t.Run("not cached, proposer does not match", func(t *testing.T) {
|
||||
pc := &mockProposerCache{
|
||||
ComputeProposerCB: func(_ context.Context, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
require.Equal(t, b.ParentRoot(), root)
|
||||
require.Equal(t, b.Slot(), slot)
|
||||
return b.ProposerIndex() + 1, nil
|
||||
},
|
||||
@@ -486,9 +504,11 @@ func TestSidecarProposerExpected(t *testing.T) {
|
||||
require.Equal(t, true, v.results.executed(RequireSidecarProposerExpected))
|
||||
require.NotNil(t, v.results.result(RequireSidecarProposerExpected))
|
||||
})
|
||||
t.Run("ComputeProposer fails", func(t *testing.T) {
|
||||
t.Run("not cached, ComputeProposer fails", func(t *testing.T) {
|
||||
pc := &mockProposerCache{
|
||||
ComputeProposerCB: func(_ context.Context, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
ComputeProposerCB: func(_ context.Context, root [32]byte, slot primitives.Slot, _ state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
require.Equal(t, b.ParentRoot(), root)
|
||||
require.Equal(t, b.Slot(), slot)
|
||||
return 0, errors.New("ComputeProposer failed")
|
||||
},
|
||||
@@ -825,11 +845,28 @@ func (v *validxStateOverride) ReadFromEveryValidator(f func(idx int, val state.R
|
||||
}
|
||||
|
||||
type mockProposerCache struct {
|
||||
ComputeProposerCB func(ctx context.Context, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error)
|
||||
ComputeProposerCB func(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error)
|
||||
ProposerCB func(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool)
|
||||
}
|
||||
|
||||
func (p *mockProposerCache) ComputeProposer(ctx context.Context, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
return p.ComputeProposerCB(ctx, slot, pst)
|
||||
func (p *mockProposerCache) ComputeProposer(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
return p.ComputeProposerCB(ctx, root, slot, pst)
|
||||
}
|
||||
|
||||
func (p *mockProposerCache) Proposer(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) {
|
||||
return p.ProposerCB(c, slot)
|
||||
}
|
||||
|
||||
var _ proposerCache = &mockProposerCache{}
|
||||
|
||||
func pcReturnsIdx(idx primitives.ValidatorIndex) func(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) {
|
||||
return func(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) {
|
||||
return idx, true
|
||||
}
|
||||
}
|
||||
|
||||
func pcReturnsNotFound() func(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) {
|
||||
return func(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) {
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
lruwrpr "github.com/OffchainLabs/prysm/v7/cache/lru"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -15,7 +16,6 @@ import (
|
||||
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v7/time/slots"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -152,7 +152,8 @@ func (c *sigCache) SignatureVerified(sig signatureData) (bool, error) {
|
||||
// and cache the result so that it can be reused when the same verification needs to be performed
|
||||
// across multiple values.
|
||||
type proposerCache interface {
|
||||
ComputeProposer(ctx context.Context, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error)
|
||||
ComputeProposer(ctx context.Context, root [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error)
|
||||
Proposer(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool)
|
||||
}
|
||||
|
||||
func newPropCache() *propCache {
|
||||
@@ -162,20 +163,26 @@ func newPropCache() *propCache {
|
||||
type propCache struct {
|
||||
}
|
||||
|
||||
// ComputeProposer takes the state and computes the proposer index at the given slot
|
||||
func (*propCache) ComputeProposer(ctx context.Context, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
// After Fulu, the lookahead only contains proposers for the current and next epoch.
|
||||
stateEpoch := slots.ToEpoch(pst.Slot())
|
||||
slotEpoch := slots.ToEpoch(slot)
|
||||
if slotEpoch > stateEpoch+1 {
|
||||
start, err := slots.EpochStart(slotEpoch - 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
pst, err = transition.ProcessSlots(ctx, pst, start)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to advance state to compute proposer")
|
||||
}
|
||||
// ComputeProposer takes the state for the given parent root and slot and computes the proposer index, updating the
|
||||
// proposer index cache when successful.
|
||||
func (*propCache) ComputeProposer(ctx context.Context, parent [32]byte, slot primitives.Slot, pst state.BeaconState) (primitives.ValidatorIndex, error) {
|
||||
pst, err := transition.ProcessSlotsUsingNextSlotCache(ctx, pst, parent[:], slot)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return helpers.BeaconProposerIndexAtSlot(ctx, pst, slot)
|
||||
idx, err := helpers.BeaconProposerIndex(ctx, pst)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
// Proposer returns the validator index if it is found in the cache, along with a boolean indicating
|
||||
// whether the value was present, similar to accessing an lru or go map.
|
||||
func (*propCache) Proposer(c *forkchoicetypes.Checkpoint, slot primitives.Slot) (primitives.ValidatorIndex, bool) {
|
||||
id, err := helpers.ProposerIndexAtSlotFromCheckpoint(c, slot)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return id, true
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/signing"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v7/crypto/bls"
|
||||
@@ -106,3 +107,25 @@ func (m *mockValidatorAtIndexer) ValidatorAtIndex(idx primitives.ValidatorIndex)
|
||||
}
|
||||
|
||||
var _ validatorAtIndexer = &mockValidatorAtIndexer{}
|
||||
|
||||
func TestProposerCache(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
// 3 validators because that was the first number that produced a non-zero proposer index by default
|
||||
st, _ := util.DeterministicGenesisStateDeneb(t, 3)
|
||||
|
||||
pc := newPropCache()
|
||||
_, cached := pc.Proposer(&forkchoicetypes.Checkpoint{}, 1)
|
||||
// should not be cached yet
|
||||
require.Equal(t, false, cached)
|
||||
|
||||
// If this test breaks due to changes in the deterministic state gen, just replace '2' with whatever the right index is.
|
||||
expectedIdx := 2
|
||||
idx, err := pc.ComputeProposer(ctx, [32]byte{}, 1, st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, primitives.ValidatorIndex(expectedIdx), idx)
|
||||
|
||||
idx, cached = pc.Proposer(&forkchoicetypes.Checkpoint{}, 1)
|
||||
// TODO: update this test when we integrate a proposer id cache
|
||||
require.Equal(t, false, cached)
|
||||
require.Equal(t, primitives.ValidatorIndex(0), idx)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/OffchainLabs/prysm/v7/beacon-chain/forkchoice/types"
|
||||
"github.com/OffchainLabs/prysm/v7/beacon-chain/state"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v7/config/params"
|
||||
@@ -483,6 +484,38 @@ func (dv *RODataColumnsVerifier) SidecarProposerExpected(ctx context.Context) (e
|
||||
|
||||
defer dv.recordResult(RequireSidecarProposerExpected, &err)
|
||||
|
||||
type slotParentRoot struct {
|
||||
slot primitives.Slot
|
||||
parentRoot [fieldparams.RootLength]byte
|
||||
}
|
||||
|
||||
targetRootBySlotParentRoot := make(map[slotParentRoot][fieldparams.RootLength]byte)
|
||||
|
||||
var targetRootFromCache = func(slot primitives.Slot, parentRoot [fieldparams.RootLength]byte) ([fieldparams.RootLength]byte, error) {
|
||||
// Use cached values if available.
|
||||
slotParentRoot := slotParentRoot{slot: slot, parentRoot: parentRoot}
|
||||
if root, ok := targetRootBySlotParentRoot[slotParentRoot]; ok {
|
||||
return root, nil
|
||||
}
|
||||
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(slot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Compute the target root for the epoch.
|
||||
targetRoot, err := dv.fc.TargetRootForEpoch(parentRoot, dataColumnEpoch)
|
||||
if err != nil {
|
||||
return [fieldparams.RootLength]byte{}, columnErrBuilder(errors.Wrap(err, "target root from epoch"))
|
||||
}
|
||||
|
||||
// Store the target root in the cache.
|
||||
targetRootBySlotParentRoot[slotParentRoot] = targetRoot
|
||||
|
||||
return targetRoot, nil
|
||||
}
|
||||
|
||||
for _, dataColumn := range dv.dataColumns {
|
||||
// Extract the slot of the data column.
|
||||
dataColumnSlot := dataColumn.Slot()
|
||||
@@ -490,33 +523,56 @@ func (dv *RODataColumnsVerifier) SidecarProposerExpected(ctx context.Context) (e
|
||||
// Extract the root of the parent block corresponding to the data column.
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
|
||||
// Ensure the expensive index computation is only performed once for
|
||||
// concurrent requests for the same signature data.
|
||||
idxAny, err, _ := dv.sg.Do(concatRootSlot(parentRoot, dataColumnSlot), func() (any, error) {
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
}
|
||||
|
||||
idx, err := helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "compute proposer"))
|
||||
}
|
||||
|
||||
return idx, nil
|
||||
})
|
||||
// Compute the target root for the data column.
|
||||
targetRoot, err := targetRootFromCache(dataColumnSlot, parentRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
return columnErrBuilder(errors.Wrap(err, "target root"))
|
||||
}
|
||||
|
||||
idx, ok := idxAny.(primitives.ValidatorIndex)
|
||||
if !ok {
|
||||
return columnErrBuilder(errors.New("type assertion to ValidatorIndex failed"))
|
||||
// Compute the epoch of the data column slot.
|
||||
dataColumnEpoch := slots.ToEpoch(dataColumnSlot)
|
||||
if dataColumnEpoch > 0 {
|
||||
dataColumnEpoch = dataColumnEpoch - 1
|
||||
}
|
||||
|
||||
// Create a checkpoint for the target root.
|
||||
checkpoint := &forkchoicetypes.Checkpoint{Root: targetRoot, Epoch: dataColumnEpoch}
|
||||
|
||||
// Try to extract the proposer index from the data column in the cache.
|
||||
idx, cached := dv.pc.Proposer(checkpoint, dataColumnSlot)
|
||||
|
||||
if !cached {
|
||||
parentRoot := dataColumn.ParentRoot()
|
||||
// Ensure the expensive index computation is only performed once for
|
||||
// concurrent requests for the same signature data.
|
||||
idxAny, err, _ := dv.sg.Do(concatRootSlot(parentRoot, dataColumnSlot), func() (any, error) {
|
||||
verifyingState, err := dv.getVerifyingState(ctx, dataColumn)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "verifying state"))
|
||||
}
|
||||
|
||||
idx, err = helpers.BeaconProposerIndexAtSlot(ctx, verifyingState, dataColumnSlot)
|
||||
if err != nil {
|
||||
return nil, columnErrBuilder(errors.Wrap(err, "compute proposer"))
|
||||
}
|
||||
|
||||
return idx, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var ok bool
|
||||
if idx, ok = idxAny.(primitives.ValidatorIndex); !ok {
|
||||
return columnErrBuilder(errors.New("type assertion to ValidatorIndex failed"))
|
||||
}
|
||||
}
|
||||
|
||||
if idx != dataColumn.ProposerIndex() {
|
||||
return columnErrBuilder(errSidecarUnexpectedProposer)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -799,20 +799,35 @@ func TestDataColumnsSidecarProposerExpected(t *testing.T) {
|
||||
columns := GenerateTestDataColumns(t, parentRoot, columnSlot, blobCount)
|
||||
firstColumn := columns[0]
|
||||
ctx := t.Context()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
stateByRooter StateByRooter
|
||||
headStateProvider *mockHeadStateProvider
|
||||
columns []blocks.RODataColumn
|
||||
error string
|
||||
name string
|
||||
stateByRooter StateByRooter
|
||||
proposerCache proposerCache
|
||||
columns []blocks.RODataColumn
|
||||
error string
|
||||
}{
|
||||
{
|
||||
name: "state lookup failure",
|
||||
name: "Cached, matches",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex()),
|
||||
},
|
||||
columns: columns,
|
||||
},
|
||||
{
|
||||
name: "Cached, does not match",
|
||||
stateByRooter: nil,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsIdx(firstColumn.ProposerIndex() + 1),
|
||||
},
|
||||
columns: columns,
|
||||
error: errSidecarUnexpectedProposer.Error(),
|
||||
},
|
||||
{
|
||||
name: "Not cached, state lookup failure",
|
||||
stateByRooter: sbrNotFound(t, firstColumn.ParentRoot()),
|
||||
headStateProvider: &mockHeadStateProvider{
|
||||
headRoot: []byte{0xff}, // Different from parentRoot so it won't use head
|
||||
headSlot: 1000,
|
||||
proposerCache: &mockProposerCache{
|
||||
ProposerCB: pcReturnsNotFound(),
|
||||
},
|
||||
columns: columns,
|
||||
error: "verifying state",
|
||||
@@ -824,7 +839,8 @@ func TestDataColumnsSidecarProposerExpected(t *testing.T) {
|
||||
initializer := Initializer{
|
||||
shared: &sharedResources{
|
||||
sr: tc.stateByRooter,
|
||||
hsp: tc.headStateProvider,
|
||||
pc: tc.proposerCache,
|
||||
hsp: &mockHeadStateProvider{},
|
||||
fc: &mockForkchoicer{
|
||||
TargetRootForEpochCB: fcReturnsTargetRoot([fieldparams.RootLength]byte{}),
|
||||
},
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
## Fixed
|
||||
|
||||
- Fix missing return after version header check in SubmitAttesterSlashingsV2.
|
||||
@@ -1,2 +0,0 @@
|
||||
### Ignored
|
||||
- Reverts AutoNatV2 change introduced in https://github.com/OffchainLabs/prysm/pull/16100 as the libp2p upgrade fails inter-op testing.
|
||||
3
changelog/aarsh_libp2p_autonatv2_per_address.md
Normal file
3
changelog/aarsh_libp2p_autonatv2_per_address.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Add support for detecting and logging per address reachability via libp2p AutoNAT v2.
|
||||
3
changelog/aarshkshah1992-gossipsub-control-pane.md
Normal file
3
changelog/aarshkshah1992-gossipsub-control-pane.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- A Gossipsub control plane with topic abstractions, a peer crawler and a peer controller.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Fix deadlock in data column gossip KZG batch verification when a caller times out preventing result delivery.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- fixed replay state issue in rest api caused by attester and sync committee duties endpoints
|
||||
@@ -1,3 +0,0 @@
|
||||
### Changed
|
||||
|
||||
- Removed proposer id cache.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Fixed
|
||||
|
||||
- Do not error when committee has been computed correctly but updating the cache failed.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Added
|
||||
|
||||
- Static analyzer that ensures each `httputil.HandleError` call is followed by a `return` statement.
|
||||
@@ -1,3 +0,0 @@
|
||||
### Ignored
|
||||
|
||||
- Use `WriteStateFetchError` in API handlers whenever possible.
|
||||
@@ -131,6 +131,7 @@ var appFlags = []cli.Flag{
|
||||
debug.MutexProfileFractionFlag,
|
||||
cmd.LogFileName,
|
||||
cmd.EnableUPnPFlag,
|
||||
cmd.EnableAutoNATFlag,
|
||||
cmd.ConfigFileFlag,
|
||||
cmd.ChainConfigFileFlag,
|
||||
cmd.GrpcMaxCallRecvMsgSizeFlag,
|
||||
|
||||
@@ -82,6 +82,7 @@ var appHelpFlagGroups = []flagGroup{
|
||||
Flags: []cli.Flag{
|
||||
cmd.BootstrapNode,
|
||||
cmd.EnableUPnPFlag,
|
||||
cmd.EnableAutoNATFlag,
|
||||
cmd.NoDiscovery,
|
||||
cmd.P2PAllowList,
|
||||
cmd.P2PDenyList,
|
||||
|
||||
@@ -224,6 +224,11 @@ var (
|
||||
Name: "enable-upnp",
|
||||
Usage: "Enable the service (Beacon chain or Validator) to use UPnP when possible.",
|
||||
}
|
||||
// EnableAutoNATFlag enables AutoNAT v2 service for per-address reachability detection.
|
||||
EnableAutoNATFlag = &cli.BoolFlag{
|
||||
Name: "enable-autonat",
|
||||
Usage: "Enable AutoNAT v2 service for per-address reachability detection. Helps diagnose connectivity issues behind NAT/firewalls.",
|
||||
}
|
||||
// ConfigFileFlag specifies the filepath to load flag values.
|
||||
ConfigFileFlag = &cli.StringFlag{
|
||||
Name: "config-file",
|
||||
|
||||
18
deps.bzl
18
deps.bzl
@@ -1974,8 +1974,8 @@ def prysm_deps():
|
||||
],
|
||||
build_file_proto_mode = "disable_global",
|
||||
importpath = "github.com/libp2p/go-libp2p",
|
||||
sum = "h1:1Ur6rPCf3GR+g8jkrnaQaM0ha2IGespsnNlCqJLLALE=",
|
||||
version = "v0.39.1",
|
||||
sum = "h1:A8foZk+ZEhZTv0Jb++7xUFlrFhBDv4j2Vh/uq4YX+KE=",
|
||||
version = "v0.42.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_libp2p_asn_util",
|
||||
@@ -2038,6 +2038,12 @@ def prysm_deps():
|
||||
sum = "h1:nrLh89LN/LEiqcFiqdKDRHjGstN300C1269K/EX0CPU=",
|
||||
version = "v4.0.2",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_libp2p_go_yamux_v5",
|
||||
importpath = "github.com/libp2p/go-yamux/v5",
|
||||
sum = "h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=",
|
||||
version = "v5.0.1",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_libp2p_zeroconf_v2",
|
||||
importpath = "github.com/libp2p/zeroconf/v2",
|
||||
@@ -2323,8 +2329,8 @@ def prysm_deps():
|
||||
go_repository(
|
||||
name = "com_github_multiformats_go_multiaddr",
|
||||
importpath = "github.com/multiformats/go-multiaddr",
|
||||
sum = "h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU=",
|
||||
version = "v0.14.0",
|
||||
sum = "h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=",
|
||||
version = "v0.16.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_multiformats_go_multiaddr_dns",
|
||||
@@ -2915,8 +2921,8 @@ def prysm_deps():
|
||||
"gazelle:exclude tools.go",
|
||||
],
|
||||
importpath = "github.com/quic-go/quic-go",
|
||||
sum = "h1:x09Agz4ATTMEP3qb5P0MRxNZfd6O9wAyK3qwwqQZVQc=",
|
||||
version = "v0.49.1-0.20250925085836-275c172fec2b",
|
||||
sum = "h1:/SlHrCRElyaU6MaEPKqKr9z83sBg2v4FLLvWM+Z47pA=",
|
||||
version = "v0.52.0",
|
||||
)
|
||||
go_repository(
|
||||
name = "com_github_quic_go_webtransport_go",
|
||||
|
||||
84
go.mod
84
go.mod
@@ -35,30 +35,28 @@ require (
|
||||
github.com/herumi/bls-eth-go-binary v1.31.0
|
||||
github.com/holiman/uint256 v1.3.2
|
||||
github.com/ianlancetaylor/cgosymbolizer v0.0.0-20200424224625-be1b05b0b279
|
||||
github.com/ipfs/go-log/v2 v2.5.1
|
||||
github.com/ipfs/go-log/v2 v2.6.0
|
||||
github.com/jedib0t/go-pretty/v6 v6.5.4
|
||||
github.com/joonix/log v0.0.0-20200409080653-9c1d2ceb5f1d
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213
|
||||
github.com/kisielk/errcheck v1.8.0
|
||||
github.com/kr/pretty v0.3.1
|
||||
github.com/libp2p/go-libp2p v0.39.1
|
||||
github.com/libp2p/go-libp2p-mplex v0.9.0
|
||||
github.com/libp2p/go-libp2p v0.42.0
|
||||
github.com/libp2p/go-libp2p-pubsub v0.14.2
|
||||
github.com/libp2p/go-mplex v0.7.0
|
||||
github.com/logrusorgru/aurora v2.0.3+incompatible
|
||||
github.com/manifoldco/promptui v0.7.0
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b
|
||||
github.com/minio/highwayhash v1.0.2
|
||||
github.com/minio/sha256-simd v1.0.1
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
|
||||
github.com/multiformats/go-multiaddr v0.14.0
|
||||
github.com/multiformats/go-multiaddr v0.16.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/paulbellamy/ratecounter v0.2.0
|
||||
github.com/pborman/uuid v1.2.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus/client_model v0.6.1
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/client_model v0.6.2
|
||||
github.com/prometheus/prom2json v1.3.0
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20251103153600-259302269bfc
|
||||
github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c
|
||||
@@ -86,15 +84,15 @@ require (
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0
|
||||
go.opentelemetry.io/otel/sdk v1.34.0
|
||||
go.opentelemetry.io/otel/trace v1.35.0
|
||||
go.uber.org/automaxprocs v1.5.2
|
||||
go.uber.org/automaxprocs v1.6.0
|
||||
go.uber.org/mock v0.5.2
|
||||
golang.org/x/crypto v0.44.0
|
||||
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476
|
||||
golang.org/x/sync v0.18.0
|
||||
golang.org/x/tools v0.39.0
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
|
||||
google.golang.org/grpc v1.71.0
|
||||
google.golang.org/protobuf v1.36.5
|
||||
google.golang.org/protobuf v1.36.6
|
||||
gopkg.in/d4l3k/messagediff.v1 v1.2.1
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
@@ -122,19 +120,15 @@ require (
|
||||
github.com/cockroachdb/redact v1.1.5 // indirect
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
|
||||
github.com/consensys/bavard v0.1.22 // indirect
|
||||
github.com/containerd/cgroups v1.1.0 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.6.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/dlclark/regexp2 v1.7.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect
|
||||
github.com/elastic/gosigar v0.14.3 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v1.0.0 // indirect
|
||||
github.com/ethereum/go-verkle v0.2.2 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
@@ -147,10 +141,9 @@ require (
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
|
||||
github.com/godbus/dbus/v5 v5.1.0 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 // indirect
|
||||
github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect
|
||||
@@ -167,26 +160,26 @@ require (
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||
github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/koron/go-ssdp v0.0.5 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/koron/go-ssdp v0.0.6 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.3 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.2.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||
github.com/libp2p/go-nat v0.2.0 // indirect
|
||||
github.com/libp2p/go-netroute v0.4.0 // indirect
|
||||
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
||||
github.com/libp2p/go-yamux/v4 v4.0.2 // indirect
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
|
||||
github.com/lunixbochs/vtclean v1.0.0 // indirect
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/miekg/dns v1.1.63 // indirect
|
||||
github.com/miekg/dns v1.1.66 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||
@@ -201,46 +194,41 @@ require (
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.9.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.9.1 // indirect
|
||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||
github.com/multiformats/go-multistream v0.6.0 // indirect
|
||||
github.com/multiformats/go-multistream v0.6.1 // indirect
|
||||
github.com/multiformats/go-varint v0.0.7 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.0 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.23.4 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
github.com/pion/datachannel v1.5.10 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.12 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.4 // indirect
|
||||
github.com/pion/ice/v2 v2.3.37 // indirect
|
||||
github.com/pion/ice/v4 v4.0.6 // indirect
|
||||
github.com/pion/interceptor v0.1.37 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.6 // indirect
|
||||
github.com/pion/ice/v4 v4.0.10 // indirect
|
||||
github.com/pion/interceptor v0.1.40 // indirect
|
||||
github.com/pion/logging v0.2.3 // indirect
|
||||
github.com/pion/mdns v0.0.12 // indirect
|
||||
github.com/pion/mdns/v2 v2.0.7 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/rtcp v1.2.15 // indirect
|
||||
github.com/pion/rtp v1.8.11 // indirect
|
||||
github.com/pion/sctp v1.8.35 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.10 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.4 // indirect
|
||||
github.com/pion/rtp v1.8.19 // indirect
|
||||
github.com/pion/sctp v1.8.39 // indirect
|
||||
github.com/pion/sdp/v3 v3.0.13 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.6 // indirect
|
||||
github.com/pion/stun v0.6.1 // indirect
|
||||
github.com/pion/stun/v2 v2.0.0 // indirect
|
||||
github.com/pion/stun/v3 v3.0.0 // indirect
|
||||
github.com/pion/transport/v2 v2.2.10 // indirect
|
||||
github.com/pion/transport/v3 v3.0.7 // indirect
|
||||
github.com/pion/turn/v2 v2.1.6 // indirect
|
||||
github.com/pion/turn/v4 v4.0.0 // indirect
|
||||
github.com/pion/webrtc/v4 v4.0.8 // indirect
|
||||
github.com/pion/turn/v4 v4.0.2 // indirect
|
||||
github.com/pion/webrtc/v4 v4.1.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/quic-go/qpack v0.5.1 // indirect
|
||||
github.com/quic-go/quic-go v0.49.1-0.20250925085836-275c172fec2b // indirect
|
||||
github.com/quic-go/quic-go v0.52.0 // indirect
|
||||
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 // indirect
|
||||
github.com/raulk/go-watchdog v1.3.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
@@ -258,23 +246,23 @@ require (
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
go.uber.org/dig v1.18.0 // indirect
|
||||
go.uber.org/fx v1.23.0 // indirect
|
||||
go.uber.org/dig v1.19.0 // indirect
|
||||
go.uber.org/fx v1.24.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 // indirect
|
||||
golang.org/x/mod v0.30.0 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/telemetry v0.0.0-20251111182119-bc8e575c7b54 // indirect
|
||||
golang.org/x/term v0.37.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated // indirect
|
||||
gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
lukechampine.com/blake3 v1.3.0 // indirect
|
||||
lukechampine.com/blake3 v1.4.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
|
||||
206
go.sum
206
go.sum
@@ -91,8 +91,6 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/bazelbuild/rules_go v0.23.2 h1:Wxu7JjqnF78cKZbsBsARLSXx/jlGaSLCnUV3mTlyHvM=
|
||||
github.com/bazelbuild/rules_go v0.23.2/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
@@ -130,7 +128,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
@@ -156,20 +153,13 @@ github.com/consensys/bavard v0.1.22 h1:Uw2CGvbXSZWhqK59X0VG/zOjpTFuOMcPLStrp1ihI
|
||||
github.com/consensys/bavard v0.1.22/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
|
||||
github.com/consensys/gnark-crypto v0.14.0 h1:DDBdl4HaBtdQsq/wfMwJvZNE80sHidrK3Nfrefatm0E=
|
||||
github.com/consensys/gnark-crypto v0.14.0/go.mod h1:CU4UijNPsHawiVGNxe9co07FkzCeWHHrb1li/n1XoU0=
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg=
|
||||
@@ -188,10 +178,10 @@ github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||
github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM=
|
||||
github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
|
||||
github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU=
|
||||
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
|
||||
@@ -203,9 +193,6 @@ github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUn
|
||||
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
|
||||
github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo=
|
||||
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 h1:qwcF+vdFrvPSEUDSX5RVoRccG8a5DhOdWdQ4zN62zzo=
|
||||
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
|
||||
@@ -220,9 +207,6 @@ github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||
github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo=
|
||||
github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||
github.com/emicklei/dot v0.11.0 h1:Ase39UD9T9fRBOb5ptgpixrxfx8abVzNWZi2+lr53PI=
|
||||
github.com/emicklei/dot v0.11.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
@@ -321,17 +305,12 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
|
||||
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
|
||||
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
|
||||
@@ -415,14 +394,13 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg=
|
||||
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 h1:wlQI2cYY0BsWmmPPAnxfQ8SDW0S3Jasn+4B8kXFxprg=
|
||||
github.com/google/pprof v0.0.0-20250202011525-fc3143867406/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18=
|
||||
github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
@@ -515,8 +493,8 @@ github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 h1:vilfsD
|
||||
github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
|
||||
github.com/ipfs/go-cid v0.5.0 h1:goEKKhaGm0ul11IHA7I6p1GmKz8kEYniqFopaB5Otwg=
|
||||
github.com/ipfs/go-cid v0.5.0/go.mod h1:0L7vmeNXpQpUS9vt+yEARkJ8rOg43DF3iPgn4GIN0mk=
|
||||
github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY=
|
||||
github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI=
|
||||
github.com/ipfs/go-log/v2 v2.6.0 h1:2Nu1KKQQ2ayonKp4MPo6pXCjqw1ULc9iohRqWV5EYqg=
|
||||
github.com/ipfs/go-log/v2 v2.6.0/go.mod h1:p+Efr3qaY5YXpx9TX7MoLCSEZX5boSWj9wh86P5HJa8=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
|
||||
@@ -542,24 +520,23 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
|
||||
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213 h1:qGQQKEcAR99REcMpsXCp3lJ03zYT1PkRd3kQGPn9GVg=
|
||||
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/errcheck v1.8.0 h1:ZX/URYa7ilESY19ik/vBmCn6zdGQLxACwjAcWbHlYlg=
|
||||
github.com/kisielk/errcheck v1.8.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.10.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/klauspost/reedsolomon v1.9.3/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/koron/go-ssdp v0.0.5 h1:E1iSMxIs4WqxTbIBLtmNBeOOC+1sCIXQeqTWVnpmwhk=
|
||||
github.com/koron/go-ssdp v0.0.5/go.mod h1:Qm59B7hpKpDqfyRNWRNr00jGwLdXjDyZh6y7rH6VS0w=
|
||||
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
|
||||
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@@ -585,28 +562,22 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-flow-metrics v0.2.0 h1:EIZzjmeOE6c8Dav0sNv35vhZxATIXWZg6j/C08XmmDw=
|
||||
github.com/libp2p/go-flow-metrics v0.2.0/go.mod h1:st3qqfu8+pMfh+9Mzqb2GTiwrAGjIPszEjZmtksN8Jc=
|
||||
github.com/libp2p/go-libp2p v0.39.1 h1:1Ur6rPCf3GR+g8jkrnaQaM0ha2IGespsnNlCqJLLALE=
|
||||
github.com/libp2p/go-libp2p v0.39.1/go.mod h1:3zicI8Lp7Isun+Afo/JOACUbbJqqR2owK6RQWFsVAbI=
|
||||
github.com/libp2p/go-libp2p v0.42.0 h1:A8foZk+ZEhZTv0Jb++7xUFlrFhBDv4j2Vh/uq4YX+KE=
|
||||
github.com/libp2p/go-libp2p v0.42.0/go.mod h1:4NGcjbD9OIvFiSRb0XueCO19zJ4kSPK5vkyyOUYmMro=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94=
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8=
|
||||
github.com/libp2p/go-libp2p-mplex v0.9.0 h1:R58pDRAmuBXkYugbSSXR9wrTX3+1pFM1xP2bLuodIq8=
|
||||
github.com/libp2p/go-libp2p-mplex v0.9.0/go.mod h1:ro1i4kuwiFT+uMPbIDIFkcLs1KRbNp0QwnUXM+P64Og=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.14.2 h1:nT5lFHPQOFJcp9CW8hpKtvbpQNdl2udJuzLQWbgRum8=
|
||||
github.com/libp2p/go-libp2p-pubsub v0.14.2/go.mod h1:MKPU5vMI8RRFyTP0HfdsF9cLmL1nHAeJm44AxJGJx44=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
|
||||
github.com/libp2p/go-mplex v0.7.0 h1:BDhFZdlk5tbr0oyFq/xv/NPGfjbnrsDam1EvutpBDbY=
|
||||
github.com/libp2p/go-mplex v0.7.0/go.mod h1:rW8ThnRcYWft/Jb2jeORBmPd6xuG3dGxWN/W168L9EU=
|
||||
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
|
||||
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
|
||||
github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
|
||||
github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
|
||||
github.com/libp2p/go-netroute v0.4.0 h1:sZZx9hyANYUx9PZyqcgE/E1GUG3iEtTZHUEvdtXT7/Q=
|
||||
github.com/libp2p/go-netroute v0.4.0/go.mod h1:Nkd5ShYgSMS5MUKy/MU2T57xFoOKvvLR92Lic48LEyA=
|
||||
github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s=
|
||||
github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU=
|
||||
github.com/libp2p/go-yamux/v4 v4.0.2 h1:nrLh89LN/LEiqcFiqdKDRHjGstN300C1269K/EX0CPU=
|
||||
github.com/libp2p/go-yamux/v4 v4.0.2/go.mod h1:C808cCRgOs1iBwY4S71T5oxgMxgLmqUw56qh4AeBW2o=
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
|
||||
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
|
||||
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
|
||||
github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8=
|
||||
@@ -639,7 +610,6 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
@@ -655,8 +625,8 @@ github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1f
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
|
||||
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
|
||||
github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
|
||||
github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
|
||||
@@ -705,21 +675,21 @@ github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYg
|
||||
github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0=
|
||||
github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4=
|
||||
github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo=
|
||||
github.com/multiformats/go-multiaddr v0.14.0 h1:bfrHrJhrRuh/NXH5mCnemjpbGjzRw/b+tJFOD41g2tU=
|
||||
github.com/multiformats/go-multiaddr v0.14.0/go.mod h1:6EkVAxtznq2yC3QT5CM1UTAwG0GTP3EWAIcjHuzQ+r4=
|
||||
github.com/multiformats/go-multiaddr v0.16.0 h1:oGWEVKioVQcdIOBlYM8BH1rZDWOGJSqr9/BKl6zQ4qc=
|
||||
github.com/multiformats/go-multiaddr v0.16.0/go.mod h1:JSVUmXDjsVFiW7RjIFMP7+Ev+h1DTbiJgVeTV/tcmP0=
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1 h1:whi/uCLbDS3mSEUMb1MsoT4uzUeZB0N32yzufqS0i5M=
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1/go.mod h1:7hfthtB4E4pQwirrz+J0CcDUfbWzTqEzVyYKKIKpgkc=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo=
|
||||
github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g=
|
||||
github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk=
|
||||
github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg=
|
||||
github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k=
|
||||
github.com/multiformats/go-multicodec v0.9.1 h1:x/Fuxr7ZuR4jJV4Os5g444F7xC4XmyUaT/FWtE+9Zjo=
|
||||
github.com/multiformats/go-multicodec v0.9.1/go.mod h1:LLWNMtyV5ithSBUo3vFIMaeDy+h3EbkMTek1m+Fybbo=
|
||||
github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew=
|
||||
github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U=
|
||||
github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM=
|
||||
github.com/multiformats/go-multistream v0.6.0 h1:ZaHKbsL404720283o4c/IHQXiS6gb8qAN5EIJ4PN5EA=
|
||||
github.com/multiformats/go-multistream v0.6.0/go.mod h1:MOyoG5otO24cHIg8kf9QW2/NozURlkP/rvi2FQJyCPg=
|
||||
github.com/multiformats/go-multistream v0.6.1 h1:4aoX5v6T+yWmc2raBHsTvzmFhOI8WVOer28DeBBEYdQ=
|
||||
github.com/multiformats/go-multistream v0.6.1/go.mod h1:ksQf6kqHAb6zIsyw7Zm+gAuVo57Qbq84E27YlYqavqw=
|
||||
github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8=
|
||||
github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
@@ -735,9 +705,8 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
|
||||
github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
|
||||
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
|
||||
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
@@ -750,20 +719,17 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
|
||||
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus=
|
||||
github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
|
||||
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
|
||||
github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU=
|
||||
github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc=
|
||||
github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
|
||||
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
|
||||
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
|
||||
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
@@ -805,33 +771,29 @@ github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oL
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk=
|
||||
github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/dtls/v3 v3.0.4 h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U=
|
||||
github.com/pion/dtls/v3 v3.0.4/go.mod h1:R373CsjxWqNPf6MEkfdy3aSe9niZvL/JaKlGeFphtMg=
|
||||
github.com/pion/ice/v2 v2.3.37 h1:ObIdaNDu1rCo7hObhs34YSBcO7fjslJMZV0ux+uZWh0=
|
||||
github.com/pion/ice/v2 v2.3.37/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ=
|
||||
github.com/pion/ice/v4 v4.0.6 h1:jmM9HwI9lfetQV/39uD0nY4y++XZNPhvzIPCb8EwxUM=
|
||||
github.com/pion/ice/v4 v4.0.6/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
|
||||
github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI=
|
||||
github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y=
|
||||
github.com/pion/dtls/v3 v3.0.6 h1:7Hkd8WhAJNbRgq9RgdNh1aaWlZlGpYTzdqjy9x9sK2E=
|
||||
github.com/pion/dtls/v3 v3.0.6/go.mod h1:iJxNQ3Uhn1NZWOMWlLxEEHAN5yX7GyPvvKw04v9bzYU=
|
||||
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
|
||||
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
|
||||
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
|
||||
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
|
||||
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
||||
github.com/pion/logging v0.2.3 h1:gHuf0zpoh1GW67Nr6Gj4cv5Z9ZscU7g/EaoC/Ke/igI=
|
||||
github.com/pion/logging v0.2.3/go.mod h1:z8YfknkquMe1csOrxK5kc+5/ZPAzMxbKLX5aXpbpC90=
|
||||
github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8=
|
||||
github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk=
|
||||
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
|
||||
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
|
||||
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
|
||||
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
|
||||
github.com/pion/rtp v1.8.11 h1:17xjnY5WO5hgO6SD3/NTIUPvSFw/PbLsIJyz1r1yNIk=
|
||||
github.com/pion/rtp v1.8.11/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4=
|
||||
github.com/pion/sctp v1.8.35 h1:qwtKvNK1Wc5tHMIYgTDJhfZk7vATGVHhXbUDfHbYwzA=
|
||||
github.com/pion/sctp v1.8.35/go.mod h1:EcXP8zCYVTRy3W9xtOF7wJm1L1aXfKRQzaM33SjQlzg=
|
||||
github.com/pion/sdp/v3 v3.0.10 h1:6MChLE/1xYB+CjumMw+gZ9ufp2DPApuVSnDT8t5MIgA=
|
||||
github.com/pion/sdp/v3 v3.0.10/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
|
||||
github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M=
|
||||
github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ=
|
||||
github.com/pion/rtp v1.8.19 h1:jhdO/3XhL/aKm/wARFVmvTfq0lC/CvN1xwYKmduly3c=
|
||||
github.com/pion/rtp v1.8.19/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
|
||||
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
|
||||
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
|
||||
github.com/pion/sdp/v3 v3.0.13 h1:uN3SS2b+QDZnWXgdr69SM8KB4EbcnPnPf2Laxhty/l4=
|
||||
github.com/pion/sdp/v3 v3.0.13/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
|
||||
github.com/pion/srtp/v3 v3.0.6 h1:E2gyj1f5X10sB/qILUGIkL4C2CqK269Xq167PbGCc/4=
|
||||
github.com/pion/srtp/v3 v3.0.6/go.mod h1:BxvziG3v/armJHAaJ87euvkhHqWe9I7iiOy50K2QkhY=
|
||||
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
|
||||
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
|
||||
github.com/pion/stun/v2 v2.0.0 h1:A5+wXKLAypxQri59+tmQKVs7+l6mMM+3d+eER9ifRU0=
|
||||
@@ -845,13 +807,10 @@ github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs
|
||||
github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0=
|
||||
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
|
||||
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
|
||||
github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
|
||||
github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc=
|
||||
github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY=
|
||||
github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
|
||||
github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
|
||||
github.com/pion/webrtc/v4 v4.0.8 h1:T1ZmnT9qxIJIt4d8XoiMOBrTClGHDDXNg9e/fh018Qc=
|
||||
github.com/pion/webrtc/v4 v4.0.8/go.mod h1:HHBeUVBAC+j4ZFnYhovEFStF02Arb1EyD4G7e7HBTJw=
|
||||
github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
|
||||
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
|
||||
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
|
||||
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@@ -874,16 +833,16 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
|
||||
github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
@@ -891,8 +850,8 @@ github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt2
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
|
||||
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
@@ -901,8 +860,8 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
|
||||
github.com/prometheus/procfs v0.0.10/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y=
|
||||
github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM=
|
||||
github.com/prysmaticlabs/fastssz v0.0.0-20251103153600-259302269bfc h1:ASmh3y4ALne2OoabF5pPL8OcIpBko8gFMg5018MxkBI=
|
||||
@@ -916,14 +875,12 @@ github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20230228205207-28762a7b9294 h
|
||||
github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20230228205207-28762a7b9294/go.mod h1:ZVEbRdnMkGhp/pu35zq4SXxtvUwWK0J1MATtekZpH2Y=
|
||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
github.com/quic-go/quic-go v0.49.1-0.20250925085836-275c172fec2b h1:x09Agz4ATTMEP3qb5P0MRxNZfd6O9wAyK3qwwqQZVQc=
|
||||
github.com/quic-go/quic-go v0.49.1-0.20250925085836-275c172fec2b/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s=
|
||||
github.com/quic-go/quic-go v0.52.0 h1:/SlHrCRElyaU6MaEPKqKr9z83sBg2v4FLLvWM+Z47pA=
|
||||
github.com/quic-go/quic-go v0.52.0/go.mod h1:MFlGGpcpJqRAfmYi6NC2cptDPSxRWTOGNuP4wqrWmzQ=
|
||||
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66 h1:4WFk6u3sOT6pLa1kQ50ZVdm8BQFgJNA117cepZxtLIg=
|
||||
github.com/quic-go/webtransport-go v0.8.1-0.20241018022711-4ac2c9250e66/go.mod h1:Vp72IJajgeOL6ddqrAhmp7IM9zbTcgkQxD/YdxrVwMw=
|
||||
github.com/r3labs/sse/v2 v2.10.0 h1:hFEkLLFY4LDifoHdiCN/LlGBAdVJYsANaLqNYa1l/v0=
|
||||
github.com/r3labs/sse/v2 v2.10.0/go.mod h1:Igau6Whc+F17QUgML1fYe1VPZzTV6EMCnYktEmkNJ7I=
|
||||
github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk=
|
||||
github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@@ -976,7 +933,6 @@ github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5k
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
@@ -1044,7 +1000,6 @@ github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10 h1:CQh
|
||||
github.com/umbracle/gohashtree v0.0.2-alpha.0.20230207094856-5b775a815c10/go.mod h1:x/Pa0FF5Te9kdrlZKJK82YmAkvL8+f989USgz6Jiw7M=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w=
|
||||
github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ=
|
||||
github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4=
|
||||
@@ -1122,27 +1077,23 @@ go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
|
||||
go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
|
||||
go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw=
|
||||
go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg=
|
||||
go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU=
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
||||
go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
|
||||
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
|
||||
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
|
||||
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
@@ -1184,8 +1135,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
|
||||
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c h1:KL/ZBHXgKGVmuZBZ01Lt57yE5ws8ZPSkkihmEyq7FXc=
|
||||
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 h1:1P7xPZEwZMoBoz0Yze5Nx2/4pxj6nw9ZqHWXqP0iRgQ=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
@@ -1286,8 +1237,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
golang.org/x/sync v0.0.0-20170517211232-f52d1811a629/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -1305,7 +1256,6 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -1342,7 +1292,6 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200219091948-cb0a6d8edb6c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1377,7 +1326,6 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -1389,7 +1337,6 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
@@ -1430,13 +1377,12 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
@@ -1492,7 +1438,6 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
@@ -1621,8 +1566,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610=
|
||||
gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y=
|
||||
@@ -1662,7 +1607,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
@@ -1688,8 +1632,8 @@ k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7F
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE=
|
||||
lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
|
||||
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
|
||||
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["analyzer.go"],
|
||||
importpath = "github.com/OffchainLabs/prysm/v7/tools/analyzers/httperror",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"@org_golang_x_tools//go/analysis:go_default_library",
|
||||
"@org_golang_x_tools//go/analysis/passes/inspect:go_default_library",
|
||||
"@org_golang_x_tools//go/ast/inspector:go_default_library",
|
||||
],
|
||||
)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user