Revert "Replace a Few IntFlags with Uint64Flags (#9959)" (#10163)

* Revert "Replace a Few IntFlags with Uint64Flags (#9959)"

This reverts commit 790bf03123.

* fix

* fix
This commit is contained in:
Nishant Das
2022-02-01 16:51:17 +08:00
committed by GitHub
parent eef2122a9e
commit e83c9d5862
25 changed files with 58 additions and 57 deletions

View File

@@ -489,7 +489,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name), MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name), TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name), UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint64(cmd.P2PMaxPeers.Name), MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name), AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)), DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name), EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),

View File

@@ -23,7 +23,7 @@ type Config struct {
MetaDataDir string MetaDataDir string
TCPPort uint TCPPort uint
UDPPort uint UDPPort uint
MaxPeers uint64 MaxPeers uint
AllowListCIDR string AllowListCIDR string
DenyListCIDR []string DenyListCIDR []string
StateNotifier statefeed.Notifier StateNotifier statefeed.Notifier

View File

@@ -104,7 +104,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
ScorerParams: &scorers.Config{}, ScorerParams: &scorers.Config{},
}), }),
host: mockp2p.NewTestP2P(t).BHost, host: mockp2p.NewTestP2P(t).BHost,
cfg: &Config{MaxPeers: uint64(limit)}, cfg: &Config{MaxPeers: uint(limit)},
} }
var err error var err error
s.addrFilter, err = configureFilter(&Config{}) s.addrFilter, err = configureFilter(&Config{})

View File

@@ -77,7 +77,7 @@ type PeerManager interface {
ENR() *enr.Record ENR() *enr.Record
DiscoveryAddresses() ([]multiaddr.Multiaddr, error) DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
RefreshENR() RefreshENR()
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold uint64) (bool, error) FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
} }

View File

@@ -85,7 +85,7 @@ func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerCo
if scorer.config.StalePeerRefreshInterval == 0 { if scorer.config.StalePeerRefreshInterval == 0 {
scorer.config.StalePeerRefreshInterval = DefaultBlockProviderStalePeerRefreshInterval scorer.config.StalePeerRefreshInterval = DefaultBlockProviderStalePeerRefreshInterval
} }
batchSize := flags.Get().BlockBatchLimit batchSize := uint64(flags.Get().BlockBatchLimit)
scorer.maxScore = 1.0 scorer.maxScore = 1.0
if batchSize > 0 { if batchSize > 0 {
totalBatches := float64(scorer.config.ProcessedBlocksCap / batchSize) totalBatches := float64(scorer.config.ProcessedBlocksCap / batchSize)
@@ -110,7 +110,7 @@ func (s *BlockProviderScorer) score(pid peer.ID) float64 {
if !ok || time.Since(peerData.BlockProviderUpdated) >= s.config.StalePeerRefreshInterval { if !ok || time.Since(peerData.BlockProviderUpdated) >= s.config.StalePeerRefreshInterval {
return s.maxScore return s.maxScore
} }
batchSize := flags.Get().BlockBatchLimit batchSize := uint64(flags.Get().BlockBatchLimit)
if batchSize > 0 { if batchSize > 0 {
processedBatches := float64(peerData.ProcessedBlocks / batchSize) processedBatches := float64(peerData.ProcessedBlocks / batchSize)
score += processedBatches * s.config.ProcessedBatchWeight score += processedBatches * s.config.ProcessedBatchWeight

View File

@@ -21,7 +21,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
batchSize := flags.Get().BlockBatchLimit batchSize := uint64(flags.Get().BlockBatchLimit)
tests := []struct { tests := []struct {
name string name string
update func(scorer *scorers.BlockProviderScorer) update func(scorer *scorers.BlockProviderScorer)
@@ -160,7 +160,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) {
}, },
}) })
scorer := peerStatuses.Scorers().BlockProviderScorer() scorer := peerStatuses.Scorers().BlockProviderScorer()
batchSize := flags.Get().BlockBatchLimit batchSize := uint64(flags.Get().BlockBatchLimit)
r := rand.NewDeterministicGenerator() r := rand.NewDeterministicGenerator()
reverse := func(pids []peer.ID) []peer.ID { reverse := func(pids []peer.ID) []peer.ID {
@@ -214,7 +214,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) {
} }
func TestScorers_BlockProvider_Sorted(t *testing.T) { func TestScorers_BlockProvider_Sorted(t *testing.T) {
batchSize := flags.Get().BlockBatchLimit batchSize := uint64(flags.Get().BlockBatchLimit)
tests := []struct { tests := []struct {
name string name string
update func(s *scorers.BlockProviderScorer) update func(s *scorers.BlockProviderScorer)
@@ -309,7 +309,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) {
func TestScorers_BlockProvider_MaxScore(t *testing.T) { func TestScorers_BlockProvider_MaxScore(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
batchSize := flags.Get().BlockBatchLimit batchSize := uint64(flags.Get().BlockBatchLimit)
tests := []struct { tests := []struct {
name string name string
@@ -347,7 +347,7 @@ func TestScorers_BlockProvider_MaxScore(t *testing.T) {
func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) { func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
batchSize := flags.Get().BlockBatchLimit batchSize := uint64(flags.Get().BlockBatchLimit)
format := "[%0.1f%%, raw: %0.2f, blocks: %d/1280]" format := "[%0.1f%%, raw: %0.2f, blocks: %d/1280]"
tests := []struct { tests := []struct {

View File

@@ -17,7 +17,7 @@ func TestScorers_Service_Init(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
batchSize := flags.Get().BlockBatchLimit batchSize := uint64(flags.Get().BlockBatchLimit)
t.Run("default config", func(t *testing.T) { t.Run("default config", func(t *testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{ peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
@@ -82,7 +82,7 @@ func TestScorers_Service_Score(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel() defer cancel()
batchSize := flags.Get().BlockBatchLimit batchSize := uint64(flags.Get().BlockBatchLimit)
peerScores := func(s *scorers.Service, pids []peer.ID) map[string]float64 { peerScores := func(s *scorers.Service, pids []peer.ID) map[string]float64 {
scores := make(map[string]float64, len(pids)) scores := make(map[string]float64, len(pids))

View File

@@ -696,7 +696,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch types.Epoch) (typ
// BestNonFinalized returns the highest known epoch, higher than ours, // BestNonFinalized returns the highest known epoch, higher than ours,
// and is shared by at least minPeers. // and is shared by at least minPeers.
func (p *Status) BestNonFinalized(minPeers uint64, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) { func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) {
connected := p.Connected() connected := p.Connected()
epochVotes := make(map[types.Epoch]uint64) epochVotes := make(map[types.Epoch]uint64)
pidEpoch := make(map[peer.ID]types.Epoch, len(connected)) pidEpoch := make(map[peer.ID]types.Epoch, len(connected))

View File

@@ -33,11 +33,12 @@ const syncLockerVal = 100
// subscribed to a particular subnet. Then we try to connect // subscribed to a particular subnet. Then we try to connect
// with those peers. This method will block until the required amount of // with those peers. This method will block until the required amount of
// peers are found, the method only exits in the event of context timeouts. // peers are found, the method only exits in the event of context timeouts.
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, subIndex, threshold uint64) (bool, error) { func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
index uint64, threshold int) (bool, error) {
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet") ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
defer span.End() defer span.End()
span.AddAttributes(trace.Int64Attribute("index", int64(subIndex))) span.AddAttributes(trace.Int64Attribute("index", int64(index)))
if s.dv5Listener == nil { if s.dv5Listener == nil {
// return if discovery isn't set // return if discovery isn't set
@@ -48,14 +49,14 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, subInde
iterator := s.dv5Listener.RandomNodes() iterator := s.dv5Listener.RandomNodes()
switch { switch {
case strings.Contains(topic, GossipAttestationMessage): case strings.Contains(topic, GossipAttestationMessage):
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(subIndex)) iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index))
case strings.Contains(topic, GossipSyncCommitteeMessage): case strings.Contains(topic, GossipSyncCommitteeMessage):
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(subIndex)) iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(index))
default: default:
return false, errors.New("no subnet exists for provided topic") return false, errors.New("no subnet exists for provided topic")
} }
currNum := uint64(len(s.pubsub.ListPeers(topic))) currNum := len(s.pubsub.ListPeers(topic))
wg := new(sync.WaitGroup) wg := new(sync.WaitGroup)
for { for {
if err := ctx.Err(); err != nil { if err := ctx.Err(); err != nil {
@@ -80,7 +81,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, subInde
} }
// Wait for all dials to be completed. // Wait for all dials to be completed.
wg.Wait() wg.Wait()
currNum = uint64(len(s.pubsub.ListPeers(topic))) currNum = len(s.pubsub.ListPeers(topic))
} }
return true, nil return true, nil
} }

View File

@@ -61,7 +61,7 @@ func (_ *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
} }
// FindPeersWithSubnet mocks the p2p func. // FindPeersWithSubnet mocks the p2p func.
func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) { func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
return false, nil return false, nil
} }

View File

@@ -51,7 +51,7 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
func (_ MockPeerManager) RefreshENR() {} func (_ MockPeerManager) RefreshENR() {}
// FindPeersWithSubnet . // FindPeersWithSubnet .
func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) { func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
return true, nil return true, nil
} }

View File

@@ -349,7 +349,7 @@ func (p *TestP2P) Peers() *peers.Status {
} }
// FindPeersWithSubnet mocks the p2p func. // FindPeersWithSubnet mocks the p2p func.
func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) { func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
return false, nil return false, nil
} }

View File

@@ -63,7 +63,7 @@ func (f *blocksFetcher) selectFailOverPeer(excludedPID peer.ID, peers []peer.ID)
// waitForMinimumPeers spins and waits up until enough peers are available. // waitForMinimumPeers spins and waits up until enough peers are available.
func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, error) { func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, error) {
required := uint64(params.BeaconConfig().MaxPeersToSync) required := params.BeaconConfig().MaxPeersToSync
if flags.Get().MinimumSyncPeers < required { if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers required = flags.Get().MinimumSyncPeers
} }
@@ -79,7 +79,7 @@ func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, err
headEpoch := slots.ToEpoch(f.chain.HeadSlot()) headEpoch := slots.ToEpoch(f.chain.HeadSlot())
_, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch) _, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
} }
if uint64(len(peers)) >= required { if len(peers) >= required {
return peers, nil return peers, nil
} }
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{
@@ -123,14 +123,14 @@ func (f *blocksFetcher) filterPeers(ctx context.Context, peers []peer.ID, peersP
// trimPeers limits peer list, returning only specified percentage of peers. // trimPeers limits peer list, returning only specified percentage of peers.
// Takes system constraints into account (min/max peers to sync). // Takes system constraints into account (min/max peers to sync).
func trimPeers(peers []peer.ID, peersPercentage float64) []peer.ID { func trimPeers(peers []peer.ID, peersPercentage float64) []peer.ID {
required := uint64(params.BeaconConfig().MaxPeersToSync) required := params.BeaconConfig().MaxPeersToSync
if flags.Get().MinimumSyncPeers < required { if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers required = flags.Get().MinimumSyncPeers
} }
// Weak/slow peers will be pushed down the list and trimmed since only percentage of peers is selected. // Weak/slow peers will be pushed down the list and trimmed since only percentage of peers is selected.
limit := uint64(math.Round(float64(len(peers)) * peersPercentage)) limit := uint64(math.Round(float64(len(peers)) * peersPercentage))
// Limit cannot be less that minimum peers required by sync mechanism. // Limit cannot be less that minimum peers required by sync mechanism.
limit = mathutil.Max(limit, required) limit = mathutil.Max(limit, uint64(required))
// Limit cannot be higher than number of peers available (safe-guard). // Limit cannot be higher than number of peers available (safe-guard).
limit = mathutil.Min(limit, uint64(len(peers))) limit = mathutil.Min(limit, uint64(len(peers)))
return peers[:limit] return peers[:limit]

View File

@@ -118,7 +118,7 @@ func TestBlocksFetcher_filterPeers(t *testing.T) {
capacityWeight float64 capacityWeight float64
} }
batchSize := flags.Get().BlockBatchLimit batchSize := uint64(flags.Get().BlockBatchLimit)
tests := []struct { tests := []struct {
name string name string
args args args args

View File

@@ -371,7 +371,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
} }
func TestBlocksFetcher_scheduleRequest(t *testing.T) { func TestBlocksFetcher_scheduleRequest(t *testing.T) {
blockBatchLimit := flags.Get().BlockBatchLimit blockBatchLimit := uint64(flags.Get().BlockBatchLimit)
t.Run("context cancellation", func(t *testing.T) { t.Run("context cancellation", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{}) fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{})
@@ -425,7 +425,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
}) })
cancel() cancel()
response := fetcher.handleRequest(ctx, 1, blockBatchLimit) response := fetcher.handleRequest(ctx, 1, uint64(blockBatchLimit))
assert.ErrorContains(t, "context canceled", response.err) assert.ErrorContains(t, "context canceled", response.err)
}) })
@@ -440,7 +440,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
requestCtx, reqCancel := context.WithTimeout(context.Background(), 2*time.Second) requestCtx, reqCancel := context.WithTimeout(context.Background(), 2*time.Second)
defer reqCancel() defer reqCancel()
go func() { go func() {
response := fetcher.handleRequest(requestCtx, 1 /* start */, blockBatchLimit /* count */) response := fetcher.handleRequest(requestCtx, 1 /* start */, uint64(blockBatchLimit) /* count */)
select { select {
case <-ctx.Done(): case <-ctx.Done():
case fetcher.fetchResponses <- response: case fetcher.fetchResponses <- response:
@@ -458,7 +458,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
blocks = resp.blocks blocks = resp.blocks
} }
} }
if uint64(len(blocks)) != blockBatchLimit { if uint64(len(blocks)) != uint64(blockBatchLimit) {
t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchLimit, len(blocks)) t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchLimit, len(blocks))
} }
@@ -509,11 +509,11 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) {
req := &ethpb.BeaconBlocksByRangeRequest{ req := &ethpb.BeaconBlocksByRangeRequest{
StartSlot: 1, StartSlot: 1,
Step: 1, Step: 1,
Count: blockBatchLimit, Count: uint64(blockBatchLimit),
} }
blocks, err := fetcher.requestBlocks(ctx, req, peerIDs[0]) blocks, err := fetcher.requestBlocks(ctx, req, peerIDs[0])
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, blockBatchLimit, uint64(len(blocks)), "Incorrect number of blocks returned") assert.Equal(t, uint64(blockBatchLimit), uint64(len(blocks)), "Incorrect number of blocks returned")
// Test context cancellation. // Test context cancellation.
ctx, cancel = context.WithCancel(context.Background()) ctx, cancel = context.WithCancel(context.Background())

View File

@@ -200,7 +200,7 @@ func TestBlocksFetcher_findFork(t *testing.T) {
peers = append(peers, connectPeerHavingBlocks(t, p2p, chain1, finalizedSlot, p2p.Peers())) peers = append(peers, connectPeerHavingBlocks(t, p2p, chain1, finalizedSlot, p2p.Peers()))
} }
blockBatchLimit := flags.Get().BlockBatchLimit * 2 blockBatchLimit := uint64(flags.Get().BlockBatchLimit) * 2
pidInd := 0 pidInd := 0
for i := uint64(1); i < uint64(len(chain1)); i += blockBatchLimit { for i := uint64(1); i < uint64(len(chain1)); i += blockBatchLimit {
req := &ethpb.BeaconBlocksByRangeRequest{ req := &ethpb.BeaconBlocksByRangeRequest{

View File

@@ -163,13 +163,13 @@ func (s *Service) Resync() error {
} }
func (s *Service) waitForMinimumPeers() { func (s *Service) waitForMinimumPeers() {
required := uint64(params.BeaconConfig().MaxPeersToSync) required := params.BeaconConfig().MaxPeersToSync
if flags.Get().MinimumSyncPeers < required { if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers required = flags.Get().MinimumSyncPeers
} }
for { for {
_, peers := s.cfg.P2P.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, s.cfg.Chain.FinalizedCheckpt().Epoch) _, peers := s.cfg.P2P.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, s.cfg.Chain.FinalizedCheckpt().Epoch)
if uint64(len(peers)) >= required { if len(peers) >= required {
break break
} }
log.WithFields(logrus.Fields{ log.WithFields(logrus.Fields{

View File

@@ -27,7 +27,7 @@ import (
) )
func TestService_Constants(t *testing.T) { func TestService_Constants(t *testing.T) {
if uint64(params.BeaconConfig().MaxPeersToSync)*flags.Get().BlockBatchLimit > uint64(1000) { if params.BeaconConfig().MaxPeersToSync*flags.Get().BlockBatchLimit > 1000 {
t.Fatal("rpc rejects requests over 1000 range slots") t.Fatal("rpc rejects requests over 1000 range slots")
} }
} }

View File

@@ -43,7 +43,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
// The initial count for the first batch to be returned back. // The initial count for the first batch to be returned back.
count := m.Count count := m.Count
allowedBlocksPerSecond := flags.Get().BlockBatchLimit allowedBlocksPerSecond := uint64(flags.Get().BlockBatchLimit)
if count > allowedBlocksPerSecond { if count > allowedBlocksPerSecond {
count = allowedBlocksPerSecond count = allowedBlocksPerSecond
} }

View File

@@ -394,11 +394,11 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
req := &ethpb.BeaconBlocksByRangeRequest{ req := &ethpb.BeaconBlocksByRangeRequest{
StartSlot: 100, StartSlot: 100,
Step: 1, Step: 1,
Count: flags.Get().BlockBatchLimit, Count: uint64(flags.Get().BlockBatchLimit),
} }
saveBlocks(req) saveBlocks(req)
for i := uint64(0); i < flags.Get().BlockBatchLimitBurstFactor; i++ { for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ {
assert.NoError(t, sendRequest(p1, p2, r, req, true, false)) assert.NoError(t, sendRequest(p1, p2, r, req, true, false))
} }

View File

@@ -640,7 +640,7 @@ func (s *Service) unSubscribeFromTopic(topic string) {
// find if we have peers who are subscribed to the same subnet // find if we have peers who are subscribed to the same subnet
func (s *Service) validPeersExist(subnetTopic string) bool { func (s *Service) validPeersExist(subnetTopic string) bool {
numOfPeers := s.cfg.p2p.PubSub().ListPeers(subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix()) numOfPeers := s.cfg.p2p.PubSub().ListPeers(subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix())
return uint64(len(numOfPeers)) >= flags.Get().MinimumPeersPerSubnet return len(numOfPeers) >= flags.Get().MinimumPeersPerSubnet
} }
func (s *Service) retrievePersistentSubs(currSlot types.Slot) []uint64 { func (s *Service) retrievePersistentSubs(currSlot types.Slot) []uint64 {
@@ -681,7 +681,7 @@ func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID {
for _, sub := range wantedSubs { for _, sub := range wantedSubs {
subnetTopic := fmt.Sprintf(topic, digest, sub) + s.cfg.p2p.Encoding().ProtocolSuffix() subnetTopic := fmt.Sprintf(topic, digest, sub) + s.cfg.p2p.Encoding().ProtocolSuffix()
peers := s.cfg.p2p.PubSub().ListPeers(subnetTopic) peers := s.cfg.p2p.PubSub().ListPeers(subnetTopic)
if uint64(len(peers)) > flags.Get().MinimumPeersPerSubnet { if len(peers) > flags.Get().MinimumPeersPerSubnet {
// In the event we have more than the minimum, we can // In the event we have more than the minimum, we can
// mark the remaining as viable for pruning. // mark the remaining as viable for pruning.
peers = peers[:flags.Get().MinimumPeersPerSubnet] peers = peers[:flags.Get().MinimumPeersPerSubnet]

View File

@@ -490,7 +490,7 @@ func TestFilterSubnetPeers(t *testing.T) {
// Try with only peers from subnet 20. // Try with only peers from subnet 20.
wantedPeers = []peer.ID{p2.BHost.ID()} wantedPeers = []peer.ID{p2.BHost.ID()}
// Connect an excess amount of peers in the particular subnet. // Connect an excess amount of peers in the particular subnet.
for i := uint64(1); i <= flags.Get().MinimumPeersPerSubnet; i++ { for i := 1; i <= flags.Get().MinimumPeersPerSubnet; i++ {
nPeer := createPeer(t, subnet20) nPeer := createPeer(t, subnet20)
p.Connect(nPeer) p.Connect(nPeer)
wantedPeers = append(wantedPeers, nPeer.BHost.ID()) wantedPeers = append(wantedPeers, nPeer.BHost.ID())

View File

@@ -88,7 +88,7 @@ var (
} }
// MinSyncPeers specifies the required number of successful peer handshakes in order // MinSyncPeers specifies the required number of successful peer handshakes in order
// to start syncing with external peers. // to start syncing with external peers.
MinSyncPeers = &cli.Uint64Flag{ MinSyncPeers = &cli.IntFlag{
Name: "min-sync-peers", Name: "min-sync-peers",
Usage: "The required number of valid peers to connect with before syncing.", Usage: "The required number of valid peers to connect with before syncing.",
Value: 3, Value: 3,
@@ -123,13 +123,13 @@ var (
Usage: "Does not run the discoveryV5 dht.", Usage: "Does not run the discoveryV5 dht.",
} }
// BlockBatchLimit specifies the requested block batch size. // BlockBatchLimit specifies the requested block batch size.
BlockBatchLimit = &cli.Uint64Flag{ BlockBatchLimit = &cli.IntFlag{
Name: "block-batch-limit", Name: "block-batch-limit",
Usage: "The amount of blocks the local peer is bounded to request and respond to in a batch.", Usage: "The amount of blocks the local peer is bounded to request and respond to in a batch.",
Value: 64, Value: 64,
} }
// BlockBatchLimitBurstFactor specifies the factor by which block batch size may increase. // BlockBatchLimitBurstFactor specifies the factor by which block batch size may increase.
BlockBatchLimitBurstFactor = &cli.Uint64Flag{ BlockBatchLimitBurstFactor = &cli.IntFlag{
Name: "block-batch-limit-burst-factor", Name: "block-batch-limit-burst-factor",
Usage: "The factor by which block batch limit may increase on burst.", Usage: "The factor by which block batch limit may increase on burst.",
Value: 10, Value: 10,

View File

@@ -12,10 +12,10 @@ type GlobalFlags struct {
DisableSync bool DisableSync bool
DisableDiscv5 bool DisableDiscv5 bool
SubscribeToAllSubnets bool SubscribeToAllSubnets bool
MinimumSyncPeers uint64 MinimumSyncPeers int
MinimumPeersPerSubnet uint64 MinimumPeersPerSubnet int
BlockBatchLimit uint64 BlockBatchLimit int
BlockBatchLimitBurstFactor uint64 BlockBatchLimitBurstFactor int
} }
var globalConfig *GlobalFlags var globalConfig *GlobalFlags
@@ -50,17 +50,17 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
cfg.SubscribeToAllSubnets = true cfg.SubscribeToAllSubnets = true
} }
cfg.DisableDiscv5 = ctx.Bool(DisableDiscv5.Name) cfg.DisableDiscv5 = ctx.Bool(DisableDiscv5.Name)
cfg.BlockBatchLimit = ctx.Uint64(BlockBatchLimit.Name) cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
cfg.BlockBatchLimitBurstFactor = ctx.Uint64(BlockBatchLimitBurstFactor.Name) cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
cfg.MinimumPeersPerSubnet = ctx.Uint64(MinPeersPerSubnet.Name) cfg.MinimumPeersPerSubnet = ctx.Int(MinPeersPerSubnet.Name)
configureMinimumPeers(ctx, cfg) configureMinimumPeers(ctx, cfg)
Init(cfg) Init(cfg)
} }
func configureMinimumPeers(ctx *cli.Context, cfg *GlobalFlags) { func configureMinimumPeers(ctx *cli.Context, cfg *GlobalFlags) {
cfg.MinimumSyncPeers = ctx.Uint64(MinSyncPeers.Name) cfg.MinimumSyncPeers = ctx.Int(MinSyncPeers.Name)
maxPeers := ctx.Uint64(cmd.P2PMaxPeers.Name) maxPeers := ctx.Int(cmd.P2PMaxPeers.Name)
if cfg.MinimumSyncPeers > maxPeers { if cfg.MinimumSyncPeers > maxPeers {
log.Warnf("Changing Minimum Sync Peers to %d", maxPeers) log.Warnf("Changing Minimum Sync Peers to %d", maxPeers)
cfg.MinimumSyncPeers = maxPeers cfg.MinimumSyncPeers = maxPeers

View File

@@ -150,7 +150,7 @@ var (
Value: "", Value: "",
} }
// P2PMaxPeers defines a flag to specify the max number of peers in libp2p. // P2PMaxPeers defines a flag to specify the max number of peers in libp2p.
P2PMaxPeers = &cli.Uint64Flag{ P2PMaxPeers = &cli.IntFlag{
Name: "p2p-max-peers", Name: "p2p-max-peers",
Usage: "The max number of p2p peers to maintain.", Usage: "The max number of p2p peers to maintain.",
Value: 45, Value: 45,