diff --git a/beacon-chain/node/node.go b/beacon-chain/node/node.go index d0f0a5ce31..c292a00a2d 100644 --- a/beacon-chain/node/node.go +++ b/beacon-chain/node/node.go @@ -489,7 +489,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error { MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name), TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name), UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name), - MaxPeers: cliCtx.Uint64(cmd.P2PMaxPeers.Name), + MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name), AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name), DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)), EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name), diff --git a/beacon-chain/p2p/config.go b/beacon-chain/p2p/config.go index 8dc9824405..6aa38576fd 100644 --- a/beacon-chain/p2p/config.go +++ b/beacon-chain/p2p/config.go @@ -23,7 +23,7 @@ type Config struct { MetaDataDir string TCPPort uint UDPPort uint - MaxPeers uint64 + MaxPeers uint AllowListCIDR string DenyListCIDR []string StateNotifier statefeed.Notifier diff --git a/beacon-chain/p2p/connection_gater_test.go b/beacon-chain/p2p/connection_gater_test.go index d5f566f932..6ab641251b 100644 --- a/beacon-chain/p2p/connection_gater_test.go +++ b/beacon-chain/p2p/connection_gater_test.go @@ -104,7 +104,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) { ScorerParams: &scorers.Config{}, }), host: mockp2p.NewTestP2P(t).BHost, - cfg: &Config{MaxPeers: uint64(limit)}, + cfg: &Config{MaxPeers: uint(limit)}, } var err error s.addrFilter, err = configureFilter(&Config{}) diff --git a/beacon-chain/p2p/interfaces.go b/beacon-chain/p2p/interfaces.go index d1bf920918..4dfa062a89 100644 --- a/beacon-chain/p2p/interfaces.go +++ b/beacon-chain/p2p/interfaces.go @@ -77,7 +77,7 @@ type PeerManager interface { ENR() *enr.Record DiscoveryAddresses() ([]multiaddr.Multiaddr, error) RefreshENR() - FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold uint64) (bool, error) + FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error) } diff --git a/beacon-chain/p2p/peers/scorers/block_providers.go b/beacon-chain/p2p/peers/scorers/block_providers.go index 33ba675bf7..329bbe5746 100644 --- a/beacon-chain/p2p/peers/scorers/block_providers.go +++ b/beacon-chain/p2p/peers/scorers/block_providers.go @@ -85,7 +85,7 @@ func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerCo if scorer.config.StalePeerRefreshInterval == 0 { scorer.config.StalePeerRefreshInterval = DefaultBlockProviderStalePeerRefreshInterval } - batchSize := flags.Get().BlockBatchLimit + batchSize := uint64(flags.Get().BlockBatchLimit) scorer.maxScore = 1.0 if batchSize > 0 { totalBatches := float64(scorer.config.ProcessedBlocksCap / batchSize) @@ -110,7 +110,7 @@ func (s *BlockProviderScorer) score(pid peer.ID) float64 { if !ok || time.Since(peerData.BlockProviderUpdated) >= s.config.StalePeerRefreshInterval { return s.maxScore } - batchSize := flags.Get().BlockBatchLimit + batchSize := uint64(flags.Get().BlockBatchLimit) if batchSize > 0 { processedBatches := float64(peerData.ProcessedBlocks / batchSize) score += processedBatches * s.config.ProcessedBatchWeight diff --git a/beacon-chain/p2p/peers/scorers/block_providers_test.go b/beacon-chain/p2p/peers/scorers/block_providers_test.go index 16427e045e..27ef73ffd7 100644 --- a/beacon-chain/p2p/peers/scorers/block_providers_test.go +++ b/beacon-chain/p2p/peers/scorers/block_providers_test.go @@ -21,7 +21,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - batchSize := flags.Get().BlockBatchLimit + batchSize := uint64(flags.Get().BlockBatchLimit) tests := []struct { name string update func(scorer *scorers.BlockProviderScorer) @@ -160,7 +160,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) { }, }) scorer := peerStatuses.Scorers().BlockProviderScorer() - batchSize := flags.Get().BlockBatchLimit + batchSize := uint64(flags.Get().BlockBatchLimit) r := rand.NewDeterministicGenerator() reverse := func(pids []peer.ID) []peer.ID { @@ -214,7 +214,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) { } func TestScorers_BlockProvider_Sorted(t *testing.T) { - batchSize := flags.Get().BlockBatchLimit + batchSize := uint64(flags.Get().BlockBatchLimit) tests := []struct { name string update func(s *scorers.BlockProviderScorer) @@ -309,7 +309,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) { func TestScorers_BlockProvider_MaxScore(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - batchSize := flags.Get().BlockBatchLimit + batchSize := uint64(flags.Get().BlockBatchLimit) tests := []struct { name string @@ -347,7 +347,7 @@ func TestScorers_BlockProvider_MaxScore(t *testing.T) { func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - batchSize := flags.Get().BlockBatchLimit + batchSize := uint64(flags.Get().BlockBatchLimit) format := "[%0.1f%%, raw: %0.2f, blocks: %d/1280]" tests := []struct { diff --git a/beacon-chain/p2p/peers/scorers/service_test.go b/beacon-chain/p2p/peers/scorers/service_test.go index d18fc46bff..a55ef31de4 100644 --- a/beacon-chain/p2p/peers/scorers/service_test.go +++ b/beacon-chain/p2p/peers/scorers/service_test.go @@ -17,7 +17,7 @@ func TestScorers_Service_Init(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - batchSize := flags.Get().BlockBatchLimit + batchSize := uint64(flags.Get().BlockBatchLimit) t.Run("default config", func(t *testing.T) { peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{ @@ -82,7 +82,7 @@ func TestScorers_Service_Score(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() - batchSize := flags.Get().BlockBatchLimit + batchSize := uint64(flags.Get().BlockBatchLimit) peerScores := func(s *scorers.Service, pids []peer.ID) map[string]float64 { scores := make(map[string]float64, len(pids)) diff --git a/beacon-chain/p2p/peers/status.go b/beacon-chain/p2p/peers/status.go index da80ef630a..391d0ecf48 100644 --- a/beacon-chain/p2p/peers/status.go +++ b/beacon-chain/p2p/peers/status.go @@ -696,7 +696,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch types.Epoch) (typ // BestNonFinalized returns the highest known epoch, higher than ours, // and is shared by at least minPeers. -func (p *Status) BestNonFinalized(minPeers uint64, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) { +func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) { connected := p.Connected() epochVotes := make(map[types.Epoch]uint64) pidEpoch := make(map[peer.ID]types.Epoch, len(connected)) diff --git a/beacon-chain/p2p/subnets.go b/beacon-chain/p2p/subnets.go index 511b15431f..8c12e55f92 100644 --- a/beacon-chain/p2p/subnets.go +++ b/beacon-chain/p2p/subnets.go @@ -33,11 +33,12 @@ const syncLockerVal = 100 // subscribed to a particular subnet. Then we try to connect // with those peers. This method will block until the required amount of // peers are found, the method only exits in the event of context timeouts. -func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, subIndex, threshold uint64) (bool, error) { +func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, + index uint64, threshold int) (bool, error) { ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet") defer span.End() - span.AddAttributes(trace.Int64Attribute("index", int64(subIndex))) + span.AddAttributes(trace.Int64Attribute("index", int64(index))) if s.dv5Listener == nil { // return if discovery isn't set @@ -48,14 +49,14 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, subInde iterator := s.dv5Listener.RandomNodes() switch { case strings.Contains(topic, GossipAttestationMessage): - iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(subIndex)) + iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index)) case strings.Contains(topic, GossipSyncCommitteeMessage): - iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(subIndex)) + iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(index)) default: return false, errors.New("no subnet exists for provided topic") } - currNum := uint64(len(s.pubsub.ListPeers(topic))) + currNum := len(s.pubsub.ListPeers(topic)) wg := new(sync.WaitGroup) for { if err := ctx.Err(); err != nil { @@ -80,7 +81,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, subInde } // Wait for all dials to be completed. wg.Wait() - currNum = uint64(len(s.pubsub.ListPeers(topic))) + currNum = len(s.pubsub.ListPeers(topic)) } return true, nil } diff --git a/beacon-chain/p2p/testing/fuzz_p2p.go b/beacon-chain/p2p/testing/fuzz_p2p.go index 693896f891..11b9d60606 100644 --- a/beacon-chain/p2p/testing/fuzz_p2p.go +++ b/beacon-chain/p2p/testing/fuzz_p2p.go @@ -61,7 +61,7 @@ func (_ *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) { } // FindPeersWithSubnet mocks the p2p func. -func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) { +func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) { return false, nil } diff --git a/beacon-chain/p2p/testing/mock_peermanager.go b/beacon-chain/p2p/testing/mock_peermanager.go index 82e94232ae..3b3e32824d 100644 --- a/beacon-chain/p2p/testing/mock_peermanager.go +++ b/beacon-chain/p2p/testing/mock_peermanager.go @@ -51,7 +51,7 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) { func (_ MockPeerManager) RefreshENR() {} // FindPeersWithSubnet . -func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) { +func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) { return true, nil } diff --git a/beacon-chain/p2p/testing/p2p.go b/beacon-chain/p2p/testing/p2p.go index e785f19b9f..4951665f1c 100644 --- a/beacon-chain/p2p/testing/p2p.go +++ b/beacon-chain/p2p/testing/p2p.go @@ -349,7 +349,7 @@ func (p *TestP2P) Peers() *peers.Status { } // FindPeersWithSubnet mocks the p2p func. -func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) { +func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) { return false, nil } diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_peers.go b/beacon-chain/sync/initial-sync/blocks_fetcher_peers.go index 09393c9439..d9dd1d2ab7 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_peers.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_peers.go @@ -63,7 +63,7 @@ func (f *blocksFetcher) selectFailOverPeer(excludedPID peer.ID, peers []peer.ID) // waitForMinimumPeers spins and waits up until enough peers are available. func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, error) { - required := uint64(params.BeaconConfig().MaxPeersToSync) + required := params.BeaconConfig().MaxPeersToSync if flags.Get().MinimumSyncPeers < required { required = flags.Get().MinimumSyncPeers } @@ -79,7 +79,7 @@ func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, err headEpoch := slots.ToEpoch(f.chain.HeadSlot()) _, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch) } - if uint64(len(peers)) >= required { + if len(peers) >= required { return peers, nil } log.WithFields(logrus.Fields{ @@ -123,14 +123,14 @@ func (f *blocksFetcher) filterPeers(ctx context.Context, peers []peer.ID, peersP // trimPeers limits peer list, returning only specified percentage of peers. // Takes system constraints into account (min/max peers to sync). func trimPeers(peers []peer.ID, peersPercentage float64) []peer.ID { - required := uint64(params.BeaconConfig().MaxPeersToSync) + required := params.BeaconConfig().MaxPeersToSync if flags.Get().MinimumSyncPeers < required { required = flags.Get().MinimumSyncPeers } // Weak/slow peers will be pushed down the list and trimmed since only percentage of peers is selected. limit := uint64(math.Round(float64(len(peers)) * peersPercentage)) // Limit cannot be less that minimum peers required by sync mechanism. - limit = mathutil.Max(limit, required) + limit = mathutil.Max(limit, uint64(required)) // Limit cannot be higher than number of peers available (safe-guard). limit = mathutil.Min(limit, uint64(len(peers))) return peers[:limit] diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_peers_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_peers_test.go index 17542739fe..1e1697707b 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_peers_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_peers_test.go @@ -118,7 +118,7 @@ func TestBlocksFetcher_filterPeers(t *testing.T) { capacityWeight float64 } - batchSize := flags.Get().BlockBatchLimit + batchSize := uint64(flags.Get().BlockBatchLimit) tests := []struct { name string args args diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go index 786bc61fc9..1b1fbe3bf7 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go @@ -371,7 +371,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) { } func TestBlocksFetcher_scheduleRequest(t *testing.T) { - blockBatchLimit := flags.Get().BlockBatchLimit + blockBatchLimit := uint64(flags.Get().BlockBatchLimit) t.Run("context cancellation", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{}) @@ -425,7 +425,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) { }) cancel() - response := fetcher.handleRequest(ctx, 1, blockBatchLimit) + response := fetcher.handleRequest(ctx, 1, uint64(blockBatchLimit)) assert.ErrorContains(t, "context canceled", response.err) }) @@ -440,7 +440,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) { requestCtx, reqCancel := context.WithTimeout(context.Background(), 2*time.Second) defer reqCancel() go func() { - response := fetcher.handleRequest(requestCtx, 1 /* start */, blockBatchLimit /* count */) + response := fetcher.handleRequest(requestCtx, 1 /* start */, uint64(blockBatchLimit) /* count */) select { case <-ctx.Done(): case fetcher.fetchResponses <- response: @@ -458,7 +458,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) { blocks = resp.blocks } } - if uint64(len(blocks)) != blockBatchLimit { + if uint64(len(blocks)) != uint64(blockBatchLimit) { t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchLimit, len(blocks)) } @@ -509,11 +509,11 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) { req := ðpb.BeaconBlocksByRangeRequest{ StartSlot: 1, Step: 1, - Count: blockBatchLimit, + Count: uint64(blockBatchLimit), } blocks, err := fetcher.requestBlocks(ctx, req, peerIDs[0]) assert.NoError(t, err) - assert.Equal(t, blockBatchLimit, uint64(len(blocks)), "Incorrect number of blocks returned") + assert.Equal(t, uint64(blockBatchLimit), uint64(len(blocks)), "Incorrect number of blocks returned") // Test context cancellation. ctx, cancel = context.WithCancel(context.Background()) diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_utils_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_utils_test.go index 280727597c..41655a45b4 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_utils_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_utils_test.go @@ -200,7 +200,7 @@ func TestBlocksFetcher_findFork(t *testing.T) { peers = append(peers, connectPeerHavingBlocks(t, p2p, chain1, finalizedSlot, p2p.Peers())) } - blockBatchLimit := flags.Get().BlockBatchLimit * 2 + blockBatchLimit := uint64(flags.Get().BlockBatchLimit) * 2 pidInd := 0 for i := uint64(1); i < uint64(len(chain1)); i += blockBatchLimit { req := ðpb.BeaconBlocksByRangeRequest{ diff --git a/beacon-chain/sync/initial-sync/service.go b/beacon-chain/sync/initial-sync/service.go index a417848e43..1f6d6407d6 100644 --- a/beacon-chain/sync/initial-sync/service.go +++ b/beacon-chain/sync/initial-sync/service.go @@ -163,13 +163,13 @@ func (s *Service) Resync() error { } func (s *Service) waitForMinimumPeers() { - required := uint64(params.BeaconConfig().MaxPeersToSync) + required := params.BeaconConfig().MaxPeersToSync if flags.Get().MinimumSyncPeers < required { required = flags.Get().MinimumSyncPeers } for { _, peers := s.cfg.P2P.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, s.cfg.Chain.FinalizedCheckpt().Epoch) - if uint64(len(peers)) >= required { + if len(peers) >= required { break } log.WithFields(logrus.Fields{ diff --git a/beacon-chain/sync/initial-sync/service_test.go b/beacon-chain/sync/initial-sync/service_test.go index 39c7593aed..60617a52ec 100644 --- a/beacon-chain/sync/initial-sync/service_test.go +++ b/beacon-chain/sync/initial-sync/service_test.go @@ -27,7 +27,7 @@ import ( ) func TestService_Constants(t *testing.T) { - if uint64(params.BeaconConfig().MaxPeersToSync)*flags.Get().BlockBatchLimit > uint64(1000) { + if params.BeaconConfig().MaxPeersToSync*flags.Get().BlockBatchLimit > 1000 { t.Fatal("rpc rejects requests over 1000 range slots") } } diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_range.go b/beacon-chain/sync/rpc_beacon_blocks_by_range.go index f7ee288274..830d9e5828 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_range.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_range.go @@ -43,7 +43,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa // The initial count for the first batch to be returned back. count := m.Count - allowedBlocksPerSecond := flags.Get().BlockBatchLimit + allowedBlocksPerSecond := uint64(flags.Get().BlockBatchLimit) if count > allowedBlocksPerSecond { count = allowedBlocksPerSecond } diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go b/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go index e6394a6b1b..12761a0799 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go @@ -394,11 +394,11 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) { req := ðpb.BeaconBlocksByRangeRequest{ StartSlot: 100, Step: 1, - Count: flags.Get().BlockBatchLimit, + Count: uint64(flags.Get().BlockBatchLimit), } saveBlocks(req) - for i := uint64(0); i < flags.Get().BlockBatchLimitBurstFactor; i++ { + for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ { assert.NoError(t, sendRequest(p1, p2, r, req, true, false)) } diff --git a/beacon-chain/sync/subscriber.go b/beacon-chain/sync/subscriber.go index d235059840..2f544e4049 100644 --- a/beacon-chain/sync/subscriber.go +++ b/beacon-chain/sync/subscriber.go @@ -640,7 +640,7 @@ func (s *Service) unSubscribeFromTopic(topic string) { // find if we have peers who are subscribed to the same subnet func (s *Service) validPeersExist(subnetTopic string) bool { numOfPeers := s.cfg.p2p.PubSub().ListPeers(subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix()) - return uint64(len(numOfPeers)) >= flags.Get().MinimumPeersPerSubnet + return len(numOfPeers) >= flags.Get().MinimumPeersPerSubnet } func (s *Service) retrievePersistentSubs(currSlot types.Slot) []uint64 { @@ -681,7 +681,7 @@ func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID { for _, sub := range wantedSubs { subnetTopic := fmt.Sprintf(topic, digest, sub) + s.cfg.p2p.Encoding().ProtocolSuffix() peers := s.cfg.p2p.PubSub().ListPeers(subnetTopic) - if uint64(len(peers)) > flags.Get().MinimumPeersPerSubnet { + if len(peers) > flags.Get().MinimumPeersPerSubnet { // In the event we have more than the minimum, we can // mark the remaining as viable for pruning. peers = peers[:flags.Get().MinimumPeersPerSubnet] diff --git a/beacon-chain/sync/subscriber_test.go b/beacon-chain/sync/subscriber_test.go index e733e33730..6e48211658 100644 --- a/beacon-chain/sync/subscriber_test.go +++ b/beacon-chain/sync/subscriber_test.go @@ -490,7 +490,7 @@ func TestFilterSubnetPeers(t *testing.T) { // Try with only peers from subnet 20. wantedPeers = []peer.ID{p2.BHost.ID()} // Connect an excess amount of peers in the particular subnet. - for i := uint64(1); i <= flags.Get().MinimumPeersPerSubnet; i++ { + for i := 1; i <= flags.Get().MinimumPeersPerSubnet; i++ { nPeer := createPeer(t, subnet20) p.Connect(nPeer) wantedPeers = append(wantedPeers, nPeer.BHost.ID()) diff --git a/cmd/beacon-chain/flags/base.go b/cmd/beacon-chain/flags/base.go index 8f754b7acd..74ce4f0a82 100644 --- a/cmd/beacon-chain/flags/base.go +++ b/cmd/beacon-chain/flags/base.go @@ -88,7 +88,7 @@ var ( } // MinSyncPeers specifies the required number of successful peer handshakes in order // to start syncing with external peers. - MinSyncPeers = &cli.Uint64Flag{ + MinSyncPeers = &cli.IntFlag{ Name: "min-sync-peers", Usage: "The required number of valid peers to connect with before syncing.", Value: 3, @@ -123,13 +123,13 @@ var ( Usage: "Does not run the discoveryV5 dht.", } // BlockBatchLimit specifies the requested block batch size. - BlockBatchLimit = &cli.Uint64Flag{ + BlockBatchLimit = &cli.IntFlag{ Name: "block-batch-limit", Usage: "The amount of blocks the local peer is bounded to request and respond to in a batch.", Value: 64, } // BlockBatchLimitBurstFactor specifies the factor by which block batch size may increase. - BlockBatchLimitBurstFactor = &cli.Uint64Flag{ + BlockBatchLimitBurstFactor = &cli.IntFlag{ Name: "block-batch-limit-burst-factor", Usage: "The factor by which block batch limit may increase on burst.", Value: 10, diff --git a/cmd/beacon-chain/flags/config.go b/cmd/beacon-chain/flags/config.go index 74230e9533..fd35ab416e 100644 --- a/cmd/beacon-chain/flags/config.go +++ b/cmd/beacon-chain/flags/config.go @@ -12,10 +12,10 @@ type GlobalFlags struct { DisableSync bool DisableDiscv5 bool SubscribeToAllSubnets bool - MinimumSyncPeers uint64 - MinimumPeersPerSubnet uint64 - BlockBatchLimit uint64 - BlockBatchLimitBurstFactor uint64 + MinimumSyncPeers int + MinimumPeersPerSubnet int + BlockBatchLimit int + BlockBatchLimitBurstFactor int } var globalConfig *GlobalFlags @@ -50,17 +50,17 @@ func ConfigureGlobalFlags(ctx *cli.Context) { cfg.SubscribeToAllSubnets = true } cfg.DisableDiscv5 = ctx.Bool(DisableDiscv5.Name) - cfg.BlockBatchLimit = ctx.Uint64(BlockBatchLimit.Name) - cfg.BlockBatchLimitBurstFactor = ctx.Uint64(BlockBatchLimitBurstFactor.Name) - cfg.MinimumPeersPerSubnet = ctx.Uint64(MinPeersPerSubnet.Name) + cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name) + cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name) + cfg.MinimumPeersPerSubnet = ctx.Int(MinPeersPerSubnet.Name) configureMinimumPeers(ctx, cfg) Init(cfg) } func configureMinimumPeers(ctx *cli.Context, cfg *GlobalFlags) { - cfg.MinimumSyncPeers = ctx.Uint64(MinSyncPeers.Name) - maxPeers := ctx.Uint64(cmd.P2PMaxPeers.Name) + cfg.MinimumSyncPeers = ctx.Int(MinSyncPeers.Name) + maxPeers := ctx.Int(cmd.P2PMaxPeers.Name) if cfg.MinimumSyncPeers > maxPeers { log.Warnf("Changing Minimum Sync Peers to %d", maxPeers) cfg.MinimumSyncPeers = maxPeers diff --git a/cmd/flags.go b/cmd/flags.go index eb408b5ba9..1d8e34819e 100644 --- a/cmd/flags.go +++ b/cmd/flags.go @@ -150,7 +150,7 @@ var ( Value: "", } // P2PMaxPeers defines a flag to specify the max number of peers in libp2p. - P2PMaxPeers = &cli.Uint64Flag{ + P2PMaxPeers = &cli.IntFlag{ Name: "p2p-max-peers", Usage: "The max number of p2p peers to maintain.", Value: 45,