mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
Replace a Few IntFlags with Uint64Flags (#9959)
* use uints instead of ints * fix method * fix * fix * builds * deepsource * deep source
This commit is contained in:
@@ -489,7 +489,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
|
||||
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
|
||||
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
|
||||
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
|
||||
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
|
||||
MaxPeers: cliCtx.Uint64(cmd.P2PMaxPeers.Name),
|
||||
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
|
||||
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
|
||||
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),
|
||||
|
||||
@@ -23,7 +23,7 @@ type Config struct {
|
||||
MetaDataDir string
|
||||
TCPPort uint
|
||||
UDPPort uint
|
||||
MaxPeers uint
|
||||
MaxPeers uint64
|
||||
AllowListCIDR string
|
||||
DenyListCIDR []string
|
||||
StateNotifier statefeed.Notifier
|
||||
|
||||
@@ -104,7 +104,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
|
||||
ScorerParams: &scorers.Config{},
|
||||
}),
|
||||
host: mockp2p.NewTestP2P(t).BHost,
|
||||
cfg: &Config{MaxPeers: uint(limit)},
|
||||
cfg: &Config{MaxPeers: uint64(limit)},
|
||||
}
|
||||
var err error
|
||||
s.addrFilter, err = configureFilter(&Config{})
|
||||
|
||||
@@ -77,7 +77,7 @@ type PeerManager interface {
|
||||
ENR() *enr.Record
|
||||
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
|
||||
RefreshENR()
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
|
||||
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold uint64) (bool, error)
|
||||
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
|
||||
}
|
||||
|
||||
|
||||
@@ -85,7 +85,7 @@ func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerCo
|
||||
if scorer.config.StalePeerRefreshInterval == 0 {
|
||||
scorer.config.StalePeerRefreshInterval = DefaultBlockProviderStalePeerRefreshInterval
|
||||
}
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
scorer.maxScore = 1.0
|
||||
if batchSize > 0 {
|
||||
totalBatches := float64(scorer.config.ProcessedBlocksCap / batchSize)
|
||||
@@ -110,7 +110,7 @@ func (s *BlockProviderScorer) score(pid peer.ID) float64 {
|
||||
if !ok || time.Since(peerData.BlockProviderUpdated) >= s.config.StalePeerRefreshInterval {
|
||||
return s.maxScore
|
||||
}
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
if batchSize > 0 {
|
||||
processedBatches := float64(peerData.ProcessedBlocks / batchSize)
|
||||
score += processedBatches * s.config.ProcessedBatchWeight
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
tests := []struct {
|
||||
name string
|
||||
update func(scorer *scorers.BlockProviderScorer)
|
||||
@@ -160,7 +160,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) {
|
||||
},
|
||||
})
|
||||
scorer := peerStatuses.Scorers().BlockProviderScorer()
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
r := rand.NewDeterministicGenerator()
|
||||
|
||||
reverse := func(pids []peer.ID) []peer.ID {
|
||||
@@ -214,7 +214,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestScorers_BlockProvider_Sorted(t *testing.T) {
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
tests := []struct {
|
||||
name string
|
||||
update func(s *scorers.BlockProviderScorer)
|
||||
@@ -309,7 +309,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) {
|
||||
func TestScorers_BlockProvider_MaxScore(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -347,7 +347,7 @@ func TestScorers_BlockProvider_MaxScore(t *testing.T) {
|
||||
func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
format := "[%0.1f%%, raw: %0.2f, blocks: %d/1280]"
|
||||
|
||||
tests := []struct {
|
||||
|
||||
@@ -17,7 +17,7 @@ func TestScorers_Service_Init(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
|
||||
t.Run("default config", func(t *testing.T) {
|
||||
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
|
||||
@@ -82,7 +82,7 @@ func TestScorers_Service_Score(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
|
||||
peerScores := func(s *scorers.Service, pids []peer.ID) map[string]float64 {
|
||||
scores := make(map[string]float64, len(pids))
|
||||
|
||||
@@ -696,7 +696,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch types.Epoch) (typ
|
||||
|
||||
// BestNonFinalized returns the highest known epoch, higher than ours,
|
||||
// and is shared by at least minPeers.
|
||||
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) {
|
||||
func (p *Status) BestNonFinalized(minPeers uint64, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) {
|
||||
connected := p.Connected()
|
||||
epochVotes := make(map[types.Epoch]uint64)
|
||||
pidEpoch := make(map[peer.ID]types.Epoch, len(connected))
|
||||
|
||||
@@ -33,12 +33,11 @@ const syncLockerVal = 100
|
||||
// subscribed to a particular subnet. Then we try to connect
|
||||
// with those peers. This method will block until the required amount of
|
||||
// peers are found, the method only exits in the event of context timeouts.
|
||||
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
index uint64, threshold int) (bool, error) {
|
||||
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, subIndex, threshold uint64) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
|
||||
defer span.End()
|
||||
|
||||
span.AddAttributes(trace.Int64Attribute("index", int64(index)))
|
||||
span.AddAttributes(trace.Int64Attribute("index", int64(subIndex)))
|
||||
|
||||
if s.dv5Listener == nil {
|
||||
// return if discovery isn't set
|
||||
@@ -49,14 +48,14 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
iterator := s.dv5Listener.RandomNodes()
|
||||
switch {
|
||||
case strings.Contains(topic, GossipAttestationMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index))
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(subIndex))
|
||||
case strings.Contains(topic, GossipSyncCommitteeMessage):
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(index))
|
||||
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(subIndex))
|
||||
default:
|
||||
return false, errors.New("no subnet exists for provided topic")
|
||||
}
|
||||
|
||||
currNum := len(s.pubsub.ListPeers(topic))
|
||||
currNum := uint64(len(s.pubsub.ListPeers(topic)))
|
||||
wg := new(sync.WaitGroup)
|
||||
for {
|
||||
if err := ctx.Err(); err != nil {
|
||||
@@ -81,7 +80,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
|
||||
}
|
||||
// Wait for all dials to be completed.
|
||||
wg.Wait()
|
||||
currNum = len(s.pubsub.ListPeers(topic))
|
||||
currNum = uint64(len(s.pubsub.ListPeers(topic)))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func (p *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet mocks the p2p func.
|
||||
func (p *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
func (p *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
|
||||
func (m MockPeerManager) RefreshENR() {}
|
||||
|
||||
// FindPeersWithSubnet .
|
||||
func (m MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
func (m MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -349,7 +349,7 @@ func (p *TestP2P) Peers() *peers.Status {
|
||||
}
|
||||
|
||||
// FindPeersWithSubnet mocks the p2p func.
|
||||
func (p *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
|
||||
func (p *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ func (f *blocksFetcher) selectFailOverPeer(excludedPID peer.ID, peers []peer.ID)
|
||||
|
||||
// waitForMinimumPeers spins and waits up until enough peers are available.
|
||||
func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, error) {
|
||||
required := params.BeaconConfig().MaxPeersToSync
|
||||
required := uint64(params.BeaconConfig().MaxPeersToSync)
|
||||
if flags.Get().MinimumSyncPeers < required {
|
||||
required = flags.Get().MinimumSyncPeers
|
||||
}
|
||||
@@ -79,7 +79,7 @@ func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, err
|
||||
headEpoch := slots.ToEpoch(f.chain.HeadSlot())
|
||||
_, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
|
||||
}
|
||||
if len(peers) >= required {
|
||||
if uint64(len(peers)) >= required {
|
||||
return peers, nil
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
@@ -123,14 +123,14 @@ func (f *blocksFetcher) filterPeers(ctx context.Context, peers []peer.ID, peersP
|
||||
// trimPeers limits peer list, returning only specified percentage of peers.
|
||||
// Takes system constraints into account (min/max peers to sync).
|
||||
func trimPeers(peers []peer.ID, peersPercentage float64) []peer.ID {
|
||||
required := params.BeaconConfig().MaxPeersToSync
|
||||
required := uint64(params.BeaconConfig().MaxPeersToSync)
|
||||
if flags.Get().MinimumSyncPeers < required {
|
||||
required = flags.Get().MinimumSyncPeers
|
||||
}
|
||||
// Weak/slow peers will be pushed down the list and trimmed since only percentage of peers is selected.
|
||||
limit := uint64(math.Round(float64(len(peers)) * peersPercentage))
|
||||
// Limit cannot be less that minimum peers required by sync mechanism.
|
||||
limit = mathutil.Max(limit, uint64(required))
|
||||
limit = mathutil.Max(limit, required)
|
||||
// Limit cannot be higher than number of peers available (safe-guard).
|
||||
limit = mathutil.Min(limit, uint64(len(peers)))
|
||||
return peers[:limit]
|
||||
|
||||
@@ -118,7 +118,7 @@ func TestBlocksFetcher_filterPeers(t *testing.T) {
|
||||
capacityWeight float64
|
||||
}
|
||||
|
||||
batchSize := uint64(flags.Get().BlockBatchLimit)
|
||||
batchSize := flags.Get().BlockBatchLimit
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
|
||||
@@ -372,7 +372,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlocksFetcher_scheduleRequest(t *testing.T) {
|
||||
blockBatchLimit := uint64(flags.Get().BlockBatchLimit)
|
||||
blockBatchLimit := flags.Get().BlockBatchLimit
|
||||
t.Run("context cancellation", func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{})
|
||||
@@ -426,7 +426,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
})
|
||||
|
||||
cancel()
|
||||
response := fetcher.handleRequest(ctx, 1, uint64(blockBatchLimit))
|
||||
response := fetcher.handleRequest(ctx, 1, blockBatchLimit)
|
||||
assert.ErrorContains(t, "context canceled", response.err)
|
||||
})
|
||||
|
||||
@@ -441,7 +441,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
requestCtx, reqCancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer reqCancel()
|
||||
go func() {
|
||||
response := fetcher.handleRequest(requestCtx, 1 /* start */, uint64(blockBatchLimit) /* count */)
|
||||
response := fetcher.handleRequest(requestCtx, 1 /* start */, blockBatchLimit /* count */)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case fetcher.fetchResponses <- response:
|
||||
@@ -459,7 +459,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
|
||||
blocks = resp.blocks
|
||||
}
|
||||
}
|
||||
if uint64(len(blocks)) != uint64(blockBatchLimit) {
|
||||
if uint64(len(blocks)) != blockBatchLimit {
|
||||
t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchLimit, len(blocks))
|
||||
}
|
||||
|
||||
@@ -510,11 +510,11 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) {
|
||||
req := &p2ppb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 1,
|
||||
Step: 1,
|
||||
Count: uint64(blockBatchLimit),
|
||||
Count: blockBatchLimit,
|
||||
}
|
||||
blocks, err := fetcher.requestBlocks(ctx, req, peerIDs[0])
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(blockBatchLimit), uint64(len(blocks)), "Incorrect number of blocks returned")
|
||||
assert.Equal(t, blockBatchLimit, uint64(len(blocks)), "Incorrect number of blocks returned")
|
||||
|
||||
// Test context cancellation.
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
|
||||
@@ -201,7 +201,7 @@ func TestBlocksFetcher_findFork(t *testing.T) {
|
||||
peers = append(peers, connectPeerHavingBlocks(t, p2p, chain1, finalizedSlot, p2p.Peers()))
|
||||
}
|
||||
|
||||
blockBatchLimit := uint64(flags.Get().BlockBatchLimit) * 2
|
||||
blockBatchLimit := flags.Get().BlockBatchLimit * 2
|
||||
pidInd := 0
|
||||
for i := uint64(1); i < uint64(len(chain1)); i += blockBatchLimit {
|
||||
req := &p2ppb.BeaconBlocksByRangeRequest{
|
||||
|
||||
@@ -163,13 +163,13 @@ func (s *Service) Resync() error {
|
||||
}
|
||||
|
||||
func (s *Service) waitForMinimumPeers() {
|
||||
required := params.BeaconConfig().MaxPeersToSync
|
||||
required := uint64(params.BeaconConfig().MaxPeersToSync)
|
||||
if flags.Get().MinimumSyncPeers < required {
|
||||
required = flags.Get().MinimumSyncPeers
|
||||
}
|
||||
for {
|
||||
_, peers := s.cfg.P2P.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, s.cfg.Chain.FinalizedCheckpt().Epoch)
|
||||
if len(peers) >= required {
|
||||
if uint64(len(peers)) >= required {
|
||||
break
|
||||
}
|
||||
log.WithFields(logrus.Fields{
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
)
|
||||
|
||||
func TestService_Constants(t *testing.T) {
|
||||
if params.BeaconConfig().MaxPeersToSync*flags.Get().BlockBatchLimit > 1000 {
|
||||
if uint64(params.BeaconConfig().MaxPeersToSync)*flags.Get().BlockBatchLimit > uint64(1000) {
|
||||
t.Fatal("rpc rejects requests over 1000 range slots")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
|
||||
|
||||
// The initial count for the first batch to be returned back.
|
||||
count := m.Count
|
||||
allowedBlocksPerSecond := uint64(flags.Get().BlockBatchLimit)
|
||||
allowedBlocksPerSecond := flags.Get().BlockBatchLimit
|
||||
if count > allowedBlocksPerSecond {
|
||||
count = allowedBlocksPerSecond
|
||||
}
|
||||
|
||||
@@ -394,11 +394,11 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
|
||||
req := &pb.BeaconBlocksByRangeRequest{
|
||||
StartSlot: 100,
|
||||
Step: 1,
|
||||
Count: uint64(flags.Get().BlockBatchLimit),
|
||||
Count: flags.Get().BlockBatchLimit,
|
||||
}
|
||||
saveBlocks(req)
|
||||
|
||||
for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ {
|
||||
for i := uint64(0); i < flags.Get().BlockBatchLimitBurstFactor; i++ {
|
||||
assert.NoError(t, sendRequest(p1, p2, r, req, true, false))
|
||||
}
|
||||
|
||||
|
||||
@@ -641,7 +641,7 @@ func (s *Service) unSubscribeFromTopic(topic string) {
|
||||
// find if we have peers who are subscribed to the same subnet
|
||||
func (s *Service) validPeersExist(subnetTopic string) bool {
|
||||
numOfPeers := s.cfg.p2p.PubSub().ListPeers(subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix())
|
||||
return len(numOfPeers) >= flags.Get().MinimumPeersPerSubnet
|
||||
return uint64(len(numOfPeers)) >= flags.Get().MinimumPeersPerSubnet
|
||||
}
|
||||
|
||||
func (s *Service) retrievePersistentSubs(currSlot types.Slot) []uint64 {
|
||||
@@ -682,7 +682,7 @@ func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID {
|
||||
for _, sub := range wantedSubs {
|
||||
subnetTopic := fmt.Sprintf(topic, digest, sub) + s.cfg.p2p.Encoding().ProtocolSuffix()
|
||||
peers := s.cfg.p2p.PubSub().ListPeers(subnetTopic)
|
||||
if len(peers) > flags.Get().MinimumPeersPerSubnet {
|
||||
if uint64(len(peers)) > flags.Get().MinimumPeersPerSubnet {
|
||||
// In the event we have more than the minimum, we can
|
||||
// mark the remaining as viable for pruning.
|
||||
peers = peers[:flags.Get().MinimumPeersPerSubnet]
|
||||
|
||||
@@ -489,7 +489,7 @@ func TestFilterSubnetPeers(t *testing.T) {
|
||||
// Try with only peers from subnet 20.
|
||||
wantedPeers = []peer.ID{p2.BHost.ID()}
|
||||
// Connect an excess amount of peers in the particular subnet.
|
||||
for i := 1; i <= flags.Get().MinimumPeersPerSubnet; i++ {
|
||||
for i := uint64(1); i <= flags.Get().MinimumPeersPerSubnet; i++ {
|
||||
nPeer := createPeer(t, subnet20)
|
||||
p.Connect(nPeer)
|
||||
wantedPeers = append(wantedPeers, nPeer.BHost.ID())
|
||||
|
||||
@@ -88,7 +88,7 @@ var (
|
||||
}
|
||||
// MinSyncPeers specifies the required number of successful peer handshakes in order
|
||||
// to start syncing with external peers.
|
||||
MinSyncPeers = &cli.IntFlag{
|
||||
MinSyncPeers = &cli.Uint64Flag{
|
||||
Name: "min-sync-peers",
|
||||
Usage: "The required number of valid peers to connect with before syncing.",
|
||||
Value: 3,
|
||||
@@ -123,13 +123,13 @@ var (
|
||||
Usage: "Does not run the discoveryV5 dht.",
|
||||
}
|
||||
// BlockBatchLimit specifies the requested block batch size.
|
||||
BlockBatchLimit = &cli.IntFlag{
|
||||
BlockBatchLimit = &cli.Uint64Flag{
|
||||
Name: "block-batch-limit",
|
||||
Usage: "The amount of blocks the local peer is bounded to request and respond to in a batch.",
|
||||
Value: 64,
|
||||
}
|
||||
// BlockBatchLimitBurstFactor specifies the factor by which block batch size may increase.
|
||||
BlockBatchLimitBurstFactor = &cli.IntFlag{
|
||||
BlockBatchLimitBurstFactor = &cli.Uint64Flag{
|
||||
Name: "block-batch-limit-burst-factor",
|
||||
Usage: "The factor by which block batch limit may increase on burst.",
|
||||
Value: 10,
|
||||
|
||||
@@ -12,10 +12,10 @@ type GlobalFlags struct {
|
||||
DisableSync bool
|
||||
DisableDiscv5 bool
|
||||
SubscribeToAllSubnets bool
|
||||
MinimumSyncPeers int
|
||||
MinimumPeersPerSubnet int
|
||||
BlockBatchLimit int
|
||||
BlockBatchLimitBurstFactor int
|
||||
MinimumSyncPeers uint64
|
||||
MinimumPeersPerSubnet uint64
|
||||
BlockBatchLimit uint64
|
||||
BlockBatchLimitBurstFactor uint64
|
||||
}
|
||||
|
||||
var globalConfig *GlobalFlags
|
||||
@@ -50,17 +50,17 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
|
||||
cfg.SubscribeToAllSubnets = true
|
||||
}
|
||||
cfg.DisableDiscv5 = ctx.Bool(DisableDiscv5.Name)
|
||||
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
|
||||
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
|
||||
cfg.MinimumPeersPerSubnet = ctx.Int(MinPeersPerSubnet.Name)
|
||||
cfg.BlockBatchLimit = ctx.Uint64(BlockBatchLimit.Name)
|
||||
cfg.BlockBatchLimitBurstFactor = ctx.Uint64(BlockBatchLimitBurstFactor.Name)
|
||||
cfg.MinimumPeersPerSubnet = ctx.Uint64(MinPeersPerSubnet.Name)
|
||||
configureMinimumPeers(ctx, cfg)
|
||||
|
||||
Init(cfg)
|
||||
}
|
||||
|
||||
func configureMinimumPeers(ctx *cli.Context, cfg *GlobalFlags) {
|
||||
cfg.MinimumSyncPeers = ctx.Int(MinSyncPeers.Name)
|
||||
maxPeers := ctx.Int(cmd.P2PMaxPeers.Name)
|
||||
cfg.MinimumSyncPeers = ctx.Uint64(MinSyncPeers.Name)
|
||||
maxPeers := ctx.Uint64(cmd.P2PMaxPeers.Name)
|
||||
if cfg.MinimumSyncPeers > maxPeers {
|
||||
log.Warnf("Changing Minimum Sync Peers to %d", maxPeers)
|
||||
cfg.MinimumSyncPeers = maxPeers
|
||||
|
||||
@@ -150,7 +150,7 @@ var (
|
||||
Value: "",
|
||||
}
|
||||
// P2PMaxPeers defines a flag to specify the max number of peers in libp2p.
|
||||
P2PMaxPeers = &cli.IntFlag{
|
||||
P2PMaxPeers = &cli.Uint64Flag{
|
||||
Name: "p2p-max-peers",
|
||||
Usage: "The max number of p2p peers to maintain.",
|
||||
Value: 45,
|
||||
|
||||
Reference in New Issue
Block a user