Revert "Replace a Few IntFlags with Uint64Flags (#9959)" (#10163)

* Revert "Replace a Few IntFlags with Uint64Flags (#9959)"

This reverts commit 790bf03123.

* fix

* fix
This commit is contained in:
Nishant Das
2022-02-01 16:51:17 +08:00
committed by GitHub
parent eef2122a9e
commit e83c9d5862
25 changed files with 58 additions and 57 deletions

View File

@@ -489,7 +489,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error {
MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name),
TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name),
UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name),
MaxPeers: cliCtx.Uint64(cmd.P2PMaxPeers.Name),
MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name),
AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name),
DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)),
EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name),

View File

@@ -23,7 +23,7 @@ type Config struct {
MetaDataDir string
TCPPort uint
UDPPort uint
MaxPeers uint64
MaxPeers uint
AllowListCIDR string
DenyListCIDR []string
StateNotifier statefeed.Notifier

View File

@@ -104,7 +104,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) {
ScorerParams: &scorers.Config{},
}),
host: mockp2p.NewTestP2P(t).BHost,
cfg: &Config{MaxPeers: uint64(limit)},
cfg: &Config{MaxPeers: uint(limit)},
}
var err error
s.addrFilter, err = configureFilter(&Config{})

View File

@@ -77,7 +77,7 @@ type PeerManager interface {
ENR() *enr.Record
DiscoveryAddresses() ([]multiaddr.Multiaddr, error)
RefreshENR()
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold uint64) (bool, error)
FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error)
AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error)
}

View File

@@ -85,7 +85,7 @@ func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerCo
if scorer.config.StalePeerRefreshInterval == 0 {
scorer.config.StalePeerRefreshInterval = DefaultBlockProviderStalePeerRefreshInterval
}
batchSize := flags.Get().BlockBatchLimit
batchSize := uint64(flags.Get().BlockBatchLimit)
scorer.maxScore = 1.0
if batchSize > 0 {
totalBatches := float64(scorer.config.ProcessedBlocksCap / batchSize)
@@ -110,7 +110,7 @@ func (s *BlockProviderScorer) score(pid peer.ID) float64 {
if !ok || time.Since(peerData.BlockProviderUpdated) >= s.config.StalePeerRefreshInterval {
return s.maxScore
}
batchSize := flags.Get().BlockBatchLimit
batchSize := uint64(flags.Get().BlockBatchLimit)
if batchSize > 0 {
processedBatches := float64(peerData.ProcessedBlocks / batchSize)
score += processedBatches * s.config.ProcessedBatchWeight

View File

@@ -21,7 +21,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
batchSize := flags.Get().BlockBatchLimit
batchSize := uint64(flags.Get().BlockBatchLimit)
tests := []struct {
name string
update func(scorer *scorers.BlockProviderScorer)
@@ -160,7 +160,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) {
},
})
scorer := peerStatuses.Scorers().BlockProviderScorer()
batchSize := flags.Get().BlockBatchLimit
batchSize := uint64(flags.Get().BlockBatchLimit)
r := rand.NewDeterministicGenerator()
reverse := func(pids []peer.ID) []peer.ID {
@@ -214,7 +214,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) {
}
func TestScorers_BlockProvider_Sorted(t *testing.T) {
batchSize := flags.Get().BlockBatchLimit
batchSize := uint64(flags.Get().BlockBatchLimit)
tests := []struct {
name string
update func(s *scorers.BlockProviderScorer)
@@ -309,7 +309,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) {
func TestScorers_BlockProvider_MaxScore(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
batchSize := flags.Get().BlockBatchLimit
batchSize := uint64(flags.Get().BlockBatchLimit)
tests := []struct {
name string
@@ -347,7 +347,7 @@ func TestScorers_BlockProvider_MaxScore(t *testing.T) {
func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
batchSize := flags.Get().BlockBatchLimit
batchSize := uint64(flags.Get().BlockBatchLimit)
format := "[%0.1f%%, raw: %0.2f, blocks: %d/1280]"
tests := []struct {

View File

@@ -17,7 +17,7 @@ func TestScorers_Service_Init(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
batchSize := flags.Get().BlockBatchLimit
batchSize := uint64(flags.Get().BlockBatchLimit)
t.Run("default config", func(t *testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
@@ -82,7 +82,7 @@ func TestScorers_Service_Score(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
batchSize := flags.Get().BlockBatchLimit
batchSize := uint64(flags.Get().BlockBatchLimit)
peerScores := func(s *scorers.Service, pids []peer.ID) map[string]float64 {
scores := make(map[string]float64, len(pids))

View File

@@ -696,7 +696,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch types.Epoch) (typ
// BestNonFinalized returns the highest known epoch, higher than ours,
// and is shared by at least minPeers.
func (p *Status) BestNonFinalized(minPeers uint64, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) {
func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) {
connected := p.Connected()
epochVotes := make(map[types.Epoch]uint64)
pidEpoch := make(map[peer.ID]types.Epoch, len(connected))

View File

@@ -33,11 +33,12 @@ const syncLockerVal = 100
// subscribed to a particular subnet. Then we try to connect
// with those peers. This method will block until the required amount of
// peers are found, the method only exits in the event of context timeouts.
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, subIndex, threshold uint64) (bool, error) {
func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string,
index uint64, threshold int) (bool, error) {
ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet")
defer span.End()
span.AddAttributes(trace.Int64Attribute("index", int64(subIndex)))
span.AddAttributes(trace.Int64Attribute("index", int64(index)))
if s.dv5Listener == nil {
// return if discovery isn't set
@@ -48,14 +49,14 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, subInde
iterator := s.dv5Listener.RandomNodes()
switch {
case strings.Contains(topic, GossipAttestationMessage):
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(subIndex))
iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index))
case strings.Contains(topic, GossipSyncCommitteeMessage):
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(subIndex))
iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(index))
default:
return false, errors.New("no subnet exists for provided topic")
}
currNum := uint64(len(s.pubsub.ListPeers(topic)))
currNum := len(s.pubsub.ListPeers(topic))
wg := new(sync.WaitGroup)
for {
if err := ctx.Err(); err != nil {
@@ -80,7 +81,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, subInde
}
// Wait for all dials to be completed.
wg.Wait()
currNum = uint64(len(s.pubsub.ListPeers(topic)))
currNum = len(s.pubsub.ListPeers(topic))
}
return true, nil
}

View File

@@ -61,7 +61,7 @@ func (_ *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
}
// FindPeersWithSubnet mocks the p2p func.
func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) {
func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
return false, nil
}

View File

@@ -51,7 +51,7 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) {
func (_ MockPeerManager) RefreshENR() {}
// FindPeersWithSubnet .
func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) {
func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
return true, nil
}

View File

@@ -349,7 +349,7 @@ func (p *TestP2P) Peers() *peers.Status {
}
// FindPeersWithSubnet mocks the p2p func.
func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) {
func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) {
return false, nil
}

View File

@@ -63,7 +63,7 @@ func (f *blocksFetcher) selectFailOverPeer(excludedPID peer.ID, peers []peer.ID)
// waitForMinimumPeers spins and waits up until enough peers are available.
func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, error) {
required := uint64(params.BeaconConfig().MaxPeersToSync)
required := params.BeaconConfig().MaxPeersToSync
if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers
}
@@ -79,7 +79,7 @@ func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, err
headEpoch := slots.ToEpoch(f.chain.HeadSlot())
_, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch)
}
if uint64(len(peers)) >= required {
if len(peers) >= required {
return peers, nil
}
log.WithFields(logrus.Fields{
@@ -123,14 +123,14 @@ func (f *blocksFetcher) filterPeers(ctx context.Context, peers []peer.ID, peersP
// trimPeers limits peer list, returning only specified percentage of peers.
// Takes system constraints into account (min/max peers to sync).
func trimPeers(peers []peer.ID, peersPercentage float64) []peer.ID {
required := uint64(params.BeaconConfig().MaxPeersToSync)
required := params.BeaconConfig().MaxPeersToSync
if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers
}
// Weak/slow peers will be pushed down the list and trimmed since only percentage of peers is selected.
limit := uint64(math.Round(float64(len(peers)) * peersPercentage))
// Limit cannot be less that minimum peers required by sync mechanism.
limit = mathutil.Max(limit, required)
limit = mathutil.Max(limit, uint64(required))
// Limit cannot be higher than number of peers available (safe-guard).
limit = mathutil.Min(limit, uint64(len(peers)))
return peers[:limit]

View File

@@ -118,7 +118,7 @@ func TestBlocksFetcher_filterPeers(t *testing.T) {
capacityWeight float64
}
batchSize := flags.Get().BlockBatchLimit
batchSize := uint64(flags.Get().BlockBatchLimit)
tests := []struct {
name string
args args

View File

@@ -371,7 +371,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) {
}
func TestBlocksFetcher_scheduleRequest(t *testing.T) {
blockBatchLimit := flags.Get().BlockBatchLimit
blockBatchLimit := uint64(flags.Get().BlockBatchLimit)
t.Run("context cancellation", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{})
@@ -425,7 +425,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
})
cancel()
response := fetcher.handleRequest(ctx, 1, blockBatchLimit)
response := fetcher.handleRequest(ctx, 1, uint64(blockBatchLimit))
assert.ErrorContains(t, "context canceled", response.err)
})
@@ -440,7 +440,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
requestCtx, reqCancel := context.WithTimeout(context.Background(), 2*time.Second)
defer reqCancel()
go func() {
response := fetcher.handleRequest(requestCtx, 1 /* start */, blockBatchLimit /* count */)
response := fetcher.handleRequest(requestCtx, 1 /* start */, uint64(blockBatchLimit) /* count */)
select {
case <-ctx.Done():
case fetcher.fetchResponses <- response:
@@ -458,7 +458,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) {
blocks = resp.blocks
}
}
if uint64(len(blocks)) != blockBatchLimit {
if uint64(len(blocks)) != uint64(blockBatchLimit) {
t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchLimit, len(blocks))
}
@@ -509,11 +509,11 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) {
req := &ethpb.BeaconBlocksByRangeRequest{
StartSlot: 1,
Step: 1,
Count: blockBatchLimit,
Count: uint64(blockBatchLimit),
}
blocks, err := fetcher.requestBlocks(ctx, req, peerIDs[0])
assert.NoError(t, err)
assert.Equal(t, blockBatchLimit, uint64(len(blocks)), "Incorrect number of blocks returned")
assert.Equal(t, uint64(blockBatchLimit), uint64(len(blocks)), "Incorrect number of blocks returned")
// Test context cancellation.
ctx, cancel = context.WithCancel(context.Background())

View File

@@ -200,7 +200,7 @@ func TestBlocksFetcher_findFork(t *testing.T) {
peers = append(peers, connectPeerHavingBlocks(t, p2p, chain1, finalizedSlot, p2p.Peers()))
}
blockBatchLimit := flags.Get().BlockBatchLimit * 2
blockBatchLimit := uint64(flags.Get().BlockBatchLimit) * 2
pidInd := 0
for i := uint64(1); i < uint64(len(chain1)); i += blockBatchLimit {
req := &ethpb.BeaconBlocksByRangeRequest{

View File

@@ -163,13 +163,13 @@ func (s *Service) Resync() error {
}
func (s *Service) waitForMinimumPeers() {
required := uint64(params.BeaconConfig().MaxPeersToSync)
required := params.BeaconConfig().MaxPeersToSync
if flags.Get().MinimumSyncPeers < required {
required = flags.Get().MinimumSyncPeers
}
for {
_, peers := s.cfg.P2P.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, s.cfg.Chain.FinalizedCheckpt().Epoch)
if uint64(len(peers)) >= required {
if len(peers) >= required {
break
}
log.WithFields(logrus.Fields{

View File

@@ -27,7 +27,7 @@ import (
)
func TestService_Constants(t *testing.T) {
if uint64(params.BeaconConfig().MaxPeersToSync)*flags.Get().BlockBatchLimit > uint64(1000) {
if params.BeaconConfig().MaxPeersToSync*flags.Get().BlockBatchLimit > 1000 {
t.Fatal("rpc rejects requests over 1000 range slots")
}
}

View File

@@ -43,7 +43,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa
// The initial count for the first batch to be returned back.
count := m.Count
allowedBlocksPerSecond := flags.Get().BlockBatchLimit
allowedBlocksPerSecond := uint64(flags.Get().BlockBatchLimit)
if count > allowedBlocksPerSecond {
count = allowedBlocksPerSecond
}

View File

@@ -394,11 +394,11 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) {
req := &ethpb.BeaconBlocksByRangeRequest{
StartSlot: 100,
Step: 1,
Count: flags.Get().BlockBatchLimit,
Count: uint64(flags.Get().BlockBatchLimit),
}
saveBlocks(req)
for i := uint64(0); i < flags.Get().BlockBatchLimitBurstFactor; i++ {
for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ {
assert.NoError(t, sendRequest(p1, p2, r, req, true, false))
}

View File

@@ -640,7 +640,7 @@ func (s *Service) unSubscribeFromTopic(topic string) {
// find if we have peers who are subscribed to the same subnet
func (s *Service) validPeersExist(subnetTopic string) bool {
numOfPeers := s.cfg.p2p.PubSub().ListPeers(subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix())
return uint64(len(numOfPeers)) >= flags.Get().MinimumPeersPerSubnet
return len(numOfPeers) >= flags.Get().MinimumPeersPerSubnet
}
func (s *Service) retrievePersistentSubs(currSlot types.Slot) []uint64 {
@@ -681,7 +681,7 @@ func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID {
for _, sub := range wantedSubs {
subnetTopic := fmt.Sprintf(topic, digest, sub) + s.cfg.p2p.Encoding().ProtocolSuffix()
peers := s.cfg.p2p.PubSub().ListPeers(subnetTopic)
if uint64(len(peers)) > flags.Get().MinimumPeersPerSubnet {
if len(peers) > flags.Get().MinimumPeersPerSubnet {
// In the event we have more than the minimum, we can
// mark the remaining as viable for pruning.
peers = peers[:flags.Get().MinimumPeersPerSubnet]

View File

@@ -490,7 +490,7 @@ func TestFilterSubnetPeers(t *testing.T) {
// Try with only peers from subnet 20.
wantedPeers = []peer.ID{p2.BHost.ID()}
// Connect an excess amount of peers in the particular subnet.
for i := uint64(1); i <= flags.Get().MinimumPeersPerSubnet; i++ {
for i := 1; i <= flags.Get().MinimumPeersPerSubnet; i++ {
nPeer := createPeer(t, subnet20)
p.Connect(nPeer)
wantedPeers = append(wantedPeers, nPeer.BHost.ID())

View File

@@ -88,7 +88,7 @@ var (
}
// MinSyncPeers specifies the required number of successful peer handshakes in order
// to start syncing with external peers.
MinSyncPeers = &cli.Uint64Flag{
MinSyncPeers = &cli.IntFlag{
Name: "min-sync-peers",
Usage: "The required number of valid peers to connect with before syncing.",
Value: 3,
@@ -123,13 +123,13 @@ var (
Usage: "Does not run the discoveryV5 dht.",
}
// BlockBatchLimit specifies the requested block batch size.
BlockBatchLimit = &cli.Uint64Flag{
BlockBatchLimit = &cli.IntFlag{
Name: "block-batch-limit",
Usage: "The amount of blocks the local peer is bounded to request and respond to in a batch.",
Value: 64,
}
// BlockBatchLimitBurstFactor specifies the factor by which block batch size may increase.
BlockBatchLimitBurstFactor = &cli.Uint64Flag{
BlockBatchLimitBurstFactor = &cli.IntFlag{
Name: "block-batch-limit-burst-factor",
Usage: "The factor by which block batch limit may increase on burst.",
Value: 10,

View File

@@ -12,10 +12,10 @@ type GlobalFlags struct {
DisableSync bool
DisableDiscv5 bool
SubscribeToAllSubnets bool
MinimumSyncPeers uint64
MinimumPeersPerSubnet uint64
BlockBatchLimit uint64
BlockBatchLimitBurstFactor uint64
MinimumSyncPeers int
MinimumPeersPerSubnet int
BlockBatchLimit int
BlockBatchLimitBurstFactor int
}
var globalConfig *GlobalFlags
@@ -50,17 +50,17 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
cfg.SubscribeToAllSubnets = true
}
cfg.DisableDiscv5 = ctx.Bool(DisableDiscv5.Name)
cfg.BlockBatchLimit = ctx.Uint64(BlockBatchLimit.Name)
cfg.BlockBatchLimitBurstFactor = ctx.Uint64(BlockBatchLimitBurstFactor.Name)
cfg.MinimumPeersPerSubnet = ctx.Uint64(MinPeersPerSubnet.Name)
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)
cfg.MinimumPeersPerSubnet = ctx.Int(MinPeersPerSubnet.Name)
configureMinimumPeers(ctx, cfg)
Init(cfg)
}
func configureMinimumPeers(ctx *cli.Context, cfg *GlobalFlags) {
cfg.MinimumSyncPeers = ctx.Uint64(MinSyncPeers.Name)
maxPeers := ctx.Uint64(cmd.P2PMaxPeers.Name)
cfg.MinimumSyncPeers = ctx.Int(MinSyncPeers.Name)
maxPeers := ctx.Int(cmd.P2PMaxPeers.Name)
if cfg.MinimumSyncPeers > maxPeers {
log.Warnf("Changing Minimum Sync Peers to %d", maxPeers)
cfg.MinimumSyncPeers = maxPeers

View File

@@ -150,7 +150,7 @@ var (
Value: "",
}
// P2PMaxPeers defines a flag to specify the max number of peers in libp2p.
P2PMaxPeers = &cli.Uint64Flag{
P2PMaxPeers = &cli.IntFlag{
Name: "p2p-max-peers",
Usage: "The max number of p2p peers to maintain.",
Value: 45,