Update variable names that are same as imports (#8220)

* Fix names that are same as import name

* Review feedbacks
This commit is contained in:
terence tsao
2021-01-07 08:42:03 -08:00
committed by GitHub
parent d20065218c
commit 9ff825a570
13 changed files with 58 additions and 59 deletions

View File

@@ -404,17 +404,17 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
beaconDB: beaconDB,
stateGen: stategen.New(beaconDB),
}
b := testutil.NewBeaconBlock()
b.Block.Slot = 1
r, err := b.HashTreeRoot()
blk := testutil.NewBeaconBlock()
blk.Block.Slot = 1
r, err := blk.HashTreeRoot()
require.NoError(t, err)
newState := testutil.NewBeaconState()
require.NoError(t, s.stateGen.SaveState(ctx, r, newState))
require.NoError(t, s.saveHeadNoDB(ctx, b, r, newState))
require.NoError(t, s.saveHeadNoDB(ctx, blk, r, newState))
newB, err := s.beaconDB.HeadBlock(ctx)
require.NoError(t, err)
if reflect.DeepEqual(newB, b) {
if reflect.DeepEqual(newB, blk) {
t.Error("head block should not be equal")
}
}

View File

@@ -172,11 +172,11 @@ func (s *Service) createListener(
dv5Cfg.Bootnodes = append(dv5Cfg.Bootnodes, bootNode)
}
network, err := discover.ListenV5(conn, localNode, dv5Cfg)
listener, err := discover.ListenV5(conn, localNode, dv5Cfg)
if err != nil {
return nil, errors.Wrap(err, "could not listen to discV5")
}
return network, nil
return listener, nil
}
func (s *Service) createLocalNode(

View File

@@ -42,8 +42,8 @@ func (s *Service) selfAddresses() string {
if s.dv5Listener != nil {
addresses = append(addresses, s.dv5Listener.Self().String())
}
for _, ma := range s.host.Addrs() {
addresses = append(addresses, ma.String()+"/p2p/"+s.host.ID().Pretty())
for _, addr := range s.host.Addrs() {
addresses = append(addresses, addr.String()+"/p2p/"+s.host.ID().Pretty())
}
return strings.Join(addresses, ",")
}

View File

@@ -229,17 +229,17 @@ func TestPeerSubscribedToSubnet(t *testing.T) {
for i := 0; i < numPeers; i++ {
addPeer(t, p, peers.PeerDisconnected)
}
peers := p.SubscribedToSubnet(2)
assert.Equal(t, 1, len(peers), "Unexpected num of peers")
assert.Equal(t, expectedPeer, peers[0])
ps := p.SubscribedToSubnet(2)
assert.Equal(t, 1, len(ps), "Unexpected num of peers")
assert.Equal(t, expectedPeer, ps[0])
peers = p.SubscribedToSubnet(8)
assert.Equal(t, 1, len(peers), "Unexpected num of peers")
assert.Equal(t, expectedPeer, peers[0])
ps = p.SubscribedToSubnet(8)
assert.Equal(t, 1, len(ps), "Unexpected num of peers")
assert.Equal(t, expectedPeer, ps[0])
peers = p.SubscribedToSubnet(9)
assert.Equal(t, 1, len(peers), "Unexpected num of peers")
assert.Equal(t, expectedPeer, peers[0])
ps = p.SubscribedToSubnet(9)
assert.Equal(t, 1, len(ps), "Unexpected num of peers")
assert.Equal(t, expectedPeer, ps[0])
}
func TestPeerImplicitAdd(t *testing.T) {

View File

@@ -225,8 +225,7 @@ func TestListenForNewNodes(t *testing.T) {
})
}
time.Sleep(4 * time.Second)
peers := s.host.Network().Peers()
assert.Equal(t, 5, len(peers), "Not all peers added to peerstore")
assert.Equal(t, 5, len(s.host.Network().Peers()), "Not all peers added to peerstore")
require.NoError(t, s.Stop())
exitRoutine <- true
}

View File

@@ -283,11 +283,11 @@ func (p *TestP2P) AddDisconnectionHandler(f func(ctx context.Context, id peer.ID
// Send a message to a specific peer.
func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid peer.ID) (network.Stream, error) {
protocol := topic
if protocol == "" {
t := topic
if t == "" {
return nil, fmt.Errorf("protocol doesnt exist for proto message: %v", msg)
}
stream, err := p.BHost.NewStream(ctx, pid, core.ProtocolID(protocol+p.Encoding().ProtocolSuffix()))
stream, err := p.BHost.NewStream(ctx, pid, core.ProtocolID(t+p.Encoding().ProtocolSuffix()))
if err != nil {
return nil, err
}

View File

@@ -17,16 +17,16 @@ func ensurePeerConnections(ctx context.Context, h host.Host, peers ...string) {
if p == "" {
continue
}
peer, err := MakePeer(p)
peerInfo, err := MakePeer(p)
if err != nil {
log.Errorf("Could not make peer: %v", err)
continue
}
c := h.Network().ConnsToPeer(peer.ID)
c := h.Network().ConnsToPeer(peerInfo.ID)
if len(c) == 0 {
if err := connectWithTimeout(ctx, h, peer); err != nil {
log.WithField("peer", peer.ID).WithField("addrs", peer.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
if err := connectWithTimeout(ctx, h, peerInfo); err != nil {
log.WithField("peer", peerInfo.ID).WithField("addrs", peerInfo.Addrs).WithError(err).Errorf("Failed to reconnect to peer")
continue
}
}

View File

@@ -621,7 +621,7 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*gethTypes
requestRange := (endBlock - startBlock) + 1
elems := make([]gethRPC.BatchElem, 0, requestRange)
headers := make([]*gethTypes.Header, 0, requestRange)
errors := make([]error, 0, requestRange)
errs := make([]error, 0, requestRange)
if requestRange == 0 {
return headers, nil
}
@@ -635,13 +635,13 @@ func (s *Service) batchRequestHeaders(startBlock, endBlock uint64) ([]*gethTypes
Error: err,
})
headers = append(headers, header)
errors = append(errors, err)
errs = append(errs, err)
}
ioErr := s.rpcClient.BatchCall(elems)
if ioErr != nil {
return nil, ioErr
}
for _, e := range errors {
for _, e := range errs {
if e != nil {
return nil, e
}

View File

@@ -188,10 +188,10 @@ func (bs *Server) StreamBlocks(req *ethpb.StreamBlocksRequest, stream ethpb.Beac
for {
select {
case event := <-blocksChannel:
case blockEvent := <-blocksChannel:
if req.VerifiedOnly {
if event.Type == statefeed.BlockProcessed {
data, ok := event.Data.(*statefeed.BlockProcessedData)
if blockEvent.Type == statefeed.BlockProcessed {
data, ok := blockEvent.Data.(*statefeed.BlockProcessedData)
if !ok || data == nil {
continue
}
@@ -200,8 +200,8 @@ func (bs *Server) StreamBlocks(req *ethpb.StreamBlocksRequest, stream ethpb.Beac
}
}
} else {
if event.Type == blockfeed.ReceivedBlock {
data, ok := event.Data.(*blockfeed.ReceivedBlockData)
if blockEvent.Type == blockfeed.ReceivedBlock {
data, ok := blockEvent.Data.(*blockfeed.ReceivedBlockData)
if !ok {
// Got bad data over the stream.
continue
@@ -242,8 +242,8 @@ func (bs *Server) StreamChainHead(_ *ptypes.Empty, stream ethpb.BeaconChain_Stre
defer stateSub.Unsubscribe()
for {
select {
case event := <-stateChannel:
if event.Type == statefeed.BlockProcessed {
case stateEvent := <-stateChannel:
if stateEvent.Type == statefeed.BlockProcessed {
res, err := bs.chainHeadRetrieval(stream.Context())
if err != nil {
return status.Errorf(codes.Internal, "Could not retrieve chain head: %v", err)

View File

@@ -72,7 +72,7 @@ func (bs *Server) ListValidatorBalances(
return nil, status.Errorf(codes.Internal, "Could not get state")
}
validators := requestedState.Validators()
vals := requestedState.Validators()
balances := requestedState.Balances()
balancesCount := len(balances)
for _, pubKey := range req.PublicKeys {
@@ -97,7 +97,7 @@ func (bs *Server) ListValidatorBalances(
index, len(balances))
}
val := validators[index]
val := vals[index]
st := validatorStatus(val, requestedEpoch)
res = append(res, &ethpb.ValidatorBalances_Balance{
PublicKey: pubKey,
@@ -115,10 +115,10 @@ func (bs *Server) ListValidatorBalances(
}
if !filtered[index] {
val := validators[index]
val := vals[index]
st := validatorStatus(val, requestedEpoch)
res = append(res, &ethpb.ValidatorBalances_Balance{
PublicKey: validators[index].PublicKey,
PublicKey: vals[index].PublicKey,
Index: index,
Balance: balances[index],
Status: st.String(),
@@ -155,7 +155,7 @@ func (bs *Server) ListValidatorBalances(
// Return everything.
for i := start; i < end; i++ {
pubkey := requestedState.PubkeyAtIndex(uint64(i))
val := validators[i]
val := vals[i]
st := validatorStatus(val, requestedEpoch)
res = append(res, &ethpb.ValidatorBalances_Balance{
PublicKey: pubkey[:],

View File

@@ -155,8 +155,8 @@ func (is *infostream) handleConnection() error {
// Send responses at the end of every epoch.
for {
select {
case event := <-is.stateChannel:
if event.Type == statefeed.BlockProcessed {
case stateEvent := <-is.stateChannel:
if stateEvent.Type == statefeed.BlockProcessed {
is.handleBlockProcessed()
}
case <-is.stateSub.Err():

View File

@@ -342,9 +342,9 @@ func TestService_markSynced(t *testing.T) {
wg.Add(1)
go func() {
select {
case event := <-stateChannel:
if event.Type == statefeed.Synced {
data, ok := event.Data.(*statefeed.SyncedData)
case stateEvent := <-stateChannel:
if stateEvent.Type == statefeed.Synced {
data, ok := stateEvent.Data.(*statefeed.SyncedData)
require.Equal(t, true, ok, "Event feed data is not type *statefeed.SyncedData")
receivedGenesisTime = data.StartTime
}

View File

@@ -30,10 +30,10 @@ import (
)
func TestSubscribe_ReceivesValidMessage(t *testing.T) {
p2p := p2ptest.NewTestP2P(t)
p2pService := p2ptest.NewTestP2P(t)
r := Service{
ctx: context.Background(),
p2p: p2p,
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
chain: &mockChain.ChainService{
ValidatorsRoot: [32]byte{'A'},
@@ -42,7 +42,7 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
chainStarted: abool.New(),
}
var err error
p2p.Digest, err = r.forkDigest()
p2pService.Digest, err = r.forkDigest()
require.NoError(t, err)
topic := "/eth2/%x/voluntary_exit"
var wg sync.WaitGroup
@@ -59,7 +59,7 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
})
r.markForChainStart()
p2p.ReceivePubSub(topic, &pb.SignedVoluntaryExit{Exit: &pb.VoluntaryExit{Epoch: 55}, Signature: make([]byte, 96)})
p2pService.ReceivePubSub(topic, &pb.SignedVoluntaryExit{Exit: &pb.VoluntaryExit{Epoch: 55}, Signature: make([]byte, 96)})
if testutil.WaitTimeout(&wg, time.Second) {
t.Fatal("Did not receive PubSub in 1 second")
@@ -67,7 +67,7 @@ func TestSubscribe_ReceivesValidMessage(t *testing.T) {
}
func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
p2p := p2ptest.NewTestP2P(t)
p2pService := p2ptest.NewTestP2P(t)
ctx := context.Background()
d := db.SetupDB(t)
chainService := &mockChain.ChainService{
@@ -78,7 +78,7 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
require.NoError(t, err)
r := Service{
ctx: ctx,
p2p: p2p,
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
slashingPool: slashings.NewPool(),
chain: chainService,
@@ -107,9 +107,9 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
require.NoError(t, err, "Error generating attester slashing")
err = r.db.SaveState(ctx, beaconState, bytesutil.ToBytes32(attesterSlashing.Attestation_1.Data.BeaconBlockRoot))
require.NoError(t, err)
p2p.Digest, err = r.forkDigest()
p2pService.Digest, err = r.forkDigest()
require.NoError(t, err)
p2p.ReceivePubSub(topic, attesterSlashing)
p2pService.ReceivePubSub(topic, attesterSlashing)
if testutil.WaitTimeout(&wg, time.Second) {
t.Fatal("Did not receive PubSub in 1 second")
@@ -119,7 +119,7 @@ func TestSubscribe_ReceivesAttesterSlashing(t *testing.T) {
}
func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
p2p := p2ptest.NewTestP2P(t)
p2pService := p2ptest.NewTestP2P(t)
ctx := context.Background()
chainService := &mockChain.ChainService{
ValidatorsRoot: [32]byte{'A'},
@@ -130,7 +130,7 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
require.NoError(t, err)
r := Service{
ctx: ctx,
p2p: p2p,
p2p: p2pService,
initialSync: &mockSync.Sync{IsSyncing: false},
slashingPool: slashings.NewPool(),
chain: chainService,
@@ -157,9 +157,9 @@ func TestSubscribe_ReceivesProposerSlashing(t *testing.T) {
1, /* validator index */
)
require.NoError(t, err, "Error generating proposer slashing")
p2p.Digest, err = r.forkDigest()
p2pService.Digest, err = r.forkDigest()
require.NoError(t, err)
p2p.ReceivePubSub(topic, proposerSlashing)
p2pService.ReceivePubSub(topic, proposerSlashing)
if testutil.WaitTimeout(&wg, time.Second) {
t.Fatal("Did not receive PubSub in 1 second")