mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 13:28:01 -05:00
* Log when downscoring a peer. * `validateSequenceNumber`: Downscore peer in function, clarify and add logs * `AddConnectionHandler`: Send majority code to the outer scope (no funtional change). * `disconnectBadPeer`: Improve log. * `sendRPCStatusRequest`: Improve log. * `findPeersWithSubnets`: Add preventive peer filtering. (As done in `s.findPeers`.) * `Stop`: Use one `defer` for the whole function. Reminder: `defer`s are executed backwards. * `Stop`: Send a goodbye message to all connected peers when stopping the service. Before this commit, stopping the service did not send any goodbye message to all connected peers. The issue with this approach is that the peer still thinks we are alive, and behaves so by trying to communicate with us. Unfortunatly, because we are offline, we cannot respond. Because of that, the peer starts to downscore us, and then bans us. As a consequence, when we restart, the peer refuses our connection request. By sending a goodbye message when stopping the service, we ensure the peer stops to expect anything from us. When restarting, everything is allright. * `ConnectedF` and `DisconnectedF`: Workaround very probable libp2p bug by preventing outbound connection to very recently disconnected peers. * Fix James' comment. * Fix James' comment. * Fix James' comment. * Fix James' comment. * Fix James' comment. * `AddDisconnectionHandler`: Handle multiple close calls to `DisconnectedF` for the same peer.
112 lines
3.9 KiB
Go
112 lines
3.9 KiB
Go
package sync
|
|
|
|
import (
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
|
mockp2p "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
|
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
|
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
|
"github.com/OffchainLabs/prysm/v6/testing/require"
|
|
"github.com/OffchainLabs/prysm/v6/testing/util"
|
|
"github.com/libp2p/go-libp2p/core/network"
|
|
"github.com/libp2p/go-libp2p/core/protocol"
|
|
)
|
|
|
|
func TestNewRateLimiter(t *testing.T) {
|
|
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
|
|
assert.Equal(t, len(rlimiter.limiterMap), 19, "correct number of topics not registered")
|
|
}
|
|
|
|
func TestNewRateLimiter_FreeCorrectly(t *testing.T) {
|
|
rlimiter := newRateLimiter(mockp2p.NewTestP2P(t))
|
|
rlimiter.free()
|
|
assert.Equal(t, len(rlimiter.limiterMap), 0, "rate limiter not freed correctly")
|
|
}
|
|
|
|
func TestRateLimiter_ExceedCapacity(t *testing.T) {
|
|
p1 := mockp2p.NewTestP2P(t)
|
|
p2 := mockp2p.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
rlimiter := newRateLimiter(p1)
|
|
|
|
// BlockByRange
|
|
topic := p2p.RPCBlocksByRangeTopicV1 + p1.Encoding().ProtocolSuffix()
|
|
|
|
wg := sync.WaitGroup{}
|
|
p2.BHost.SetStreamHandler(protocol.ID(topic), func(stream network.Stream) {
|
|
defer wg.Done()
|
|
code, errMsg, err := readStatusCodeNoDeadline(stream, p2.Encoding())
|
|
require.NoError(t, err, "could not read incoming stream")
|
|
assert.Equal(t, responseCodeInvalidRequest, code, "not equal response codes")
|
|
assert.Equal(t, p2ptypes.ErrRateLimited.Error(), errMsg, "not equal errors")
|
|
})
|
|
wg.Add(1)
|
|
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), protocol.ID(topic))
|
|
require.NoError(t, err, "could not create stream")
|
|
|
|
err = rlimiter.validateRequest(stream, 64)
|
|
require.NoError(t, err, "could not validate incoming request")
|
|
|
|
// Attempt to create an error, rate limit and lead to disconnect
|
|
err = rlimiter.validateRequest(stream, 1000)
|
|
require.NotNil(t, err, "could not get error from leaky bucket")
|
|
|
|
require.NoError(t, stream.Close(), "could not close stream")
|
|
|
|
if util.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
}
|
|
|
|
func TestRateLimiter_ExceedRawCapacity(t *testing.T) {
|
|
p1 := mockp2p.NewTestP2P(t)
|
|
p2 := mockp2p.NewTestP2P(t)
|
|
p1.Connect(p2)
|
|
p1.Peers().Add(nil, p2.PeerID(), p2.BHost.Addrs()[0], network.DirOutbound)
|
|
|
|
rlimiter := newRateLimiter(p1)
|
|
|
|
// BlockByRange
|
|
topic := p2p.RPCBlocksByRangeTopicV1 + p1.Encoding().ProtocolSuffix()
|
|
|
|
wg := sync.WaitGroup{}
|
|
p2.BHost.SetStreamHandler(protocol.ID(topic), func(stream network.Stream) {
|
|
defer wg.Done()
|
|
code, errMsg, err := readStatusCodeNoDeadline(stream, p2.Encoding())
|
|
require.NoError(t, err, "could not read incoming stream")
|
|
assert.Equal(t, responseCodeInvalidRequest, code, "not equal response codes")
|
|
assert.Equal(t, p2ptypes.ErrRateLimited.Error(), errMsg, "not equal errors")
|
|
})
|
|
wg.Add(1)
|
|
stream, err := p1.BHost.NewStream(t.Context(), p2.PeerID(), protocol.ID(topic))
|
|
require.NoError(t, err, "could not create stream")
|
|
|
|
for i := 0; i < 2*defaultBurstLimit; i++ {
|
|
err = rlimiter.validateRawRpcRequest(stream, 1)
|
|
rlimiter.addRawStream(stream)
|
|
require.NoError(t, err, "could not validate incoming request")
|
|
}
|
|
// Triggers rate limit error on burst.
|
|
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream, 1))
|
|
|
|
// Make Peer bad.
|
|
for i := 0; i < defaultBurstLimit; i++ {
|
|
assert.ErrorContains(t, p2ptypes.ErrRateLimited.Error(), rlimiter.validateRawRpcRequest(stream, 1))
|
|
}
|
|
assert.NotNil(t, p1.Peers().IsBad(p2.PeerID()), "peer is not marked as a bad peer")
|
|
require.NoError(t, stream.Close(), "could not close stream")
|
|
|
|
if util.WaitTimeout(&wg, 1*time.Second) {
|
|
t.Fatal("Did not receive stream within 1 sec")
|
|
}
|
|
}
|
|
|
|
func Test_limiter_retrieveCollector_requiresLock(t *testing.T) {
|
|
l := limiter{}
|
|
_, err := l.retrieveCollector("")
|
|
require.ErrorContains(t, "caller must hold read/write lock", err)
|
|
}
|