Compare commits

...

3 Commits

Author SHA1 Message Date
Tanguy
2a7326fc97 m 2022-01-17 16:39:37 +01:00
Tanguy
432904edce Some work on raises 2022-01-12 16:53:28 +01:00
Tanguy
7eb3225d19 Remove false async raises 2022-01-06 12:16:15 +01:00
15 changed files with 128 additions and 142 deletions

View File

@@ -1,7 +1,7 @@
asynctest;https://github.com/markspanbroek/asynctest@#3882ed64ed3159578f796bc5ae0c6b13837fe798
bearssl;https://github.com/status-im/nim-bearssl@#ba80e2a0d7ae8aab666cee013e38ff8d33a3e5e7
chronicles;https://github.com/status-im/nim-chronicles@#2a2681b60289aaf7895b7056f22616081eb1a882
chronos;https://github.com/status-im/nim-chronos@#7dc58d42b6905a7fd7531875fa76060f8f744e4e
chronos;https://github.com/status-im/nim-chronos@#f8a55aeb4e356a67f0f7311a90f7b6467c505860
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fbb76f8af8a33ab818184a7d4406d9fee20993be
faststreams;https://github.com/status-im/nim-faststreams@#c653d05f277dca0f374732c5b9b80f2368faea33
httputils;https://github.com/status-im/nim-http-utils@#507bfb7dcb6244d76ce2567df7bf3756cbe88775
@@ -13,5 +13,5 @@ serialization;https://github.com/status-im/nim-serialization@#11a8aa64d27d4fa92e
stew;https://github.com/status-im/nim-stew@#2f9c61f485e1de6d7e163294008276c455d39da2
testutils;https://github.com/status-im/nim-testutils@#aa6e5216f4b4ab5aa971cdcdd70e1ec1203cedf2
unittest2;https://github.com/status-im/nim-unittest2@#4e2893eacb916c7678fdc4935ff7420f13bf3a9c
websock;https://github.com/status-im/nim-websock@#c2aae352f7fad7a8d333327c37e966969d3ee542
websock;https://github.com/status-im/nim-websock@#b31ff99b9d5200eea175520c820a3c9f91093bbf
zlib;https://github.com/status-im/nim-zlib@#d4e716d071eba1b5e0ffdf7949d983959e2b95dd

View File

@@ -30,8 +30,7 @@ const
type
TooManyConnectionsError* = object of LPError
ConnProvider* = proc(): Future[Connection]
{.gcsafe, closure, raises: [Defect].}
ConnProvider* = proc(): Future[Connection] {.gcsafe, closure, raises: [], async.}
ConnEventKind* {.pure.} = enum
Connected, # A connection was made and securely upgraded - there may be
@@ -401,32 +400,24 @@ proc storeConn*(c: ConnManager, conn: Connection)
proc trackConn(c: ConnManager,
provider: ConnProvider,
sema: AsyncSemaphore):
Future[Connection] {.async.} =
Future[Connection] {.async, raises: [].} =
var conn: Connection
try:
conn = await provider()
conn = await provider()
if isNil(conn):
return
if isNil(conn):
return
trace "Got connection", conn
trace "Got connection", conn
proc semaphoreMonitor() {.async.} =
try:
await conn.join()
except CatchableError as exc:
trace "Exception in semaphore monitor, ignoring", exc = exc.msg
proc semaphoreMonitor() {.async, raises: [].} =
try:
await conn.join()
except CatchableError as exc:
trace "Exception in semaphore monitor, ignoring", exc = exc.msg
sema.release()
asyncSpawn semaphoreMonitor()
except CatchableError as exc:
trace "Exception tracking connection", exc = exc.msg
if not isNil(conn):
await conn.close()
raise exc
sema.release()
asyncSpawn semaphoreMonitor()
return conn
proc trackIncomingConn*(c: ConnManager,
@@ -437,23 +428,18 @@ proc trackIncomingConn*(c: ConnManager,
##
var conn: Connection
try:
trace "Tracking incoming connection"
await c.inSema.acquire()
conn = await c.trackConn(provider, c.inSema)
if isNil(conn):
trace "Couldn't acquire connection, releasing semaphore slot", dir = $Direction.In
c.inSema.release()
return conn
except CatchableError as exc:
trace "Exception tracking connection", exc = exc.msg
trace "Tracking incoming connection"
await c.inSema.acquire()
conn = await c.trackConn(provider, c.inSema)
if isNil(conn):
trace "Couldn't acquire connection, releasing semaphore slot", dir = $Direction.In
c.inSema.release()
raise exc
return conn
proc trackOutgoingConn*(c: ConnManager,
provider: ConnProvider):
Future[Connection] {.async.} =
Future[Connection] {.async, raises: [TooManyConnectionsError].} =
## try acquiring a connection if all slots
## are already taken, raise TooManyConnectionsError
## exception
@@ -468,17 +454,12 @@ proc trackOutgoingConn*(c: ConnManager,
raise newTooManyConnectionsError()
var conn: Connection
try:
conn = await c.trackConn(provider, c.outSema)
if isNil(conn):
trace "Couldn't acquire connection, releasing semaphore slot", dir = $Direction.Out
c.outSema.release()
return conn
except CatchableError as exc:
trace "Exception tracking connection", exc = exc.msg
conn = await c.trackConn(provider, c.outSema)
if isNil(conn):
trace "Couldn't acquire connection, releasing semaphore slot", dir = $Direction.Out
c.outSema.release()
raise exc
return conn
proc storeMuxer*(c: ConnManager,
muxer: Muxer,

View File

@@ -160,7 +160,7 @@ type
template orError*(exp: untyped, err: untyped): untyped =
(exp.mapErr do (_: auto) -> auto: err)
proc newRng*(): ref BrHmacDrbgContext =
proc newRng*(): ref BrHmacDrbgContext {.raises: [].} =
# You should only create one instance of the RNG per application / library
# Ref is used so that it can be shared between components
# TODO consider moving to bearssl

View File

@@ -19,6 +19,7 @@ import dial,
multistream,
connmanager,
stream/connection,
upgrademngrs/upgrade,
transports/transport,
nameresolving/nameresolver,
errors
@@ -48,7 +49,8 @@ proc dialAndUpgrade(
self: Dialer,
peerId: PeerId,
addrs: seq[MultiAddress]):
Future[Connection] {.async.} =
Future[Connection] {.async, raises: [DialFailedError, UpgradeFailedError, TooManyConnectionsError].} =
debug "Dialing peer", peerId
for address in addrs: # for each address
@@ -56,34 +58,26 @@ proc dialAndUpgrade(
hostname = address.getHostname()
resolvedAddresses =
if isNil(self.nameResolver): @[address]
else: await self.nameResolver.resolveMAddress(address)
else:
try: await self.nameResolver.resolveMAddress(address)
except MaError: raise newException(DialFailedError, "Failed to resolve name")
for a in resolvedAddresses: # for each resolved address
for transport in self.transports: # for each transport
if transport.handles(a): # check if it can dial it
trace "Dialing address", address = $a, peerId, hostname
let dialed = try:
libp2p_total_dial_attempts.inc()
# await a connection slot when the total
# connection count is equal to `maxConns`
#
# Need to copy to avoid "cannot be captured" errors in Nim-1.4.x.
let
transportCopy = transport
addressCopy = a
libp2p_total_dial_attempts.inc()
# await a connection slot when the total
# connection count is equal to `maxConns`
#
# Need to copy to avoid "cannot be captured" errors in Nim-1.4.x.
let
transportCopy = transport
addressCopy = a
dialed =
await self.connManager.trackOutgoingConn(
() => transportCopy.dial(hostname, addressCopy)
)
except TooManyConnectionsError as exc:
trace "Connection limit reached!"
raise exc
except CancelledError as exc:
debug "Dialing canceled", msg = exc.msg, peerId
raise exc
except CatchableError as exc:
debug "Dialing failed", msg = exc.msg, peerId
libp2p_failed_dials.inc()
continue # Try the next address
# make sure to assign the peer to the connection
dialed.peerId = peerId
@@ -94,15 +88,15 @@ proc dialAndUpgrade(
libp2p_successful_dials.inc()
let conn = try:
let conn =
try:
await transport.upgradeOutgoing(dialed)
except CatchableError as exc:
except UpgradeFailedError as exc:
# If we failed to establish the connection through one transport,
# we won't succeeded through another - no use in trying again
await dialed.close()
debug "Upgrade failed", msg = exc.msg, peerId
if exc isnot CancelledError:
libp2p_failed_upgrades_outgoing.inc()
libp2p_failed_upgrades_outgoing.inc()
raise exc
doAssert not isNil(conn), "connection died after upgradeOutgoing"

View File

@@ -85,7 +85,7 @@ proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
if s.closedLocal and s.atEof():
await procCall BufferStream(s).close()
proc reset*(s: LPChannel) {.async, gcsafe.} =
proc reset*(s: LPChannel) {.async, gcsafe, raises: [].} =
if s.isClosed:
trace "Already closed", s
return
@@ -143,9 +143,9 @@ method initStream*(s: LPChannel) =
if s.objName.len == 0:
s.objName = LPChannelTrackerName
s.timeoutHandler = proc(): Future[void] {.gcsafe.} =
s.timeoutHandler = proc(): Future[void] {.gcsafe, async, raises: [].} =
trace "Idle timeout expired, resetting LPChannel", s
s.reset()
await s.reset()
procCall BufferStream(s).initStream()

View File

@@ -14,7 +14,7 @@ import
chronos,
chronicles,
stew/[endians2, byteutils]
import ".."/[multiaddress, multicodec]
import ".."/[multiaddress, multicodec, errors]
logScope:
topics = "libp2p nameresolver"
@@ -24,7 +24,7 @@ type
method resolveTxt*(
self: NameResolver,
address: string): Future[seq[string]] {.async, base.} =
address: string): Future[seq[string]] {.async, base, raises: [].} =
## Get TXT record
##
@@ -34,7 +34,7 @@ method resolveIp*(
self: NameResolver,
address: string,
port: Port,
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async, base.} =
domain: Domain = Domain.AF_UNSPEC): Future[seq[TransportAddress]] {.async, raises: [], base.} =
## Resolve the specified address
##
@@ -50,13 +50,16 @@ proc resolveDnsAddress(
ma: MultiAddress,
domain: Domain = Domain.AF_UNSPEC,
prefix = ""): Future[seq[MultiAddress]]
{.async, raises: [Defect, MaError, TransportAddressError].} =
{.async, raises: [MaError].} =
#Resolve a single address
var pbuf: array[2, byte]
var dnsval = getHostname(ma)
if ma[1].tryGet().protoArgument(pbuf).tryGet() == 0:
let portNumber =
try: ma[1].tryGet().protoArgument(pbuf).tryGet()
except ResultError[string], LPError: 0
if portNumber == 0:
raise newException(MaError, "Incorrect port number")
let
port = Port(fromBytesBE(uint16, pbuf))
@@ -65,10 +68,14 @@ proc resolveDnsAddress(
var addressSuffix = ma
return collect(newSeqOfCap(4)):
for address in resolvedAddresses:
var createdAddress = MultiAddress.init(address).tryGet()[0].tryGet()
for part in ma:
if DNS.match(part.get()): continue
createdAddress &= part.tryGet()
var createdAddress: MultiAddress
try:
createdAddress = MultiAddress.init(address).tryGet()[0].tryGet()
for part in ma:
if DNS.match(part.get()): continue
createdAddress &= part.tryGet()
except LPError:
raise newException(MaError, "Invalid resolved address:" & $address)
createdAddress
func matchDnsSuffix(m1, m2: MultiAddress): MaResult[bool] =
@@ -84,7 +91,7 @@ proc resolveDnsAddr(
self: NameResolver,
ma: MultiAddress,
depth: int = 0): Future[seq[MultiAddress]]
{.async.} =
{.async, raises: [MaError].} =
trace "Resolving dnsaddr", ma
if depth > 6:
@@ -100,9 +107,10 @@ proc resolveDnsAddr(
var result: seq[MultiAddress]
for entry in txt:
if not entry.startsWith("dnsaddr="): continue
let entryValue = MultiAddress.init(entry[8..^1]).tryGet()
let entryValue = try: MultiAddress.init(entry[8..^1]).tryGet() except: raise newException(MaError, "")
if not matchDnsSuffix(ma, entryValue).tryGet(): continue
let isDnsAddr = try: matchDnsSuffix(ma, entryValue).tryGet() except: raise newException(MaError, "")
if not isDnsAddr: continue
# The spec is not clear wheter only DNSADDR can be recursived
# or any DNS addr. Only handling DNSADDR because it's simpler
@@ -122,7 +130,7 @@ proc resolveDnsAddr(
proc resolveMAddress*(
self: NameResolver,
address: MultiAddress): Future[seq[MultiAddress]] {.async.} =
address: MultiAddress): Future[seq[MultiAddress]] {.async, raises: [MaError].} =
var res = initOrderedSet[MultiAddress]()
if not DNS.matchPartial(address):

View File

@@ -93,8 +93,8 @@ proc handleConn(s: Secure,
for f in futs:
if not f.finished: await f.cancelAndWait() # cancel outstanding join()
await allFuturesThrowing(
sconn.close(), conn.close())
await sconn.close()
await conn.close()
except CancelledError:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.

View File

@@ -172,7 +172,7 @@ method readOnce*(s: BufferStream,
return rbytes
method closeImpl*(s: BufferStream): Future[void] =
method closeImpl*(s: BufferStream): Future[void] {.async, raises: [].} =
## close the stream and clear the buffer
trace "Closing BufferStream", s, len = s.len
@@ -212,4 +212,4 @@ method closeImpl*(s: BufferStream): Future[void] =
trace "Closed BufferStream", s
procCall Connection(s).closeImpl() # noraises, nocancels
await procCall Connection(s).closeImpl() # noraises, nocancels

View File

@@ -47,7 +47,7 @@ method initStream*(s: ChronosStream) =
if s.objName.len == 0:
s.objName = ChronosStreamTrackerName
s.timeoutHandler = proc() {.async, gcsafe.} =
s.timeoutHandler = proc() {.async, gcsafe, raises: [].} =
trace "Idle timeout expired, closing ChronosStream", s
await s.close()

View File

@@ -7,7 +7,10 @@
## This file may not be copied, modified, or distributed except according to
## those terms.
{.push raises: [Defect].}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/[hashes, oids, strformat]
import chronicles, chronos, metrics
@@ -26,7 +29,7 @@ const
DefaultConnectionTimeout* = 5.minutes
type
TimeoutHandler* = proc(): Future[void] {.gcsafe, raises: [Defect].}
TimeoutHandler* = proc(): Future[void] {.gcsafe, raises: [], async.}
Connection* = ref object of LPStream
activity*: bool # reset every time data is sent or received
@@ -84,11 +87,11 @@ method initStream*(s: Connection) =
s.timerTaskFut = s.timeoutMonitor()
if isNil(s.timeoutHandler):
s.timeoutHandler = proc(): Future[void] =
s.timeoutHandler = proc(): Future[void] {.async, raises: [].} =
trace "Idle timeout expired, closing connection", s
s.close()
await s.close()
method closeImpl*(s: Connection): Future[void] =
method closeImpl*(s: Connection): Future[void] {.async, raises: [].} =
# Cleanup timeout timer
trace "Closing connection", s
@@ -102,7 +105,7 @@ method closeImpl*(s: Connection): Future[void] =
trace "Closed connection", s
procCall LPStream(s).closeImpl()
await procCall LPStream(s).closeImpl()
func hash*(p: Connection): Hash =
cast[pointer](p).hash

View File

@@ -258,7 +258,7 @@ proc writeLp*(s: LPStream, msg: string): Future[void] =
proc write*(s: LPStream, msg: string): Future[void] =
s.write(msg.toBytes())
method closeImpl*(s: LPStream): Future[void] {.async, base.} =
method closeImpl*(s: LPStream): Future[void] {.async, base, raises: [].} =
## Implementation of close - called only once
trace "Closing stream", s, objName = s.objName, dir = $s.dir
libp2p_open_streams.dec(labelValues = [s.objName, $s.dir])
@@ -266,7 +266,7 @@ method closeImpl*(s: LPStream): Future[void] {.async, base.} =
s.closeEvent.fire()
trace "Closed stream", s, objName = s.objName, dir = $s.dir
method close*(s: LPStream): Future[void] {.base, async.} = # {.raises [Defect].}
method close*(s: LPStream): Future[void] {.base, async, raises: [].} = # {.raises [Defect].}
## close the stream - this may block, but will not raise exceptions
##
if s.isClosed:

View File

@@ -53,7 +53,7 @@ method stop*(self: Transport) {.base, async.} =
self.running = false
method accept*(self: Transport): Future[Connection]
{.base, gcsafe.} =
{.base, gcsafe, async, raises: [].} =
## accept incoming connections
##
@@ -62,7 +62,7 @@ method accept*(self: Transport): Future[Connection]
method dial*(
self: Transport,
hostname: string,
address: MultiAddress): Future[Connection] {.base, gcsafe.} =
address: MultiAddress): Future[Connection] {.base, gcsafe, async, raises: [].} =
## dial a peer
##
@@ -70,8 +70,8 @@ method dial*(
proc dial*(
self: Transport,
address: MultiAddress): Future[Connection] {.gcsafe.} =
self.dial("", address)
address: MultiAddress): Future[Connection] {.gcsafe, async, raises: [].} =
return await self.dial("", address)
method upgradeIncoming*(
self: Transport,
@@ -84,12 +84,12 @@ method upgradeIncoming*(
method upgradeOutgoing*(
self: Transport,
conn: Connection): Future[Connection] {.base, gcsafe.} =
conn: Connection): Future[Connection] {.base, gcsafe, async, raises: [UpgradeFailedError].} =
## base upgrade method that the transport uses to perform
## transport specific upgrades
##
self.upgrader.upgradeOutgoing(conn)
return await self.upgrader.upgradeOutgoing(conn)
method handles*(
self: Transport,

View File

@@ -210,7 +210,7 @@ proc new*(
ms: ms)
upgrader.streamHandler = proc(conn: Connection)
{.async, gcsafe, raises: [Defect].} =
{.async, gcsafe.} =
trace "Starting stream handler", conn
try:
await upgrader.ms.handle(conn) # handle incoming connection

View File

@@ -44,7 +44,7 @@ method upgradeIncoming*(
method upgradeOutgoing*(
self: Upgrade,
conn: Connection): Future[Connection] {.base.} =
conn: Connection): Future[Connection] {.base, raises: [UpgradeFailedError], async.} =
doAssert(false, "Not implemented!")
proc secure*(

View File

@@ -246,9 +246,9 @@ suite "Connection Manager":
var conns: seq[Connection]
for i in 0..<3:
let conn = connMngr.trackIncomingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -257,9 +257,9 @@ suite "Connection Manager":
# should timeout adding a connection over the limit
let conn = connMngr.trackIncomingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -275,9 +275,9 @@ suite "Connection Manager":
var conns: seq[Connection]
for i in 0..<3:
let conn = await connMngr.trackOutgoingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -286,9 +286,9 @@ suite "Connection Manager":
# should throw adding a connection over the limit
expect TooManyConnectionsError:
discard await connMngr.trackOutgoingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -302,9 +302,9 @@ suite "Connection Manager":
var conns: seq[Connection]
for i in 0..<3:
let conn = await connMngr.trackOutgoingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -312,9 +312,9 @@ suite "Connection Manager":
# should timeout adding a connection over the limit
let conn = connMngr.trackIncomingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -330,9 +330,9 @@ suite "Connection Manager":
var conns: seq[Connection]
for i in 0..<3:
let conn = connMngr.trackIncomingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -342,9 +342,9 @@ suite "Connection Manager":
# should throw adding a connection over the limit
expect TooManyConnectionsError:
discard await connMngr.trackOutgoingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -358,9 +358,9 @@ suite "Connection Manager":
var conns: seq[Connection]
for i in 0..<3:
let conn = connMngr.trackIncomingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -369,9 +369,9 @@ suite "Connection Manager":
# should timeout adding a connection over the limit
let conn = connMngr.trackIncomingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -387,9 +387,9 @@ suite "Connection Manager":
var conns: seq[Connection]
for i in 0..<3:
let conn = await connMngr.trackOutgoingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -398,9 +398,9 @@ suite "Connection Manager":
# should throw adding a connection over the limit
expect TooManyConnectionsError:
discard await connMngr.trackOutgoingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -414,9 +414,9 @@ suite "Connection Manager":
var conns: seq[Connection]
for i in 0..<3:
let conn = await connMngr.trackOutgoingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -424,9 +424,9 @@ suite "Connection Manager":
# should timeout adding a connection over the limit
let conn = connMngr.trackIncomingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -442,9 +442,9 @@ suite "Connection Manager":
var conns: seq[Connection]
for i in 0..<3:
let conn = connMngr.trackIncomingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)
@@ -454,9 +454,9 @@ suite "Connection Manager":
# should throw adding a connection over the limit
expect TooManyConnectionsError:
discard await connMngr.trackOutgoingConn(
proc(): Future[Connection] {.async.} =
proc(): Future[Connection] {.async, raises: [].} =
return Connection.new(
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).tryGet()).tryGet(),
PeerId.init(PrivateKey.random(ECDSA, (newRng())[]).get()).get(),
Direction.In)
)