Compare commits

...

33 Commits

Author SHA1 Message Date
Diego
1342b45b29 handle CancelledError 2024-03-20 11:04:50 +01:00
Diego
a65ce6a47a fix: sendMsg must not raise errors 2024-03-19 22:36:17 +01:00
Ivan FB
fdf53d18cd libp2p/dialer.nim: tiny log change to make it clearer a connection upgrade (#1071) 2024-03-18 11:38:23 +01:00
Etan Kissling
48a3ac06ff {.async: (raises).} for MultistreamSelect (#1066) 2024-03-12 21:05:53 +01:00
Etan Kissling
49a92e5641 avoid pointless exception raising in dcutr/server (#1063) 2024-03-12 18:29:01 +01:00
Etan Kissling
08a48faf41 {.async: (raises).} annotations for protocols/secure (#1059) 2024-03-07 11:22:22 +00:00
Etan Kissling
61b299e411 {.async: (raises).} for relay/utils.nim (#1058) 2024-03-07 10:45:25 +01:00
Etan Kissling
ca01ee06a8 clean up triple lookup and avoid KeyError when adding muxer (#1057) 2024-03-06 06:49:45 +01:00
Etan Kissling
6c43ab3fce default MultiAddress param for newStandardSwitch does not raise (#1056) 2024-03-06 06:48:13 +01:00
Jacek Sieka
ae13a0d583 Send priority with queue fix (#1051)
Co-authored-by: Diego <diego@status.im>
2024-03-05 15:05:21 +00:00
Etan Kissling
28609597d1 add {.async: (raises).} to libp2p/stream modules (#1050)
Co-authored-by: Dmitriy Ryajov <dryajov@gmail.com>
Co-authored-by: Jacek Sieka <jacek@status.im>
2024-03-05 07:06:27 +00:00
Etan Kissling
8294d5b9df document known --mm:orc crash (#1039) 2024-03-04 19:34:09 +01:00
Etan Kissling
78e83889ee define proper parent error type for YamuxError (#1040) 2024-03-04 19:26:27 +01:00
Etan Kissling
7603b8de5e catch WebSocketError in wstransport (#1049) 2024-03-04 00:27:35 +01:00
Etan Kissling
8cccd54125 avoid triple lookup in m.flushed yamux table (#1045) 2024-03-04 00:27:13 +01:00
Etan Kissling
18e00a741b avoid KeyError in edge case of yamux handler (#1044) 2024-03-04 00:24:18 +01:00
Etan Kissling
ee264fdf11 in yamux, do not write {Rst} packet to stream that's in use (#1041) 2024-03-04 00:23:42 +01:00
Etan Kissling
9059a8aced use race instead of or to avoid lockup (#1042) 2024-03-04 00:06:32 +01:00
Etan Kissling
0b753e7cf2 don't forget closing the stream when final {Fin} fails in yamux (#1043) 2024-03-04 00:05:59 +01:00
Etan Kissling
d43c5feab0 do not log yamux buffers without sanitization (trace log level) (#1046) 2024-03-04 00:04:37 +01:00
Etan Kissling
1609fd7197 change SecioError and NoiseError to descendants of LPStreamError (#1047) 2024-03-04 00:04:25 +01:00
Etan Kissling
42cd78e95b remove unused LPStreamError types (#1048) 2024-03-04 00:03:44 +01:00
Etan Kissling
44cada9c55 use new Chronos trackCounter APIs for leaks checks in tests (#1038) 2024-03-03 18:13:37 +01:00
Etan Kissling
6c873481ac move allFutureThrowing helper to tests (#1037)
Co-authored-by: Jacek Sieka <jacek@status.im>
2024-03-01 18:06:26 +01:00
Etan Kissling
d08ce17144 remove unused MultiBase.encode(..., Cid) function (#1036) 2024-03-01 14:07:52 +01:00
Etan Kissling
bd6ead95ef increase tolerance of simple heartbeat test (#1034) 2024-03-01 14:06:42 +01:00
Etan Kissling
53e3825e07 fix typo in ProtoMessage.toString() (#1033) 2024-02-29 15:56:47 +01:00
diegomrsantos
e9b456162a use chronos 4.0.0 (#1030) 2024-02-27 13:26:50 +01:00
diegomrsantos
250024f6cc fix: move transport interop tests to nim-libp2p repo (#1031) 2024-02-27 10:55:43 +01:00
Eugene Kabanov
fec632d28d Fix empty path crash issue for MultiAddresses unix, ip6zone, dns***. (#1025)
This issue was discovered by @0xTylerHolmes . Thank you!
2024-02-22 15:57:11 +01:00
Ludovic Chenut
349496e40f feat: Yamux timeout (#1029) 2024-02-22 10:21:34 +01:00
diegomrsantos
7faa0fac23 fix: allFuturesThrowing compilation issue on daily (#1026) 2024-02-19 19:46:34 +01:00
Diego
c5e4f8e12d Revert "feat: message prioritization with immediate peer-published dispatch and queuing for other msgs (#1015)"
This reverts commit fe4ff79885.
2024-02-19 13:47:37 +01:00
49 changed files with 1987 additions and 1242 deletions

View File

@@ -11,47 +11,19 @@ concurrency:
cancel-in-progress: true
jobs:
run-multidim-interop:
name: Run multidimensional interoperability tests
run-transport-interop:
name: Run transport interoperability tests
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
with:
repository: libp2p/test-plans
submodules: true
fetch-depth: 0
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: >
cd transport-interop/impl/nim/v1.0 &&
make commitSha=$GITHUB_SHA image_name=nim-libp2p-head
- name: Create ping-version.json
run: >
(cat << EOF
{
"id": "nim-libp2p-head",
"containerImageID": "nim-libp2p-head",
"transports": [
"tcp",
"ws"
],
"secureChannels": [
"noise"
],
"muxers": [
"mplex",
"yamux"
]
}
EOF
) > ${{ github.workspace }}/test_head.json
- uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
run: docker buildx build --load -t nim-libp2p-head -f tests/transport-interop/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/test_head.json
extra-versions: ${{ github.workspace }}/tests/transport-interop/version.json
run-hole-punching-interop:
name: Run hole-punching interoperability tests

View File

@@ -1,6 +1,6 @@
bearssl;https://github.com/status-im/nim-bearssl@#e4157639db180e52727712a47deaefcbbac6ec86
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
chronos;https://github.com/status-im/nim-chronos@#ba143e029f35fd9b4cd3d89d007cc834d0d5ba3c
chronos;https://github.com/status-im/nim-chronos@#672db137b7cad9b384b8f4fb551fb6bbeaabfe1b
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18

View File

@@ -12,7 +12,7 @@ requires "nim >= 1.6.0",
"dnsclient >= 0.3.0 & < 0.4.0",
"bearssl >= 0.1.4",
"chronicles >= 0.10.2",
"chronos >= 3.0.6",
"chronos >= 4.0.0",
"metrics",
"secp256k1",
"stew#head",

View File

@@ -122,8 +122,12 @@ proc withMplex*(
b.muxers.add(MuxerProvider.new(newMuxer, MplexCodec))
b
proc withYamux*(b: SwitchBuilder, windowSize: int = YamuxDefaultWindowSize): SwitchBuilder =
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn, windowSize)
proc withYamux*(b: SwitchBuilder,
windowSize: int = YamuxDefaultWindowSize,
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes): SwitchBuilder =
proc newMuxer(conn: Connection): Muxer =
Yamux.new(conn, windowSize, inTimeout = inTimeout, outTimeout = outTimeout)
assert b.muxers.countIt(it.codec == YamuxCodec) == 0, "Yamux build multiple times"
b.muxers.add(MuxerProvider.new(newMuxer, YamuxCodec))
@@ -287,23 +291,24 @@ proc build*(b: SwitchBuilder): Switch
return switch
proc newStandardSwitch*(
privKey = none(PrivateKey),
addrs: MultiAddress | seq[MultiAddress] = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
secureManagers: openArray[SecureProtocol] = [
privKey = none(PrivateKey),
addrs: MultiAddress | seq[MultiAddress] =
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("valid address"),
secureManagers: openArray[SecureProtocol] = [
SecureProtocol.Noise,
],
transportFlags: set[ServerFlags] = {},
rng = newRng(),
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes,
maxConnections = MaxConnections,
maxIn = -1,
maxOut = -1,
maxConnsPerPeer = MaxConnectionsPerPeer,
nameResolver: NameResolver = nil,
sendSignedPeerRecord = false,
peerStoreCapacity = 1000): Switch
{.raises: [LPError], public.} =
transportFlags: set[ServerFlags] = {},
rng = newRng(),
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes,
maxConnections = MaxConnections,
maxIn = -1,
maxOut = -1,
maxConnsPerPeer = MaxConnectionsPerPeer,
nameResolver: NameResolver = nil,
sendSignedPeerRecord = false,
peerStoreCapacity = 1000
): Switch {.raises: [LPError], public.} =
## Helper for common switch configurations.
{.push warning[Deprecated]:off.}
if SecureProtocol.Secio in secureManagers:

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -261,12 +261,6 @@ proc write*(vb: var VBuffer, cid: Cid) {.inline.} =
## Write CID value ``cid`` to buffer ``vb``.
vb.writeArray(cid.data.buffer)
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
cid: Cid): string {.inline.} =
## Get MultiBase encoded representation of ``cid`` using encoding
## ``encoding``.
result = MultiBase.encode(encoding, cid.data.buffer).tryGet()
proc hash*(cid: Cid): Hash {.inline.} =
hash(cid.data.buffer)

View File

@@ -311,12 +311,14 @@ proc storeMuxer*(c: ConnManager,
raise newTooManyConnectionsError()
assert muxer notin c.muxed.getOrDefault(peerId)
let
newPeer = peerId notin c.muxed
assert newPeer or c.muxed[peerId].len > 0
c.muxed.mgetOrPut(peerId, newSeq[Muxer]()).add(muxer)
var newPeer = false
c.muxed.withValue(peerId, muxers):
doAssert muxers[].len > 0
doAssert muxer notin muxers[]
muxers[].add(muxer)
do:
c.muxed[peerId] = @[muxer]
newPeer = true
libp2p_peers.set(c.muxed.len.int64)
asyncSpawn c.triggerConnEvent(

View File

@@ -247,7 +247,7 @@ proc toString*(msg: ProtoMessage, dump = true): string =
else: "[REMOTE]"
local & direction & remote
let seqid = block:
msg.seqID.wihValue(seqid): "seqID = " & $seqid & " "
msg.seqID.withValue(seqid): "seqID = " & $seqid & " "
else: ""
let mtype = block:
msg.mtype.withValue(typ): "type = " & $typ & " "

View File

@@ -85,7 +85,7 @@ proc dialAndUpgrade(
# If we failed to establish the connection through one transport,
# we won't succeeded through another - no use in trying again
await dialed.close()
debug "Upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
debug "Connection upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
if exc isnot CancelledError:
if dialed.dir == Direction.Out:
libp2p_failed_upgrades_outgoing.inc()

View File

@@ -45,28 +45,6 @@ macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
debug "A future has failed, enable trace logging for details", error=exc.name
trace "Exception details", msg=exc.msg
proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] =
var futs: seq[Future[T]]
for fut in args:
futs &= fut
proc call() {.async.} =
var first: ref CatchableError = nil
futs = await allFinished(futs)
for fut in futs:
if fut.failed:
let err = fut.readError()
if err of Defect:
raise err
else:
if err of CancelledError:
raise err
if isNil(first):
first = err
if not isNil(first):
raise first
return call()
template tryAndWarn*(message: static[string]; body: untyped): untyped =
try:
body

View File

@@ -119,23 +119,46 @@ proc ip6VB(vb: var VBuffer): bool =
if vb.readArray(a.address_v6) == 16:
result = true
proc ip6zoneStB(s: string, vb: var VBuffer): bool =
## IPv6 stringToBuffer() implementation.
template pathStringToBuffer(s: string, vb: var VBuffer): bool =
if len(s) > 0:
vb.writeSeq(s)
result = true
true
else:
false
template pathBufferToString(vb: var VBuffer, s: var string): bool =
s = ""
if (vb.readSeq(s) > 0) and (len(s) > 0):
true
else:
false
template pathBufferToStringNoSlash(vb: var VBuffer, s: var string): bool =
s = ""
if (vb.readSeq(s) > 0) and (len(s) > 0) and (s.find('/') == -1):
true
else:
false
template pathValidateBuffer(vb: var VBuffer): bool =
var s = ""
pathBufferToString(vb, s)
template pathValidateBufferNoSlash(vb: var VBuffer): bool =
var s = ""
pathBufferToStringNoSlash(vb, s)
proc ip6zoneStB(s: string, vb: var VBuffer): bool =
## IPv6 stringToBuffer() implementation.
pathStringToBuffer(s, vb)
proc ip6zoneBtS(vb: var VBuffer, s: var string): bool =
## IPv6 bufferToString() implementation.
if vb.readSeq(s) > 0:
result = true
pathBufferToStringNoSlash(vb, s)
proc ip6zoneVB(vb: var VBuffer): bool =
## IPv6 validateBuffer() implementation.
var s = ""
if vb.readSeq(s) > 0:
if s.find('/') == -1:
result = true
pathValidateBufferNoSlash(vb)
proc portStB(s: string, vb: var VBuffer): bool =
## Port number stringToBuffer() implementation.
@@ -154,7 +177,8 @@ proc portBtS(vb: var VBuffer, s: var string): bool =
## Port number bufferToString() implementation.
var port: array[2, byte]
if vb.readArray(port) == 2:
var nport = (safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
let nport =
(safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
s = $nport
result = true
@@ -214,7 +238,8 @@ proc onionBtS(vb: var VBuffer, s: var string): bool =
## ONION address bufferToString() implementation.
var buf: array[12, byte]
if vb.readArray(buf) == 12:
var nport = (safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
let nport =
(safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
s = Base32Lower.encode(buf.toOpenArray(0, 9))
s.add(":")
s.add($nport)
@@ -248,7 +273,8 @@ proc onion3BtS(vb: var VBuffer, s: var string): bool =
## ONION address bufferToString() implementation.
var buf: array[37, byte]
if vb.readArray(buf) == 37:
var nport = (safeConvert[uint16](buf[35]) shl 8) or safeConvert[uint16](buf[36])
var nport =
(safeConvert[uint16](buf[35]) shl 8) or safeConvert[uint16](buf[36])
s = Base32Lower.encode(buf.toOpenArray(0, 34))
s.add(":")
s.add($nport)
@@ -262,40 +288,27 @@ proc onion3VB(vb: var VBuffer): bool =
proc unixStB(s: string, vb: var VBuffer): bool =
## Unix socket name stringToBuffer() implementation.
if len(s) > 0:
vb.writeSeq(s)
result = true
pathStringToBuffer(s, vb)
proc unixBtS(vb: var VBuffer, s: var string): bool =
## Unix socket name bufferToString() implementation.
s = ""
if vb.readSeq(s) > 0:
result = true
pathBufferToString(vb, s)
proc unixVB(vb: var VBuffer): bool =
## Unix socket name validateBuffer() implementation.
var s = ""
if vb.readSeq(s) > 0:
result = true
pathValidateBuffer(vb)
proc dnsStB(s: string, vb: var VBuffer): bool =
## DNS name stringToBuffer() implementation.
if len(s) > 0:
vb.writeSeq(s)
result = true
pathStringToBuffer(s, vb)
proc dnsBtS(vb: var VBuffer, s: var string): bool =
## DNS name bufferToString() implementation.
s = ""
if vb.readSeq(s) > 0:
result = true
pathBufferToStringNoSlash(vb, s)
proc dnsVB(vb: var VBuffer): bool =
## DNS name validateBuffer() implementation.
var s = ""
if vb.readSeq(s) > 0:
if s.find('/') == -1:
result = true
pathValidateBufferNoSlash(vb)
proc mapEq*(codec: string): MaPattern =
## ``Equal`` operator for pattern
@@ -660,7 +673,8 @@ proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
inc(offset)
ok(res)
proc getParts[U, V](ma: MultiAddress, slice: HSlice[U, V]): MaResult[MultiAddress] =
proc getParts[U, V](ma: MultiAddress,
slice: HSlice[U, V]): MaResult[MultiAddress] =
when slice.a is BackwardsIndex or slice.b is BackwardsIndex:
let maLength = ? len(ma)
template normalizeIndex(index): int =
@@ -674,7 +688,8 @@ proc getParts[U, V](ma: MultiAddress, slice: HSlice[U, V]): MaResult[MultiAddres
? res.append(? ma[i])
ok(res)
proc `[]`*(ma: MultiAddress, i: int | BackwardsIndex): MaResult[MultiAddress] {.inline.} =
proc `[]`*(ma: MultiAddress,
i: int | BackwardsIndex): MaResult[MultiAddress] {.inline.} =
## Returns part with index ``i`` of MultiAddress ``ma``.
when i is BackwardsIndex:
let maLength = ? len(ma)
@@ -769,7 +784,7 @@ proc toString*(value: MultiAddress): MaResult[string] =
if not proto.coder.bufferToString(vb.data, part):
return err("multiaddress: Decoding protocol error")
parts.add($(proto.mcodec))
if proto.kind == Path and part[0] == '/':
if len(part) > 0 and (proto.kind == Path) and (part[0] == '/'):
parts.add(part[1..^1])
else:
parts.add(part)
@@ -1120,16 +1135,20 @@ proc getField*(pb: ProtoBuffer, field: int,
if not(res):
ok(false)
else:
value = MultiAddress.init(buffer).valueOr: return err(ProtoError.IncorrectBlob)
value = MultiAddress.init(buffer).valueOr:
return err(ProtoError.IncorrectBlob)
ok(true)
proc getRepeatedField*(pb: ProtoBuffer, field: int,
value: var seq[MultiAddress]): ProtoResult[bool] {.
inline.} =
## Read repeated field from protobuf message. ``field`` is field number. If the message is malformed, an error is returned.
## If field is not present in message, then ``ok(false)`` is returned and value is empty. If field is present,
## but no items could be parsed, then ``err(ProtoError.IncorrectBlob)`` is returned and value is empty.
## If field is present and some item could be parsed, then ``true`` is returned and value contains the parsed values.
## Read repeated field from protobuf message. ``field`` is field number.
## If the message is malformed, an error is returned. If field is not present
## in message, then ``ok(false)`` is returned and value is empty. If field is
## present, but no items could be parsed, then
## ``err(ProtoError.IncorrectBlob)`` is returned and value is empty.
## If field is present and some item could be parsed, then ``true`` is
## returned and value contains the parsed values.
var items: seq[seq[byte]]
value.setLen(0)
let res = ? pb.getRepeatedField(field, items)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -45,15 +45,18 @@ proc new*(T: typedesc[MultistreamSelect]): T =
)
template validateSuffix(str: string): untyped =
if str.endsWith("\n"):
str.removeSuffix("\n")
else:
raise newException(MultiStreamError, "MultistreamSelect failed, malformed message")
if str.endsWith("\n"):
str.removeSuffix("\n")
else:
raise (ref MultiStreamError)(msg:
"MultistreamSelect failed, malformed message")
proc select*(_: MultistreamSelect | type MultistreamSelect,
conn: Connection,
proto: seq[string]):
Future[string] {.async.} =
proc select*(
_: MultistreamSelect | type MultistreamSelect,
conn: Connection,
proto: seq[string]
): Future[string] {.async: (raises: [
CancelledError, LPStreamError, MultiStreamError]).} =
trace "initiating handshake", conn, codec = Codec
## select a remote protocol
await conn.writeLp(Codec & "\n") # write handshake
@@ -66,7 +69,7 @@ proc select*(_: MultistreamSelect | type MultistreamSelect,
if s != Codec:
notice "handshake failed", conn, codec = s
raise newException(MultiStreamError, "MultistreamSelect handshake failed")
raise (ref MultiStreamError)(msg: "MultistreamSelect handshake failed")
else:
trace "multistream handshake success", conn
@@ -98,19 +101,29 @@ proc select*(_: MultistreamSelect | type MultistreamSelect,
# No alternatives, fail
return ""
proc select*(_: MultistreamSelect | type MultistreamSelect,
conn: Connection,
proto: string): Future[bool] {.async.} =
proc select*(
_: MultistreamSelect | type MultistreamSelect,
conn: Connection,
proto: string
): Future[bool] {.async: (raises: [
CancelledError, LPStreamError, MultiStreamError]).} =
if proto.len > 0:
return (await MultistreamSelect.select(conn, @[proto])) == proto
(await MultistreamSelect.select(conn, @[proto])) == proto
else:
return (await MultistreamSelect.select(conn, @[])) == Codec
(await MultistreamSelect.select(conn, @[])) == Codec
proc select*(m: MultistreamSelect, conn: Connection): Future[bool] =
proc select*(
m: MultistreamSelect,
conn: Connection
): Future[bool] {.async: (raises: [
CancelledError, LPStreamError, MultiStreamError], raw: true).} =
m.select(conn, "")
proc list*(m: MultistreamSelect,
conn: Connection): Future[seq[string]] {.async.} =
proc list*(
m: MultistreamSelect,
conn: Connection
): Future[seq[string]] {.async: (raises: [
CancelledError, LPStreamError, MultiStreamError]).} =
## list remote protos requests on connection
if not await m.select(conn):
return
@@ -126,12 +139,13 @@ proc list*(m: MultistreamSelect,
result = list
proc handle*(
_: type MultistreamSelect,
conn: Connection,
protos: seq[string],
matchers = newSeq[Matcher](),
active: bool = false,
): Future[string] {.async.} =
_: type MultistreamSelect,
conn: Connection,
protos: seq[string],
matchers = newSeq[Matcher](),
active: bool = false
): Future[string] {.async: (raises: [
CancelledError, LPStreamError, MultiStreamError]).} =
trace "Starting multistream negotiation", conn, handshaked = active
var handshaked = active
while not conn.atEof:
@@ -140,8 +154,8 @@ proc handle*(
if not handshaked and ms != Codec:
debug "expected handshake message", conn, instead=ms
raise newException(CatchableError,
"MultistreamSelect handling failed, invalid first message")
raise (ref MultiStreamError)(msg:
"MultistreamSelect handling failed, invalid first message")
trace "handle: got request", conn, ms
if ms.len() <= 0:
@@ -172,13 +186,16 @@ proc handle*(
trace "no handlers", conn, protocol = ms
await conn.writeLp(Na)
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async.} =
proc handle*(
m: MultistreamSelect,
conn: Connection,
active: bool = false) {.async: (raises: [CancelledError]).} =
trace "Starting multistream handler", conn, handshaked = active
var
protos: seq[string]
matchers: seq[Matcher]
for h in m.handlers:
if not isNil(h.match):
if h.match != nil:
matchers.add(h.match)
for proto in h.protos:
protos.add(proto)
@@ -186,12 +203,13 @@ proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.asy
try:
let ms = await MultistreamSelect.handle(conn, protos, matchers, active)
for h in m.handlers:
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
if (h.match != nil and h.match(ms)) or h.protos.contains(ms):
trace "found handler", conn, protocol = ms
var protocolHolder = h
let maxIncomingStreams = protocolHolder.protocol.maxIncomingStreams
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >= maxIncomingStreams:
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >=
maxIncomingStreams:
debug "Max streams for protocol reached, blocking new stream",
conn, protocol = ms, maxIncomingStreams
return
@@ -242,8 +260,32 @@ proc addHandler*(m: MultistreamSelect,
protocol: protocol,
match: matcher))
proc start*(m: MultistreamSelect) {.async.} =
await allFutures(m.handlers.mapIt(it.protocol.start()))
proc start*(m: MultistreamSelect) {.async: (raises: [CancelledError]).} =
let
handlers = m.handlers
futs = handlers.mapIt(it.protocol.start())
try:
await allFutures(futs)
for fut in futs:
await fut
except CancelledError as exc:
var pending: seq[Future[void].Raising([])]
for i, fut in futs:
if not fut.finished:
pending.add noCancel fut.cancelAndWait()
elif fut.completed:
pending.add handlers[i].protocol.stop()
else:
static: doAssert typeof(fut).E is (CancelledError,)
await noCancel allFutures(pending)
raise exc
proc stop*(m: MultistreamSelect) {.async.} =
await allFutures(m.handlers.mapIt(it.protocol.stop()))
proc stop*(m: MultistreamSelect) {.async: (raises: []).} =
# Nim 1.6.18: Using `mapIt` results in a seq of `.Raising([CancelledError])`
var futs = newSeqOfCap[Future[void].Raising([])](m.handlers.len)
for it in m.handlers:
futs.add it.protocol.stop()
await noCancel allFutures(futs)
for fut in futs:
await fut

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -42,7 +42,10 @@ const MaxMsgSize* = 1 shl 20 # 1mb
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
newException(InvalidMplexMsgType, "invalid message type")
proc readMsg*(conn: Connection): Future[Msg] {.async.} =
proc readMsg*(
conn: Connection
): Future[Msg] {.async: (raises: [
CancelledError, LPStreamError, MuxerError]).} =
let header = await conn.readVarint()
trace "read header varint", varint = header, conn
@@ -55,10 +58,13 @@ proc readMsg*(conn: Connection): Future[Msg] {.async.} =
return (header shr 3, MessageType(msgType), data)
proc writeMsg*(conn: Connection,
id: uint64,
msgType: MessageType,
data: seq[byte] = @[]): Future[void] =
proc writeMsg*(
conn: Connection,
id: uint64,
msgType: MessageType,
data: seq[byte] = @[]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
var
left = data.len
offset = 0
@@ -84,8 +90,11 @@ proc writeMsg*(conn: Connection,
# message gets written before some of the chunks
conn.write(buf.buffer)
proc writeMsg*(conn: Connection,
id: uint64,
msgType: MessageType,
data: string): Future[void] =
proc writeMsg*(
conn: Connection,
id: uint64,
msgType: MessageType,
data: string
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
conn.writeMsg(id, msgType, data.toBytes())

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -28,7 +28,8 @@ when defined(libp2p_mplex_metrics):
declareHistogram libp2p_mplex_qtime, "message queuing time"
when defined(libp2p_network_protocols_metrics):
declareCounter libp2p_protocols_bytes, "total sent or received bytes", ["protocol", "direction"]
declareCounter libp2p_protocols_bytes,
"total sent or received bytes", ["protocol", "direction"]
## Channel half-closed states
##
@@ -64,16 +65,16 @@ type
func shortLog*(s: LPChannel): auto =
try:
if s.isNil: "LPChannel(nil)"
if s == nil: "LPChannel(nil)"
elif s.name != $s.oid and s.name.len > 0:
&"{shortLog(s.conn.peerId)}:{s.oid}:{s.name}"
else: &"{shortLog(s.conn.peerId)}:{s.oid}"
except ValueError as exc:
raise newException(Defect, exc.msg)
raiseAssert(exc.msg)
chronicles.formatIt(LPChannel): shortLog(it)
proc open*(s: LPChannel) {.async.} =
proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
trace "Opening channel", s, conn = s.conn
if s.conn.isClosed:
return
@@ -82,20 +83,20 @@ proc open*(s: LPChannel) {.async.} =
s.isOpen = true
except CancelledError as exc:
raise exc
except CatchableError as exc:
except LPStreamError as exc:
await s.conn.close()
raise exc
method closed*(s: LPChannel): bool =
s.closedLocal
proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
proc closeUnderlying(s: LPChannel): Future[void] {.async: (raises: []).} =
## Channels may be closed for reading and writing in any order - we'll close
## the underlying bufferstream when both directions are closed
if s.closedLocal and s.atEof():
await procCall BufferStream(s).close()
proc reset*(s: LPChannel) {.async.} =
proc reset*(s: LPChannel) {.async: (raises: []).} =
if s.isClosed:
trace "Already closed", s
return
@@ -108,22 +109,21 @@ proc reset*(s: LPChannel) {.async.} =
if s.isOpen and not s.conn.isClosed:
# If the connection is still active, notify the other end
proc resetMessage() {.async.} =
proc resetMessage() {.async: (raises: []).} =
try:
trace "sending reset message", s, conn = s.conn
await s.conn.writeMsg(s.id, s.resetCode) # write reset
except CatchableError as exc:
# No cancellations
await s.conn.close()
await noCancel s.conn.writeMsg(s.id, s.resetCode) # write reset
except LPStreamError as exc:
trace "Can't send reset message", s, conn = s.conn, msg = exc.msg
await s.conn.close()
asyncSpawn resetMessage()
await s.closeImpl() # noraises, nocancels
await s.closeImpl()
trace "Channel reset", s
method close*(s: LPChannel) {.async.} =
method close*(s: LPChannel) {.async: (raises: []).} =
## Close channel for writing - a message will be sent to the other peer
## informing them that the channel is closed and that we're waiting for
## their acknowledgement.
@@ -137,10 +137,9 @@ method close*(s: LPChannel) {.async.} =
if s.isOpen and not s.conn.isClosed:
try:
await s.conn.writeMsg(s.id, s.closeCode) # write close
except CancelledError as exc:
except CancelledError:
await s.conn.close()
raise exc
except CatchableError as exc:
except LPStreamError as exc:
# It's harmless that close message cannot be sent - the connection is
# likely down already
await s.conn.close()
@@ -154,16 +153,17 @@ method initStream*(s: LPChannel) =
if s.objName.len == 0:
s.objName = LPChannelTrackerName
s.timeoutHandler = proc(): Future[void] {.gcsafe.} =
s.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
trace "Idle timeout expired, resetting LPChannel", s
s.reset()
procCall BufferStream(s).initStream()
method readOnce*(s: LPChannel,
pbytes: pointer,
nbytes: int):
Future[int] {.async.} =
method readOnce*(
s: LPChannel,
pbytes: pointer,
nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
## Mplex relies on reading being done regularly from every channel, or all
## channels are blocked - in particular, this means that reading from one
## channel must not be done from within a callback / read handler of another
@@ -186,15 +186,19 @@ method readOnce*(s: LPChannel,
if bytes == 0:
await s.closeUnderlying()
return bytes
except CatchableError as exc:
# readOnce in BufferStream generally raises on EOF or cancellation - for
# the former, resetting is harmless, for the latter it's necessary because
# data has been lost in s.readBuf and there's no way to gracefully recover /
# use the channel any more
except CancelledError as exc:
await s.reset()
raise exc
except LPStreamError as exc:
# Resetting is necessary because data has been lost in s.readBuf and
# there's no way to gracefully recover / use the channel any more
await s.reset()
raise newLPStreamConnDownError(exc)
proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
proc prepareWrite(
s: LPChannel,
msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
# prepareWrite is the slow path of writing a message - see conditions in
# write
if s.remoteReset:
@@ -222,7 +226,10 @@ proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
await s.conn.writeMsg(s.id, s.msgCode, msg)
proc completeWrite(
s: LPChannel, fut: Future[void], msgLen: int): Future[void] {.async.} =
s: LPChannel,
fut: Future[void].Raising([CancelledError, LPStreamError]),
msgLen: int
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
try:
s.writes += 1
@@ -235,7 +242,10 @@ proc completeWrite(
when defined(libp2p_network_protocols_metrics):
if s.protocol.len > 0:
libp2p_protocols_bytes.inc(msgLen.int64, labelValues=[s.protocol, "out"])
# This crashes on Nim 2.0.2 with `--mm:orc` during `nimble test`
# https://github.com/status-im/nim-metrics/issues/79
libp2p_protocols_bytes.inc(
msgLen.int64, labelValues = [s.protocol, "out"])
s.activity = true
except CancelledError as exc:
@@ -247,7 +257,7 @@ proc completeWrite(
raise exc
except LPStreamEOFError as exc:
raise exc
except CatchableError as exc:
except LPStreamError as exc:
trace "exception in lpchannel write handler", s, msg = exc.msg
await s.reset()
await s.conn.close()
@@ -255,7 +265,11 @@ proc completeWrite(
finally:
s.writes -= 1
method write*(s: LPChannel, msg: seq[byte]): Future[void] =
method write*(
s: LPChannel,
msg: seq[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
## Write to mplex channel - there may be up to MaxWrite concurrent writes
## pending after which the peer is disconnected
@@ -276,13 +290,12 @@ method write*(s: LPChannel, msg: seq[byte]): Future[void] =
method getWrapped*(s: LPChannel): Connection = s.conn
proc init*(
L: type LPChannel,
id: uint64,
conn: Connection,
initiator: bool,
name: string = "",
timeout: Duration = DefaultChanTimeout): LPChannel =
L: type LPChannel,
id: uint64,
conn: Connection,
initiator: bool,
name: string = "",
timeout: Duration = DefaultChanTimeout): LPChannel =
let chann = L(
id: id,
name: name,

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -56,7 +56,7 @@ proc newTooManyChannels(): ref TooManyChannels =
proc newInvalidChannelIdError(): ref InvalidChannelIdError =
newException(InvalidChannelIdError, "max allowed channel count exceeded")
proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
proc cleanupChann(m: Mplex, chann: LPChannel) {.async: (raises: []), inline.} =
## remove the local channel from the internal tables
##
try:
@@ -68,19 +68,19 @@ proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
libp2p_mplex_channels.set(
m.channels[chann.initiator].len.int64,
labelValues = [$chann.initiator, $m.connection.peerId])
except CatchableError as exc:
except CancelledError as exc:
warn "Error cleaning up mplex channel", m, chann, msg = exc.msg
proc newStreamInternal*(m: Mplex,
initiator: bool = true,
chanId: uint64 = 0,
name: string = "",
timeout: Duration): LPChannel
{.gcsafe, raises: [InvalidChannelIdError].} =
proc newStreamInternal*(
m: Mplex,
initiator: bool = true,
chanId: uint64 = 0,
name: string = "",
timeout: Duration): LPChannel {.gcsafe, raises: [InvalidChannelIdError].} =
## create new channel/stream
##
let id = if initiator:
m.currentId.inc(); m.currentId
let id =
if initiator: m.currentId.inc(); m.currentId
else: chanId
if id in m.channels[initiator]:
@@ -111,18 +111,14 @@ proc newStreamInternal*(m: Mplex,
m.channels[initiator].len.int64,
labelValues = [$initiator, $m.connection.peerId])
proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
proc handleStream(m: Mplex, chann: LPChannel) {.async: (raises: []).} =
## call the muxer stream handler for this channel
##
try:
await m.streamHandler(chann)
trace "finished handling stream", m, chann
doAssert(chann.closed, "connection not closed by handler!")
except CatchableError as exc:
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
await chann.reset()
await m.streamHandler(chann)
trace "finished handling stream", m, chann
doAssert(chann.closed, "connection not closed by handler!")
method handle*(m: Mplex) {.async.} =
method handle*(m: Mplex) {.async: (raises: []).} =
trace "Starting mplex handler", m
try:
while not m.connection.atEof:
@@ -150,7 +146,7 @@ method handle*(m: Mplex) {.async.} =
else:
if m.channels[false].len > m.maxChannCount - 1:
warn "too many channels created by remote peer",
allowedMax = MaxChannelCount, m
allowedMax = MaxChannelCount, m
raise newTooManyChannels()
let name = string.fromBytes(data)
@@ -159,59 +155,65 @@ method handle*(m: Mplex) {.async.} =
trace "Processing channel message", m, channel, data = data.shortLog
case msgType:
of MessageType.New:
trace "created channel", m, channel
of MessageType.New:
trace "created channel", m, channel
if not isNil(m.streamHandler):
# Launch handler task
# All the errors are handled inside `handleStream()` procedure.
asyncSpawn m.handleStream(channel)
if m.streamHandler != nil:
# Launch handler task
# All the errors are handled inside `handleStream()` procedure.
asyncSpawn m.handleStream(channel)
of MessageType.MsgIn, MessageType.MsgOut:
if data.len > MaxMsgSize:
warn "attempting to send a packet larger than allowed",
allowed = MaxMsgSize, channel
raise newLPStreamLimitError()
of MessageType.MsgIn, MessageType.MsgOut:
if data.len > MaxMsgSize:
warn "attempting to send a packet larger than allowed",
allowed = MaxMsgSize, channel
raise newLPStreamLimitError()
trace "pushing data to channel", m, channel, len = data.len
try:
await channel.pushData(data)
trace "pushed data to channel", m, channel, len = data.len
except LPStreamClosedError as exc:
# Channel is being closed, but `cleanupChann` was not yet triggered.
trace "pushing data to channel failed", m, channel, len = data.len,
msg = exc.msg
discard # Ignore message, same as if `cleanupChann` had completed.
trace "pushing data to channel", m, channel, len = data.len
try:
await channel.pushData(data)
trace "pushed data to channel", m, channel, len = data.len
except LPStreamClosedError as exc:
# Channel is being closed, but `cleanupChann` was not yet triggered.
trace "pushing data to channel failed", m, channel, len = data.len,
msg = exc.msg
discard # Ignore message, same as if `cleanupChann` had completed.
of MessageType.CloseIn, MessageType.CloseOut:
await channel.pushEof()
of MessageType.ResetIn, MessageType.ResetOut:
channel.remoteReset = true
await channel.reset()
of MessageType.CloseIn, MessageType.CloseOut:
await channel.pushEof()
of MessageType.ResetIn, MessageType.ResetOut:
channel.remoteReset = true
await channel.reset()
except CancelledError:
debug "Unexpected cancellation in mplex handler", m
except LPStreamEOFError as exc:
trace "Stream EOF", m, msg = exc.msg
except CatchableError as exc:
debug "Unexpected exception in mplex read loop", m, msg = exc.msg
except LPStreamError as exc:
debug "Unexpected stream exception in mplex read loop", m, msg = exc.msg
except MuxerError as exc:
debug "Unexpected muxer exception in mplex read loop", m, msg = exc.msg
finally:
await m.close()
trace "Stopped mplex handler", m
proc new*(M: type Mplex,
conn: Connection,
inTimeout: Duration = DefaultChanTimeout,
outTimeout: Duration = DefaultChanTimeout,
maxChannCount: int = MaxChannelCount): Mplex =
proc new*(
M: type Mplex,
conn: Connection,
inTimeout: Duration = DefaultChanTimeout,
outTimeout: Duration = DefaultChanTimeout,
maxChannCount: int = MaxChannelCount): Mplex =
M(connection: conn,
inChannTimeout: inTimeout,
outChannTimeout: outTimeout,
oid: genOid(),
maxChannCount: maxChannCount)
method newStream*(m: Mplex,
name: string = "",
lazy: bool = false): Future[Connection] {.async.} =
method newStream*(
m: Mplex,
name: string = "",
lazy: bool = false
): Future[Connection] {.async: (raises: [
CancelledError, LPStreamError, MuxerError]).} =
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
if not lazy:
@@ -219,7 +221,7 @@ method newStream*(m: Mplex,
return Connection(channel)
method close*(m: Mplex) {.async.} =
method close*(m: Mplex) {.async: (raises: []).} =
if m.isClosed:
trace "Already closed", m
return

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -23,16 +23,17 @@ type
MuxerError* = object of LPError
TooManyChannels* = object of MuxerError
StreamHandler* = proc(conn: Connection): Future[void] {.gcsafe, raises: [].}
MuxerHandler* = proc(muxer: Muxer): Future[void] {.gcsafe, raises: [].}
StreamHandler* = proc(conn: Connection): Future[void] {.async: (raises: []).}
MuxerHandler* = proc(muxer: Muxer): Future[void] {.async: (raises: []).}
Muxer* = ref object of RootObj
streamHandler*: StreamHandler
handler*: Future[void]
handler*: Future[void].Raising([])
connection*: Connection
# user provider proc that returns a constructed Muxer
MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [].}
MuxerConstructor* =
proc(conn: Connection): Muxer {.gcsafe, closure, raises: [].}
# this wraps a creator proc that knows how to make muxers
MuxerProvider* = object
@@ -40,24 +41,32 @@ type
codec*: string
func shortLog*(m: Muxer): auto =
if isNil(m): "nil"
if m == nil: "nil"
else: shortLog(m.connection)
chronicles.formatIt(Muxer): shortLog(it)
# muxer interface
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
Future[Connection] {.base, async.} = discard
method close*(m: Muxer) {.base, async.} =
if not isNil(m.connection):
method newStream*(
m: Muxer,
name: string = "",
lazy: bool = false
): Future[Connection] {.base, async: (raises: [
CancelledError, LPStreamError, MuxerError], raw: true).} =
raiseAssert("Not implemented!")
method close*(m: Muxer) {.base, async: (raises: []).} =
if m.connection != nil:
await m.connection.close()
method handle*(m: Muxer): Future[void] {.base, async.} = discard
method handle*(m: Muxer): Future[void] {.base, async: (raises: []).} = discard
proc new*(
T: typedesc[MuxerProvider],
creator: MuxerConstructor,
codec: string): T {.gcsafe.} =
T: typedesc[MuxerProvider],
creator: MuxerConstructor,
codec: string): T {.gcsafe.} =
let muxerProvider = T(newMuxer: creator, codec: codec)
muxerProvider
method getStreams*(m: Muxer): seq[Connection] {.base.} = doAssert false, "not implemented"
method getStreams*(m: Muxer): seq[Connection] {.base.} =
raiseAssert("Not implemented!")

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -27,14 +27,17 @@ const
MaxChannelCount = 200
when defined(libp2p_yamux_metrics):
declareGauge(libp2p_yamux_channels, "yamux channels", labels = ["initiator", "peer"])
declareHistogram libp2p_yamux_send_queue, "message send queue length (in byte)",
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
declareHistogram libp2p_yamux_recv_queue, "message recv queue length (in byte)",
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
declareGauge libp2p_yamux_channels,
"yamux channels", labels = ["initiator", "peer"]
declareHistogram libp2p_yamux_send_queue,
"message send queue length (in byte)", buckets = [
0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
declareHistogram libp2p_yamux_recv_queue,
"message recv queue length (in byte)", buckets = [
0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
type
YamuxError* = object of CatchableError
YamuxError* = object of MuxerError
MsgType = enum
Data = 0x0
@@ -60,7 +63,10 @@ type
streamId: uint32
length: uint32
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async.} =
proc readHeader(
conn: LPStream
): Future[YamuxHeader] {.async: (raises: [
CancelledError, LPStreamError, MuxerError]).} =
var buffer: array[12, byte]
await conn.readExactly(addr buffer[0], 12)
@@ -74,10 +80,10 @@ proc readHeader(conn: LPStream): Future[YamuxHeader] {.async.} =
return result
proc `$`(header: YamuxHeader): string =
result = "{" & $header.msgType & ", "
result &= "{" & header.flags.foldl(if a != "": a & ", " & $b else: $b, "") & "}, "
result &= "streamId: " & $header.streamId & ", "
result &= "length: " & $header.length & "}"
"{" & $header.msgType & ", " &
"{" & header.flags.foldl(if a != "": a & ", " & $b else: $b, "") & "}, " &
"streamId: " & $header.streamId & ", " &
"length: " & $header.length & "}"
proc encode(header: YamuxHeader): array[12, byte] =
result[0] = header.version
@@ -86,10 +92,14 @@ proc encode(header: YamuxHeader): array[12, byte] =
result[4..7] = toBytesBE(header.streamId)
result[8..11] = toBytesBE(header.length)
proc write(conn: LPStream, header: YamuxHeader): Future[void] {.gcsafe.} =
proc write(
conn: LPStream,
header: YamuxHeader
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
trace "write directly on stream", h = $header
var buffer = header.encode()
return conn.write(@buffer)
conn.write(@buffer)
proc ping(T: type[YamuxHeader], flag: MsgFlags, pingData: uint32): T =
T(
@@ -107,11 +117,10 @@ proc goAway(T: type[YamuxHeader], status: GoAwayStatus): T =
)
proc data(
T: type[YamuxHeader],
streamId: uint32,
length: uint32 = 0,
flags: set[MsgFlags] = {},
): T =
T: type[YamuxHeader],
streamId: uint32,
length: uint32 = 0,
flags: set[MsgFlags] = {}): T =
T(
version: YamuxVersion,
msgType: MsgType.Data,
@@ -121,11 +130,10 @@ proc data(
)
proc windowUpdate(
T: type[YamuxHeader],
streamId: uint32,
delta: uint32,
flags: set[MsgFlags] = {},
): T =
T: type[YamuxHeader],
streamId: uint32,
delta: uint32,
flags: set[MsgFlags] = {}): T =
T(
version: YamuxVersion,
msgType: MsgType.WindowUpdate,
@@ -138,7 +146,7 @@ type
ToSend = tuple
data: seq[byte]
sent: int
fut: Future[void]
fut: Future[void].Raising([CancelledError, LPStreamError])
YamuxChannel* = ref object of Connection
id: uint32
recvWindow: int
@@ -153,7 +161,7 @@ type
recvQueue: seq[byte]
isReset: bool
remoteReset: bool
closedRemotely: Future[void]
closedRemotely: Future[void].Raising([])
closedLocally: bool
receivedData: AsyncEvent
returnedEof: bool
@@ -162,7 +170,7 @@ proc `$`(channel: YamuxChannel): string =
result = if channel.conn.dir == Out: "=> " else: "<= "
result &= $channel.id
var s: seq[string] = @[]
if channel.closedRemotely.done():
if channel.closedRemotely.completed():
s.add("ClosedRemotely")
if channel.closedLocally:
s.add("ClosedLocally")
@@ -184,26 +192,28 @@ proc lengthSendQueueWithLimit(channel: YamuxChannel): int =
# 3 big messages if the peer is stalling.
channel.sendQueue.foldl(a + min(b.data.len - b.sent, channel.maxSendQueueSize div 3), 0)
proc actuallyClose(channel: YamuxChannel) {.async.} =
proc actuallyClose(channel: YamuxChannel) {.async: (raises: []).} =
if channel.closedLocally and channel.sendQueue.len == 0 and
channel.closedRemotely.done():
channel.closedRemotely.completed():
await procCall Connection(channel).closeImpl()
proc remoteClosed(channel: YamuxChannel) {.async.} =
if not channel.closedRemotely.done():
proc remoteClosed(channel: YamuxChannel) {.async: (raises: []).} =
if not channel.closedRemotely.completed():
channel.closedRemotely.complete()
await channel.actuallyClose()
method closeImpl*(channel: YamuxChannel) {.async.} =
method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
if not channel.closedLocally:
channel.closedLocally = true
channel.isEof = true
if channel.isReset == false and channel.sendQueue.len == 0:
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
if not channel.isReset and channel.sendQueue.len == 0:
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
except CancelledError, LPStreamError: discard
await channel.actuallyClose()
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
proc reset(
channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).} =
# If we reset locally, we want to flush up to a maximum of recvWindow
# bytes. It's because the peer we're connected to can send us data before
# it receives the reset.
@@ -218,19 +228,20 @@ proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
channel.recvQueue = @[]
channel.sendWindow = 0
if not channel.closedLocally:
if isLocal:
if isLocal and not channel.isSending:
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Rst}))
except LPStreamEOFError as exc: discard
except LPStreamClosedError as exc: discard
except CancelledError, LPStreamError: discard
await channel.close()
if not channel.closedRemotely.done():
if not channel.closedRemotely.completed():
await channel.remoteClosed()
channel.receivedData.fire()
if not isLocal:
# If the reset is remote, there's no reason to flush anything.
channel.recvWindow = 0
proc updateRecvWindow(channel: YamuxChannel) {.async.} =
proc updateRecvWindow(
channel: YamuxChannel
) {.async: (raises: [CancelledError, LPStreamError]).} =
## Send to the peer a window update when the recvWindow is empty enough
##
# In order to avoid spamming a window update everytime a byte is read,
@@ -248,14 +259,15 @@ proc updateRecvWindow(channel: YamuxChannel) {.async.} =
trace "increasing the recvWindow", delta
method readOnce*(
channel: YamuxChannel,
pbytes: pointer,
nbytes: int):
Future[int] {.async.} =
channel: YamuxChannel,
pbytes: pointer,
nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
## Read from a yamux channel
if channel.isReset:
raise if channel.remoteReset:
raise
if channel.remoteReset:
newLPStreamResetError()
elif channel.closedLocally:
newLPStreamClosedError()
@@ -265,8 +277,10 @@ method readOnce*(
raise newLPStreamRemoteClosedError()
if channel.recvQueue.len == 0:
channel.receivedData.clear()
await channel.closedRemotely or channel.receivedData.wait()
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
try: # https://github.com/status-im/nim-chronos/issues/516
discard await race(channel.closedRemotely, channel.receivedData.wait())
except ValueError: raiseAssert("Futures list is not empty")
if channel.closedRemotely.completed() and channel.recvQueue.len == 0:
channel.returnedEof = true
channel.isEof = true
return 0
@@ -274,7 +288,8 @@ method readOnce*(
let toRead = min(channel.recvQueue.len, nbytes)
var p = cast[ptr UncheckedArray[byte]](pbytes)
toOpenArray(p, 0, nbytes - 1)[0..<toRead] = channel.recvQueue.toOpenArray(0, toRead - 1)
toOpenArray(p, 0, nbytes - 1)[0..<toRead] =
channel.recvQueue.toOpenArray(0, toRead - 1)
channel.recvQueue = channel.recvQueue[toRead..^1]
# We made some room in the recv buffer let the peer know
@@ -282,7 +297,9 @@ method readOnce*(
channel.activity = true
return toRead
proc gotDataFromRemote(channel: YamuxChannel, b: seq[byte]) {.async.} =
proc gotDataFromRemote(
channel: YamuxChannel,
b: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
channel.recvWindow -= b.len
channel.recvQueue = channel.recvQueue.concat(b)
channel.receivedData.fire()
@@ -293,7 +310,9 @@ proc gotDataFromRemote(channel: YamuxChannel, b: seq[byte]) {.async.} =
proc setMaxRecvWindow*(channel: YamuxChannel, maxRecvWindow: int) =
channel.maxRecvWindow = maxRecvWindow
proc trySend(channel: YamuxChannel) {.async.} =
proc trySend(
channel: YamuxChannel
) {.async: (raises: [CancelledError, LPStreamError]).} =
if channel.isSending:
return
channel.isSending = true
@@ -304,12 +323,10 @@ proc trySend(channel: YamuxChannel) {.async.} =
if channel.sendWindow == 0:
trace "trying to send while the sendWindow is empty"
if channel.lengthSendQueueWithLimit() > channel.maxSendQueueSize:
trace "channel send queue too big, resetting", maxSendQueueSize=channel.maxSendQueueSize,
trace "channel send queue too big, resetting",
maxSendQueueSize = channel.maxSendQueueSize,
currentQueueSize = channel.lengthSendQueueWithLimit()
try:
await channel.reset(true)
except CatchableError as exc:
warn "failed to reset", msg=exc.msg
await channel.reset(isLocal = true)
break
let
@@ -326,7 +343,7 @@ proc trySend(channel: YamuxChannel) {.async.} =
sendBuffer[0..<12] = header.encode()
var futures: seq[Future[void]]
var futures: seq[Future[void].Raising([CancelledError, LPStreamError])]
while inBuffer < toSend:
# concatenate the different message we try to send into one buffer
let (data, sent, fut) = channel.sendQueue[0]
@@ -343,8 +360,15 @@ proc trySend(channel: YamuxChannel) {.async.} =
trace "try to send the buffer", h = $header
channel.sendWindow.dec(toSend)
try: await channel.conn.write(sendBuffer)
except CatchableError as exc:
try:
await channel.conn.write(sendBuffer)
except CancelledError:
trace "cancelled sending the buffer"
for fut in futures.items():
fut.cancelSoon()
await channel.reset()
break
except LPStreamError as exc:
trace "failed to send the buffer"
let connDown = newLPStreamConnDownError(exc)
for fut in futures.items():
@@ -355,7 +379,11 @@ proc trySend(channel: YamuxChannel) {.async.} =
fut.complete()
channel.activity = true
method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
method write*(
channel: YamuxChannel,
msg: seq[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
## Write to yamux channel
##
result = newFuture[void]("Yamux Send")
@@ -373,7 +401,9 @@ method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
libp2p_yamux_send_queue.observe(channel.lengthSendQueue().int64)
asyncSpawn channel.trySend()
proc open(channel: YamuxChannel) {.async.} =
proc open(
channel: YamuxChannel
) {.async: (raises: [CancelledError, LPStreamError]).} =
## Open a yamux channel by sending a window update with Syn or Ack flag
##
if channel.opened:
@@ -396,27 +426,36 @@ type
maxChannCount: int
windowSize: int
maxSendQueueSize: int
inTimeout: Duration
outTimeout: Duration
proc lenBySrc(m: Yamux, isSrc: bool): int =
for v in m.channels.values():
if v.isSrc == isSrc: result += 1
proc cleanupChannel(m: Yamux, channel: YamuxChannel) {.async.} =
await channel.join()
proc cleanupChannel(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
try:
await channel.join()
except CancelledError:
discard
m.channels.del(channel.id)
when defined(libp2p_yamux_metrics):
libp2p_yamux_channels.set(m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId])
libp2p_yamux_channels.set(
m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId])
if channel.isReset and channel.recvWindow > 0:
m.flushed[channel.id] = channel.recvWindow
proc createStream(m: Yamux, id: uint32, isSrc: bool,
recvWindow: int, maxSendQueueSize: int): YamuxChannel =
# As you can see, during initialization, recvWindow can be larger than maxRecvWindow.
proc createStream(
m: Yamux, id: uint32, isSrc: bool,
recvWindow: int, maxSendQueueSize: int): YamuxChannel =
# During initialization, recvWindow can be larger than maxRecvWindow.
# This is because the peer we're connected to will always assume
# that the initial recvWindow is 256k.
# To solve this contradiction, no updateWindow will be sent until recvWindow is less
# than maxRecvWindow
result = YamuxChannel(
# To solve this contradiction, no updateWindow will be sent until
# recvWindow is less than maxRecvWindow
proc newClosedRemotelyFut(): Future[void] {.async: (raises: [], raw: true).} =
newFuture[void]()
var stream = YamuxChannel(
id: id,
maxRecvWindow: recvWindow,
recvWindow: if recvWindow > YamuxDefaultWindowSize: recvWindow else: YamuxDefaultWindowSize,
@@ -425,26 +464,33 @@ proc createStream(m: Yamux, id: uint32, isSrc: bool,
isSrc: isSrc,
conn: m.connection,
receivedData: newAsyncEvent(),
closedRemotely: newFuture[void]()
closedRemotely: newClosedRemotelyFut()
)
result.objName = "YamuxStream"
result.dir = if isSrc: Direction.Out else: Direction.In
result.timeoutHandler = proc(): Future[void] {.gcsafe.} =
trace "Idle timeout expired, resetting YamuxChannel"
result.reset()
result.initStream()
result.peerId = m.connection.peerId
result.observedAddr = m.connection.observedAddr
result.transportDir = m.connection.transportDir
stream.objName = "YamuxStream"
if isSrc:
stream.dir = Direction.Out
stream.timeout = m.outTimeout
else:
stream.dir = Direction.In
stream.timeout = m.inTimeout
stream.timeoutHandler =
proc(): Future[void] {.async: (raises: [], raw: true).} =
trace "Idle timeout expired, resetting YamuxChannel"
stream.reset(isLocal = true)
stream.initStream()
stream.peerId = m.connection.peerId
stream.observedAddr = m.connection.observedAddr
stream.transportDir = m.connection.transportDir
when defined(libp2p_agents_metrics):
result.shortAgent = m.connection.shortAgent
m.channels[id] = result
asyncSpawn m.cleanupChannel(result)
stream.shortAgent = m.connection.shortAgent
m.channels[id] = stream
asyncSpawn m.cleanupChannel(stream)
trace "created channel", id, pid=m.connection.peerId
when defined(libp2p_yamux_metrics):
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $result.peerId])
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $stream.peerId])
return stream
method close*(m: Yamux) {.async.} =
method close*(m: Yamux) {.async: (raises: []).} =
if m.isClosed == true:
trace "Already closed"
return
@@ -453,24 +499,21 @@ method close*(m: Yamux) {.async.} =
trace "Closing yamux"
let channels = toSeq(m.channels.values())
for channel in channels:
await channel.reset(true)
await channel.reset(isLocal = true)
try: await m.connection.write(YamuxHeader.goAway(NormalTermination))
except CatchableError as exc: trace "failed to send goAway", msg=exc.msg
except CancelledError as exc: trace "cancelled sending goAway", msg = exc.msg
except LPStreamError as exc: trace "failed to send goAway", msg = exc.msg
await m.connection.close()
trace "Closed yamux"
proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
proc handleStream(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
## Call the muxer stream handler for this channel
##
try:
await m.streamHandler(channel)
trace "finished handling stream"
doAssert(channel.isClosed, "connection not closed by handler!")
except CatchableError as exc:
trace "Exception in yamux stream handler", msg = exc.msg
await channel.reset()
await m.streamHandler(channel)
trace "finished handling stream"
doAssert(channel.isClosed, "connection not closed by handler!")
method handle*(m: Yamux) {.async.} =
method handle*(m: Yamux) {.async: (raises: []).} =
trace "Starting yamux handler", pid=m.connection.peerId
try:
while not m.connection.atEof:
@@ -505,19 +548,28 @@ method handle*(m: Yamux) {.async.} =
await newStream.open()
asyncSpawn m.handleStream(newStream)
elif header.streamId notin m.channels:
if header.streamId notin m.flushed:
raise newException(YamuxError, "Unknown stream ID: " & $header.streamId)
elif header.msgType == Data:
# Flush the data
m.flushed[header.streamId].dec(int(header.length))
if m.flushed[header.streamId] < 0:
raise newException(YamuxError, "Peer exhausted the recvWindow after reset")
if header.length > 0:
var buffer = newSeqUninitialized[byte](header.length)
await m.connection.readExactly(addr buffer[0], int(header.length))
# Flush the data
m.flushed.withValue(header.streamId, flushed):
if header.msgType == Data:
flushed[].dec(int(header.length))
if flushed[] < 0:
raise newException(YamuxError,
"Peer exhausted the recvWindow after reset")
do:
raise newException(YamuxError,
"Unknown stream ID: " & $header.streamId)
if header.length > 0:
var buffer = newSeqUninitialized[byte](header.length)
await m.connection.readExactly(
addr buffer[0], int(header.length))
continue
let channel = m.channels[header.streamId]
let channel =
try:
m.channels[header.streamId]
except KeyError:
raise newException(YamuxError,
"Stream was cleaned up before handling data: " & $header.streamId)
if header.msgType == WindowUpdate:
channel.sendWindow += int(header.length)
@@ -530,7 +582,7 @@ method handle*(m: Yamux) {.async.} =
if header.length > 0:
var buffer = newSeqUninitialized[byte](header.length)
await m.connection.readExactly(addr buffer[0], int(header.length))
trace "Msg Rcv", msg=string.fromBytes(buffer)
trace "Msg Rcv", msg=shortLog(buffer)
await channel.gotDataFromRemote(buffer)
if MsgFlags.Fin in header.flags:
@@ -539,11 +591,24 @@ method handle*(m: Yamux) {.async.} =
if MsgFlags.Rst in header.flags:
trace "remote reset channel"
await channel.reset()
except CancelledError as exc:
debug "Unexpected cancellation in yamux handler", msg = exc.msg
except LPStreamEOFError as exc:
trace "Stream EOF", msg = exc.msg
except LPStreamError as exc:
debug "Unexpected stream exception in yamux read loop", msg = exc.msg
except YamuxError as exc:
trace "Closing yamux connection", error=exc.msg
await m.connection.write(YamuxHeader.goAway(ProtocolError))
try:
await m.connection.write(YamuxHeader.goAway(ProtocolError))
except CancelledError, LPStreamError:
discard
except MuxerError as exc:
debug "Unexpected muxer exception in yamux read loop", msg = exc.msg
try:
await m.connection.write(YamuxHeader.goAway(ProtocolError))
except CancelledError, LPStreamError:
discard
finally:
await m.close()
trace "Stopped yamux handler"
@@ -552,10 +617,11 @@ method getStreams*(m: Yamux): seq[Connection] =
for c in m.channels.values: result.add(c)
method newStream*(
m: Yamux,
name: string = "",
lazy: bool = false): Future[Connection] {.async.} =
m: Yamux,
name: string = "",
lazy: bool = false
): Future[Connection] {.async: (raises: [
CancelledError, LPStreamError, MuxerError]).} =
if m.channels.len > m.maxChannCount - 1:
raise newException(TooManyChannels, "max allowed channel count exceeded")
let stream = m.createStream(m.currentId, true, m.windowSize, m.maxSendQueueSize)
@@ -564,14 +630,19 @@ method newStream*(
await stream.open()
return stream
proc new*(T: type[Yamux], conn: Connection,
maxChannCount: int = MaxChannelCount,
windowSize: int = YamuxDefaultWindowSize,
maxSendQueueSize: int = MaxSendQueueSize): T =
proc new*(
T: type[Yamux], conn: Connection,
maxChannCount: int = MaxChannelCount,
windowSize: int = YamuxDefaultWindowSize,
maxSendQueueSize: int = MaxSendQueueSize,
inTimeout: Duration = 5.minutes,
outTimeout: Duration = 5.minutes): T =
T(
connection: conn,
currentId: if conn.dir == Out: 1 else: 2,
maxChannCount: maxChannCount,
windowSize: windowSize,
maxSendQueueSize: maxSendQueueSize
maxSendQueueSize: maxSendQueueSize,
inTimeout: inTimeout,
outTimeout: outTimeout
)

View File

@@ -19,7 +19,6 @@ import ../../protocol,
../../../switch,
../../../utils/future
export DcutrError
export chronicles
type Dcutr* = ref object of LPProtocol
@@ -65,14 +64,14 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
except CancelledError as err:
raise err
except AllFuturesFailedError as err:
debug "Dcutr receiver could not connect to the remote peer, all connect attempts failed", peerDialableAddrs, msg = err.msg
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts failed", err)
debug "Dcutr receiver could not connect to the remote peer, " &
"all connect attempts failed", peerDialableAddrs, msg = err.msg
except AsyncTimeoutError as err:
debug "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", err)
debug "Dcutr receiver could not connect to the remote peer, " &
"all connect attempts timed out", peerDialableAddrs, msg = err.msg
except CatchableError as err:
warn "Unexpected error when Dcutr receiver tried to connect to the remote peer", msg = err.msg
raise newException(DcutrError, "Unexpected error when Dcutr receiver tried to connect to the remote peer", err)
warn "Unexpected error when Dcutr receiver tried to connect " &
"to the remote peer", msg = err.msg
let self = T()
self.handler = handleStream

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -23,11 +23,15 @@ type
method readOnce*(
self: RelayConnection,
pbytes: pointer,
nbytes: int): Future[int] {.async.} =
nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
self.activity = true
return await self.conn.readOnce(pbytes, nbytes)
self.conn.readOnce(pbytes, nbytes)
method write*(self: RelayConnection, msg: seq[byte]): Future[void] {.async.} =
method write*(
self: RelayConnection,
msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
self.dataSent.inc(msg.len)
if self.limitData != 0 and self.dataSent > self.limitData:
await self.close()
@@ -35,25 +39,25 @@ method write*(self: RelayConnection, msg: seq[byte]): Future[void] {.async.} =
self.activity = true
await self.conn.write(msg)
method closeImpl*(self: RelayConnection): Future[void] {.async.} =
method closeImpl*(self: RelayConnection): Future[void] {.async: (raises: []).} =
await self.conn.close()
await procCall Connection(self).closeImpl()
method getWrapped*(self: RelayConnection): Connection = self.conn
proc new*(
T: typedesc[RelayConnection],
conn: Connection,
limitDuration: uint32,
limitData: uint64): T =
T: typedesc[RelayConnection],
conn: Connection,
limitDuration: uint32,
limitData: uint64): T =
let rc = T(conn: conn, limitDuration: limitDuration, limitData: limitData)
rc.dir = conn.dir
rc.initStream()
if limitDuration > 0:
proc checkDurationConnection() {.async.} =
let sleep = sleepAsync(limitDuration.seconds())
await sleep or conn.join()
if sleep.finished: await conn.close()
else: sleep.cancel()
proc checkDurationConnection() {.async: (raises: []).} =
try:
await noCancel conn.join().wait(limitDuration.seconds())
except AsyncTimeoutError:
await conn.close()
asyncSpawn checkDurationConnection()
return rc

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -361,17 +361,25 @@ proc deletesReservation(r: Relay) {.async.} =
if n > r.rsvp[k]:
r.rsvp.del(k)
method start*(r: Relay) {.async.} =
method start*(
r: Relay
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if not r.reservationLoop.isNil:
warn "Starting relay twice"
return
return fut
r.reservationLoop = r.deletesReservation()
r.started = true
fut
method stop*(r: Relay) {.async.} =
method stop*(r: Relay): Future[void] {.async: (raises: [], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if r.reservationLoop.isNil:
warn "Stopping relay without starting it"
return
return fut
r.started = false
r.reservationLoop.cancel()
r.reservationLoop = nil
fut

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -21,63 +21,77 @@ const
RelayV2HopCodec* = "/libp2p/circuit/relay/0.2.0/hop"
RelayV2StopCodec* = "/libp2p/circuit/relay/0.2.0/stop"
proc sendStatus*(conn: Connection, code: StatusV1) {.async.} =
proc sendStatus*(
conn: Connection,
code: StatusV1
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
let
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
msg = RelayMessage(
msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
conn.writeLp(pb.buffer)
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async.} =
proc sendHopStatus*(
conn: Connection,
code: StatusV2
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
let
msg = HopMessage(msgType: HopMessageType.Status, status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
conn.writeLp(pb.buffer)
proc sendStopStatus*(conn: Connection, code: StatusV2) {.async.} =
proc sendStopStatus*(
conn: Connection,
code: StatusV2
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
trace "send stop relay/v2 status", status = $code & " (" & $ord(code) & ")"
let
msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
conn.writeLp(pb.buffer)
proc bridge*(connSrc: Connection, connDst: Connection) {.async.} =
proc bridge*(
connSrc: Connection,
connDst: Connection) {.async: (raises: [CancelledError]).} =
const bufferSize = 4096
var
bufSrcToDst: array[bufferSize, byte]
bufDstToSrc: array[bufferSize, byte]
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.high + 1)
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.high + 1)
bytesSendFromSrcToDst = 0
bytesSendFromDstToSrc = 0
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.len)
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.len)
bytesSentFromSrcToDst = 0
bytesSentFromDstToSrc = 0
bufRead: int
try:
while not connSrc.closed() and not connDst.closed():
await futSrc or futDst
try: # https://github.com/status-im/nim-chronos/issues/516
discard await race(futSrc, futDst)
except ValueError: raiseAssert("Futures list is not empty")
if futSrc.finished():
bufRead = await futSrc
if bufRead > 0:
bytesSendFromSrcToDst.inc(bufRead)
await connDst.write(@bufSrcToDst[0..<bufRead])
zeroMem(addr(bufSrcToDst), bufSrcToDst.high + 1)
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.high + 1)
bytesSentFromSrcToDst.inc(bufRead)
await connDst.write(@bufSrcToDst[0 ..< bufRead])
zeroMem(addr bufSrcToDst[0], bufSrcToDst.len)
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.len)
if futDst.finished():
bufRead = await futDst
if bufRead > 0:
bytesSendFromDstToSrc += bufRead
await connSrc.write(bufDstToSrc[0..<bufRead])
zeroMem(addr(bufDstToSrc), bufDstToSrc.high + 1)
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.high + 1)
bytesSentFromDstToSrc += bufRead
await connSrc.write(bufDstToSrc[0 ..< bufRead])
zeroMem(addr bufDstToSrc[0], bufDstToSrc.len)
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.len)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except LPStreamError as exc:
if connSrc.closed() or connSrc.atEof():
trace "relay src closed connection", src = connSrc.peerId
if connDst.closed() or connDst.atEof():
trace "relay dst closed connection", dst = connDst.peerId
trace "relay error", exc=exc.msg
trace "end relaying", bytesSendFromSrcToDst, bytesSendFromDstToSrc
trace "end relaying", bytesSentFromSrcToDst, bytesSentFromDstToSrc
await futSrc.cancelAndWait()
await futDst.cancelAndWait()

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -31,8 +31,19 @@ type
maxIncomingStreams: Opt[int]
method init*(p: LPProtocol) {.base, gcsafe.} = discard
method start*(p: LPProtocol) {.async, base.} = p.started = true
method stop*(p: LPProtocol) {.async, base.} = p.started = false
method start*(
p: LPProtocol) {.async: (raises: [CancelledError], raw: true), base.} =
let fut = newFuture[void]()
fut.complete()
p.started = true
fut
method stop*(p: LPProtocol) {.async: (raises: [], raw: true), base.} =
let fut = newFuture[void]()
fut.complete()
p.started = false
fut
proc maxIncomingStreams*(p: LPProtocol): int =
p.maxIncomingStreams.get(DefaultMaxIncomingStreams)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -701,30 +701,40 @@ proc maintainDirectPeers(g: GossipSub) {.async.} =
for id, addrs in g.parameters.directPeers:
await g.addDirectPeer(id, addrs)
method start*(g: GossipSub) {.async.} =
method start*(
g: GossipSub
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()
fut.complete()
trace "gossipsub start"
if not g.heartbeatFut.isNil:
warn "Starting gossipsub twice"
return
return fut
g.heartbeatFut = g.heartbeat()
g.scoringHeartbeatFut = g.scoringHeartbeat()
g.directPeersLoop = g.maintainDirectPeers()
g.started = true
fut
method stop*(g: GossipSub): Future[void] {.async: (raises: [], raw: true).} =
let fut = newFuture[void]()
fut.complete()
method stop*(g: GossipSub) {.async.} =
trace "gossipsub stop"
g.started = false
if g.heartbeatFut.isNil:
warn "Stopping gossipsub without starting it"
return
return fut
# stop heartbeat interval
g.directPeersLoop.cancel()
g.scoringHeartbeatFut.cancel()
g.heartbeatFut.cancel()
g.heartbeatFut = nil
fut
method initPubSub*(g: GossipSub)
{.raises: [InitializationError].} =

View File

@@ -150,7 +150,7 @@ proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool) {.rai
## priority messages have been sent.
trace "sending pubsub message to peer", peer, msg = shortLog(msg)
asyncSpawn peer.send(msg, p.anonymize, isHighPriority)
peer.send(msg, p.anonymize, isHighPriority)
proc broadcast*(
p: PubSub,

View File

@@ -31,6 +31,7 @@ when defined(libp2p_expensive_metrics):
declareCounter(libp2p_pubsub_skipped_received_messages, "number of received skipped messages", labels = ["id"])
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
when defined(pubsubpeer_queue_metrics):
declareGauge(libp2p_gossipsub_priority_queue_size, "the number of messages in the priority queue", labels = ["id"])
declareGauge(libp2p_gossipsub_non_priority_queue_size, "the number of messages in the non-priority queue", labels = ["id"])
@@ -251,26 +252,26 @@ template sendMetrics(msg: RPCMsg): untyped =
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
proc clearSendPriorityQueue(p: PubSubPeer) =
while p.rpcmessagequeue.sendPriorityQueue.len > 0 and p.rpcmessagequeue.sendPriorityQueue[0].finished:
when defined(libp2p_expensive_metrics):
libp2p_gossipsub_priority_queue_size.dec(labelValues = [$p.peerId])
if p.rpcmessagequeue.sendPriorityQueue.len == 0:
return # fast path
while p.rpcmessagequeue.sendPriorityQueue.len > 0 and
p.rpcmessagequeue.sendPriorityQueue[0].finished:
discard p.rpcmessagequeue.sendPriorityQueue.popFirst()
proc sendMsg(p: PubSubPeer, msg: seq[byte]) {.async.} =
if p.sendConn == nil:
# Wait for a send conn to be setup. `connectOnce` will
# complete this even if the sendConn setup failed
await p.connectedFut
while p.rpcmessagequeue.sendPriorityQueue.len > 0 and
p.rpcmessagequeue.sendPriorityQueue[^1].finished:
discard p.rpcmessagequeue.sendPriorityQueue.popLast()
var conn = p.sendConn
if conn == nil or conn.closed():
debug "No send connection", p, msg = shortLog(msg)
return
trace "sending encoded msgs to peer", conn, encoded = shortLog(msg)
when defined(pubsubpeer_queue_metrics):
libp2p_gossipsub_priority_queue_size.set(
value = p.rpcmessagequeue.sendPriorityQueue.len.int64,
labelValues = [$p.peerId])
proc sendMsgContinue(conn: Connection, msgFut: Future[void]) {.async.} =
# Continuation for a pending `sendMsg` future from below
try:
await conn.writeLp(msg)
await msgFut
trace "sent pubsub message to remote", conn
except CatchableError as exc: # never cancelled
# Because we detach the send call from the currently executing task using
@@ -281,7 +282,43 @@ proc sendMsg(p: PubSubPeer, msg: seq[byte]) {.async.} =
await conn.close() # This will clean up the send connection
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool) {.async.} =
proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async.} =
# Slow path of `sendMsg` where msg is held in memory while send connection is
# being set up
if p.sendConn == nil:
# Wait for a send conn to be setup. `connectOnce` will
# complete this even if the sendConn setup failed
try:
await p.connectedFut
except CatchableError as exc:
debug "Error when waiting for a send conn to be setup", msg = exc.msg, p
var conn = p.sendConn
if conn == nil or conn.closed():
debug "No send connection", msg = shortLog(msg), p
return
trace "sending encoded msg to peer", conn, encoded = shortLog(msg), p
try:
await sendMsgContinue(conn, conn.writeLp(msg))
except CancelledError as exc:
trace "Continuation for pending `sendMsg` future has been unexpectedly cancelled"
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] =
if p.sendConn != nil and not p.sendConn.closed():
# Fast path that avoids copying msg (which happens for {.async.})
let conn = p.sendConn
trace "sending encoded msg to peer", conn, encoded = shortLog(msg), p
let f = conn.writeLp(msg)
if not f.completed():
sendMsgContinue(conn, f)
else:
f
else:
sendMsgSlow(p, msg)
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[void] =
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
##
## Parameters:
@@ -294,24 +331,23 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool) {.async.}
if msg.len <= 0:
debug "empty message, skipping", p, msg = shortLog(msg)
return
if msg.len > p.maxMessageSize:
Future[void].completed()
elif msg.len > p.maxMessageSize:
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
return
if isHighPriority:
Future[void].completed()
elif isHighPriority:
p.clearSendPriorityQueue()
let f = p.sendMsg(msg)
if not f.finished:
p.rpcmessagequeue.sendPriorityQueue.addLast(f)
when defined(libp2p_expensive_metrics):
when defined(pubsubpeer_queue_metrics):
libp2p_gossipsub_priority_queue_size.inc(labelValues = [$p.peerId])
f
else:
await p.rpcmessagequeue.nonPriorityQueue.addLast(msg)
when defined(libp2p_expensive_metrics):
let f = p.rpcmessagequeue.nonPriorityQueue.addLast(msg)
when defined(pubsubpeer_queue_metrics):
libp2p_gossipsub_non_priority_queue_size.inc(labelValues = [$p.peerId])
trace "message queued", p, msg = shortLog(msg)
f
iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize: bool): seq[byte] =
## This iterator takes an `RPCMsg` and sequentially repackages its Messages into new `RPCMsg` instances.
@@ -348,7 +384,7 @@ iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize:
else:
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool) {.async.} =
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool) {.raises: [].} =
## Asynchronously sends an `RPCMsg` to a specified `PubSubPeer` with an option for anonymization.
##
## Parameters:
@@ -377,11 +413,11 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool) {.
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
await p.sendEncoded(encodedSplitMsg, isHighPriority)
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority)
else:
# If the message size is within limits, send it as is
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
await p.sendEncoded(encoded, isHighPriority)
asyncSpawn p.sendEncoded(encoded, isHighPriority)
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
for sentIHave in p.sentIHaves.mitems():
@@ -396,12 +432,13 @@ proc sendNonPriorityTask(p: PubSubPeer) {.async.} =
let msg = await p.rpcmessagequeue.nonPriorityQueue.popFirst()
while p.rpcmessagequeue.sendPriorityQueue.len > 0:
p.clearSendPriorityQueue()
# this minimizes the number of times we have to wait for something (each wait = performance cost)
# we will never wait for a finished future and by waiting for the last one, all that come before it are guaranteed
# to be finished already (since sends are processed in order).
# waiting for the last future minimizes the number of times we have to
# wait for something (each wait = performance cost) -
# clearSendPriorityQueue ensures we're not waiting for an already-finished
# future
if p.rpcmessagequeue.sendPriorityQueue.len > 0:
await p.rpcmessagequeue.sendPriorityQueue[^1]
when defined(libp2p_expensive_metrics):
when defined(pubsubpeer_queue_metrics):
libp2p_gossipsub_non_priority_queue_size.dec(labelValues = [$p.peerId])
await p.sendMsg(msg)
@@ -413,11 +450,12 @@ proc startSendNonPriorityTask(p: PubSubPeer) =
proc stopSendNonPriorityTask*(p: PubSubPeer) =
if not p.rpcmessagequeue.sendNonPriorityTask.isNil:
debug "stopping sendNonPriorityTask", p
p.rpcmessagequeue.sendNonPriorityTask.cancel()
p.rpcmessagequeue.sendNonPriorityTask.cancelSoon()
p.rpcmessagequeue.sendNonPriorityTask = nil
p.rpcmessagequeue.sendPriorityQueue.clear()
p.rpcmessagequeue.nonPriorityQueue.clear()
when defined(libp2p_expensive_metrics):
when defined(pubsubpeer_queue_metrics):
libp2p_gossipsub_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
libp2p_gossipsub_non_priority_queue_size.set(labelValues = [$p.peerId], value = 0)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -678,17 +678,25 @@ proc deletesRegister(rdv: RendezVous) {.async.} =
libp2p_rendezvous_registered.set(int64(total))
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
method start*(rdv: RendezVous) {.async.} =
method start*(
rdv: RendezVous
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if not rdv.registerDeletionLoop.isNil:
warn "Starting rendezvous twice"
return
return fut
rdv.registerDeletionLoop = rdv.deletesRegister()
rdv.started = true
fut
method stop*(rdv: RendezVous) {.async.} =
method stop*(rdv: RendezVous): Future[void] {.async: (raises: [], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if rdv.registerDeletionLoop.isNil:
warn "Stopping rendezvous without starting it"
return
return fut
rdv.started = false
rdv.registerDeletionLoop.cancel()
rdv.registerDeletionLoop = nil
fut

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -89,7 +89,7 @@ type
readCs: CipherState
writeCs: CipherState
NoiseError* = object of LPError
NoiseError* = object of LPStreamError
NoiseHandshakeError* = object of NoiseError
NoiseDecryptTagError* = object of NoiseError
NoiseOversizedPayloadError* = object of NoiseError
@@ -99,10 +99,10 @@ type
func shortLog*(conn: NoiseConnection): auto =
try:
if conn.isNil: "NoiseConnection(nil)"
if conn == nil: "NoiseConnection(nil)"
else: &"{shortLog(conn.peerId)}:{conn.oid}"
except ValueError as exc:
raise newException(Defect, exc.msg)
raiseAssert(exc.msg)
chronicles.formatIt(NoiseConnection): shortLog(it)
@@ -112,7 +112,7 @@ proc genKeyPair(rng: var HmacDrbgContext): KeyPair =
proc hashProtocol(name: string): MDigest[256] =
# If protocol_name is less than or equal to HASHLEN bytes in length,
# sets h equal to protocol_name with zero bytes appended to make HASHLEN bytes.
# sets h to protocol_name with zero bytes appended to make HASHLEN bytes.
# Otherwise sets h = HASH(protocol_name).
if name.len <= 32:
@@ -142,7 +142,7 @@ proc encrypt(
inc state.n
if state.n > NonceMax:
raise newException(NoiseNonceMaxError, "Noise max nonce value reached")
raise (ref NoiseNonceMaxError)(msg: "Noise max nonce value reached")
proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
{.raises: [NoiseNonceMaxError].} =
@@ -168,10 +168,11 @@ proc decryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
trace "decryptWithAd", tagIn = tagIn.shortLog, tagOut = tagOut.shortLog, nonce = state.n
if tagIn != tagOut:
debug "decryptWithAd failed", data = shortLog(data)
raise newException(NoiseDecryptTagError, "decryptWithAd failed tag authentication.")
raise (ref NoiseDecryptTagError)(msg:
"decryptWithAd failed tag authentication.")
inc state.n
if state.n > NonceMax:
raise newException(NoiseNonceMaxError, "Noise max nonce value reached")
raise (ref NoiseNonceMaxError)(msg: "Noise max nonce value reached")
# Symmetricstate
@@ -181,8 +182,7 @@ proc init(_: type[SymmetricState]): SymmetricState =
result.cs = CipherState(k: EmptyKey)
proc mixKey(ss: var SymmetricState, ikm: ChaChaPolyKey) =
var
temp_keys: array[2, ChaChaPolyKey]
var temp_keys: array[2, ChaChaPolyKey]
sha256.hkdf(ss.ck, ikm, [], temp_keys)
ss.ck = temp_keys[0]
ss.cs = CipherState(k: temp_keys[1])
@@ -198,8 +198,7 @@ proc mixHash(ss: var SymmetricState, data: openArray[byte]) =
# We might use this for other handshake patterns/tokens
proc mixKeyAndHash(ss: var SymmetricState, ikm: openArray[byte]) {.used.} =
var
temp_keys: array[3, ChaChaPolyKey]
var temp_keys: array[3, ChaChaPolyKey]
sha256.hkdf(ss.ck, ikm, [], temp_keys)
ss.ck = temp_keys[0]
ss.mixHash(temp_keys[1])
@@ -234,7 +233,8 @@ proc init(_: type[HandshakeState]): HandshakeState =
template write_e: untyped =
trace "noise write e"
# Sets e (which must be empty) to GENERATE_KEYPAIR(). Appends e.public_key to the buffer. Calls MixHash(e.public_key).
# Sets e (which must be empty) to GENERATE_KEYPAIR().
# Appends e.public_key to the buffer. Calls MixHash(e.public_key).
hs.e = genKeyPair(p.rng[])
msg.add hs.e.publicKey
hs.ss.mixHash(hs.e.publicKey)
@@ -275,33 +275,37 @@ template read_e: untyped =
trace "noise read e", size = msg.len
if msg.len < Curve25519Key.len:
raise newException(NoiseHandshakeError, "Noise E, expected more data")
raise (ref NoiseHandshakeError)(msg: "Noise E, expected more data")
# Sets re (which must be empty) to the next DHLEN bytes from the message. Calls MixHash(re.public_key).
# Sets re (which must be empty) to the next DHLEN bytes from the message.
# Calls MixHash(re.public_key).
hs.re[0..Curve25519Key.high] = msg.toOpenArray(0, Curve25519Key.high)
msg.consume(Curve25519Key.len)
hs.ss.mixHash(hs.re)
template read_s: untyped =
trace "noise read s", size = msg.len
# Sets temp to the next DHLEN + 16 bytes of the message if HasKey() == True, or to the next DHLEN bytes otherwise.
# Sets temp to the next DHLEN + 16 bytes of the message if HasKey() == True,
# or to the next DHLEN bytes otherwise.
# Sets rs (which must be empty) to DecryptAndHash(temp).
let
rsLen =
if hs.ss.cs.hasKey:
if msg.len < Curve25519Key.len + ChaChaPolyTag.len:
raise newException(NoiseHandshakeError, "Noise S, expected more data")
raise (ref NoiseHandshakeError)(msg: "Noise S, expected more data")
Curve25519Key.len + ChaChaPolyTag.len
else:
if msg.len < Curve25519Key.len:
raise newException(NoiseHandshakeError, "Noise S, expected more data")
raise (ref NoiseHandshakeError)(msg: "Noise S, expected more data")
Curve25519Key.len
hs.rs[0..Curve25519Key.high] =
hs.ss.decryptAndHash(msg.toOpenArray(0, rsLen - 1))
msg.consume(rsLen)
proc readFrame(sconn: Connection): Future[seq[byte]] {.async.} =
proc readFrame(
sconn: Connection
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
var besize {.noinit.}: array[2, byte]
await sconn.readExactly(addr besize[0], besize.len)
let size = uint16.fromBytesBE(besize).int
@@ -313,7 +317,11 @@ proc readFrame(sconn: Connection): Future[seq[byte]] {.async.} =
await sconn.readExactly(addr buffer[0], buffer.len)
return buffer
proc writeFrame(sconn: Connection, buf: openArray[byte]): Future[void] =
proc writeFrame(
sconn: Connection,
buf: openArray[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
doAssert buf.len <= uint16.high.int
var
lesize = buf.len.uint16
@@ -324,13 +332,24 @@ proc writeFrame(sconn: Connection, buf: openArray[byte]): Future[void] =
outbuf &= buf
sconn.write(outbuf)
proc receiveHSMessage(sconn: Connection): Future[seq[byte]] = readFrame(sconn)
proc sendHSMessage(sconn: Connection, buf: openArray[byte]): Future[void] =
proc receiveHSMessage(
sconn: Connection
): Future[seq[byte]] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
readFrame(sconn)
proc sendHSMessage(
sconn: Connection,
buf: openArray[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
writeFrame(sconn, buf)
proc handshakeXXOutbound(
p: Noise, conn: Connection,
p2pSecret: seq[byte]): Future[HandshakeResult] {.async.} =
p2pSecret: seq[byte]
): Future[HandshakeResult] {.async: (raises: [
CancelledError, LPStreamError]).} =
const initiator = true
var
hs = HandshakeState.init()
@@ -372,13 +391,16 @@ proc handshakeXXOutbound(
await conn.sendHSMessage(msg.data)
let (cs1, cs2) = hs.ss.split()
return HandshakeResult(cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
return HandshakeResult(
cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
finally:
burnMem(hs)
proc handshakeXXInbound(
p: Noise, conn: Connection,
p2pSecret: seq[byte]): Future[HandshakeResult] {.async.} =
p2pSecret: seq[byte]
): Future[HandshakeResult] {.async: (raises: [
CancelledError, LPStreamError]).} =
const initiator = false
var
@@ -422,11 +444,14 @@ proc handshakeXXInbound(
let
remoteP2psecret = hs.ss.decryptAndHash(msg.data)
(cs1, cs2) = hs.ss.split()
return HandshakeResult(cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
return HandshakeResult(
cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
finally:
burnMem(hs)
method readMessage*(sconn: NoiseConnection): Future[seq[byte]] {.async.} =
method readMessage*(
sconn: NoiseConnection
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
while true: # Discard 0-length payloads
let frame = await sconn.stream.readFrame()
sconn.activity = true
@@ -458,7 +483,11 @@ proc encryptFrame(
cipherFrame[2 + src.len()..<cipherFrame.len] = tag
method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
method write*(
sconn: NoiseConnection,
message: seq[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
# Fast path: `{.async.}` would introduce a copy of `message`
const FramingSize = 2 + sizeof(ChaChaPolyTag)
@@ -478,7 +507,8 @@ method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
try:
encryptFrame(
sconn,
cipherFrames.toOpenArray(woffset, woffset + chunkSize + FramingSize - 1),
cipherFrames.toOpenArray(
woffset, woffset + chunkSize + FramingSize - 1),
message.toOpenArray(offset, offset + chunkSize - 1))
except NoiseNonceMaxError as exc:
debug "Noise nonce exceeded"
@@ -501,21 +531,28 @@ method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
# sequencing issues
sconn.stream.write(cipherFrames)
method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerId]): Future[SecureConn] {.async.} =
method handshake*(
p: Noise,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]
): Future[SecureConn] {.async: (raises: [CancelledError, LPStreamError]).} =
trace "Starting Noise handshake", conn, initiator
let timeout = conn.timeout
conn.timeout = HandshakeTimeout
# https://github.com/libp2p/specs/tree/master/noise#libp2p-data-in-handshake-messages
let
signedPayload = p.localPrivateKey.sign(
PayloadString & p.noiseKeys.publicKey.getBytes).tryGet()
let signedPayload = p.localPrivateKey.sign(
PayloadString & p.noiseKeys.publicKey.getBytes)
if signedPayload.isErr():
raise (ref NoiseHandshakeError)(msg:
"Failed to sign public key: " & $signedPayload.error())
var
libp2pProof = initProtoBuffer()
libp2pProof.write(1, p.localPublicKey)
libp2pProof.write(2, signedPayload.getBytes())
libp2pProof.write(2, signedPayload.get().getBytes())
# data field also there but not used!
libp2pProof.finish()
@@ -534,29 +571,38 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
remoteSigBytes: seq[byte]
if not remoteProof.getField(1, remotePubKeyBytes).valueOr(false):
raise newException(NoiseHandshakeError, "Failed to deserialize remote public key bytes. (initiator: " & $initiator & ")")
raise (ref NoiseHandshakeError)(msg:
"Failed to deserialize remote public key bytes. (initiator: " &
$initiator & ")")
if not remoteProof.getField(2, remoteSigBytes).valueOr(false):
raise newException(NoiseHandshakeError, "Failed to deserialize remote signature bytes. (initiator: " & $initiator & ")")
raise (ref NoiseHandshakeError)(msg:
"Failed to deserialize remote signature bytes. (initiator: " &
$initiator & ")")
if not remotePubKey.init(remotePubKeyBytes):
raise newException(NoiseHandshakeError, "Failed to decode remote public key. (initiator: " & $initiator & ")")
raise (ref NoiseHandshakeError)(msg:
"Failed to decode remote public key. (initiator: " & $initiator & ")")
if not remoteSig.init(remoteSigBytes):
raise newException(NoiseHandshakeError, "Failed to decode remote signature. (initiator: " & $initiator & ")")
raise (ref NoiseHandshakeError)(msg:
"Failed to decode remote signature. (initiator: " & $initiator & ")")
let verifyPayload = PayloadString & handshakeRes.rs.getBytes
if not remoteSig.verify(verifyPayload, remotePubKey):
raise newException(NoiseHandshakeError, "Noise handshake signature verify failed.")
raise (ref NoiseHandshakeError)(msg:
"Noise handshake signature verify failed.")
else:
trace "Remote signature verified", conn
let pid = PeerId.init(remotePubKey).valueOr:
raise newException(NoiseHandshakeError, "Invalid remote peer id: " & $error)
raise (ref NoiseHandshakeError)(msg:
"Invalid remote peer id: " & $error)
trace "Remote peer id", pid = $pid
peerId.withValue(targetPid):
if not targetPid.validate():
raise newException(NoiseHandshakeError, "Failed to validate expected peerId.")
raise (ref NoiseHandshakeError)(msg:
"Failed to validate expected peerId.")
if pid != targetPid:
var
@@ -566,7 +612,8 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
initiator, dealt_peer = conn,
dealt_key = $failedKey, received_peer = $pid,
received_key = $remotePubKey
raise newException(NoiseHandshakeError, "Noise handshake, peer id don't match! " & $pid & " != " & $targetPid)
raise (ref NoiseHandshakeError)(msg:
"Noise handshake, peer id don't match! " & $pid & " != " & $targetPid)
conn.peerId = pid
var tmp = NoiseConnection.new(conn, conn.peerId, conn.observedAddr)
@@ -586,7 +633,7 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
return secure
method closeImpl*(s: NoiseConnection) {.async.} =
method closeImpl*(s: NoiseConnection) {.async: (raises: []).} =
await procCall SecureConn(s).closeImpl()
burnMem(s.readCs)
@@ -597,15 +644,14 @@ method init*(p: Noise) {.gcsafe.} =
p.codec = NoiseCodec
proc new*(
T: typedesc[Noise],
rng: ref HmacDrbgContext,
privateKey: PrivateKey,
outgoing: bool = true,
commonPrologue: seq[byte] = @[]): T =
T: typedesc[Noise],
rng: ref HmacDrbgContext,
privateKey: PrivateKey,
outgoing: bool = true,
commonPrologue: seq[byte] = @[]): T =
let pkBytes = privateKey.getPublicKey()
.expect("Expected valid Private Key")
.getBytes().expect("Couldn't get public Key bytes")
.expect("Expected valid Private Key")
.getBytes().expect("Couldn't get public Key bytes")
var noise = Noise(
rng: rng,

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -73,14 +73,14 @@ type
writerCoder: SecureCipher
readerCoder: SecureCipher
SecioError* = object of LPError
SecioError* = object of LPStreamError
func shortLog*(conn: SecioConn): auto =
try:
if conn.isNil: "SecioConn(nil)"
if conn == nil: "SecioConn(nil)"
else: &"{shortLog(conn.peerId)}:{conn.oid}"
except ValueError as exc:
raise newException(Defect, exc.msg)
raiseAssert(exc.msg)
chronicles.formatIt(SecioConn): shortLog(it)
@@ -107,11 +107,11 @@ proc update(mac: var SecureMac, data: openArray[byte]) =
proc sizeDigest(mac: SecureMac): int {.inline.} =
case mac.kind
of SecureMacType.Sha256:
result = int(mac.ctxsha256.sizeDigest())
int(mac.ctxsha256.sizeDigest())
of SecureMacType.Sha512:
result = int(mac.ctxsha512.sizeDigest())
int(mac.ctxsha512.sizeDigest())
of SecureMacType.Sha1:
result = int(mac.ctxsha1.sizeDigest())
int(mac.ctxsha1.sizeDigest())
proc finish(mac: var SecureMac, data: var openArray[byte]) =
case mac.kind
@@ -188,9 +188,11 @@ proc macCheckAndDecode(sconn: SecioConn, data: var seq[byte]): bool =
sconn.readerCoder.decrypt(data.toOpenArray(0, mark - 1),
data.toOpenArray(0, mark - 1))
data.setLen(mark)
result = true
true
proc readRawMessage(conn: Connection): Future[seq[byte]] {.async.} =
proc readRawMessage(
conn: Connection
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
while true: # Discard 0-length payloads
var lengthBuf: array[4, byte]
await conn.readExactly(addr lengthBuf[0], lengthBuf.len)
@@ -211,19 +213,23 @@ proc readRawMessage(conn: Connection): Future[seq[byte]] {.async.} =
trace "Discarding 0-length payload", conn
method readMessage*(sconn: SecioConn): Future[seq[byte]] {.async.} =
method readMessage*(
sconn: SecioConn
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
## Read message from channel secure connection ``sconn``.
when chronicles.enabledLogLevel == LogLevel.TRACE:
logScope:
stream_oid = $sconn.stream.oid
var buf = await sconn.stream.readRawMessage()
if sconn.macCheckAndDecode(buf):
result = buf
buf
else:
trace "Message MAC verification failed", buf = buf.shortLog
raise (ref SecioError)(msg: "message failed MAC verification")
method write*(sconn: SecioConn, message: seq[byte]) {.async.} =
method write*(
sconn: SecioConn,
message: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
## Write message ``message`` to secure connection ``sconn``.
if message.len == 0:
return
@@ -233,15 +239,16 @@ method write*(sconn: SecioConn, message: seq[byte]) {.async.} =
offset = 0
while left > 0:
let
chunkSize = if left > SecioMaxMessageSize - 64: SecioMaxMessageSize - 64 else: left
chunkSize = min(left, SecioMaxMessageSize - 64)
macsize = sconn.writerMac.sizeDigest()
length = chunkSize + macsize
var msg = newSeq[byte](chunkSize + 4 + macsize)
msg[0..<4] = uint32(length).toBytesBE()
sconn.writerCoder.encrypt(message.toOpenArray(offset, offset + chunkSize - 1),
msg.toOpenArray(4, 4 + chunkSize - 1))
sconn.writerCoder.encrypt(
message.toOpenArray(offset, offset + chunkSize - 1),
msg.toOpenArray(4, 4 + chunkSize - 1))
left = left - chunkSize
offset = offset + chunkSize
let mo = 4 + chunkSize
@@ -253,17 +260,16 @@ method write*(sconn: SecioConn, message: seq[byte]) {.async.} =
await sconn.stream.write(msg)
sconn.activity = true
proc newSecioConn(conn: Connection,
hash: string,
cipher: string,
secrets: Secret,
order: int,
remotePubKey: PublicKey): SecioConn
{.raises: [LPError].} =
proc newSecioConn(
conn: Connection,
hash: string,
cipher: string,
secrets: Secret,
order: int,
remotePubKey: PublicKey): SecioConn =
## Create new secure stream/lpstream, using specified hash algorithm ``hash``,
## cipher algorithm ``cipher``, stretched keys ``secrets`` and order
## ``order``.
result = SecioConn.new(conn, conn.peerId, conn.observedAddr)
let i0 = if order < 0: 1 else: 0
@@ -282,108 +288,136 @@ proc newSecioConn(conn: Connection,
result.readerCoder.init(cipher, secrets.keyOpenArray(i1),
secrets.ivOpenArray(i1))
proc transactMessage(conn: Connection,
msg: seq[byte]): Future[seq[byte]] {.async.} =
proc transactMessage(
conn: Connection,
msg: seq[byte]
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
trace "Sending message", message = msg.shortLog, length = len(msg)
await conn.write(msg)
return await conn.readRawMessage()
await conn.readRawMessage()
method handshake*(s: Secio, conn: Connection, initiator: bool, peerId: Opt[PeerId]): Future[SecureConn] {.async.} =
var
localNonce: array[SecioNonceSize, byte]
remoteNonce: seq[byte]
remoteBytesPubkey: seq[byte]
remoteEBytesPubkey: seq[byte]
remoteEBytesSig: seq[byte]
remotePubkey: PublicKey
remoteEPubkey: ecnist.EcPublicKey
remoteESignature: Signature
remoteExchanges: string
remoteCiphers: string
remoteHashes: string
remotePeerId: PeerId
localPeerId: PeerId
localBytesPubkey = s.localPublicKey.getBytes().tryGet()
method handshake*(
s: Secio,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]
): Future[SecureConn] {.async: (raises: [CancelledError, LPStreamError]).} =
let localBytesPubkey = s.localPublicKey.getBytes()
if localBytesPubkey.isErr():
raise (ref SecioError)(msg:
"Failed to get local public key bytes: " & $localBytesPubkey.error())
let localPeerId = PeerId.init(s.localPublicKey)
if localPeerId.isErr():
raise (ref SecioError)(msg:
"Failed to initialize local peer ID: " & $localPeerId.error())
var localNonce: array[SecioNonceSize, byte]
hmacDrbgGenerate(s.rng[], localNonce)
var request = createProposal(localNonce,
localBytesPubkey,
SecioExchanges,
SecioCiphers,
SecioHashes)
localPeerId = PeerId.init(s.localPublicKey).tryGet()
let request = createProposal(
localNonce, localBytesPubkey.get(),
SecioExchanges, SecioCiphers, SecioHashes)
trace "Local proposal", schemes = SecioExchanges,
ciphers = SecioCiphers,
hashes = SecioHashes,
pubkey = localBytesPubkey.shortLog,
peer = localPeerId
var answer = await transactMessage(conn, request)
pubkey = localBytesPubkey.get().shortLog,
peer = localPeerId.get()
let answer = await transactMessage(conn, request)
if len(answer) == 0:
trace "Proposal exchange failed", conn
raise (ref SecioError)(msg: "Proposal exchange failed")
if not decodeProposal(answer, remoteNonce, remoteBytesPubkey, remoteExchanges,
remoteCiphers, remoteHashes):
var
remoteNonce: seq[byte]
remoteBytesPubkey: seq[byte]
remoteExchanges: string
remoteCiphers: string
remoteHashes: string
if not decodeProposal(
answer, remoteNonce, remoteBytesPubkey, remoteExchanges,
remoteCiphers, remoteHashes):
trace "Remote proposal decoding failed", conn
raise (ref SecioError)(msg: "Remote proposal decoding failed")
var remotePubkey: PublicKey
if not remotePubkey.init(remoteBytesPubkey):
trace "Remote public key incorrect or corrupted",
pubkey = remoteBytesPubkey.shortLog
raise (ref SecioError)(msg: "Remote public key incorrect or corrupted")
remotePeerId = PeerId.init(remotePubkey).tryGet()
let remotePeerId = PeerId.init(remotePubkey)
if remotePeerId.isErr():
raise (ref SecioError)(msg:
"Failed to initialize remote peer ID: " & $remotePeerId.error())
peerId.withValue(targetPid):
if not targetPid.validate():
raise newException(SecioError, "Failed to validate expected peerId.")
raise (ref SecioError)(msg: "Failed to validate expected peerId.")
if remotePeerId != targetPid:
raise newException(SecioError, "Peer ids don't match!")
conn.peerId = remotePeerId
let order = getOrder(remoteBytesPubkey, localNonce, localBytesPubkey,
remoteNonce).tryGet()
if remotePeerId.get() != targetPid:
raise (ref SecioError)(msg: "Peer ids don't match!")
conn.peerId = remotePeerId.get()
let order = getOrder(
remoteBytesPubkey, localNonce, localBytesPubkey.get(), remoteNonce)
if order.isErr():
raise (ref SecioError)(msg: "Failed to get order: " & $order.error())
trace "Remote proposal", schemes = remoteExchanges, ciphers = remoteCiphers,
hashes = remoteHashes,
pubkey = remoteBytesPubkey.shortLog, order = order,
peer = remotePeerId
pubkey = remoteBytesPubkey.shortLog,
order = order.get(),
peer = remotePeerId.get()
let scheme = selectBest(order, SecioExchanges, remoteExchanges)
let cipher = selectBest(order, SecioCiphers, remoteCiphers)
let hash = selectBest(order, SecioHashes, remoteHashes)
let
scheme = selectBest(order.get(), SecioExchanges, remoteExchanges)
cipher = selectBest(order.get(), SecioCiphers, remoteCiphers)
hash = selectBest(order.get(), SecioHashes, remoteHashes)
if len(scheme) == 0 or len(cipher) == 0 or len(hash) == 0:
trace "No algorithms in common", peer = remotePeerId
trace "No algorithms in common", peer = remotePeerId.get()
raise (ref SecioError)(msg: "No algorithms in common")
trace "Encryption scheme selected", scheme = scheme, cipher = cipher,
hash = hash
var ekeypair = ephemeral(scheme, s.rng[]).tryGet()
let ekeypair = ephemeral(scheme, s.rng[])
if ekeypair.isErr():
raise (ref SecioError)(msg:
"Failed to create ephemeral keypair: " & $ekeypair.error())
# We need EC public key in raw binary form
var epubkey = ekeypair.pubkey.getRawBytes().tryGet()
var localCorpus = request[4..^1] & answer & epubkey
var signature = s.localPrivateKey.sign(localCorpus).tryGet()
let epubkey = ekeypair.get().pubkey.getRawBytes()
if epubkey.isErr():
raise (ref SecioError)(msg:
"Failed to get ephemeral key bytes: " & $epubkey.error())
let
localCorpus = request[4..^1] & answer & epubkey.get()
signature = s.localPrivateKey.sign(localCorpus)
if signature.isErr():
raise (ref SecioError)(msg:
"Failed to sign local corpus: " & $signature.error())
var localExchange = createExchange(epubkey, signature.getBytes())
var remoteExchange = await transactMessage(conn, localExchange)
let
localExchange = createExchange(epubkey.get(), signature.get().getBytes())
remoteExchange = await transactMessage(conn, localExchange)
if len(remoteExchange) == 0:
trace "Corpus exchange failed", conn
raise (ref SecioError)(msg: "Corpus exchange failed")
var
remoteEBytesPubkey: seq[byte]
remoteEBytesSig: seq[byte]
if not decodeExchange(remoteExchange, remoteEBytesPubkey, remoteEBytesSig):
trace "Remote exchange decoding failed", conn
raise (ref SecioError)(msg: "Remote exchange decoding failed")
var remoteESignature: Signature
if not remoteESignature.init(remoteEBytesSig):
trace "Remote signature incorrect or corrupted", signature = remoteEBytesSig.shortLog
trace "Remote signature incorrect or corrupted",
signature = remoteEBytesSig.shortLog
raise (ref SecioError)(msg: "Remote signature incorrect or corrupted")
var remoteCorpus = answer & request[4..^1] & remoteEBytesPubkey
let remoteCorpus = answer & request[4..^1] & remoteEBytesPubkey
if not remoteESignature.verify(remoteCorpus, remotePubkey):
trace "Signature verification failed", scheme = $remotePubkey.scheme,
signature = $remoteESignature,
@@ -393,30 +427,34 @@ method handshake*(s: Secio, conn: Connection, initiator: bool, peerId: Opt[PeerI
trace "Signature verified", scheme = remotePubkey.scheme
var remoteEPubkey: ecnist.EcPublicKey
if not remoteEPubkey.initRaw(remoteEBytesPubkey):
trace "Remote ephemeral public key incorrect or corrupted",
pubkey = toHex(remoteEBytesPubkey)
raise (ref SecioError)(msg: "Remote ephemeral public key incorrect or corrupted")
raise (ref SecioError)(msg:
"Remote ephemeral public key incorrect or corrupted")
var secret = getSecret(remoteEPubkey, ekeypair.seckey)
let secret = getSecret(remoteEPubkey, ekeypair.get().seckey)
if len(secret) == 0:
trace "Shared secret could not be created"
raise (ref SecioError)(msg: "Shared secret could not be created")
trace "Shared secret calculated", secret = secret.shortLog
var keys = stretchKeys(cipher, hash, secret)
let keys = stretchKeys(cipher, hash, secret)
trace "Authenticated encryption parameters",
iv0 = toHex(keys.ivOpenArray(0)), key0 = keys.keyOpenArray(0).shortLog,
iv0 = toHex(keys.ivOpenArray(0)),
key0 = keys.keyOpenArray(0).shortLog,
mac0 = keys.macOpenArray(0).shortLog,
iv1 = keys.ivOpenArray(1).shortLog, key1 = keys.keyOpenArray(1).shortLog,
iv1 = keys.ivOpenArray(1).shortLog,
key1 = keys.keyOpenArray(1).shortLog,
mac1 = keys.macOpenArray(1).shortLog
# Perform Nonce exchange over encrypted channel.
var secioConn = newSecioConn(conn, hash, cipher, keys, order, remotePubkey)
result = secioConn
let secioConn = newSecioConn(
conn, hash, cipher, keys, order.get(), remotePubkey)
await secioConn.write(remoteNonce)
var res = await secioConn.readMessage()
@@ -426,19 +464,20 @@ method handshake*(s: Secio, conn: Connection, initiator: bool, peerId: Opt[PeerI
raise (ref SecioError)(msg: "Nonce verification failed")
else:
trace "Secure handshake succeeded"
secioConn
method init(s: Secio) {.gcsafe.} =
procCall Secure(s).init()
s.codec = SecioCodec
proc new*(
T: typedesc[Secio],
rng: ref HmacDrbgContext,
localPrivateKey: PrivateKey): T =
T: typedesc[Secio],
rng: ref HmacDrbgContext,
localPrivateKey: PrivateKey): T =
let secio = Secio(
rng: rng,
localPrivateKey: localPrivateKey,
localPublicKey: localPrivateKey.getPublicKey().expect("Invalid private key"),
localPublicKey: localPrivateKey.getPublicKey().expect("Invalid private key")
)
secio.init()
secio

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -37,18 +37,19 @@ type
func shortLog*(conn: SecureConn): auto =
try:
if conn.isNil: "SecureConn(nil)"
if conn == nil: "SecureConn(nil)"
else: &"{shortLog(conn.peerId)}:{conn.oid}"
except ValueError as exc:
raise newException(Defect, exc.msg)
raiseAssert(exc.msg)
chronicles.formatIt(SecureConn): shortLog(it)
proc new*(T: type SecureConn,
conn: Connection,
peerId: PeerId,
observedAddr: Opt[MultiAddress],
timeout: Duration = DefaultConnectionTimeout): T =
proc new*(
T: type SecureConn,
conn: Connection,
peerId: PeerId,
observedAddr: Opt[MultiAddress],
timeout: Duration = DefaultConnectionTimeout): T =
result = T(stream: conn,
peerId: peerId,
observedAddr: observedAddr,
@@ -63,55 +64,72 @@ method initStream*(s: SecureConn) =
procCall Connection(s).initStream()
method closeImpl*(s: SecureConn) {.async.} =
method closeImpl*(s: SecureConn) {.async: (raises: []).} =
trace "Closing secure conn", s, dir = s.dir
if not(isNil(s.stream)):
if s.stream != nil:
await s.stream.close()
await procCall Connection(s).closeImpl()
method readMessage*(c: SecureConn): Future[seq[byte]] {.async, base.} =
doAssert(false, "Not implemented!")
method readMessage*(
c: SecureConn
): Future[seq[byte]] {.async: (raises: [
CancelledError, LPStreamError], raw: true), base.} =
raiseAssert("Not implemented!")
method getWrapped*(s: SecureConn): Connection = s.stream
method handshake*(s: Secure,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]): Future[SecureConn] {.async, base.} =
doAssert(false, "Not implemented!")
method handshake*(
s: Secure,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]
): Future[SecureConn] {.async: (raises: [
CancelledError, LPStreamError], raw: true), base.} =
raiseAssert("Not implemented!")
proc handleConn(s: Secure,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]): Future[Connection] {.async.} =
proc handleConn(
s: Secure,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]
): Future[Connection] {.async: (raises: [CancelledError, LPStreamError]).} =
var sconn = await s.handshake(conn, initiator, peerId)
# mark connection bottom level transport direction
# this is the safest place to do this
# we require this information in for example gossipsub
sconn.transportDir = if initiator: Direction.Out else: Direction.In
proc cleanup() {.async.} =
proc cleanup() {.async: (raises: []).} =
try:
let futs = [conn.join(), sconn.join()]
await futs[0] or futs[1]
for f in futs:
if not f.finished: await f.cancelAndWait() # cancel outstanding join()
block:
let
fut1 = conn.join()
fut2 = sconn.join()
try: # https://github.com/status-im/nim-chronos/issues/516
discard await race(fut1, fut2)
except ValueError: raiseAssert("Futures list is not empty")
# at least one join() completed, cancel pending one, if any
if not fut1.finished: await fut1.cancelAndWait()
if not fut2.finished: await fut2.cancelAndWait()
block:
let
fut1 = sconn.close()
fut2 = conn.close()
await allFutures(fut1, fut2)
static: doAssert typeof(fut1).E is void # Cannot fail
static: doAssert typeof(fut2).E is void # Cannot fail
await allFuturesThrowing(
sconn.close(), conn.close())
except CancelledError:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.
discard
except CatchableError as exc:
debug "error cleaning up secure connection", err = exc.msg, sconn
if not isNil(sconn):
if sconn != nil:
# All the errors are handled inside `cleanup()` procedure.
asyncSpawn cleanup()
return sconn
sconn
method init*(s: Secure) =
procCall LPProtocol(s).init()
@@ -127,22 +145,25 @@ method init*(s: Secure) =
warn "securing connection canceled", conn
await conn.close()
raise exc
except CatchableError as exc:
except LPStreamError as exc:
warn "securing connection failed", err = exc.msg, conn
await conn.close()
s.handler = handle
method secure*(s: Secure,
conn: Connection,
peerId: Opt[PeerId]):
Future[Connection] {.base.} =
method secure*(
s: Secure,
conn: Connection,
peerId: Opt[PeerId]
): Future[Connection] {.async: (raises: [
CancelledError, LPStreamError], raw: true), base.} =
s.handleConn(conn, conn.dir == Direction.Out, peerId)
method readOnce*(s: SecureConn,
pbytes: pointer,
nbytes: int):
Future[int] {.async.} =
method readOnce*(
s: SecureConn,
pbytes: pointer,
nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
doAssert(nbytes > 0, "nbytes must be positive integer")
if s.isEof:
@@ -159,7 +180,7 @@ method readOnce*(s: SecureConn,
raise err
except CancelledError as exc:
raise exc
except CatchableError as err:
except LPStreamError as err:
debug "Error while reading message from secure connection, closing.",
error = err.name,
message = err.msg,

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -34,10 +34,10 @@ type
func shortLog*(s: BufferStream): auto =
try:
if s.isNil: "BufferStream(nil)"
if s == nil: "BufferStream(nil)"
else: &"{shortLog(s.peerId)}:{s.oid}"
except ValueError as exc:
raise newException(Defect, exc.msg)
raiseAssert(exc.msg)
chronicles.formatIt(BufferStream): shortLog(it)
@@ -55,14 +55,16 @@ method initStream*(s: BufferStream) =
trace "BufferStream created", s
proc new*(
T: typedesc[BufferStream],
timeout: Duration = DefaultConnectionTimeout): T =
T: typedesc[BufferStream],
timeout: Duration = DefaultConnectionTimeout): T =
let bufferStream = T(timeout: timeout)
bufferStream.initStream()
bufferStream
method pushData*(s: BufferStream, data: seq[byte]) {.base, async.} =
method pushData*(
s: BufferStream,
data: seq[byte]
) {.base, async: (raises: [CancelledError, LPStreamError]).} =
## Write bytes to internal read buffer, use this to fill up the
## buffer with data.
##
@@ -70,7 +72,7 @@ method pushData*(s: BufferStream, data: seq[byte]) {.base, async.} =
##
doAssert(not s.pushing,
&"Only one concurrent push allowed for stream {s.shortLog()}")
"Only one concurrent push allowed for stream " & s.shortLog())
if s.isClosed or s.pushedEof:
raise newLPStreamClosedError()
@@ -87,12 +89,14 @@ method pushData*(s: BufferStream, data: seq[byte]) {.base, async.} =
finally:
s.pushing = false
method pushEof*(s: BufferStream) {.base, async.} =
method pushEof*(
s: BufferStream
) {.base, async: (raises: [CancelledError, LPStreamError]).} =
if s.pushedEof:
return
doAssert(not s.pushing,
&"Only one concurrent push allowed for stream {s.shortLog()}")
"Only one concurrent push allowed for stream " & s.shortLog())
s.pushedEof = true
@@ -108,13 +112,14 @@ method pushEof*(s: BufferStream) {.base, async.} =
method atEof*(s: BufferStream): bool =
s.isEof and s.readBuf.len == 0
method readOnce*(s: BufferStream,
pbytes: pointer,
nbytes: int):
Future[int] {.async.} =
method readOnce*(
s: BufferStream,
pbytes: pointer,
nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
doAssert(nbytes > 0, "nbytes must be positive integer")
doAssert(not s.reading,
&"Only one concurrent read allowed for stream {s.shortLog()}")
"Only one concurrent read allowed for stream " & s.shortLog())
if s.returnedEof:
raise newLPStreamEOFError()
@@ -135,13 +140,6 @@ method readOnce*(s: BufferStream,
# Not very efficient, but shouldn't happen often
s.readBuf.assign(@(p.toOpenArray(0, rbytes - 1)) & @(s.readBuf.data))
raise exc
except CatchableError as exc:
# When an exception happens here, the Bufferstream is effectively
# broken and no more reads will be valid - for now, return EOF if it's
# called again, though this is not completely true - EOF represents an
# "orderly" shutdown and that's not what happened here..
s.returnedEof = true
raise exc
finally:
s.reading = false
@@ -173,7 +171,8 @@ method readOnce*(s: BufferStream,
return rbytes
method closeImpl*(s: BufferStream): Future[void] =
method closeImpl*(
s: BufferStream): Future[void] {.async: (raises: [], raw: true).} =
## close the stream and clear the buffer
trace "Closing BufferStream", s, len = s.len
@@ -209,8 +208,8 @@ method closeImpl*(s: BufferStream): Future[void] =
if not s.readQueue.empty():
discard s.readQueue.popFirstNoWait()
except AsyncQueueFullError, AsyncQueueEmptyError:
raise newException(Defect, getCurrentExceptionMsg())
raiseAssert(getCurrentExceptionMsg())
trace "Closed BufferStream", s
procCall Connection(s).closeImpl() # noraises, nocancels
procCall Connection(s).closeImpl()

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -31,18 +31,22 @@ type
tracked: bool
when defined(libp2p_agents_metrics):
declareGauge(libp2p_peers_identity, "peers identities", labels = ["agent"])
declareCounter(libp2p_peers_traffic_read, "incoming traffic", labels = ["agent"])
declareCounter(libp2p_peers_traffic_write, "outgoing traffic", labels = ["agent"])
declareGauge libp2p_peers_identity,
"peers identities", labels = ["agent"]
declareCounter libp2p_peers_traffic_read,
"incoming traffic", labels = ["agent"]
declareCounter libp2p_peers_traffic_write,
"outgoing traffic", labels = ["agent"]
declareCounter(libp2p_network_bytes, "total traffic", labels = ["direction"])
declareCounter libp2p_network_bytes,
"total traffic", labels = ["direction"]
func shortLog*(conn: ChronosStream): auto =
try:
if conn.isNil: "ChronosStream(nil)"
if conn == nil: "ChronosStream(nil)"
else: &"{shortLog(conn.peerId)}:{conn.oid}"
except ValueError as exc:
raise newException(Defect, exc.msg)
raiseAssert(exc.msg)
chronicles.formatIt(ChronosStream): shortLog(it)
@@ -50,17 +54,18 @@ method initStream*(s: ChronosStream) =
if s.objName.len == 0:
s.objName = ChronosStreamTrackerName
s.timeoutHandler = proc() {.async.} =
s.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
trace "Idle timeout expired, closing ChronosStream", s
await s.close()
s.close()
procCall Connection(s).initStream()
proc init*(C: type ChronosStream,
client: StreamTransport,
dir: Direction,
timeout = DefaultChronosStreamTimeout,
observedAddr: Opt[MultiAddress]): ChronosStream =
proc init*(
C: type ChronosStream,
client: StreamTransport,
dir: Direction,
timeout = DefaultChronosStreamTimeout,
observedAddr: Opt[MultiAddress]): ChronosStream =
result = C(client: client,
timeout: timeout,
dir: dir,
@@ -94,7 +99,11 @@ when defined(libp2p_agents_metrics):
libp2p_peers_identity.dec(labelValues = [s.shortAgent])
s.tracked = false
method readOnce*(s: ChronosStream, pbytes: pointer, nbytes: int): Future[int] {.async.} =
method readOnce*(
s: ChronosStream,
pbytes: pointer,
nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
if s.atEof:
raise newLPStreamEOFError()
withExceptions:
@@ -107,7 +116,10 @@ method readOnce*(s: ChronosStream, pbytes: pointer, nbytes: int): Future[int] {.
libp2p_peers_traffic_read.inc(result.int64, labelValues = [s.shortAgent])
proc completeWrite(
s: ChronosStream, fut: Future[int], msgLen: int): Future[void] {.async.} =
s: ChronosStream,
fut: Future[int].Raising([TransportError, CancelledError]),
msgLen: int
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
withExceptions:
# StreamTransport will only return written < msg.len on fatal failures where
# further writing is not possible - in such cases, we'll raise here,
@@ -124,7 +136,11 @@ proc completeWrite(
if s.tracked:
libp2p_peers_traffic_write.inc(msgLen.int64, labelValues = [s.shortAgent])
method write*(s: ChronosStream, msg: seq[byte]): Future[void] =
method write*(
s: ChronosStream,
msg: seq[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
# Avoid a copy of msg being kept in the closure created by `{.async.}` as this
# drives up memory usage
if msg.len == 0:
@@ -145,19 +161,14 @@ method closed*(s: ChronosStream): bool =
method atEof*(s: ChronosStream): bool =
s.client.atEof()
method closeImpl*(s: ChronosStream) {.async.} =
try:
trace "Shutting down chronos stream", address = $s.client.remoteAddress(), s
method closeImpl*(
s: ChronosStream) {.async: (raises: []).} =
trace "Shutting down chronos stream", address = $s.client.remoteAddress(), s
if not s.client.closed():
await s.client.closeWait()
if not s.client.closed():
await s.client.closeWait()
trace "Shutdown chronos stream", address = $s.client.remoteAddress(), s
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "Error closing chronosstream", s, msg = exc.msg
trace "Shutdown chronos stream", address = $s.client.remoteAddress(), s
when defined(libp2p_agents_metrics):
# do this after closing!

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -27,25 +27,25 @@ const
DefaultConnectionTimeout* = 5.minutes
type
TimeoutHandler* = proc(): Future[void] {.gcsafe, raises: [].}
TimeoutHandler* = proc(): Future[void] {.async: (raises: []).}
Connection* = ref object of LPStream
activity*: bool # reset every time data is sent or received
timeout*: Duration # channel timeout if no activity
timerTaskFut: Future[void] # the current timer instance
activity*: bool # reset every time data is sent or received
timeout*: Duration # channel timeout if no activity
timerTaskFut: Future[void].Raising([]) # the current timer instance
timeoutHandler*: TimeoutHandler # timeout handler
peerId*: PeerId
observedAddr*: Opt[MultiAddress]
protocol*: string # protocol used by the connection, used as tag for metrics
transportDir*: Direction # The bottom level transport (generally the socket) direction
protocol*: string # protocol used by the connection, used as metrics tag
transportDir*: Direction # underlying transport (usually socket) direction
when defined(libp2p_agents_metrics):
shortAgent*: string
proc timeoutMonitor(s: Connection) {.async.}
proc timeoutMonitor(s: Connection) {.async: (raises: []).}
func shortLog*(conn: Connection): string =
try:
if conn.isNil: "Connection(nil)"
if conn == nil: "Connection(nil)"
else: &"{shortLog(conn.peerId)}:{conn.oid}"
except ValueError as exc:
raiseAssert(exc.msg)
@@ -58,23 +58,28 @@ method initStream*(s: Connection) =
procCall LPStream(s).initStream()
doAssert(isNil(s.timerTaskFut))
doAssert(s.timerTaskFut == nil)
if s.timeout > 0.millis:
trace "Monitoring for timeout", s, timeout = s.timeout
s.timerTaskFut = s.timeoutMonitor()
if isNil(s.timeoutHandler):
s.timeoutHandler = proc(): Future[void] =
trace "Idle timeout expired, closing connection", s
s.close()
if s.timeoutHandler == nil:
s.timeoutHandler =
proc(): Future[void] {.async: (raises: [], raw: true).} =
trace "Idle timeout expired, closing connection", s
s.close()
method closeImpl*(s: Connection): Future[void] =
method closeImpl*(s: Connection): Future[void] {.async: (raises: []).} =
# Cleanup timeout timer
trace "Closing connection", s
if not isNil(s.timerTaskFut) and not s.timerTaskFut.finished:
s.timerTaskFut.cancel()
if s.timerTaskFut != nil and not s.timerTaskFut.finished:
# Don't `cancelAndWait` here to avoid risking deadlock in this scenario:
# - `pollActivity` is waiting for `s.timeoutHandler` to complete.
# - `s.timeoutHandler` may have triggered `closeImpl` and we are now here.
# In this situation, we have to return for `s.timerTaskFut` to complete.
s.timerTaskFut.cancelSoon()
s.timerTaskFut = nil
trace "Closed connection", s
@@ -84,7 +89,7 @@ method closeImpl*(s: Connection): Future[void] =
func hash*(p: Connection): Hash =
cast[pointer](p).hash
proc pollActivity(s: Connection): Future[bool] {.async.} =
proc pollActivity(s: Connection): Future[bool] {.async: (raises: []).} =
if s.closed and s.atEof:
return false # Done, no more monitoring
@@ -95,22 +100,13 @@ proc pollActivity(s: Connection): Future[bool] {.async.} =
# Inactivity timeout happened, call timeout monitor
trace "Connection timed out", s
if not(isNil(s.timeoutHandler)):
if s.timeoutHandler != nil:
trace "Calling timeout handler", s
try:
await s.timeoutHandler()
except CancelledError:
# timeoutHandler is expected to be fast, but it's still possible that
# cancellation will happen here - no need to warn about it - we do want to
# stop the polling however
debug "Timeout handler cancelled", s
except CatchableError as exc: # Shouldn't happen
warn "exception in timeout handler", s, exc = exc.msg
await s.timeoutHandler()
return false
proc timeoutMonitor(s: Connection) {.async.} =
proc timeoutMonitor(s: Connection) {.async: (raises: []).} =
## monitor the channel for inactivity
##
## if the timeout was hit, it means that
@@ -129,21 +125,22 @@ proc timeoutMonitor(s: Connection) {.async.} =
return
method getWrapped*(s: Connection): Connection {.base.} =
doAssert(false, "not implemented!")
raiseAssert("Not implemented!")
when defined(libp2p_agents_metrics):
proc setShortAgent*(s: Connection, shortAgent: string) =
var conn = s
while not isNil(conn):
while conn != nil:
conn.shortAgent = shortAgent
conn = conn.getWrapped()
proc new*(C: type Connection,
peerId: PeerId,
dir: Direction,
observedAddr: Opt[MultiAddress],
timeout: Duration = DefaultConnectionTimeout,
timeoutHandler: TimeoutHandler = nil): Connection =
proc new*(
C: type Connection,
peerId: PeerId,
dir: Direction,
observedAddr: Opt[MultiAddress],
timeout: Duration = DefaultConnectionTimeout,
timeoutHandler: TimeoutHandler = nil): Connection =
result = C(peerId: peerId,
dir: dir,
timeout: timeout,

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -23,8 +23,8 @@ import ../varint,
export errors
declareGauge(libp2p_open_streams,
"open stream instances", labels = ["type", "dir"])
declareGauge libp2p_open_streams,
"open stream instances", labels = ["type", "dir"]
export oids
@@ -50,12 +50,7 @@ type
LPStreamError* = object of LPError
LPStreamIncompleteError* = object of LPStreamError
LPStreamIncorrectDefect* = object of Defect
LPStreamLimitError* = object of LPStreamError
LPStreamReadError* = object of LPStreamError
par*: ref CatchableError
LPStreamWriteError* = object of LPStreamError
par*: ref CatchableError
LPStreamEOFError* = object of LPStreamError
# X | Read | Write
@@ -77,54 +72,12 @@ type
opened*: uint64
closed*: uint64
proc setupStreamTracker*(name: string): StreamTracker =
let tracker = new StreamTracker
proc dumpTracking(): string {.gcsafe.} =
return "Opened " & tracker.id & ": " & $tracker.opened & "\n" &
"Closed " & tracker.id & ": " & $tracker.closed
proc leakTransport(): bool {.gcsafe.} =
return (tracker.opened != tracker.closed)
tracker.id = name
tracker.opened = 0
tracker.closed = 0
tracker.dump = dumpTracking
tracker.isLeaked = leakTransport
addTracker(name, tracker)
return tracker
proc getStreamTracker(name: string): StreamTracker {.gcsafe.} =
result = cast[StreamTracker](getTracker(name))
if isNil(result):
result = setupStreamTracker(name)
proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError =
var w = newException(LPStreamReadError, "Read stream failed")
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
w.par = p
result = w
proc newLPStreamReadError*(msg: string): ref LPStreamReadError =
newException(LPStreamReadError, msg)
proc newLPStreamWriteError*(p: ref CatchableError): ref LPStreamWriteError =
var w = newException(LPStreamWriteError, "Write stream failed")
w.msg = w.msg & ", originated from [" & $p.name & "] " & p.msg
w.par = p
result = w
proc newLPStreamIncompleteError*(): ref LPStreamIncompleteError =
result = newException(LPStreamIncompleteError, "Incomplete data received")
proc newLPStreamLimitError*(): ref LPStreamLimitError =
result = newException(LPStreamLimitError, "Buffer limit reached")
proc newLPStreamIncorrectDefect*(m: string): ref LPStreamIncorrectDefect =
result = newException(LPStreamIncorrectDefect, m)
proc newLPStreamEOFError*(): ref LPStreamEOFError =
result = newException(LPStreamEOFError, "Stream EOF!")
@@ -145,8 +98,9 @@ proc newLPStreamConnDownError*(
parentException)
func shortLog*(s: LPStream): auto =
if s.isNil: "LPStream(nil)"
if s == nil: "LPStream(nil)"
else: $s.oid
chronicles.formatIt(LPStream): shortLog(it)
method initStream*(s: LPStream) {.base.} =
@@ -157,10 +111,12 @@ method initStream*(s: LPStream) {.base.} =
s.oid = genOid()
libp2p_open_streams.inc(labelValues = [s.objName, $s.dir])
inc getStreamTracker(s.objName).opened
trackCounter(s.objName)
trace "Stream created", s, objName = s.objName, dir = $s.dir
proc join*(s: LPStream): Future[void] {.public.} =
proc join*(
s: LPStream
): Future[void] {.async: (raises: [CancelledError], raw: true), public.} =
## Wait for the stream to be closed
s.closeEvent.wait()
@@ -171,19 +127,21 @@ method atEof*(s: LPStream): bool {.base, public.} =
s.isEof
method readOnce*(
s: LPStream,
pbytes: pointer,
nbytes: int):
Future[int] {.base, async, public.} =
s: LPStream,
pbytes: pointer,
nbytes: int
): Future[int] {.base, async: (raises: [
CancelledError, LPStreamError], raw: true), public.} =
## Reads whatever is available in the stream,
## up to `nbytes`. Will block if nothing is
## available
doAssert(false, "not implemented!")
raiseAssert("Not implemented!")
proc readExactly*(s: LPStream,
pbytes: pointer,
nbytes: int):
Future[void] {.async, public.} =
proc readExactly*(
s: LPStream,
pbytes: pointer,
nbytes: int
): Future[void] {.async: (raises: [CancelledError, LPStreamError]), public.} =
## Waits for `nbytes` to be available, then read
## them and return them
if s.atEof:
@@ -217,10 +175,11 @@ proc readExactly*(s: LPStream,
trace "couldn't read all bytes, incomplete data", s, nbytes, read
raise newLPStreamIncompleteError()
proc readLine*(s: LPStream,
limit = 0,
sep = "\r\n"): Future[string]
{.async, public.} =
proc readLine*(
s: LPStream,
limit = 0,
sep = "\r\n"
): Future[string] {.async: (raises: [CancelledError, LPStreamError]), public.} =
## Reads up to `limit` bytes are read, or a `sep` is found
# TODO replace with something that exploits buffering better
var lim = if limit <= 0: -1 else: limit
@@ -246,7 +205,9 @@ proc readLine*(s: LPStream,
if len(result) == lim:
break
proc readVarint*(conn: LPStream): Future[uint64] {.async, public.} =
proc readVarint*(
conn: LPStream
): Future[uint64] {.async: (raises: [CancelledError, LPStreamError]), public.} =
var
buffer: array[10, byte]
@@ -264,7 +225,11 @@ proc readVarint*(conn: LPStream): Future[uint64] {.async, public.} =
if true: # can't end with a raise apparently
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, public.} =
proc readLp*(
s: LPStream,
maxSize: int
): Future[seq[byte]] {.async: (raises: [
CancelledError, LPStreamError]), public.} =
## read length prefixed msg, with the length encoded as a varint
let
length = await s.readVarint()
@@ -278,13 +243,21 @@ proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, public.} =
var res = newSeqUninitialized[byte](length)
await s.readExactly(addr res[0], res.len)
return res
res
method write*(s: LPStream, msg: seq[byte]): Future[void] {.base, public.} =
method write*(
s: LPStream,
msg: seq[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true), base, public.} =
# Write `msg` to stream, waiting for the write to be finished
doAssert(false, "not implemented!")
raiseAssert("Not implemented!")
proc writeLp*(s: LPStream, msg: openArray[byte]): Future[void] {.public.} =
proc writeLp*(
s: LPStream,
msg: openArray[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true), public.} =
## Write `msg` with a varint-encoded length prefix
let vbytes = PB.toBytes(msg.len().uint64)
var buf = newSeqUninitialized[byte](msg.len() + vbytes.len)
@@ -292,35 +265,53 @@ proc writeLp*(s: LPStream, msg: openArray[byte]): Future[void] {.public.} =
buf[vbytes.len..<buf.len] = msg
s.write(buf)
proc writeLp*(s: LPStream, msg: string): Future[void] {.public.} =
proc writeLp*(
s: LPStream,
msg: string
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true), public.} =
writeLp(s, msg.toOpenArrayByte(0, msg.high))
proc write*(s: LPStream, msg: string): Future[void] {.public.} =
proc write*(
s: LPStream,
msg: string
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true), public.} =
s.write(msg.toBytes())
method closeImpl*(s: LPStream): Future[void] {.async, base.} =
method closeImpl*(
s: LPStream
): Future[void] {.async: (raises: [], raw: true), base.} =
## Implementation of close - called only once
trace "Closing stream", s, objName = s.objName, dir = $s.dir
libp2p_open_streams.dec(labelValues = [s.objName, $s.dir])
inc getStreamTracker(s.objName).closed
untrackCounter(s.objName)
s.closeEvent.fire()
trace "Closed stream", s, objName = s.objName, dir = $s.dir
let fut = newFuture[void]()
fut.complete()
fut
method close*(s: LPStream): Future[void] {.base, async, public.} = # {.raises [Defect].}
method close*(
s: LPStream
): Future[void] {.async: (raises: [], raw: true), base, public.} =
## close the stream - this may block, but will not raise exceptions
##
if s.isClosed:
trace "Already closed", s
return
let fut = newFuture[void]()
fut.complete()
return fut
s.isClosed = true # Set flag before performing virtual close
# An separate implementation method is used so that even when derived types
# A separate implementation method is used so that even when derived types
# override `closeImpl`, it is called only once - anyone overriding `close`
# itself must implement this - once-only check as well, with their own field
await closeImpl(s)
closeImpl(s)
proc closeWithEOF*(s: LPStream): Future[void] {.async, public.} =
proc closeWithEOF*(
s: LPStream): Future[void] {.async: (raises: []), public.} =
## Close the stream and wait for EOF - use this with half-closed streams where
## an EOF is expected to arrive from the other end.
##
@@ -349,9 +340,9 @@ proc closeWithEOF*(s: LPStream): Future[void] {.async, public.} =
var buf: array[8, byte]
if (await readOnce(s, addr buf[0], buf.len)) != 0:
debug "Unexpected bytes while waiting for EOF", s
except CancelledError:
discard
except LPStreamEOFError:
trace "Expected EOF came", s
except CancelledError as exc:
raise exc
except CatchableError as exc:
except LPStreamError as exc:
debug "Unexpected error while waiting for EOF", s, msg = exc.msg

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -42,36 +42,8 @@ type
acceptFuts: seq[Future[StreamTransport]]
connectionsTimeout: Duration
TcpTransportTracker* = ref object of TrackerBase
opened*: uint64
closed*: uint64
TcpTransportError* = object of transport.TransportError
proc setupTcpTransportTracker(): TcpTransportTracker {.gcsafe, raises: [].}
proc getTcpTransportTracker(): TcpTransportTracker {.gcsafe.} =
result = cast[TcpTransportTracker](getTracker(TcpTransportTrackerName))
if isNil(result):
result = setupTcpTransportTracker()
proc dumpTracking(): string {.gcsafe.} =
var tracker = getTcpTransportTracker()
result = "Opened tcp transports: " & $tracker.opened & "\n" &
"Closed tcp transports: " & $tracker.closed
proc leakTransport(): bool {.gcsafe.} =
var tracker = getTcpTransportTracker()
result = (tracker.opened != tracker.closed)
proc setupTcpTransportTracker(): TcpTransportTracker =
result = new TcpTransportTracker
result.opened = 0
result.closed = 0
result.dump = dumpTracking
result.isLeaked = leakTransport
addTracker(TcpTransportTrackerName, result)
proc connHandler*(self: TcpTransport,
client: StreamTransport,
observedAddr: Opt[MultiAddress],
@@ -90,24 +62,38 @@ proc connHandler*(self: TcpTransport,
timeout = self.connectionsTimeout
))
proc onClose() {.async.} =
proc onClose() {.async: (raises: []).} =
try:
let futs = @[client.join(), conn.join()]
await futs[0] or futs[1]
for f in futs:
if not f.finished: await f.cancelAndWait() # cancel outstanding join()
block:
let
fut1 = client.join()
fut2 = conn.join()
try: # https://github.com/status-im/nim-chronos/issues/516
discard await race(fut1, fut2)
except ValueError: raiseAssert("Futures list is not empty")
# at least one join() completed, cancel pending one, if any
if not fut1.finished: await fut1.cancelAndWait()
if not fut2.finished: await fut2.cancelAndWait()
trace "Cleaning up client", addrs = $client.remoteAddress,
conn
self.clients[dir].keepItIf( it != client )
await allFuturesThrowing(
conn.close(), client.closeWait())
block:
let
fut1 = conn.close()
fut2 = client.closeWait()
await allFutures(fut1, fut2)
if fut1.failed:
let err = fut1.error()
debug "Error cleaning up client", errMsg = err.msg, conn
static: doAssert typeof(fut2).E is void # Cannot fail
trace "Cleaned up client", addrs = $client.remoteAddress,
conn
except CatchableError as exc:
except CancelledError as exc:
let useExc {.used.} = exc
debug "Error cleaning up client", errMsg = exc.msg, conn
@@ -152,7 +138,7 @@ method start*(
await procCall Transport(self).start(addrs)
trace "Starting TCP transport"
inc getTcpTransportTracker().opened
trackCounter(TcpTransportTrackerName)
for i, ma in addrs:
if not self.handles(ma):
@@ -206,7 +192,7 @@ method stop*(self: TcpTransport) {.async.} =
self.servers = @[]
trace "Transport stopped"
inc getTcpTransportTracker().closed
untrackCounter(TcpTransportTrackerName)
except CatchableError as exc:
trace "Error shutting down tcp transport", exc = exc.msg

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -44,11 +44,12 @@ method initStream*(s: WsStream) =
procCall Connection(s).initStream()
proc new*(T: type WsStream,
session: WSSession,
dir: Direction,
observedAddr: Opt[MultiAddress],
timeout = 10.minutes): T =
proc new*(
T: type WsStream,
session: WSSession,
dir: Direction,
observedAddr: Opt[MultiAddress],
timeout = 10.minutes): T =
let stream = T(
session: session,
@@ -63,18 +64,23 @@ template mapExceptions(body: untyped) =
try:
body
except AsyncStreamIncompleteError:
raise newLPStreamEOFError()
raise newLPStreamIncompleteError()
except AsyncStreamLimitError:
raise newLPStreamLimitError()
except AsyncStreamUseClosedError:
raise newLPStreamEOFError()
except WSClosedError:
raise newLPStreamEOFError()
except AsyncStreamLimitError:
raise newLPStreamLimitError()
except WebSocketError:
raise newLPStreamEOFError()
except CatchableError:
raise newLPStreamEOFError()
method readOnce*(
s: WsStream,
pbytes: pointer,
nbytes: int): Future[int] {.async.} =
s: WsStream,
pbytes: pointer,
nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
let res = mapExceptions(await s.session.recv(pbytes, nbytes))
if res == 0 and s.session.readyState == ReadyState.Closed:
@@ -83,13 +89,17 @@ method readOnce*(
return res
method write*(
s: WsStream,
msg: seq[byte]): Future[void] {.async.} =
s: WsStream,
msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
mapExceptions(await s.session.send(msg, Opcode.Binary))
s.activity = true # reset activity flag
method closeImpl*(s: WsStream): Future[void] {.async.} =
await s.session.close()
method closeImpl*(s: WsStream): Future[void] {.async: (raises: []).} =
try:
await s.session.close()
except CatchableError:
discard
await procCall Connection(s).closeImpl()
method getWrapped*(s: WsStream): Connection = nil
@@ -136,7 +146,7 @@ method start*(
if WSS.match(ma):
if self.secure: true
else:
warn "Trying to listen on a WSS address without setting the certificate!"
warn "Trying to listen on a WSS address without setting certificate!"
false
else: false

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -31,8 +31,8 @@ proc getMuxerByCodec(self: MuxedUpgrade, muxerName: string): MuxerProvider =
return m
proc mux*(
self: MuxedUpgrade,
conn: Connection): Future[Muxer] {.async.} =
self: MuxedUpgrade,
conn: Connection): Future[Muxer] {.async.} =
## mux connection
trace "Muxing connection", conn
@@ -59,13 +59,13 @@ proc mux*(
return muxer
method upgrade*(
self: MuxedUpgrade,
conn: Connection,
peerId: Opt[PeerId]): Future[Muxer] {.async.} =
self: MuxedUpgrade,
conn: Connection,
peerId: Opt[PeerId]): Future[Muxer] {.async.} =
trace "Upgrading connection", conn, direction = conn.dir
let sconn = await self.secure(conn, peerId) # secure the connection
if isNil(sconn):
if sconn == nil:
raise newException(UpgradeFailedError,
"unable to secure connection, stopping upgrade")
@@ -86,22 +86,21 @@ method upgrade*(
return muxer
proc new*(
T: type MuxedUpgrade,
muxers: seq[MuxerProvider],
secureManagers: openArray[Secure] = [],
ms: MultistreamSelect): T =
T: type MuxedUpgrade,
muxers: seq[MuxerProvider],
secureManagers: openArray[Secure] = [],
ms: MultistreamSelect): T =
let upgrader = T(
muxers: muxers,
secureManagers: @secureManagers,
ms: ms)
upgrader.streamHandler = proc(conn: Connection) {.async.} =
upgrader.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
trace "Starting stream handler", conn
try:
await upgrader.ms.handle(conn) # handle incoming connection
except CancelledError as exc:
raise exc
return
except CatchableError as exc:
trace "exception in stream handler", conn, msg = exc.msg
finally:

29
tests/errorhelpers.nim Normal file
View File

@@ -0,0 +1,29 @@
import
std/sequtils,
chronos
proc allFuturesThrowing*(args: varargs[FutureBase]): Future[void] =
# This proc is only meant for use in tests / not suitable for general use.
# - Swallowing errors arbitrarily instead of aggregating them is bad design
# - It raises `CatchableError` instead of the union of the `futs` errors,
# inflating the caller's `raises` list unnecessarily. `macro` could fix it
let futs = @args
(proc() {.async: (raises: [CatchableError]).} =
await allFutures(futs)
var firstErr: ref CatchableError
for fut in futs:
if fut.failed:
let err = fut.error()
if err of CancelledError:
raise err
if firstErr == nil:
firstErr = err
if firstErr != nil:
raise firstErr)()
proc allFuturesThrowing*[T](futs: varargs[Future[T]]): Future[void] =
allFuturesThrowing(futs.mapIt(FutureBase(it)))
proc allFuturesThrowing*[T, E](
futs: varargs[InternalRaisesFuture[T, E]]): Future[void] =
allFuturesThrowing(futs.mapIt(FutureBase(it)))

View File

@@ -14,8 +14,8 @@ import ../libp2p/protocols/secure/secure
import ../libp2p/switch
import ../libp2p/nameresolving/[nameresolver, mockresolver]
import ./asyncunit
export asyncunit, mockresolver
import "."/[asyncunit, errorhelpers]
export asyncunit, errorhelpers, mockresolver
const
StreamTransportTrackerName = "stream.transport"
@@ -35,25 +35,19 @@ const
ChronosStreamTrackerName
]
iterator testTrackers*(extras: openArray[string] = []): TrackerBase =
for name in trackerNames:
let t = getTracker(name)
if not isNil(t): yield t
for name in extras:
let t = getTracker(name)
if not isNil(t): yield t
template checkTracker*(name: string) =
var tracker = getTracker(name)
if tracker.isLeaked():
checkpoint tracker.dump()
if isCounterLeaked(name):
let
tracker = getTrackerCounter(name)
trackerDescription =
"Opened " & name & ": " & $tracker.opened & "\n" &
"Closed " & name & ": " & $tracker.closed
checkpoint trackerDescription
fail()
template checkTrackers*() =
for tracker in testTrackers():
if tracker.isLeaked():
checkpoint tracker.dump()
fail()
for name in trackerNames:
checkTracker(name)
# Also test the GC is not fooling with us
when defined(nimHasWarnBareExcept):
{.push warning[BareExcept]:off.}
@@ -82,11 +76,18 @@ template rng*(): ref HmacDrbgContext =
getRng()
type
WriteHandler* = proc(data: seq[byte]): Future[void] {.gcsafe, raises: [].}
WriteHandler* = proc(
data: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).}
TestBufferStream* = ref object of BufferStream
writeHandler*: WriteHandler
method write*(s: TestBufferStream, msg: seq[byte]): Future[void] =
method write*(
s: TestBufferStream,
msg: seq[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
s.writeHandler(msg)
method getWrapped*(s: TestBufferStream): Connection = nil
@@ -104,11 +105,15 @@ proc bridgedConnections*: (Connection, Connection) =
connB.dir = Direction.In
connA.initStream()
connB.initStream()
connA.writeHandler = proc(data: seq[byte]) {.async.} =
await connB.pushData(data)
connA.writeHandler =
proc(data: seq[byte]) {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
connB.pushData(data)
connB.writeHandler = proc(data: seq[byte]) {.async.} =
await connA.pushData(data)
connB.writeHandler =
proc(data: seq[byte]) {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
connA.pushData(data)
return (connA, connB)
macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =

View File

@@ -12,6 +12,7 @@ import ../../libp2p/[builders,
protocols/connectivity/autonat/service,
protocols/ping]
import ../stubs/autonatclientstub
import ../errorhelpers
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
let rng = newRng()

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -24,7 +24,8 @@ import utils
import ../helpers
proc noop(data: seq[byte]) {.async.} = discard
proc noop(data: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
const MsgIdSuccess = "msg id gen success"

View File

@@ -1,7 +1,7 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -18,8 +18,7 @@ import ./helpers
suite "BufferStream":
teardown:
# echo getTracker(BufferStreamTrackerName).dump()
check getTracker(BufferStreamTrackerName).isLeaked() == false
checkTrackers()
asyncTest "push data to buffer":
let buff = BufferStream.new()

View File

@@ -1,7 +1,7 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -29,11 +29,12 @@ type
peerId: PeerId
method newStream*(
m: TestMuxer,
name: string = "",
lazy: bool = false):
Future[Connection] {.async.} =
result = Connection.new(m.peerId, Direction.Out, Opt.none(MultiAddress))
m: TestMuxer,
name: string = "",
lazy: bool = false
): Future[Connection] {.async: (raises: [
CancelledError, LPStreamError, MuxerError]).} =
Connection.new(m.peerId, Direction.Out, Opt.none(MultiAddress))
suite "Connection Manager":
teardown:

View File

@@ -22,13 +22,13 @@ when not defined(macosx):
asyncTest "simple heartbeat":
var i = 0
proc t() {.async.} =
heartbeat "shouldn't see this", 30.milliseconds:
heartbeat "shouldn't see this", 50.milliseconds:
i.inc()
let hb = t()
await sleepAsync(300.milliseconds)
await sleepAsync(500.milliseconds)
await hb.cancelAndWait()
check:
i in 9..11
i in 9..12
asyncTest "change heartbeat period on the fly":
var i = 0
@@ -46,7 +46,7 @@ when not defined(macosx):
# (500 ms - 120 ms) / 75ms = 5x 75ms
# total 9
check:
i in 8..10
i in 8..11
asyncTest "catch up on slow heartbeat":
var i = 0
@@ -63,4 +63,4 @@ when not defined(macosx):
# 360ms remaining, / 30ms = 12x
# total 15
check:
i in 14..16
i in 14..17

View File

@@ -1,7 +1,7 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -32,7 +32,8 @@ suite "Mplex":
suite "channel encoding":
asyncTest "encode header with channel id 0":
proc encHandler(msg: seq[byte]) {.async.} =
proc encHandler(
msg: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
check msg == fromHex("000873747265616d2031")
let conn = TestBufferStream.new(encHandler)
@@ -40,7 +41,8 @@ suite "Mplex":
await conn.close()
asyncTest "encode header with channel id other than 0":
proc encHandler(msg: seq[byte]) {.async.} =
proc encHandler(
msg: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
check msg == fromHex("88010873747265616d2031")
let conn = TestBufferStream.new(encHandler)
@@ -48,7 +50,8 @@ suite "Mplex":
await conn.close()
asyncTest "encode header and body with channel id 0":
proc encHandler(msg: seq[byte]) {.async.} =
proc encHandler(
msg: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
check msg == fromHex("020873747265616d2031")
let conn = TestBufferStream.new(encHandler)
@@ -56,7 +59,8 @@ suite "Mplex":
await conn.close()
asyncTest "encode header and body with channel id other than 0":
proc encHandler(msg: seq[byte]) {.async.} =
proc encHandler(
msg: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
check msg == fromHex("8a010873747265616d2031")
let conn = TestBufferStream.new(encHandler)
@@ -97,7 +101,10 @@ suite "Mplex":
suite "channel half-closed":
asyncTest "(local close) - should close for write":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -112,7 +119,9 @@ suite "Mplex":
asyncTest "(local close) - should allow reads until remote closes":
let
conn = TestBufferStream.new(
proc (data: seq[byte]) {.async.} =
proc (
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard,
)
chann = LPChannel.init(1, conn, true)
@@ -139,7 +148,9 @@ suite "Mplex":
asyncTest "(remote close) - channel should close for reading by remote":
let
conn = TestBufferStream.new(
proc (data: seq[byte]) {.async.} =
proc (
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard,
)
chann = LPChannel.init(1, conn, true)
@@ -162,7 +173,9 @@ suite "Mplex":
let
testData = "Hello!".toBytes
conn = TestBufferStream.new(
proc (data: seq[byte]) {.async.} =
proc (
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
)
chann = LPChannel.init(1, conn, true)
@@ -175,7 +188,10 @@ suite "Mplex":
await conn.close()
asyncTest "should not allow pushing data to channel when remote end closed":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -192,7 +208,10 @@ suite "Mplex":
suite "channel reset":
asyncTest "channel should fail reading":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -205,7 +224,10 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete read":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -220,7 +242,10 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete pushData":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -239,7 +264,10 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and push":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -254,7 +282,10 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and pushes":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -279,7 +310,10 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and push with cancel":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -293,7 +327,10 @@ suite "Mplex":
await conn.close()
asyncTest "should complete both read and push after reset":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -311,7 +348,10 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete ongoing push without reader":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -323,7 +363,10 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete ongoing read without a push":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -335,7 +378,10 @@ suite "Mplex":
await conn.close()
asyncTest "reset should allow all reads and pushes to complete":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -364,7 +410,10 @@ suite "Mplex":
await conn.close()
asyncTest "channel should fail writing":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -376,7 +425,10 @@ suite "Mplex":
await conn.close()
asyncTest "channel should reset on timeout":
proc writeHandler(data: seq[byte]) {.async.} = discard
proc writeHandler(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(
@@ -395,11 +447,15 @@ suite "Mplex":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == "HELLO"
await stream.close()
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
try:
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == "HELLO"
except CancelledError, LPStreamError:
return
finally:
await stream.close()
await mplexListen.handle()
await mplexListen.close()
@@ -432,11 +488,15 @@ suite "Mplex":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == "HELLO"
await stream.close()
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
try:
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == "HELLO"
except CancelledError, LPStreamError:
return
finally:
await stream.close()
await mplexListen.handle()
await mplexListen.close()
@@ -477,13 +537,17 @@ suite "Mplex":
try:
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
let msg = await stream.readLp(MaxMsgSize)
check msg == bigseq
trace "Bigseq check passed!"
await stream.close()
listenJob.complete()
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
try:
let msg = await stream.readLp(MaxMsgSize)
check msg == bigseq
trace "Bigseq check passed!"
except CancelledError, LPStreamError:
return
finally:
await stream.close()
listenJob.complete()
await mplexListen.handle()
await sleepAsync(1.seconds) # give chronos some slack to process things
@@ -523,10 +587,14 @@ suite "Mplex":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
await stream.writeLp("Hello from stream!")
await stream.close()
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
try:
await stream.writeLp("Hello from stream!")
except CancelledError, LPStreamError:
return
finally:
await stream.close()
await mplexListen.handle()
await mplexListen.close()
@@ -561,14 +629,21 @@ suite "Mplex":
var count = 1
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == &"stream {count}!"
count.inc
if count == 11:
done.complete()
await stream.close()
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
try:
let msg = await stream.readLp(1024)
try:
check string.fromBytes(msg) == &"stream {count}!"
except ValueError as exc:
raiseAssert(exc.msg)
count.inc
if count == 11:
done.complete()
except CancelledError, LPStreamError:
return
finally:
await stream.close()
await mplexListen.handle()
await mplexListen.close()
@@ -605,15 +680,22 @@ suite "Mplex":
var count = 1
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == &"stream {count} from dialer!"
await stream.writeLp(&"stream {count} from listener!")
count.inc
if count == 11:
done.complete()
await stream.close()
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
try:
let msg = await stream.readLp(1024)
try:
check string.fromBytes(msg) == &"stream {count} from dialer!"
await stream.writeLp(&"stream {count} from listener!")
except ValueError as exc:
raiseAssert(exc.msg)
count.inc
if count == 11:
done.complete()
except CancelledError, LPStreamError:
return
finally:
await stream.close()
await mplexListen.handle()
await mplexListen.close()
@@ -650,16 +732,19 @@ suite "Mplex":
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
listenStreams.add(stream)
try:
discard await stream.readLp(1024)
except LPStreamEOFError:
await stream.close()
return
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
listenStreams.add(stream)
try:
discard await stream.readLp(1024)
except LPStreamEOFError:
return
except CancelledError, LPStreamError:
return
finally:
await stream.close()
check false
check false
await mplexListen.handle()
await mplexListen.close()
@@ -700,14 +785,14 @@ suite "Mplex":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
listenStreams.add(stream)
count.inc()
if count == 10:
done.complete()
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
listenStreams.add(stream)
count.inc()
if count == 10:
done.complete()
await stream.join()
await noCancel stream.join()
await mplexListen.handle()
await mplexListen.close()
@@ -764,10 +849,10 @@ suite "Mplex":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
listenStreams.add(stream)
await stream.join()
await noCancel stream.join()
await mplexListen.handle()
await mplexListen.close()
@@ -808,10 +893,10 @@ suite "Mplex":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
listenStreams.add(stream)
await stream.join()
await noCancel stream.join()
await mplexListen.handle()
await mplexListen.close()
@@ -854,10 +939,10 @@ suite "Mplex":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
listenStreams.add(stream)
await stream.join()
await noCancel stream.join()
mplexHandle = mplexListen.handle()
await mplexHandle
@@ -899,10 +984,10 @@ suite "Mplex":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
listenStreams.add(stream)
await stream.join()
await noCancel stream.join()
await mplexListen.handle()
await mplexListen.close()
@@ -946,10 +1031,10 @@ suite "Mplex":
proc acceptHandler() {.async.} =
listenConn = await transport1.accept()
let mplexListen = Mplex.new(listenConn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
listenStreams.add(stream)
await stream.join()
await noCancel stream.join()
await mplexListen.handle()
await mplexListen.close()
@@ -995,15 +1080,17 @@ suite "Mplex":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
try:
let msg = await stream.readLp(MsgSize)
check msg.len == MsgSize
except CatchableError as e:
echo e.msg
await stream.close()
complete.complete()
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
try:
let msg = await stream.readLp(MsgSize)
check msg.len == MsgSize
except CancelledError as e:
echo e.msg
except LPStreamError as e:
echo e.msg
await stream.close()
complete.complete()
await mplexListen.handle()
await mplexListen.close()
@@ -1067,12 +1154,16 @@ suite "Mplex":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async.} =
let msg = await stream.readLp(MsgSize)
check msg.len == MsgSize
await stream.close()
complete.complete()
mplexListen.streamHandler =
proc(stream: Connection) {.async: (raises: []).} =
try:
let msg = await stream.readLp(MsgSize)
check msg.len == MsgSize
except CancelledError, LPStreamError:
return
finally:
await stream.close()
complete.complete()
await mplexListen.handle()
await mplexListen.close()

View File

@@ -307,6 +307,29 @@ const
)
]
ZeroPathFailureVectors = [
"900300", # /unix//
"2A00", # /ip6zone//
"3800", # /dnsaddr//
"3600", # /dns4//
"3700", # /dns6//
]
CrashesVectors = [
"0401020304060050900366",
"2926013880ffffff2a040402030406005090030404020304060150900350900302030406005090030c2f383838383838383838383838383838382a",
"04010103042a040101030406005090030c",
"bd03afadec040bc547f9658668b1ff00000073001f101a37f54cc07fb4bc03bc039a18fbee063690033838383838383838383838383838383838383838383838383838383838",
"2a040402030406005090030406ffff",
"29260100094f81800e0000003880ffffff2a0404020304060050900304040203040404020304060050900304040203040601509003509003020406",
"38060606383838380606060600802a2438380606060600802a047f0000012a163c803c3c3c0606060680ffff002a0404020304380600509003040402030406015090035e2a2a2a2a2a2a2a2a",
"38060606060606060606060600802a2a2a047f0000012a063c803c3c3c383838012a063c80385fffffff2a260402030406005090030404020304060150900350",
"a503220000000000000000000000000000000002003880ffffff2a040402030406005090030404020304060150900350900302030406005090030c",
"047f0000012a2a2a2a2a2a2a2a2a2a2a2a2a2a2a062a2a042a8000142a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a50900350900302030406005090030c2f622f00000203040600030406005090030c2f612f2a712f63030406005090030c2f612f2a622f632a2a002a2a2a2a2a2a372a",
"047f000001062a2a042a8000142a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2ad52a2a2a2a2a2a2a2a2a2a50900350900302030406005090030c2f622f00000203040600030406005090030c2f612f2a622f63030406005090030c2f612f2a622f632a2a002a2a2a2a2a2a37d52a2a2a2a2a2a2a2a2a2a5090032a",
"90030c2a04"
]
suite "MultiAddress test suite":
test "go-multiaddr success test vectors":
@@ -461,3 +484,13 @@ suite "MultiAddress test suite":
let error = pb.getRepeatedField(1, decoded).error()
check error == ProtoError.IncorrectBlob
check decoded.len == 0
test "MultiAddress with empty path test":
for item in ZeroPathFailureVectors:
let res = MultiAddress.init(hexToSeqByte(item))
check res.isErr()
test "MultiAddress crash test":
for item in CrashesVectors:
let res = MultiAddress.init(hexToSeqByte(item))
check res.isErr()

View File

@@ -1,7 +1,7 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -32,43 +32,57 @@ type
TestSelectStream = ref object of Connection
step*: int
method readOnce*(s: TestSelectStream,
pbytes: pointer,
nbytes: int): Future[int] {.async.} =
method readOnce*(
s: TestSelectStream,
pbytes: pointer,
nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
let fut = newFuture[int]()
case s.step:
of 1:
var buf = newSeq[byte](1)
buf[0] = 19
copyMem(pbytes, addr buf[0], buf.len())
s.step = 2
return buf.len
of 2:
var buf = "/multistream/1.0.0\n"
copyMem(pbytes, addr buf[0], buf.len())
s.step = 3
return buf.len
of 3:
var buf = newSeq[byte](1)
buf[0] = 18
copyMem(pbytes, addr buf[0], buf.len())
s.step = 4
return buf.len
of 4:
var buf = "/test/proto/1.0.0\n"
copyMem(pbytes, addr buf[0], buf.len())
return buf.len
else:
copyMem(pbytes,
cstring("\0x3na\n"),
"\0x3na\n".len())
of 1:
var buf = newSeq[byte](1)
buf[0] = 19
copyMem(pbytes, addr buf[0], buf.len())
s.step = 2
fut.complete(buf.len)
of 2:
var buf = "/multistream/1.0.0\n"
copyMem(pbytes, addr buf[0], buf.len())
s.step = 3
fut.complete(buf.len)
of 3:
var buf = newSeq[byte](1)
buf[0] = 18
copyMem(pbytes, addr buf[0], buf.len())
s.step = 4
fut.complete(buf.len)
of 4:
var buf = "/test/proto/1.0.0\n"
copyMem(pbytes, addr buf[0], buf.len())
fut.complete(buf.len)
else:
copyMem(pbytes,
cstring("\0x3na\n"),
"\0x3na\n".len())
return "\0x3na\n".len()
fut.complete("\0x3na\n".len())
fut
method write*(s: TestSelectStream, msg: seq[byte]) {.async.} = discard
method write*(
s: TestSelectStream,
msg: seq[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
let fut = newFuture[void]()
fut.complete()
fut
method close(s: TestSelectStream) {.async.} =
method close(s: TestSelectStream) {.async: (raises: [], raw: true).} =
s.isClosed = true
s.isEof = true
let fut = newFuture[void]()
fut.complete()
fut
proc newTestSelectStream(): TestSelectStream =
new result
@@ -76,50 +90,65 @@ proc newTestSelectStream(): TestSelectStream =
## Mock stream for handles `ls` test
type
LsHandler = proc(procs: seq[byte]): Future[void] {.gcsafe, raises: [].}
LsHandler = proc(
procs: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).}
TestLsStream = ref object of Connection
step*: int
ls*: LsHandler
method readOnce*(s: TestLsStream,
pbytes: pointer,
nbytes: int):
Future[int] {.async.} =
method readOnce*(
s: TestLsStream,
pbytes: pointer,
nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
let fut = newFuture[int]()
case s.step:
of 1:
var buf = newSeq[byte](1)
buf[0] = 19
copyMem(pbytes, addr buf[0], buf.len())
s.step = 2
return buf.len()
of 2:
var buf = "/multistream/1.0.0\n"
copyMem(pbytes, addr buf[0], buf.len())
s.step = 3
return buf.len()
of 3:
var buf = newSeq[byte](1)
buf[0] = 3
copyMem(pbytes, addr buf[0], buf.len())
s.step = 4
return buf.len()
of 4:
var buf = "ls\n"
copyMem(pbytes, addr buf[0], buf.len())
return buf.len()
else:
var buf = "na\n"
copyMem(pbytes, addr buf[0], buf.len())
return buf.len()
of 1:
var buf = newSeq[byte](1)
buf[0] = 19
copyMem(pbytes, addr buf[0], buf.len())
s.step = 2
fut.complete(buf.len())
of 2:
var buf = "/multistream/1.0.0\n"
copyMem(pbytes, addr buf[0], buf.len())
s.step = 3
fut.complete(buf.len())
of 3:
var buf = newSeq[byte](1)
buf[0] = 3
copyMem(pbytes, addr buf[0], buf.len())
s.step = 4
fut.complete(buf.len())
of 4:
var buf = "ls\n"
copyMem(pbytes, addr buf[0], buf.len())
fut.complete(buf.len())
else:
var buf = "na\n"
copyMem(pbytes, addr buf[0], buf.len())
fut.complete(buf.len())
fut
method write*(s: TestLsStream, msg: seq[byte]) {.async.} =
method write*(
s: TestLsStream,
msg: seq[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
if s.step == 4:
await s.ls(msg)
return s.ls(msg)
let fut = newFuture[void]()
fut.complete()
fut
method close(s: TestLsStream) {.async.} =
method close(s: TestLsStream): Future[void] {.async: (raises: [], raw: true).} =
s.isClosed = true
s.isEof = true
let fut = newFuture[void]()
fut.complete()
fut
proc newTestLsStream(ls: LsHandler): TestLsStream {.gcsafe.} =
new result
@@ -128,52 +157,67 @@ proc newTestLsStream(ls: LsHandler): TestLsStream {.gcsafe.} =
## Mock stream for handles `na` test
type
NaHandler = proc(procs: string): Future[void] {.gcsafe, raises: [].}
NaHandler = proc(
procs: string
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).}
TestNaStream = ref object of Connection
step*: int
na*: NaHandler
method readOnce*(s: TestNaStream,
pbytes: pointer,
nbytes: int):
Future[int] {.async.} =
method readOnce*(
s: TestNaStream,
pbytes: pointer,
nbytes: int
): Future[int] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
let fut = newFuture[int]()
case s.step:
of 1:
var buf = newSeq[byte](1)
buf[0] = 19
copyMem(pbytes, addr buf[0], buf.len())
s.step = 2
return buf.len()
of 2:
var buf = "/multistream/1.0.0\n"
copyMem(pbytes, addr buf[0], buf.len())
s.step = 3
return buf.len()
of 3:
var buf = newSeq[byte](1)
buf[0] = 18
copyMem(pbytes, addr buf[0], buf.len())
s.step = 4
return buf.len()
of 4:
var buf = "/test/proto/1.0.0\n"
copyMem(pbytes, addr buf[0], buf.len())
return buf.len()
else:
copyMem(pbytes,
cstring("\0x3na\n"),
"\0x3na\n".len())
of 1:
var buf = newSeq[byte](1)
buf[0] = 19
copyMem(pbytes, addr buf[0], buf.len())
s.step = 2
fut.complete(buf.len())
of 2:
var buf = "/multistream/1.0.0\n"
copyMem(pbytes, addr buf[0], buf.len())
s.step = 3
fut.complete(buf.len())
of 3:
var buf = newSeq[byte](1)
buf[0] = 18
copyMem(pbytes, addr buf[0], buf.len())
s.step = 4
fut.complete(buf.len())
of 4:
var buf = "/test/proto/1.0.0\n"
copyMem(pbytes, addr buf[0], buf.len())
fut.complete(buf.len())
else:
copyMem(pbytes,
cstring("\0x3na\n"),
"\0x3na\n".len())
return "\0x3na\n".len()
fut.complete("\0x3na\n".len())
fut
method write*(s: TestNaStream, msg: seq[byte]) {.async.} =
method write*(
s: TestNaStream,
msg: seq[byte]
): Future[void] {.async: (raises: [
CancelledError, LPStreamError], raw: true).} =
if s.step == 4:
await s.na(string.fromBytes(msg))
return s.na(string.fromBytes(msg))
let fut = newFuture[void]()
fut.complete()
fut
method close(s: TestNaStream) {.async.} =
method close(s: TestNaStream): Future[void] {.async: (raises: [], raw: true).} =
s.isClosed = true
s.isEof = true
let fut = newFuture[void]()
fut.complete()
fut
proc newTestNaStream(na: NaHandler): TestNaStream =
new result
@@ -210,7 +254,8 @@ suite "Multistream select":
var conn: Connection = nil
let done = newFuture[void]()
proc testLsHandler(proto: seq[byte]) {.async.} =
proc testLsHandler(
proto: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
var strProto: string = string.fromBytes(proto)
check strProto == "\x26/test/proto1/1.0.0\n/test/proto2/1.0.0\n"
await conn.close()
@@ -230,7 +275,9 @@ suite "Multistream select":
let ms = MultistreamSelect.new()
var conn: Connection = nil
proc testNaHandler(msg: string): Future[void] {.async.} =
proc testNaHandler(
msg: string
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
check msg == "\x03na\n"
await conn.close()
conn = newTestNaStream(testNaHandler)

View File

@@ -1,7 +1,7 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -18,16 +18,21 @@ import
],
./helpers
proc newBlockerFut(): Future[void] {.async: (raises: [], raw: true).} =
newFuture[void]()
suite "Yamux":
teardown:
checkTrackers()
template mSetup(ws: int = YamuxDefaultWindowSize) {.inject.} =
template mSetup(ws: int = YamuxDefaultWindowSize,
inTo: Duration = 5.minutes,
outTo: Duration = 5.minutes) {.inject.} =
#TODO in a template to avoid threadvar
let
(conna {.inject.}, connb {.inject.}) = bridgedConnections()
yamuxa {.inject.} = Yamux.new(conna, windowSize = ws)
yamuxb {.inject.} = Yamux.new(connb, windowSize = ws)
yamuxa {.inject.} = Yamux.new(conna, windowSize = ws, inTimeout = inTo, outTimeout = outTo)
yamuxb {.inject.} = Yamux.new(connb, windowSize = ws, inTimeout = inTo, outTimeout = outTo)
(handlera, handlerb) = (yamuxa.handle(), yamuxb.handle())
defer:
@@ -40,10 +45,14 @@ suite "Yamux":
asyncTest "Roundtrip of small messages":
mSetup()
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
check (await conn.readLp(100)) == fromHex("1234")
await conn.writeLp(fromHex("5678"))
await conn.close()
yamuxb.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
try:
check (await conn.readLp(100)) == fromHex("1234")
await conn.writeLp(fromHex("5678"))
except CancelledError, LPStreamError:
return
finally:
await conn.close()
let streamA = await yamuxa.newStream()
check streamA == yamuxa.getStreams()[0]
@@ -55,15 +64,19 @@ suite "Yamux":
asyncTest "Continuing read after close":
mSetup()
let
readerBlocker = newFuture[void]()
handlerBlocker = newFuture[void]()
readerBlocker = newBlockerFut()
handlerBlocker = newBlockerFut()
var numberOfRead = 0
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
yamuxb.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
await readerBlocker
var buffer: array[25600, byte]
while (await conn.readOnce(addr buffer[0], 25600)) > 0:
numberOfRead.inc()
await conn.close()
try:
var buffer: array[25600, byte]
while (await conn.readOnce(addr buffer[0], 25600)) > 0:
numberOfRead.inc()
except CancelledError, LPStreamError:
return
finally:
await conn.close()
handlerBlocker.complete()
let streamA = await yamuxa.newStream()
@@ -78,12 +91,16 @@ suite "Yamux":
suite "Window exhaustion":
asyncTest "Basic exhaustion blocking":
mSetup()
let readerBlocker = newFuture[void]()
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
let readerBlocker = newBlockerFut()
yamuxb.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
await readerBlocker
var buffer: array[160000, byte]
discard await conn.readOnce(addr buffer[0], 160000)
await conn.close()
try:
var buffer: array[160000, byte]
discard await conn.readOnce(addr buffer[0], 160000)
except CancelledError, LPStreamError:
return
finally:
await conn.close()
let streamA = await yamuxa.newStream()
check streamA == yamuxa.getStreams()[0]
@@ -101,12 +118,16 @@ suite "Yamux":
asyncTest "Exhaustion doesn't block other channels":
mSetup()
let readerBlocker = newFuture[void]()
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
let readerBlocker = newBlockerFut()
yamuxb.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
await readerBlocker
var buffer: array[160000, byte]
discard await conn.readOnce(addr buffer[0], 160000)
await conn.close()
try:
var buffer: array[160000, byte]
discard await conn.readOnce(addr buffer[0], 160000)
except CancelledError, LPStreamError:
return
finally:
await conn.close()
let streamA = await yamuxa.newStream()
check streamA == yamuxa.getStreams()[0]
@@ -118,10 +139,14 @@ suite "Yamux":
# Now that the secondWriter is stuck, create a second stream
# and exchange some data
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
check (await conn.readLp(100)) == fromHex("1234")
await conn.writeLp(fromHex("5678"))
await conn.close()
yamuxb.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
try:
check (await conn.readLp(100)) == fromHex("1234")
await conn.writeLp(fromHex("5678"))
except CancelledError, LPStreamError:
return
finally:
await conn.close()
let streamB = await yamuxa.newStream()
await streamB.writeLp(fromHex("1234"))
@@ -136,15 +161,19 @@ suite "Yamux":
asyncTest "Can set custom window size":
mSetup()
let writerBlocker = newFuture[void]()
let writerBlocker = newBlockerFut()
var numberOfRead = 0
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
yamuxb.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
YamuxChannel(conn).setMaxRecvWindow(20)
var buffer: array[256000, byte]
while (await conn.readOnce(addr buffer[0], 256000)) > 0:
numberOfRead.inc()
writerBlocker.complete()
await conn.close()
try:
var buffer: array[256000, byte]
while (await conn.readOnce(addr buffer[0], 256000)) > 0:
numberOfRead.inc()
writerBlocker.complete()
except CancelledError, LPStreamError:
return
finally:
await conn.close()
let streamA = await yamuxa.newStream()
check streamA == yamuxa.getStreams()[0]
@@ -161,12 +190,16 @@ suite "Yamux":
asyncTest "Saturate until reset":
mSetup()
let writerBlocker = newFuture[void]()
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
let writerBlocker = newBlockerFut()
yamuxb.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
await writerBlocker
var buffer: array[256, byte]
check: (await conn.readOnce(addr buffer[0], 256)) == 0
await conn.close()
try:
var buffer: array[256, byte]
check: (await conn.readOnce(addr buffer[0], 256)) == 0
except CancelledError, LPStreamError:
return
finally:
await conn.close()
let streamA = await yamuxa.newStream()
check streamA == yamuxa.getStreams()[0]
@@ -182,12 +215,16 @@ suite "Yamux":
asyncTest "Increase window size":
mSetup(512000)
let readerBlocker = newFuture[void]()
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
let readerBlocker = newBlockerFut()
yamuxb.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
await readerBlocker
var buffer: array[260000, byte]
discard await conn.readOnce(addr buffer[0], 260000)
await conn.close()
try:
var buffer: array[260000, byte]
discard await conn.readOnce(addr buffer[0], 260000)
except CancelledError, LPStreamError:
return
finally:
await conn.close()
let streamA = await yamuxa.newStream()
check streamA == yamuxa.getStreams()[0]
@@ -205,17 +242,20 @@ suite "Yamux":
asyncTest "Reduce window size":
mSetup(64000)
let readerBlocker1 = newFuture[void]()
let readerBlocker2 = newFuture[void]()
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
await readerBlocker1
var buffer: array[256000, byte]
# For the first roundtrip, the send window size is assumed to be 256k
discard await conn.readOnce(addr buffer[0], 256000)
await readerBlocker2
discard await conn.readOnce(addr buffer[0], 40000)
await conn.close()
let readerBlocker1 = newBlockerFut()
let readerBlocker2 = newBlockerFut()
yamuxb.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
try:
await readerBlocker1
var buffer: array[256000, byte]
# For the first roundtrip, the send window size is assumed to be 256k
discard await conn.readOnce(addr buffer[0], 256000)
await readerBlocker2
discard await conn.readOnce(addr buffer[0], 40000)
except CancelledError, LPStreamError:
return
finally:
await conn.close()
let streamA = await yamuxa.newStream()
check streamA == yamuxa.getStreams()[0]
@@ -237,15 +277,75 @@ suite "Yamux":
await wait(thirdWriter, 1.seconds)
await streamA.close()
suite "Timeout testing":
asyncTest "Check if InTimeout close both streams correctly":
mSetup(inTo = 1.seconds)
let blocker = newBlockerFut()
let connBlocker = newBlockerFut()
yamuxb.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
try:
check (await conn.readLp(100)) == fromHex("1234")
await conn.writeLp(fromHex("5678"))
await blocker
check conn.isClosed
connBlocker.complete()
except CancelledError, LPStreamError:
await conn.close()
let streamA = await yamuxa.newStream()
check streamA == yamuxa.getStreams()[0]
await streamA.writeLp(fromHex("1234"))
check (await streamA.readLp(100)) == fromHex("5678")
# wait for the timeout to happens, the sleep duration is set to 4 seconds
# as the timeout could be a bit long to trigger
await sleepAsync(4.seconds)
blocker.complete()
check streamA.isClosed
await connBlocker
asyncTest "Check if OutTimeout close both streams correctly":
mSetup(outTo = 1.seconds)
let blocker = newBlockerFut()
let connBlocker = newBlockerFut()
yamuxb.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
try:
check (await conn.readLp(100)) == fromHex("1234")
await conn.writeLp(fromHex("5678"))
await blocker
check conn.isClosed
connBlocker.complete()
except CancelledError, LPStreamError:
await conn.close()
let streamA = await yamuxa.newStream()
check streamA == yamuxa.getStreams()[0]
await streamA.writeLp(fromHex("1234"))
check (await streamA.readLp(100)) == fromHex("5678")
# wait for the timeout to happens, the sleep duration is set to 4 seconds
# as the timeout could be a bit long to trigger
await sleepAsync(4.seconds)
blocker.complete()
check streamA.isClosed
await connBlocker
suite "Exception testing":
asyncTest "Local & Remote close":
mSetup()
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
check (await conn.readLp(100)) == fromHex("1234")
await conn.close()
yamuxb.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
try:
check (await conn.readLp(100)) == fromHex("1234")
except CancelledError, LPStreamError:
return
finally:
await conn.close()
expect LPStreamClosedError: await conn.writeLp(fromHex("102030"))
check (await conn.readLp(100)) == fromHex("5678")
try:
check (await conn.readLp(100)) == fromHex("5678")
except CancelledError, LPStreamError:
return
let streamA = await yamuxa.newStream()
check streamA == yamuxa.getStreams()[0]
@@ -257,17 +357,21 @@ suite "Yamux":
asyncTest "Local & Remote reset":
mSetup()
let blocker = newFuture[void]()
let blocker = newBlockerFut()
yamuxb.streamHandler = proc(conn: Connection) {.async.} =
yamuxb.streamHandler = proc(conn: Connection) {.async: (raises: []).} =
await blocker
expect LPStreamResetError: discard await conn.readLp(100)
expect LPStreamResetError: await conn.writeLp(fromHex("1234"))
await conn.close()
try:
expect LPStreamResetError: discard await conn.readLp(100)
expect LPStreamResetError: await conn.writeLp(fromHex("1234"))
except CancelledError, LPStreamError:
return
finally:
await conn.close()
let streamA = await yamuxa.newStream()
check streamA == yamuxa.getStreams()[0]
await yamuxa.close()
expect LPStreamClosedError: await streamA.writeLp(fromHex("1234"))
expect LPStreamClosedError: discard await streamA.readLp(100)

View File

@@ -0,0 +1,16 @@
# syntax=docker/dockerfile:1.5-labs
FROM nimlang/nim:1.6.16 as builder
WORKDIR /app
COPY .pinned libp2p.nimble nim-libp2p/
RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" -y
COPY . nim-libp2p/
RUN \
cd nim-libp2p && \
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs -p:nim-libp2p -d:chronicles_log_level=WARN --threads:off ./tests/transport-interop/main.nim
ENTRYPOINT ["/app/nim-libp2p/tests/transport-interop/main"]

View File

@@ -0,0 +1,96 @@
import
std/[os, strutils, sequtils],
chronos, redis, serialization, json_serialization
import ../../libp2p/[builders, protocols/ping, transports/wstransport]
type
ResultJson = object
handshakePlusOneRTTMillis: float
pingRTTMilllis: float
let
testTimeout =
try: seconds(parseInt(getEnv("test_timeout_seconds")))
except CatchableError: 3.minutes
proc main {.async.} =
let
transport = getEnv("transport")
muxer = getEnv("muxer")
secureChannel = getEnv("security")
isDialer = getEnv("is_dialer") == "true"
envIp = getEnv("ip", "0.0.0.0")
ip =
# nim-libp2p doesn't do snazzy ip expansion
if envIp == "0.0.0.0":
block:
let addresses = getInterfaces().filterIt(it.name == "eth0").mapIt(it.addresses)
if addresses.len < 1 or addresses[0].len < 1:
quit "Can't find local ip!"
($addresses[0][0].host).split(":")[0]
else:
envIp
redisAddr = getEnv("redis_addr", "redis:6379").split(":")
# using synchronous redis because async redis is based on
# asyncdispatch instead of chronos
redisClient = open(redisAddr[0], Port(parseInt(redisAddr[1])))
switchBuilder = SwitchBuilder.new()
case transport:
of "tcp":
discard switchBuilder.withTcpTransport().withAddress(
MultiAddress.init("/ip4/" & ip & "/tcp/0").tryGet()
)
of "ws":
discard switchBuilder.withTransport(proc (upgr: Upgrade): Transport = WsTransport.new(upgr)).withAddress(
MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet()
)
else: doAssert false
case secureChannel:
of "noise": discard switchBuilder.withNoise()
else: doAssert false
case muxer:
of "yamux": discard switchBuilder.withYamux()
of "mplex": discard switchBuilder.withMplex()
else: doAssert false
let
rng = newRng()
switch = switchBuilder.withRng(rng).build()
pingProtocol = Ping.new(rng = rng)
switch.mount(pingProtocol)
await switch.start()
defer: await switch.stop()
if not isDialer:
discard redisClient.rPush("listenerAddr", $switch.peerInfo.fullAddrs.tryGet()[0])
await sleepAsync(100.hours) # will get cancelled
else:
let listenerAddr =
try: redisClient.bLPop(@["listenerAddr"], testTimeout.seconds.int)[1]
except Exception as e:
raise newException(CatchableError, e.msg)
let
remoteAddr = MultiAddress.init(listenerAddr).tryGet()
dialingStart = Moment.now()
remotePeerId = await switch.connect(remoteAddr)
stream = await switch.dial(remotePeerId, PingCodec)
pingDelay = await pingProtocol.ping(stream)
totalDelay = Moment.now() - dialingStart
await stream.close()
echo Json.encode(
ResultJson(
handshakePlusOneRTTMillis: float(totalDelay.milliseconds),
pingRTTMilllis: float(pingDelay.milliseconds)
)
)
quit(0)
discard waitFor(main().withTimeout(testTimeout))
quit(1)

View File

@@ -0,0 +1,15 @@
{
"id": "nim-libp2p-head",
"containerImageID": "nim-libp2p-head",
"transports": [
"tcp",
"ws"
],
"secureChannels": [
"noise"
],
"muxers": [
"mplex",
"yamux"
]
}