Compare commits

...

35 Commits

Author SHA1 Message Date
Jacek Sieka
900c83c77b avoid obsolete import 2023-05-11 09:02:27 +02:00
Tanguy
95e98e8c51 Fix traffic metrics (#879) 2023-04-03 12:37:23 +00:00
Tanguy
4aa615c44c GossipSub: TimedEntry & shortAgent fixes (#858) 2023-04-03 11:05:01 +02:00
Tanguy
6b61ce8c91 GossipSub: Better IWANT handling (#875) 2023-04-03 10:56:20 +02:00
Alvaro Revuelta
53b060f8f0 Add getters for conns and streams (#878) 2023-03-31 00:16:39 +02:00
diegomrsantos
af5299f26c Create an ObservedAddrManager and add an AddressMapper in AutonatService and AutoRelayService (#871)
Co-authored-by: Tanguy <tanguy@status.im>
2023-03-24 15:42:49 +00:00
Tanguy
bac754e2ad Various gossipsub fixes (#827) 2023-03-21 17:13:25 +01:00
Tanguy
8d5ea43e2b Upgrade flow refactoring (#807) 2023-03-08 12:30:19 +01:00
Jacek Sieka
e573238705 reexport public types (#872) 2023-03-06 15:36:10 +00:00
Tanguy
c1a3bd8fee Fix pubsub CI logs (#861) 2023-03-01 16:59:44 +01:00
diegomrsantos
ddeb7b3bd4 Handle when peers ask each other at the same time (#865) 2023-02-21 17:49:41 +01:00
Tanguy
382b992e00 Interop tests (#864) 2023-02-20 14:26:53 +01:00
Tanguy
408dcf12bd Fix backward compatibility of #822 (#862) 2023-02-15 17:18:29 +01:00
Ludovic Chenut
0012b639c8 Fix testrelay (#860) 2023-02-15 11:18:42 +01:00
Tanguy
f7f1e89669 TCP Transport: enable NO_DELAY for clients (#822) 2023-02-14 10:35:44 +01:00
Tanguy
f14ada3dcf Move tests flags from 'nimble test' to 'config.nims' (#852) 2023-02-10 11:32:21 +01:00
diegomrsantos
444b837923 Autonat doesn't ask an incoming peer (#857) 2023-02-09 17:40:04 +01:00
diegomrsantos
f89bd0c77c Autonat dials dns addrs (#856) 2023-02-09 16:53:46 +01:00
diegomrsantos
e68186373b Dialing addrs concurrently in autonat (#855) 2023-02-07 18:51:17 +01:00
diegomrsantos
266c7b117a Add anyCompleted proc (#853) 2023-02-07 18:50:42 +01:00
Ludovic Chenut
0e28d3b828 Add the peerId to the transport.dial (#842) 2023-01-31 12:46:10 +01:00
diegomrsantos
4ace70d53b Connect is able to force a new connection (#849) 2023-01-25 11:19:03 +01:00
Tanguy
ca19f8fdbf Autonat service: handle connections limits (#846)
Co-authored-by: diegomrsantos <diego@status.im>
2023-01-24 17:04:42 +01:00
Tanguy
351bda2b56 Add expected connections to connmngr (#845)
Co-authored-by: diegomrsantos <diego@status.im>
2023-01-23 22:28:39 +00:00
Tanguy
7d9c43a5ce Fix CI for nim devel 1.6 (#848) 2023-01-23 15:53:15 +01:00
Tanguy
c11772c94e Happy new year! (#847) 2023-01-20 15:47:40 +01:00
Ludovic Chenut
489c115132 Autorelay service (#819) 2023-01-17 16:18:38 +01:00
Tanguy
166c0d1c87 Fix PubSub subscribe on connection race condition (#809) 2023-01-10 13:33:14 +01:00
Etan Kissling
ba451196e8 Avoid closing connection on channel EOF (#816) 2023-01-06 15:18:16 +01:00
diegomrsantos
9f658c151e Autonat refactoring (#834) 2023-01-06 11:14:38 +01:00
diegomrsantos
e304ad0f7e Remove unnecessary async (#836) 2023-01-05 15:02:52 +01:00
diegomrsantos
5e3323d43f More autonat tests (#833) 2022-12-23 19:10:15 +00:00
diegomrsantos
9532bff983 Ignore unknown answers (#831)
Co-authored-by: Tanguy <tanguy@status.im>
2022-12-23 16:49:25 +01:00
diegomrsantos
676786b00e Add Autonat timeout (#829)
Co-authored-by: Ludovic Chenut <ludovic@status.im>
2022-12-22 19:29:31 +00:00
diegomrsantos
d521c57b82 Handle dial error correctly (#830) 2022-12-22 17:33:59 +01:00
143 changed files with 2613 additions and 1694 deletions

54
.github/workflows/interop.yml vendored Normal file
View File

@@ -0,0 +1,54 @@
name: Interoperability Testing
on:
pull_request:
push:
branches:
- unstable
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
run-multidim-interop:
name: Run multidimensional interoperability tests
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
with:
repository: libp2p/test-plans
submodules: true
fetch-depth: 0
- name: Build image
run: >
cd multidim-interop/nim/v1.0 &&
make commitSha=$GITHUB_SHA image_name=nim-libp2p-head
- name: Create ping-version.json
run: >
(cat << EOF
{
"id": "nim-libp2p-head",
"containerImageID": "nim-libp2p-head",
"transports": [
"tcp",
"ws"
],
"secureChannels": [
"noise"
],
"muxers": [
"mplex",
"yamux"
]
}
EOF
) > ${{ github.workspace }}/test_head.json
- uses: libp2p/test-plans/.github/actions/run-interop-ping-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/test_head.json

16
.pinned
View File

@@ -1,16 +1,16 @@
bearssl;https://github.com/status-im/nim-bearssl@#a647994910904b0103a05db3a5ec1ecfc4d91a88
bearssl;https://github.com/status-im/nim-bearssl@#acf9645e328bdcab481cfda1c158e07ecd46bd7b
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
chronos;https://github.com/status-im/nim-chronos@#75d030ff71264513fb9701c75a326cd36fcb4692
chronos;https://github.com/status-im/nim-chronos@#f7835a192b45c37e97614d865141f21eea8c156e
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823
faststreams;https://github.com/status-im/nim-faststreams@#b42daf41d8eb4fbce40add6836bed838f8d85b6f
faststreams;https://github.com/status-im/nim-faststreams@#814f8927e1f356f39219f37f069b83066bcc893a
httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f
json_serialization;https://github.com/status-im/nim-json-serialization@#a7d815ed92f200f490c95d3cfd722089cc923ce6
metrics;https://github.com/status-im/nim-metrics@#21e99a2e9d9f80e68bef65c80ef781613005fccb
nimcrypto;https://github.com/cheatfate/nimcrypto@#24e006df85927f64916e60511620583b11403178
nimcrypto;https://github.com/cheatfate/nimcrypto@#4014ef939b51e02053c2e16dd3481d47bc9267dd
secp256k1;https://github.com/status-im/nim-secp256k1@#fd173fdff863ce2e211cf64c9a03bc7539fe40b0
serialization;https://github.com/status-im/nim-serialization@#d77417cba6896c26287a68e6a95762e45a1b87e5
stew;https://github.com/status-im/nim-stew@#7184d2424dc3945657884646a72715d494917aad
serialization;https://github.com/status-im/nim-serialization@#5b7cea55efeb074daa8abd8146a03a34adb4521a
stew;https://github.com/status-im/nim-stew@#407a59883691d362db2fe8eab7f7c3b1f75112ff
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
unittest2;https://github.com/status-im/nim-unittest2@#da8398c45cafd5bd7772da1fc96e3924a18d3823
websock;https://github.com/status-im/nim-websock@#691f069b209d372b1240d5ae1f57fb7bbafeaba7
zlib;https://github.com/status-im/nim-zlib@#6a6670afba6b97b29b920340e2641978c05ab4d8
websock;https://github.com/status-im/nim-websock@#fea05cde8b123b38d1a0a8524b77efbc84daa848
zlib;https://github.com/status-im/nim-zlib@#826e2fc013f55b4478802d4f2e39f187c50d520a

View File

@@ -1,6 +1,8 @@
# to allow locking
if dirExists("nimbledeps/pkgs"):
switch("NimblePath", "nimbledeps/pkgs")
if dirExists("nimbledeps/pkgs2"):
switch("NimblePath", "nimbledeps/pkgs2")
switch("warning", "CaseTransition:off")
switch("warning", "ObservableStores:off")
@@ -10,6 +12,7 @@ switch("warning", "LockLevel:off")
if (NimMajor, NimMinor) < (1, 6):
--styleCheck:hint
else:
switch("warningAsError", "UseBase:on")
--styleCheck:error
# Avoid some rare stack corruption while using exceptions with a SEH-enabled

View File

@@ -57,8 +57,7 @@ proc main() {.async.} =
let
# Create a relay address to swDst using swRel as the relay
addrs = MultiAddress.init($swRel.peerInfo.addrs[0] & "/p2p/" &
$swRel.peerInfo.peerId & "/p2p-circuit/p2p/" &
$swDst.peerInfo.peerId).get()
$swRel.peerInfo.peerId & "/p2p-circuit").get()
# Connect Dst to the relay
await swDst.connect(swRel.peerInfo.peerId, swRel.peerInfo.addrs)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -22,9 +22,7 @@ requires "nim >= 1.2.0",
import hashes
proc runTest(filename: string, verify: bool = true, sign: bool = true,
moreoptions: string = "") =
var excstr = "nim c --skipParentCfg --opt:speed -d:debug -d:libp2p_agents_metrics -d:libp2p_protobuf_metrics -d:libp2p_network_protocols_metrics -d:libp2p_mplex_metrics "
excstr.add(" -d:chronicles_sinks=textlines[stdout],json[dynamic] -d:chronicles_log_level=TRACE ")
excstr.add(" -d:chronicles_runtime_filtering=TRUE ")
var excstr = "nim c --skipParentCfg --opt:speed -d:debug "
excstr.add(" " & getEnv("NIMFLAGS") & " ")
excstr.add(" --verbosity:0 --hints:off ")
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
@@ -143,10 +141,20 @@ task install_pinned, "Reads the lockfile":
if system.dirExists("nimbledeps/pkgs"): "nimbledeps/pkgs"
else: "nimbledeps/pkgs2"
for dependency in listDirs(nimblePkgs):
let filename = dependency.extractFilename
if toInstall.anyIt(filename.startsWith(it[0]) and
filename.endsWith(it[1].split('#')[^1])) == false:
rmDir(dependency)
let
fileName = dependency.extractFilename
fileContent = readFile(dependency & "/nimblemeta.json")
packageName = fileName.split('-')[0]
if toInstall.anyIt(
it[0] == packageName and
(
it[1].split('#')[^1] in fileContent or # nimble for nim 2.X
fileName.endsWith(it[1].split('#')[^1]) # nimble for nim 1.X
)
) == false or
fileName.split('-')[^1].len < 20: # safegard for nimble for nim 1.X
rmDir(dependency)
task unpin, "Restore global package use":
rmDir("nimbledeps")

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -27,7 +27,7 @@ import
crypto/crypto, transports/[transport, tcptransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat, relay/relay, relay/client, relay/rtransport],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
connmanager, upgrademngrs/muxedupgrade,
nameresolving/nameresolver,
errors, utility
@@ -230,7 +230,7 @@ proc build*(b: SwitchBuilder): Switch
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
ms = MultistreamSelect.new()
muxedUpgrade = MuxedUpgrade.new(identify, b.muxers, secureManagerInstances, connManager, ms)
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, connManager, ms)
let
transports = block:
@@ -247,14 +247,13 @@ proc build*(b: SwitchBuilder): Switch
let peerStore =
if isSome(b.peerStoreCapacity):
PeerStore.new(b.peerStoreCapacity.get())
PeerStore.new(identify, b.peerStoreCapacity.get())
else:
PeerStore.new()
PeerStore.new(identify)
let switch = newSwitch(
peerInfo = peerInfo,
transports = transports,
identity = identify,
secureManagers = secureManagerInstances,
connManager = connManager,
ms = ms,
@@ -262,6 +261,8 @@ proc build*(b: SwitchBuilder): Switch
peerStore = peerStore,
services = b.services)
switch.mount(identify)
if b.autonat:
let autonat = Autonat.new(switch)
switch.mount(autonat)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -32,6 +32,7 @@ const
type
TooManyConnectionsError* = object of LPError
AlreadyExpectingConnectionError* = object of LPError
ConnEventKind* {.pure.} = enum
Connected, # A connection was made and securely upgraded - there may be
@@ -54,7 +55,6 @@ type
PeerEventKind* {.pure.} = enum
Left,
Identified,
Joined
PeerEvent* = object
@@ -67,18 +67,14 @@ type
PeerEventHandler* =
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [Defect].}
MuxerHolder = object
muxer: Muxer
handle: Future[void]
ConnManager* = ref object of RootObj
maxConnsPerPeer: int
inSema*: AsyncSemaphore
outSema*: AsyncSemaphore
conns: Table[PeerId, HashSet[Connection]]
muxed: Table[Connection, MuxerHolder]
muxed: Table[PeerId, seq[Muxer]]
connEvents: array[ConnEventKind, OrderedSet[ConnEventHandler]]
peerEvents: array[PeerEventKind, OrderedSet[PeerEventHandler]]
expectedConnectionsOverLimit*: Table[(PeerId, Direction), Future[Muxer]]
peerStore*: PeerStore
ConnectionSlot* = object
@@ -108,15 +104,18 @@ proc new*(C: type ConnManager,
outSema: outSema)
proc connCount*(c: ConnManager, peerId: PeerId): int =
c.conns.getOrDefault(peerId).len
c.muxed.getOrDefault(peerId).len
proc connectedPeers*(c: ConnManager, dir: Direction): seq[PeerId] =
var peers = newSeq[PeerId]()
for peerId, conns in c.conns:
if conns.anyIt(it.dir == dir):
for peerId, mux in c.muxed:
if mux.anyIt(it.connection.dir == dir):
peers.add(peerId)
return peers
proc getConnections*(c: ConnManager): Table[PeerId, seq[Muxer]] =
return c.muxed
proc addConnEventHandler*(c: ConnManager,
handler: ConnEventHandler,
kind: ConnEventKind) =
@@ -200,14 +199,6 @@ proc triggerPeerEvents*(c: ConnManager,
return
try:
let count = c.connCount(peerId)
if event.kind == PeerEventKind.Joined and count != 1:
trace "peer already joined", peer = peerId, event = $event
return
elif event.kind == PeerEventKind.Left and count != 0:
trace "peer still connected or already left", peer = peerId, event = $event
return
trace "triggering peer events", peer = peerId, event = $event
var peerEvents: seq[Future[void]]
@@ -220,18 +211,22 @@ proc triggerPeerEvents*(c: ConnManager,
except CatchableError as exc: # handlers should not raise!
warn "Exception in triggerPeerEvents", exc = exc.msg, peer = peerId
proc contains*(c: ConnManager, conn: Connection): bool =
## checks if a connection is being tracked by the
## connection manager
##
proc expectConnection*(c: ConnManager, p: PeerId, dir: Direction): Future[Muxer] {.async.} =
## Wait for a peer to connect to us. This will bypass the `MaxConnectionsPerPeer`
let key = (p, dir)
if key in c.expectedConnectionsOverLimit:
raise newException(AlreadyExpectingConnectionError, "Already expecting an incoming connection from that peer")
if isNil(conn):
return
let future = newFuture[Muxer]()
c.expectedConnectionsOverLimit[key] = future
return conn in c.conns.getOrDefault(conn.peerId)
try:
return await future
finally:
c.expectedConnectionsOverLimit.del(key)
proc contains*(c: ConnManager, peerId: PeerId): bool =
peerId in c.conns
peerId in c.muxed
proc contains*(c: ConnManager, muxer: Muxer): bool =
## checks if a muxer is being tracked by the connection
@@ -239,184 +234,140 @@ proc contains*(c: ConnManager, muxer: Muxer): bool =
##
if isNil(muxer):
return
return false
let conn = muxer.connection
if conn notin c:
return
return muxer in c.muxed.getOrDefault(conn.peerId)
if conn notin c.muxed:
return
proc closeMuxer(muxer: Muxer) {.async.} =
trace "Cleaning up muxer", m = muxer
return muxer == c.muxed.getOrDefault(conn).muxer
proc closeMuxerHolder(muxerHolder: MuxerHolder) {.async.} =
trace "Cleaning up muxer", m = muxerHolder.muxer
await muxerHolder.muxer.close()
if not(isNil(muxerHolder.handle)):
await muxer.close()
if not(isNil(muxer.handler)):
try:
await muxerHolder.handle # TODO noraises?
await muxer.handler # TODO noraises?
except CatchableError as exc:
trace "Exception in close muxer handler", exc = exc.msg
trace "Cleaned up muxer", m = muxerHolder.muxer
proc delConn(c: ConnManager, conn: Connection) =
let peerId = conn.peerId
c.conns.withValue(peerId, peerConns):
peerConns[].excl(conn)
if peerConns[].len == 0:
c.conns.del(peerId) # invalidates `peerConns`
libp2p_peers.set(c.conns.len.int64)
trace "Removed connection", conn
proc cleanupConn(c: ConnManager, conn: Connection) {.async.} =
## clean connection's resources such as muxers and streams
if isNil(conn):
trace "Wont cleanup a nil connection"
return
# Remove connection from all tables without async breaks
var muxer = some(MuxerHolder())
if not c.muxed.pop(conn, muxer.get()):
muxer = none(MuxerHolder)
delConn(c, conn)
trace "Cleaned up muxer", m = muxer
proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
try:
if muxer.isSome:
await closeMuxerHolder(muxer.get())
finally:
await conn.close()
trace "Triggering disconnect events", mux
let peerId = mux.connection.peerId
trace "Connection cleaned up", conn
let muxers = c.muxed.getOrDefault(peerId).filterIt(it != mux)
if muxers.len > 0:
c.muxed[peerId] = muxers
else:
c.muxed.del(peerId)
libp2p_peers.set(c.muxed.len.int64)
await c.triggerPeerEvents(peerId, PeerEvent(kind: PeerEventKind.Left))
proc onConnUpgraded(c: ConnManager, conn: Connection) {.async.} =
try:
trace "Triggering connect events", conn
conn.upgrade()
if not(c.peerStore.isNil):
c.peerStore.cleanup(peerId)
let peerId = conn.peerId
await c.triggerPeerEvents(
peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: conn.dir == Direction.Out))
await c.triggerConnEvent(
peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: conn.dir == Direction.In))
except CatchableError as exc:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError and should handle other errors
warn "Unexpected exception in switch peer connection cleanup",
conn, msg = exc.msg
proc peerCleanup(c: ConnManager, conn: Connection) {.async.} =
try:
trace "Triggering disconnect events", conn
let peerId = conn.peerId
await c.triggerConnEvent(
peerId, ConnEvent(kind: ConnEventKind.Disconnected))
await c.triggerPeerEvents(peerId, PeerEvent(kind: PeerEventKind.Left))
if not(c.peerStore.isNil):
c.peerStore.cleanup(peerId)
except CatchableError as exc:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError and should handle other errors
warn "Unexpected exception peer cleanup handler",
conn, msg = exc.msg
mux, msg = exc.msg
proc onClose(c: ConnManager, conn: Connection) {.async.} =
proc onClose(c: ConnManager, mux: Muxer) {.async.} =
## connection close even handler
##
## triggers the connections resource cleanup
##
try:
await conn.join()
trace "Connection closed, cleaning up", conn
await c.cleanupConn(conn)
except CancelledError:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.
debug "Unexpected cancellation in connection manager's cleanup", conn
await mux.connection.join()
trace "Connection closed, cleaning up", mux
except CatchableError as exc:
debug "Unexpected exception in connection manager's cleanup",
errMsg = exc.msg, conn
errMsg = exc.msg, mux
finally:
trace "Triggering peerCleanup", conn
asyncSpawn c.peerCleanup(conn)
await c.muxCleanup(mux)
proc selectConn*(c: ConnManager,
proc selectMuxer*(c: ConnManager,
peerId: PeerId,
dir: Direction): Connection =
dir: Direction): Muxer =
## Select a connection for the provided peer and direction
##
let conns = toSeq(
c.conns.getOrDefault(peerId))
.filterIt( it.dir == dir )
c.muxed.getOrDefault(peerId))
.filterIt( it.connection.dir == dir )
if conns.len > 0:
return conns[0]
proc selectConn*(c: ConnManager, peerId: PeerId): Connection =
proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer =
## Select a connection for the provided giving priority
## to outgoing connections
##
var conn = c.selectConn(peerId, Direction.Out)
if isNil(conn):
conn = c.selectConn(peerId, Direction.In)
if isNil(conn):
var mux = c.selectMuxer(peerId, Direction.Out)
if isNil(mux):
mux = c.selectMuxer(peerId, Direction.In)
if isNil(mux):
trace "connection not found", peerId
return mux
return conn
proc selectMuxer*(c: ConnManager, conn: Connection): Muxer =
## select the muxer for the provided connection
proc storeMuxer*(c: ConnManager,
muxer: Muxer)
{.raises: [Defect, CatchableError].} =
## store the connection and muxer
##
if isNil(conn):
return
if isNil(muxer):
raise newException(LPError, "muxer cannot be nil")
if conn in c.muxed:
return c.muxed.getOrDefault(conn).muxer
else:
debug "no muxer for connection", conn
if isNil(muxer.connection):
raise newException(LPError, "muxer's connection cannot be nil")
proc storeConn*(c: ConnManager, conn: Connection)
{.raises: [Defect, LPError].} =
## store a connection
##
if isNil(conn):
raise newException(LPError, "Connection cannot be nil")
if conn.closed or conn.atEof:
if muxer.connection.closed or muxer.connection.atEof:
raise newException(LPError, "Connection closed or EOF")
let peerId = conn.peerId
if c.conns.getOrDefault(peerId).len > c.maxConnsPerPeer:
debug "Too many connections for peer",
conn, conns = c.conns.getOrDefault(peerId).len
let
peerId = muxer.connection.peerId
dir = muxer.connection.dir
raise newTooManyConnectionsError()
# we use getOrDefault in the if below instead of [] to avoid the KeyError
if c.muxed.getOrDefault(peerId).len > c.maxConnsPerPeer:
let key = (peerId, dir)
let expectedConn = c.expectedConnectionsOverLimit.getOrDefault(key)
if expectedConn != nil and not expectedConn.finished:
expectedConn.complete(muxer)
else:
debug "Too many connections for peer",
conns = c.muxed.getOrDefault(peerId).len
c.conns.mgetOrPut(peerId, HashSet[Connection]()).incl(conn)
libp2p_peers.set(c.conns.len.int64)
raise newTooManyConnectionsError()
# Launch on close listener
# All the errors are handled inside `onClose()` procedure.
asyncSpawn c.onClose(conn)
assert muxer notin c.muxed.getOrDefault(peerId)
trace "Stored connection",
conn, direction = $conn.dir, connections = c.conns.len
let
newPeer = peerId notin c.muxed
assert newPeer or c.muxed[peerId].len > 0
c.muxed.mgetOrPut(peerId, newSeq[Muxer]()).add(muxer)
libp2p_peers.set(c.muxed.len.int64)
asyncSpawn c.triggerConnEvent(
peerId, ConnEvent(kind: ConnEventKind.Connected, incoming: dir == Direction.In))
if newPeer:
asyncSpawn c.triggerPeerEvents(
peerId, PeerEvent(kind: PeerEventKind.Joined, initiator: dir == Direction.Out))
asyncSpawn c.onClose(muxer)
trace "Stored muxer",
muxer, direction = $muxer.connection.dir, peers = c.muxed.len
proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} =
await c.inSema.acquire()
return ConnectionSlot(connManager: c, direction: In)
proc getOutgoingSlot*(c: ConnManager, forceDial = false): Future[ConnectionSlot] {.async.} =
proc getOutgoingSlot*(c: ConnManager, forceDial = false): ConnectionSlot {.raises: [Defect, TooManyConnectionsError].} =
if forceDial:
c.outSema.forceAcquire()
elif not c.outSema.tryAcquire():
@@ -425,6 +376,13 @@ proc getOutgoingSlot*(c: ConnManager, forceDial = false): Future[ConnectionSlot]
raise newTooManyConnectionsError()
return ConnectionSlot(connManager: c, direction: Out)
proc slotsAvailable*(c: ConnManager, dir: Direction): int =
case dir:
of Direction.In:
return c.inSema.count
of Direction.Out:
return c.outSema.count
proc release*(cs: ConnectionSlot) =
if cs.direction == In:
cs.connManager.inSema.release()
@@ -446,39 +404,17 @@ proc trackConnection*(cs: ConnectionSlot, conn: Connection) =
asyncSpawn semaphoreMonitor()
proc storeMuxer*(c: ConnManager,
muxer: Muxer,
handle: Future[void] = nil)
{.raises: [Defect, CatchableError].} =
## store the connection and muxer
##
if isNil(muxer):
raise newException(CatchableError, "muxer cannot be nil")
if isNil(muxer.connection):
raise newException(CatchableError, "muxer's connection cannot be nil")
if muxer.connection notin c:
raise newException(CatchableError, "cant add muxer for untracked connection")
c.muxed[muxer.connection] = MuxerHolder(
muxer: muxer,
handle: handle)
trace "Stored muxer",
muxer, handle = not handle.isNil, connections = c.conns.len
asyncSpawn c.onConnUpgraded(muxer.connection)
proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
if isNil(mux):
cs.release()
return
cs.trackConnection(mux.connection)
proc getStream*(c: ConnManager,
peerId: PeerId,
dir: Direction): Future[Connection] {.async, gcsafe.} =
## get a muxed stream for the provided peer
## with the given direction
muxer: Muxer): Future[Connection] {.async, gcsafe.} =
## get a muxed stream for the passed muxer
##
let muxer = c.selectMuxer(c.selectConn(peerId, dir))
if not(isNil(muxer)):
return await muxer.newStream()
@@ -487,40 +423,25 @@ proc getStream*(c: ConnManager,
## get a muxed stream for the passed peer from any connection
##
let muxer = c.selectMuxer(c.selectConn(peerId))
if not(isNil(muxer)):
return await muxer.newStream()
return await c.getStream(c.selectMuxer(peerId))
proc getStream*(c: ConnManager,
conn: Connection): Future[Connection] {.async, gcsafe.} =
## get a muxed stream for the passed connection
peerId: PeerId,
dir: Direction): Future[Connection] {.async, gcsafe.} =
## get a muxed stream for the passed peer from a connection with `dir`
##
let muxer = c.selectMuxer(conn)
if not(isNil(muxer)):
return await muxer.newStream()
return await c.getStream(c.selectMuxer(peerId, dir))
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
## drop connections and cleanup resources for peer
##
trace "Dropping peer", peerId
let conns = c.conns.getOrDefault(peerId)
for conn in conns:
trace "Removing connection", conn
delConn(c, conn)
var muxers: seq[MuxerHolder]
for conn in conns:
if conn in c.muxed:
muxers.add c.muxed[conn]
c.muxed.del(conn)
let muxers = c.muxed.getOrDefault(peerId)
for muxer in muxers:
await closeMuxerHolder(muxer)
for conn in conns:
await conn.close()
trace "Dropped peer", peerId
await closeMuxer(muxer)
trace "Peer dropped", peerId
@@ -530,18 +451,18 @@ proc close*(c: ConnManager) {.async.} =
##
trace "Closing ConnManager"
let conns = c.conns
c.conns.clear()
let muxed = c.muxed
c.muxed.clear()
for _, muxer in muxed:
await closeMuxerHolder(muxer)
let expected = c.expectedConnectionsOverLimit
c.expectedConnectionsOverLimit.clear()
for _, conns2 in conns:
for conn in conns2:
await conn.close()
for _, fut in expected:
await fut.cancelAndWait()
for _, muxers in muxed:
for mux in muxers:
await closeMuxer(mux)
trace "Closed ConnManager"

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -22,7 +22,7 @@ else:
import bearssl/blockx
from stew/assign2 import assign
from stew/ranges/ptr_arith import baseAddr
from stew/ptrops import baseAddr
const
ChaChaPolyKeySize = 32

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022-2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -27,7 +27,8 @@ method connect*(
self: Dial,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false) {.async, base.} =
forceDial = false,
reuseConnection = true) {.async, base.} =
## connect remote peer without negotiating
## a protocol
##

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -17,7 +17,9 @@ import pkg/[chronos,
import dial,
peerid,
peerinfo,
peerstore,
multicodec,
muxers/muxer,
multistream,
connmanager,
stream/connection,
@@ -41,10 +43,10 @@ type
Dialer* = ref object of Dial
localPeerId*: PeerId
ms: MultistreamSelect
connManager: ConnManager
dialLock: Table[PeerId, AsyncLock]
transports: seq[Transport]
peerStore: PeerStore
nameResolver: NameResolver
proc dialAndUpgrade(
@@ -52,7 +54,7 @@ proc dialAndUpgrade(
peerId: Opt[PeerId],
hostname: string,
address: MultiAddress):
Future[Connection] {.async.} =
Future[Muxer] {.async.} =
for transport in self.transports: # for each transport
if transport.handles(address): # check if it can dial it
@@ -60,7 +62,7 @@ proc dialAndUpgrade(
let dialed =
try:
libp2p_total_dial_attempts.inc()
await transport.dial(hostname, address)
await transport.dial(hostname, address, peerId)
except CancelledError as exc:
debug "Dialing canceled", msg = exc.msg, peerId
raise exc
@@ -75,7 +77,7 @@ proc dialAndUpgrade(
libp2p_successful_dials.inc()
let conn =
let mux =
try:
await transport.upgradeOutgoing(dialed, peerId)
except CatchableError as exc:
@@ -89,9 +91,9 @@ proc dialAndUpgrade(
# Try other address
return nil
doAssert not isNil(conn), "connection died after upgradeOutgoing"
debug "Dial successful", conn, peerId = conn.peerId
return conn
doAssert not isNil(mux), "connection died after upgradeOutgoing"
debug "Dial successful", peerId = mux.connection.peerId
return mux
return nil
proc expandDnsAddr(
@@ -126,7 +128,7 @@ proc dialAndUpgrade(
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress]):
Future[Connection] {.async.} =
Future[Muxer] {.async.} =
debug "Dialing peer", peerId
@@ -147,12 +149,21 @@ proc dialAndUpgrade(
if not isNil(result):
return result
proc tryReusingConnection(self: Dialer, peerId: PeerId): Future[Opt[Muxer]] {.async.} =
let muxer = self.connManager.selectMuxer(peerId)
if muxer == nil:
return Opt.none(Muxer)
trace "Reusing existing connection", muxer, direction = $muxer.connection.dir
return Opt.some(muxer)
proc internalConnect(
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress],
forceDial: bool):
Future[Connection] {.async.} =
forceDial: bool,
reuseConnection = true):
Future[Muxer] {.async.} =
if Opt.some(self.localPeerId) == peerId:
raise newException(CatchableError, "can't dial self!")
@@ -161,44 +172,31 @@ proc internalConnect(
try:
await lock.acquire()
# Check if we have a connection already and try to reuse it
var conn =
if peerId.isSome: self.connManager.selectConn(peerId.get())
else: nil
if conn != nil:
if conn.atEof or conn.closed:
# This connection should already have been removed from the connection
# manager - it's essentially a bug that we end up here - we'll fail
# for now, hoping that this will clean themselves up later...
warn "dead connection in connection manager", conn
await conn.close()
raise newException(DialFailedError, "Zombie connection encountered")
if peerId.isSome and reuseConnection:
let muxOpt = await self.tryReusingConnection(peerId.get())
if muxOpt.isSome:
return muxOpt.get()
trace "Reusing existing connection", conn, direction = $conn.dir
return conn
let slot = await self.connManager.getOutgoingSlot(forceDial)
conn =
let slot = self.connManager.getOutgoingSlot(forceDial)
let muxed =
try:
await self.dialAndUpgrade(peerId, addrs)
except CatchableError as exc:
slot.release()
raise exc
slot.trackConnection(conn)
if isNil(conn): # None of the addresses connected
slot.trackMuxer(muxed)
if isNil(muxed): # None of the addresses connected
raise newException(DialFailedError, "Unable to establish outgoing link")
# A disconnect could have happened right after
# we've added the connection so we check again
# to prevent races due to that.
if conn.closed() or conn.atEof():
# This can happen when the other ends drops us
# before we get a chance to return the connection
# back to the dialer.
trace "Connection dead on arrival", conn
raise newLPStreamClosedError()
try:
self.connManager.storeMuxer(muxed)
await self.peerStore.identify(muxed)
except CatchableError as exc:
trace "Failed to finish outgoung upgrade", err=exc.msg
await muxed.close()
raise exc
return conn
return muxed
finally:
if lock.locked():
lock.release()
@@ -207,15 +205,16 @@ method connect*(
self: Dialer,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false) {.async.} =
forceDial = false,
reuseConnection = true) {.async.} =
## connect remote peer without negotiating
## a protocol
##
if self.connManager.connCount(peerId) > 0:
if self.connManager.connCount(peerId) > 0 and reuseConnection:
return
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial)
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection)
method connect*(
self: Dialer,
@@ -228,21 +227,21 @@ method connect*(
return (await self.internalConnect(
Opt.some(fullAddress.get()[0]),
@[fullAddress.get()[1]],
false)).peerId
false)).connection.peerId
else:
if allowUnknownPeerId == false:
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
return (await self.internalConnect(
Opt.none(PeerId),
@[address],
false)).peerId
false)).connection.peerId
proc negotiateStream(
self: Dialer,
conn: Connection,
protos: seq[string]): Future[Connection] {.async.} =
trace "Negotiating stream", conn, protos
let selected = await self.ms.select(conn, protos)
let selected = await MultistreamSelect.select(conn, protos)
if not protos.contains(selected):
await conn.closeWithEOF()
raise newException(DialFailedError, "Unable to select sub-protocol " & $protos)
@@ -260,11 +259,11 @@ method tryDial*(
trace "Check if it can dial", peerId, addrs
try:
let conn = await self.dialAndUpgrade(Opt.some(peerId), addrs)
if conn.isNil():
let mux = await self.dialAndUpgrade(Opt.some(peerId), addrs)
if mux.isNil():
raise newException(DialFailedError, "No valid multiaddress")
await conn.close()
return conn.observedAddr
await mux.close()
return mux.connection.observedAddr
except CancelledError as exc:
raise exc
except CatchableError as exc:
@@ -296,7 +295,7 @@ method dial*(
##
var
conn: Connection
conn: Muxer
stream: Connection
proc cleanup() {.async.} =
@@ -333,12 +332,12 @@ proc new*(
T: type Dialer,
localPeerId: PeerId,
connManager: ConnManager,
peerStore: PeerStore,
transports: seq[Transport],
ms: MultistreamSelect,
nameResolver: NameResolver = nil): Dialer =
T(localPeerId: localPeerId,
connManager: connManager,
transports: transports,
ms: ms,
peerStore: peerStore,
nameResolver: nameResolver)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -462,6 +462,7 @@ const
IP6* = mapEq("ip6")
DNS* = mapOr(DNSANY, DNS4, DNS6, DNSADDR)
IP* = mapOr(IP4, IP6)
DNS_OR_IP* = mapOr(DNS, IP)
TCP* = mapOr(mapAnd(DNS, mapEq("tcp")), mapAnd(IP, mapEq("tcp")))
UDP* = mapOr(mapAnd(DNS, mapEq("udp")), mapAnd(IP, mapEq("udp")))
UTP* = mapAnd(UDP, mapEq("utp"))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -21,12 +21,11 @@ logScope:
topics = "libp2p multistream"
const
MsgSize* = 1024
Codec* = "/multistream/1.0.0"
MsgSize = 1024
Codec = "/multistream/1.0.0"
MSCodec* = "\x13" & Codec & "\n"
Na* = "\x03na\n"
Ls* = "\x03ls\n"
Na = "na\n"
Ls = "ls\n"
type
Matcher* = proc (proto: string): bool {.gcsafe, raises: [Defect].}
@@ -45,7 +44,7 @@ type
proc new*(T: typedesc[MultistreamSelect]): T =
T(
codec: MSCodec,
codec: Codec,
)
template validateSuffix(str: string): untyped =
@@ -54,13 +53,13 @@ template validateSuffix(str: string): untyped =
else:
raise newException(MultiStreamError, "MultistreamSelect failed, malformed message")
proc select*(m: MultistreamSelect,
proc select*(_: MultistreamSelect | type MultistreamSelect,
conn: Connection,
proto: seq[string]):
Future[string] {.async.} =
trace "initiating handshake", conn, codec = m.codec
trace "initiating handshake", conn, codec = Codec
## select a remote protocol
await conn.write(m.codec) # write handshake
await conn.writeLp(Codec & "\n") # write handshake
if proto.len() > 0:
trace "selecting proto", conn, proto = proto[0]
await conn.writeLp((proto[0] & "\n")) # select proto
@@ -102,13 +101,13 @@ proc select*(m: MultistreamSelect,
# No alternatives, fail
return ""
proc select*(m: MultistreamSelect,
proc select*(_: MultistreamSelect | type MultistreamSelect,
conn: Connection,
proto: string): Future[bool] {.async.} =
if proto.len > 0:
return (await m.select(conn, @[proto])) == proto
return (await MultistreamSelect.select(conn, @[proto])) == proto
else:
return (await m.select(conn, @[])) == Codec
return (await MultistreamSelect.select(conn, @[])) == Codec
proc select*(m: MultistreamSelect, conn: Connection): Future[bool] =
m.select(conn, "")
@@ -119,7 +118,7 @@ proc list*(m: MultistreamSelect,
if not await m.select(conn):
return
await conn.write(Ls) # send ls
await conn.writeLp(Ls) # send ls
var list = newSeq[string]()
let ms = string.fromBytes(await conn.readLp(MsgSize))
@@ -129,68 +128,86 @@ proc list*(m: MultistreamSelect,
result = list
proc handle*(
_: type MultistreamSelect,
conn: Connection,
protos: seq[string],
matchers = newSeq[Matcher](),
active: bool = false,
): Future[string] {.async, gcsafe.} =
trace "Starting multistream negotiation", conn, handshaked = active
var handshaked = active
while not conn.atEof:
var ms = string.fromBytes(await conn.readLp(MsgSize))
validateSuffix(ms)
if not handshaked and ms != Codec:
debug "expected handshake message", conn, instead=ms
raise newException(CatchableError,
"MultistreamSelect handling failed, invalid first message")
trace "handle: got request", conn, ms
if ms.len() <= 0:
trace "handle: invalid proto", conn
await conn.writeLp(Na)
case ms:
of "ls":
trace "handle: listing protos", conn
#TODO this doens't seem to follow spec, each protocol
# should be length prefixed. Not very important
# since LS is getting deprecated
await conn.writeLp(protos.join("\n") & "\n")
of Codec:
if not handshaked:
await conn.writeLp(Codec & "\n")
handshaked = true
else:
trace "handle: sending `na` for duplicate handshake while handshaked",
conn
await conn.writeLp(Na)
elif ms in protos or matchers.anyIt(it(ms)):
trace "found handler", conn, protocol = ms
await conn.writeLp(ms & "\n")
conn.protocol = ms
return ms
else:
trace "no handlers", conn, protocol = ms
await conn.writeLp(Na)
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
trace "Starting multistream handler", conn, handshaked = active
var handshaked = active
var
handshaked = active
protos: seq[string]
matchers: seq[Matcher]
for h in m.handlers:
if not isNil(h.match):
matchers.add(h.match)
for proto in h.protos:
protos.add(proto)
try:
while not conn.atEof:
var ms = string.fromBytes(await conn.readLp(MsgSize))
validateSuffix(ms)
let ms = await MultistreamSelect.handle(conn, protos, matchers, active)
for h in m.handlers:
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
trace "found handler", conn, protocol = ms
if not handshaked and ms != Codec:
notice "expected handshake message", conn, instead=ms
raise newException(CatchableError,
"MultistreamSelect handling failed, invalid first message")
trace "handle: got request", conn, ms
if ms.len() <= 0:
trace "handle: invalid proto", conn
await conn.write(Na)
if m.handlers.len() == 0:
trace "handle: sending `na` for protocol", conn, protocol = ms
await conn.write(Na)
continue
case ms:
of "ls":
trace "handle: listing protos", conn
var protos = ""
for h in m.handlers:
for proto in h.protos:
protos &= (proto & "\n")
await conn.writeLp(protos)
of Codec:
if not handshaked:
await conn.write(m.codec)
handshaked = true
else:
trace "handle: sending `na` for duplicate handshake while handshaked",
conn
await conn.write(Na)
else:
for h in m.handlers:
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
trace "found handler", conn, protocol = ms
var protocolHolder = h
let maxIncomingStreams = protocolHolder.protocol.maxIncomingStreams
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >= maxIncomingStreams:
debug "Max streams for protocol reached, blocking new stream",
conn, protocol = ms, maxIncomingStreams
return
protocolHolder.openedStreams.inc(conn.peerId)
try:
await conn.writeLp(ms & "\n")
conn.protocol = ms
await protocolHolder.protocol.handler(conn, ms)
finally:
protocolHolder.openedStreams.inc(conn.peerId, -1)
if protocolHolder.openedStreams[conn.peerId] == 0:
protocolHolder.openedStreams.del(conn.peerId)
return
debug "no handlers", conn, protocol = ms
await conn.write(Na)
var protocolHolder = h
let maxIncomingStreams = protocolHolder.protocol.maxIncomingStreams
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >= maxIncomingStreams:
debug "Max streams for protocol reached, blocking new stream",
conn, protocol = ms, maxIncomingStreams
return
protocolHolder.openedStreams.inc(conn.peerId)
try:
await protocolHolder.protocol.handler(conn, ms)
finally:
protocolHolder.openedStreams.inc(conn.peerId, -1)
if protocolHolder.openedStreams[conn.peerId] == 0:
protocolHolder.openedStreams.del(conn.peerId)
return
debug "no handlers", conn, ms
except CancelledError as exc:
raise exc
except CatchableError as exc:

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -244,7 +244,11 @@ proc completeWrite(
except CancelledError as exc:
# Chronos may still send the data
raise exc
except LPStreamClosedError as exc:
except LPStreamConnDownError as exc:
await s.reset()
await s.conn.close()
raise exc
except LPStreamEOFError as exc:
raise exc
except CatchableError as exc:
trace "exception in lpchannel write handler", s, msg = exc.msg

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -177,8 +177,14 @@ method handle*(m: Mplex) {.async, gcsafe.} =
raise newLPStreamLimitError()
trace "pushing data to channel", m, channel, len = data.len
await channel.pushData(data)
trace "pushed data to channel", m, channel, len = data.len
try:
await channel.pushData(data)
trace "pushed data to channel", m, channel, len = data.len
except LPStreamClosedError as exc:
# Channel is being closed, but `cleanupChann` was not yet triggered.
trace "pushing data to channel failed", m, channel, len = data.len,
msg = exc.msg
discard # Ignore message, same as if `cleanupChann` had completed.
of MessageType.CloseIn, MessageType.CloseOut:
await channel.pushEof()
@@ -242,3 +248,7 @@ method close*(m: Mplex) {.async, gcsafe.} =
m.channels[true].clear()
trace "Closed mplex", m
method getStreams*(m: Mplex): seq[Connection] =
for c in m.channels[false].values: result.add(c)
for c in m.channels[true].values: result.add(c)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -32,24 +32,28 @@ type
Muxer* = ref object of RootObj
streamHandler*: StreamHandler
handler*: Future[void]
connection*: Connection
# user provider proc that returns a constructed Muxer
MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [Defect].}
# this wraps a creator proc that knows how to make muxers
MuxerProvider* = ref object of LPProtocol
MuxerProvider* = object
newMuxer*: MuxerConstructor
streamHandler*: StreamHandler # triggered every time there is a new stream, called for any muxer instance
muxerHandler*: MuxerHandler # triggered every time there is a new muxed connection created
codec*: string
func shortLog*(m: Muxer): auto = shortLog(m.connection)
func shortLog*(m: Muxer): auto =
if isNil(m): "nil"
else: shortLog(m.connection)
chronicles.formatIt(Muxer): shortLog(it)
# muxer interface
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
Future[Connection] {.base, async, gcsafe.} = discard
method close*(m: Muxer) {.base, async, gcsafe.} = discard
method close*(m: Muxer) {.base, async, gcsafe.} =
if not isNil(m.connection):
await m.connection.close()
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
proc new*(
@@ -57,36 +61,7 @@ proc new*(
creator: MuxerConstructor,
codec: string): T {.gcsafe.} =
let muxerProvider = T(newMuxer: creator)
muxerProvider.codec = codec
muxerProvider.init()
let muxerProvider = T(newMuxer: creator, codec: codec)
muxerProvider
method init(c: MuxerProvider) =
proc handler(conn: Connection, proto: string) {.async, gcsafe, closure.} =
trace "starting muxer handler", proto=proto, conn
try:
let
muxer = c.newMuxer(conn)
if not isNil(c.streamHandler):
muxer.streamHandler = c.streamHandler
var futs = newSeq[Future[void]]()
futs &= muxer.handle()
# finally await both the futures
if not isNil(c.muxerHandler):
await c.muxerHandler(muxer)
when defined(libp2p_agents_metrics):
conn.shortAgent = muxer.connection.shortAgent
checkFutures(await allFinished(futs))
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "exception in muxer handler", exc = exc.msg, conn, proto
finally:
await conn.close()
c.handler = handler
method getStreams*(m: Muxer): seq[Connection] {.base.} = doAssert false, "not implemented"

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -356,6 +356,8 @@ proc open*(channel: YamuxChannel) {.async, gcsafe.} =
channel.opened = true
await channel.conn.write(YamuxHeader.data(channel.id, 0, {if channel.isSrc: Syn else: Ack}))
method getWrapped*(channel: YamuxChannel): Connection = channel.conn
type
Yamux* = ref object of Muxer
channels: Table[uint32, YamuxChannel]
@@ -506,6 +508,9 @@ method handle*(m: Yamux) {.async, gcsafe.} =
await m.close()
trace "Stopped yamux handler"
method getStreams*(m: Yamux): seq[Connection] =
for c in m.channels.values: result.add(c)
method newStream*(
m: Yamux,
name: string = "",

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -0,0 +1,98 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
std/[sequtils, tables],
chronos, chronicles,
multiaddress, multicodec
type
## Manages observed MultiAddresses by reomte peers. It keeps track of the most observed IP and IP/Port.
ObservedAddrManager* = ref object of RootObj
observedIPsAndPorts: seq[MultiAddress]
maxSize: int
minCount: int
proc addObservation*(self:ObservedAddrManager, observedAddr: MultiAddress): bool =
## Adds a new observed MultiAddress. If the number of observations exceeds maxSize, the oldest one is removed.
if self.observedIPsAndPorts.len >= self.maxSize:
self.observedIPsAndPorts.del(0)
self.observedIPsAndPorts.add(observedAddr)
return true
proc getProtocol(self: ObservedAddrManager, observations: seq[MultiAddress], multiCodec: MultiCodec): Opt[MultiAddress] =
var countTable = toCountTable(observations)
countTable.sort()
var orderedPairs = toSeq(countTable.pairs)
for (ma, count) in orderedPairs:
let maFirst = ma[0].get()
if maFirst.protoCode.get() == multiCodec and count >= self.minCount:
return Opt.some(ma)
return Opt.none(MultiAddress)
proc getMostObservedProtocol(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
## Returns the most observed IP address or none if the number of observations are less than minCount.
let observedIPs = self.observedIPsAndPorts.mapIt(it[0].get())
return self.getProtocol(observedIPs, multiCodec)
proc getMostObservedProtoAndPort(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
## Returns the most observed IP/Port address or none if the number of observations are less than minCount.
return self.getProtocol(self.observedIPsAndPorts, multiCodec)
proc getMostObservedProtosAndPorts*(self: ObservedAddrManager): seq[MultiAddress] =
## Returns the most observed IP4/Port and IP6/Port address or an empty seq if the number of observations
## are less than minCount.
var res: seq[MultiAddress]
let ip4 = self.getMostObservedProtoAndPort(multiCodec("ip4"))
if ip4.isSome():
res.add(ip4.get())
let ip6 = self.getMostObservedProtoAndPort(multiCodec("ip6"))
if ip6.isSome():
res.add(ip6.get())
return res
proc guessDialableAddr*(
self: ObservedAddrManager,
ma: MultiAddress): MultiAddress =
## Replaces the first proto valeu of each listen address by the corresponding (matching the proto code) most observed value.
## If the most observed value is not available, the original MultiAddress is returned.
try:
let maFirst = ma[0]
let maRest = ma[1..^1]
if maRest.isErr():
return ma
let observedIP = self.getMostObservedProtocol(maFirst.get().protoCode().get())
return
if observedIP.isNone() or maFirst.get() == observedIP.get():
ma
else:
observedIP.get() & maRest.get()
except CatchableError as error:
debug "Error while handling manual port forwarding", msg = error.msg
return ma
proc `$`*(self: ObservedAddrManager): string =
## Returns a string representation of the ObservedAddrManager.
return "IPs and Ports: " & $self.observedIPsAndPorts
proc new*(
T: typedesc[ObservedAddrManager],
maxSize = 10,
minCount = 3): T =
## Creates a new ObservedAddrManager.
return T(
observedIPsAndPorts: newSeq[MultiAddress](),
maxSize: maxSize,
minCount: minCount)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -28,11 +28,16 @@ else:
import
std/[tables, sets, options, macros],
chronos, chronicles,
./crypto/crypto,
./protocols/identify,
./protocols/protocol,
./peerid, ./peerinfo,
./routing_record,
./multiaddress,
./stream/connection,
./multistream,
./muxers/muxer,
utility
type
@@ -70,11 +75,15 @@ type
PeerStore* {.public.} = ref object
books: Table[string, BasePeerBook]
identify: Identify
capacity*: int
toClean*: seq[PeerId]
proc new*(T: type PeerStore, capacity = 1000): PeerStore {.public.} =
T(capacity: capacity)
proc new*(T: type PeerStore, identify: Identify, capacity = 1000): PeerStore {.public.} =
T(
identify: identify,
capacity: capacity
)
#########################
# Generic Peer Book API #
@@ -186,3 +195,31 @@ proc cleanup*(
while peerStore.toClean.len > peerStore.capacity:
peerStore.del(peerStore.toClean[0])
peerStore.toClean.delete(0)
proc identify*(
peerStore: PeerStore,
muxer: Muxer) {.async.} =
# new stream for identify
var stream = await muxer.newStream()
if stream == nil:
return
try:
if (await MultistreamSelect.select(stream, peerStore.identify.codec())):
let info = await peerStore.identify.identify(stream, stream.peerId)
when defined(libp2p_agents_metrics):
var knownAgent = "unknown"
if info.agentVersion.isSome and info.agentVersion.get().len > 0:
let shortAgent = info.agentVersion.get().split("/")[0].safeToLowerAscii()
if shortAgent.isOk() and KnownLibP2PAgentsSeq.contains(shortAgent.get()):
knownAgent = shortAgent.get()
muxer.connection.setShortAgent(knownAgent)
peerStore.updatePeerInfo(info)
finally:
await stream.closeWithEOF()
proc guessDialableAddr*(self: PeerStore, ma: MultiAddress): MultiAddress =
return self.identify.observedAddrManager.guessDialableAddr(ma)

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,322 +0,0 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/[options, sets, sequtils]
import stew/results
import chronos, chronicles, stew/objects
import ../protocol,
../../switch,
../../multiaddress,
../../multicodec,
../../peerid,
../../utils/semaphore,
../../errors
logScope:
topics = "libp2p autonat"
const
AutonatCodec* = "/libp2p/autonat/1.0.0"
AddressLimit = 8
type
AutonatError* = object of LPError
AutonatUnreachableError* = object of LPError
MsgType* = enum
Dial = 0
DialResponse = 1
ResponseStatus* = enum
Ok = 0
DialError = 100
DialRefused = 101
BadRequest = 200
InternalError = 300
AutonatPeerInfo* = object
id: Option[PeerId]
addrs: seq[MultiAddress]
AutonatDial* = object
peerInfo: Option[AutonatPeerInfo]
AutonatDialResponse* = object
status*: ResponseStatus
text*: Option[string]
ma*: Option[MultiAddress]
AutonatMsg = object
msgType: MsgType
dial: Option[AutonatDial]
response: Option[AutonatDialResponse]
proc encode*(msg: AutonatMsg): ProtoBuffer =
result = initProtoBuffer()
result.write(1, msg.msgType.uint)
if msg.dial.isSome():
var dial = initProtoBuffer()
if msg.dial.get().peerInfo.isSome():
var bufferPeerInfo = initProtoBuffer()
let peerInfo = msg.dial.get().peerInfo.get()
if peerInfo.id.isSome():
bufferPeerInfo.write(1, peerInfo.id.get())
for ma in peerInfo.addrs:
bufferPeerInfo.write(2, ma.data.buffer)
bufferPeerInfo.finish()
dial.write(1, bufferPeerInfo.buffer)
dial.finish()
result.write(2, dial.buffer)
if msg.response.isSome():
var bufferResponse = initProtoBuffer()
let response = msg.response.get()
bufferResponse.write(1, response.status.uint)
if response.text.isSome():
bufferResponse.write(2, response.text.get())
if response.ma.isSome():
bufferResponse.write(3, response.ma.get())
bufferResponse.finish()
result.write(3, bufferResponse.buffer)
result.finish()
proc encode*(d: AutonatDial): ProtoBuffer =
result = initProtoBuffer()
result.write(1, MsgType.Dial.uint)
var dial = initProtoBuffer()
if d.peerInfo.isSome():
var bufferPeerInfo = initProtoBuffer()
let peerInfo = d.peerInfo.get()
if peerInfo.id.isSome():
bufferPeerInfo.write(1, peerInfo.id.get())
for ma in peerInfo.addrs:
bufferPeerInfo.write(2, ma.data.buffer)
bufferPeerInfo.finish()
dial.write(1, bufferPeerInfo.buffer)
dial.finish()
result.write(2, dial.buffer)
result.finish()
proc encode*(r: AutonatDialResponse): ProtoBuffer =
result = initProtoBuffer()
result.write(1, MsgType.DialResponse.uint)
var bufferResponse = initProtoBuffer()
bufferResponse.write(1, r.status.uint)
if r.text.isSome():
bufferResponse.write(2, r.text.get())
if r.ma.isSome():
bufferResponse.write(3, r.ma.get())
bufferResponse.finish()
result.write(3, bufferResponse.buffer)
result.finish()
proc decode(_: typedesc[AutonatMsg], buf: seq[byte]): Option[AutonatMsg] =
var
msgTypeOrd: uint32
pbDial: ProtoBuffer
pbResponse: ProtoBuffer
msg: AutonatMsg
let
pb = initProtoBuffer(buf)
r1 = pb.getField(1, msgTypeOrd)
r2 = pb.getField(2, pbDial)
r3 = pb.getField(3, pbResponse)
if r1.isErr() or r2.isErr() or r3.isErr(): return none(AutonatMsg)
if r1.get() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
return none(AutonatMsg)
if r2.get():
var
pbPeerInfo: ProtoBuffer
dial: AutonatDial
let
r4 = pbDial.getField(1, pbPeerInfo)
if r4.isErr(): return none(AutonatMsg)
var peerInfo: AutonatPeerInfo
if r4.get():
var pid: PeerId
let
r5 = pbPeerInfo.getField(1, pid)
r6 = pbPeerInfo.getRepeatedField(2, peerInfo.addrs)
if r5.isErr() or r6.isErr(): return none(AutonatMsg)
if r5.get(): peerInfo.id = some(pid)
dial.peerInfo = some(peerInfo)
msg.dial = some(dial)
if r3.get():
var
statusOrd: uint
text: string
ma: MultiAddress
response: AutonatDialResponse
let
r4 = pbResponse.getField(1, statusOrd)
r5 = pbResponse.getField(2, text)
r6 = pbResponse.getField(3, ma)
if r4.isErr() or r5.isErr() or r6.isErr() or
(r4.get() and not checkedEnumAssign(response.status, statusOrd)):
return none(AutonatMsg)
if r5.get(): response.text = some(text)
if r6.get(): response.ma = some(ma)
msg.response = some(response)
return some(msg)
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
id: some(pid),
addrs: addrs
))).encode()
await conn.writeLp(pb.buffer)
proc sendResponseError(conn: Connection, status: ResponseStatus, text: string = "") {.async.} =
let pb = AutonatDialResponse(
status: status,
text: if text == "": none(string) else: some(text),
ma: none(MultiAddress)
).encode()
await conn.writeLp(pb.buffer)
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
let pb = AutonatDialResponse(
status: ResponseStatus.Ok,
text: some("Ok"),
ma: some(ma)
).encode()
await conn.writeLp(pb.buffer)
type
Autonat* = ref object of LPProtocol
sem: AsyncSemaphore
switch*: Switch
method dialMe*(a: Autonat, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()):
Future[MultiAddress] {.base, async.} =
proc getResponseOrRaise(autonatMsg: Option[AutonatMsg]): AutonatDialResponse {.raises: [UnpackError, AutonatError].} =
if autonatMsg.isNone() or
autonatMsg.get().msgType != DialResponse or
autonatMsg.get().response.isNone() or
autonatMsg.get().response.get().ma.isNone():
raise newException(AutonatError, "Unexpected response")
else:
autonatMsg.get().response.get()
let conn =
try:
if addrs.len == 0:
await a.switch.dial(pid, @[AutonatCodec])
else:
await a.switch.dial(pid, addrs, AutonatCodec)
except CatchableError as err:
raise newException(AutonatError, "Unexpected error when dialling", err)
defer: await conn.close()
await conn.sendDial(a.switch.peerInfo.peerId, a.switch.peerInfo.addrs)
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
return case response.status:
of ResponseStatus.Ok:
response.ma.get()
of ResponseStatus.DialError:
raise newException(AutonatUnreachableError, "Peer could not dial us back")
else:
raise newException(AutonatError, "Bad status " & $response.status & " " & response.text.get(""))
proc tryDial(a: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.async.} =
try:
await a.sem.acquire()
let ma = await a.switch.dialer.tryDial(conn.peerId, addrs)
if ma.isSome:
await conn.sendResponseOk(ma.get())
else:
await conn.sendResponseError(DialError, "Missing observed address")
except CancelledError as exc:
raise exc
except CatchableError as exc:
await conn.sendResponseError(DialError, exc.msg)
finally:
a.sem.release()
proc handleDial(a: Autonat, conn: Connection, msg: AutonatMsg): Future[void] =
if msg.dial.isNone() or msg.dial.get().peerInfo.isNone():
return conn.sendResponseError(BadRequest, "Missing Peer Info")
let peerInfo = msg.dial.get().peerInfo.get()
if peerInfo.id.isSome() and peerInfo.id.get() != conn.peerId:
return conn.sendResponseError(BadRequest, "PeerId mismatch")
if conn.observedAddr.isNone:
return conn.sendResponseError(BadRequest, "Missing observed address")
let observedAddr = conn.observedAddr.get()
var isRelayed = observedAddr.contains(multiCodec("p2p-circuit"))
if isRelayed.isErr() or isRelayed.get():
return conn.sendResponseError(DialRefused, "Refused to dial a relayed observed address")
let hostIp = observedAddr[0]
if hostIp.isErr() or not IP.match(hostIp.get()):
trace "wrong observed address", address=observedAddr
return conn.sendResponseError(InternalError, "Expected an IP address")
var addrs = initHashSet[MultiAddress]()
addrs.incl(observedAddr)
for ma in peerInfo.addrs:
isRelayed = ma.contains(multiCodec("p2p-circuit"))
if isRelayed.isErr() or isRelayed.get():
continue
let maFirst = ma[0]
if maFirst.isErr() or not IP.match(maFirst.get()):
continue
try:
addrs.incl(
if maFirst.get() == hostIp.get():
ma
else:
let maEnd = ma[1..^1]
if maEnd.isErr(): continue
hostIp.get() & maEnd.get()
)
except LPError as exc:
continue
if len(addrs) >= AddressLimit:
break
if len(addrs) == 0:
return conn.sendResponseError(DialRefused, "No dialable address")
return a.tryDial(conn, toSeq(addrs))
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1): T =
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize))
autonat.init()
autonat
method init*(a: Autonat) =
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
try:
let msgOpt = AutonatMsg.decode(await conn.readLp(1024))
if msgOpt.isNone() or msgOpt.get().msgType != MsgType.Dial:
raise newException(AutonatError, "Received malformed message")
let msg = msgOpt.get()
await a.handleDial(conn, msg)
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "exception in autonat handler", exc = exc.msg, conn
finally:
trace "exiting autonat handler", conn
await conn.close()
a.handler = handleStream
a.codec = AutonatCodec

View File

@@ -0,0 +1,78 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/[options, sets, sequtils]
import stew/[results, objects]
import chronos, chronicles
import ../../../switch,
../../../multiaddress,
../../../peerid
import core
export core
logScope:
topics = "libp2p autonat"
type
AutonatClient* = ref object of RootObj
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
id: some(pid),
addrs: addrs
))).encode()
await conn.writeLp(pb.buffer)
method dialMe*(self: AutonatClient, switch: Switch, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()):
Future[MultiAddress] {.base, async.} =
proc getResponseOrRaise(autonatMsg: Option[AutonatMsg]): AutonatDialResponse {.raises: [UnpackError, AutonatError].} =
if autonatMsg.isNone() or
autonatMsg.get().msgType != DialResponse or
autonatMsg.get().response.isNone() or
(autonatMsg.get().response.get().status == Ok and
autonatMsg.get().response.get().ma.isNone()):
raise newException(AutonatError, "Unexpected response")
else:
autonatMsg.get().response.get()
let conn =
try:
if addrs.len == 0:
await switch.dial(pid, @[AutonatCodec])
else:
await switch.dial(pid, addrs, AutonatCodec)
except CatchableError as err:
raise newException(AutonatError, "Unexpected error when dialling: " & err.msg, err)
# To bypass maxConnectionsPerPeer
let incomingConnection = switch.connManager.expectConnection(pid, In)
if incomingConnection.failed() and incomingConnection.error of AlreadyExpectingConnectionError:
raise newException(AutonatError, incomingConnection.error.msg)
defer:
await conn.close()
incomingConnection.cancel() # Safer to always try to cancel cause we aren't sure if the peer dialled us or not
if incomingConnection.completed():
await (await incomingConnection).connection.close()
trace "sending Dial", addrs = switch.peerInfo.addrs
await conn.sendDial(switch.peerInfo.peerId, switch.peerInfo.addrs)
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
return case response.status:
of ResponseStatus.Ok:
response.ma.get()
of ResponseStatus.DialError:
raise newException(AutonatUnreachableError, "Peer could not dial us back: " & response.text.get(""))
else:
raise newException(AutonatError, "Bad status " & $response.status & " " & response.text.get(""))

View File

@@ -0,0 +1,152 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/[options, sets, sequtils]
import stew/[results, objects]
import chronos, chronicles
import ../../../multiaddress,
../../../peerid,
../../../errors
logScope:
topics = "libp2p autonat"
const
AutonatCodec* = "/libp2p/autonat/1.0.0"
AddressLimit* = 8
type
AutonatError* = object of LPError
AutonatUnreachableError* = object of LPError
MsgType* = enum
Dial = 0
DialResponse = 1
ResponseStatus* = enum
Ok = 0
DialError = 100
DialRefused = 101
BadRequest = 200
InternalError = 300
AutonatPeerInfo* = object
id*: Option[PeerId]
addrs*: seq[MultiAddress]
AutonatDial* = object
peerInfo*: Option[AutonatPeerInfo]
AutonatDialResponse* = object
status*: ResponseStatus
text*: Option[string]
ma*: Option[MultiAddress]
AutonatMsg* = object
msgType*: MsgType
dial*: Option[AutonatDial]
response*: Option[AutonatDialResponse]
proc encode(p: AutonatPeerInfo): ProtoBuffer =
result = initProtoBuffer()
if p.id.isSome():
result.write(1, p.id.get())
for ma in p.addrs:
result.write(2, ma.data.buffer)
result.finish()
proc encode*(d: AutonatDial): ProtoBuffer =
result = initProtoBuffer()
result.write(1, MsgType.Dial.uint)
var dial = initProtoBuffer()
if d.peerInfo.isSome():
dial.write(1, encode(d.peerInfo.get()))
dial.finish()
result.write(2, dial.buffer)
result.finish()
proc encode*(r: AutonatDialResponse): ProtoBuffer =
result = initProtoBuffer()
result.write(1, MsgType.DialResponse.uint)
var bufferResponse = initProtoBuffer()
bufferResponse.write(1, r.status.uint)
if r.text.isSome():
bufferResponse.write(2, r.text.get())
if r.ma.isSome():
bufferResponse.write(3, r.ma.get())
bufferResponse.finish()
result.write(3, bufferResponse.buffer)
result.finish()
proc encode*(msg: AutonatMsg): ProtoBuffer =
if msg.dial.isSome():
return encode(msg.dial.get())
if msg.response.isSome():
return encode(msg.response.get())
proc decode*(_: typedesc[AutonatMsg], buf: seq[byte]): Option[AutonatMsg] =
var
msgTypeOrd: uint32
pbDial: ProtoBuffer
pbResponse: ProtoBuffer
msg: AutonatMsg
let
pb = initProtoBuffer(buf)
r1 = pb.getField(1, msgTypeOrd)
r2 = pb.getField(2, pbDial)
r3 = pb.getField(3, pbResponse)
if r1.isErr() or r2.isErr() or r3.isErr(): return none(AutonatMsg)
if r1.get() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
return none(AutonatMsg)
if r2.get():
var
pbPeerInfo: ProtoBuffer
dial: AutonatDial
let
r4 = pbDial.getField(1, pbPeerInfo)
if r4.isErr(): return none(AutonatMsg)
var peerInfo: AutonatPeerInfo
if r4.get():
var pid: PeerId
let
r5 = pbPeerInfo.getField(1, pid)
r6 = pbPeerInfo.getRepeatedField(2, peerInfo.addrs)
if r5.isErr() or r6.isErr(): return none(AutonatMsg)
if r5.get(): peerInfo.id = some(pid)
dial.peerInfo = some(peerInfo)
msg.dial = some(dial)
if r3.get():
var
statusOrd: uint
text: string
ma: MultiAddress
response: AutonatDialResponse
let
r4 = pbResponse.getField(1, statusOrd)
r5 = pbResponse.getField(2, text)
r6 = pbResponse.getField(3, ma)
if r4.isErr() or r5.isErr() or r6.isErr() or
(r4.get() and not checkedEnumAssign(response.status, statusOrd)):
return none(AutonatMsg)
if r5.get(): response.text = some(text)
if r6.get(): response.ma = some(ma)
msg.response = some(response)
return some(msg)

View File

@@ -0,0 +1,165 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/[options, sets, sequtils]
import stew/results
import chronos, chronicles, stew/objects
import ../../protocol,
../../../switch,
../../../multiaddress,
../../../multicodec,
../../../peerid,
../../../utils/[semaphore, future],
../../../errors
import core
export core
logScope:
topics = "libp2p autonat"
type
Autonat* = ref object of LPProtocol
sem: AsyncSemaphore
switch*: Switch
dialTimeout: Duration
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
id: some(pid),
addrs: addrs
))).encode()
await conn.writeLp(pb.buffer)
proc sendResponseError(conn: Connection, status: ResponseStatus, text: string = "") {.async.} =
let pb = AutonatDialResponse(
status: status,
text: if text == "": none(string) else: some(text),
ma: none(MultiAddress)
).encode()
await conn.writeLp(pb.buffer)
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
let pb = AutonatDialResponse(
status: ResponseStatus.Ok,
text: some("Ok"),
ma: some(ma)
).encode()
await conn.writeLp(pb.buffer)
proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.async.} =
await autonat.sem.acquire()
var futs: seq[Future[Opt[MultiAddress]]]
try:
# This is to bypass the per peer max connections limit
let outgoingConnection = autonat.switch.connManager.expectConnection(conn.peerId, Out)
if outgoingConnection.failed() and outgoingConnection.error of AlreadyExpectingConnectionError:
await conn.sendResponseError(DialRefused, outgoingConnection.error.msg)
return
# Safer to always try to cancel cause we aren't sure if the connection was established
defer: outgoingConnection.cancel()
# tryDial is to bypass the global max connections limit
futs = addrs.mapIt(autonat.switch.dialer.tryDial(conn.peerId, @[it]))
let fut = await anyCompleted(futs).wait(autonat.dialTimeout)
let ma = await fut
if ma.isSome:
await conn.sendResponseOk(ma.get())
else:
await conn.sendResponseError(DialError, "Missing observed address")
except CancelledError as exc:
raise exc
except AllFuturesFailedError as exc:
debug "All dial attempts failed", addrs, exc = exc.msg
await conn.sendResponseError(DialError, "All dial attempts failed")
except AsyncTimeoutError as exc:
debug "Dial timeout", addrs, exc = exc.msg
await conn.sendResponseError(DialError, "Dial timeout")
except CatchableError as exc:
debug "Unexpected error", addrs, exc = exc.msg
await conn.sendResponseError(DialError, "Unexpected error")
finally:
autonat.sem.release()
for f in futs:
if not f.finished():
f.cancel()
proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[void] =
if msg.dial.isNone() or msg.dial.get().peerInfo.isNone():
return conn.sendResponseError(BadRequest, "Missing Peer Info")
let peerInfo = msg.dial.get().peerInfo.get()
if peerInfo.id.isSome() and peerInfo.id.get() != conn.peerId:
return conn.sendResponseError(BadRequest, "PeerId mismatch")
if conn.observedAddr.isNone:
return conn.sendResponseError(BadRequest, "Missing observed address")
let observedAddr = conn.observedAddr.get()
var isRelayed = observedAddr.contains(multiCodec("p2p-circuit"))
if isRelayed.isErr() or isRelayed.get():
return conn.sendResponseError(DialRefused, "Refused to dial a relayed observed address")
let hostIp = observedAddr[0]
if hostIp.isErr() or not IP.match(hostIp.get()):
trace "wrong observed address", address=observedAddr
return conn.sendResponseError(InternalError, "Expected an IP address")
var addrs = initHashSet[MultiAddress]()
addrs.incl(observedAddr)
trace "addrs received", addrs = peerInfo.addrs
for ma in peerInfo.addrs:
isRelayed = ma.contains(multiCodec("p2p-circuit"))
if isRelayed.isErr() or isRelayed.get():
continue
let maFirst = ma[0]
if maFirst.isErr() or not DNS_OR_IP.match(maFirst.get()):
continue
try:
addrs.incl(
if maFirst.get() == hostIp.get():
ma
else:
let maEnd = ma[1..^1]
if maEnd.isErr(): continue
hostIp.get() & maEnd.get()
)
except LPError as exc:
continue
if len(addrs) >= AddressLimit:
break
if len(addrs) == 0:
return conn.sendResponseError(DialRefused, "No dialable address")
let addrsSeq = toSeq(addrs)
trace "trying to dial", addrs = addrsSeq
return autonat.tryDial(conn, addrsSeq)
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
try:
let msgOpt = AutonatMsg.decode(await conn.readLp(1024))
if msgOpt.isNone() or msgOpt.get().msgType != MsgType.Dial:
raise newException(AutonatError, "Received malformed message")
let msg = msgOpt.get()
await autonat.handleDial(conn, msg)
except CancelledError as exc:
raise exc
except CatchableError as exc:
debug "exception in autonat handler", exc = exc.msg, conn
finally:
trace "exiting autonat handler", conn
await conn.close()
autonat.handler = handleStream
autonat.codec = AutonatCodec
autonat

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -14,21 +14,26 @@ else:
import std/[options, deques, sequtils]
import chronos, metrics
import ../switch
import ../protocols/[connectivity/autonat]
import ../utils/heartbeat
import ../crypto/crypto
import ../../../switch
import ../../../wire
import client
import ../../../utils/heartbeat
import ../../../crypto/crypto
logScope:
topics = "libp2p autonatservice"
declarePublicGauge(libp2p_autonat_reachability_confidence, "autonat reachability confidence", labels = ["reachability"])
type
AutonatService* = ref object of Service
newConnectedPeerHandler: PeerEventHandler
addressMapper: AddressMapper
scheduleHandle: Future[void]
networkReachability: NetworkReachability
confidence: Option[float]
answers: Deque[NetworkReachability]
autonat: Autonat
autonatClient: AutonatClient
statusAndConfidenceHandler: StatusAndConfidenceHandler
rng: ref HmacDrbgContext
scheduleInterval: Option[Duration]
@@ -37,6 +42,7 @@ type
maxQueueSize: int
minConfidence: float
dialTimeout: Duration
enableAddressMapper: bool
NetworkReachability* {.pure.} = enum
NotReachable, Reachable, Unknown
@@ -45,26 +51,28 @@ type
proc new*(
T: typedesc[AutonatService],
autonat: Autonat,
autonatClient: AutonatClient,
rng: ref HmacDrbgContext,
scheduleInterval: Option[Duration] = none(Duration),
askNewConnectedPeers = true,
numPeersToAsk: int = 5,
maxQueueSize: int = 10,
minConfidence: float = 0.3,
dialTimeout = 5.seconds): T =
dialTimeout = 30.seconds,
enableAddressMapper = true): T =
return T(
scheduleInterval: scheduleInterval,
networkReachability: Unknown,
confidence: none(float),
answers: initDeque[NetworkReachability](),
autonat: autonat,
autonatClient: autonatClient,
rng: rng,
askNewConnectedPeers: askNewConnectedPeers,
numPeersToAsk: numPeersToAsk,
maxQueueSize: maxQueueSize,
minConfidence: minConfidence,
dialTimeout: dialTimeout)
dialTimeout: dialTimeout,
enableAddressMapper: enableAddressMapper)
proc networkReachability*(self: AutonatService): NetworkReachability {.inline.} =
return self.networkReachability
@@ -73,11 +81,20 @@ proc callHandler(self: AutonatService) {.async.} =
if not isNil(self.statusAndConfidenceHandler):
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
proc hasEnoughIncomingSlots(switch: Switch): bool =
# we leave some margin instead of comparing to 0 as a peer could connect to us while we are asking for the dial back
return switch.connManager.slotsAvailable(In) >= 2
proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
return switch.connManager.selectMuxer(peerId, In) != nil
proc handleAnswer(self: AutonatService, ans: NetworkReachability) {.async.} =
if ans == Unknown:
return
if self.answers.len == self.maxQueueSize:
self.answers.popFirst()
self.answers.addLast(ans)
self.networkReachability = Unknown
@@ -90,60 +107,102 @@ proc handleAnswer(self: AutonatService, ans: NetworkReachability) {.async.} =
self.networkReachability = reachability
self.confidence = some(confidence)
trace "Current status", currentStats = $self.networkReachability, confidence = $self.confidence
debug "Current status", currentStats = $self.networkReachability, confidence = $self.confidence, answers = self.answers
proc askPeer(self: AutonatService, s: Switch, peerId: PeerId): Future[NetworkReachability] {.async.} =
trace "Asking for reachability", peerId = $peerId
proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[NetworkReachability] {.async.} =
logScope:
peerId = $peerId
if doesPeerHaveIncomingConn(switch, peerId):
return Unknown
if not hasEnoughIncomingSlots(switch):
debug "No incoming slots available, not asking peer", incomingSlotsAvailable=switch.connManager.slotsAvailable(In)
return Unknown
trace "Asking peer for reachability"
let ans =
try:
discard await self.autonat.dialMe(peerId).wait(self.dialTimeout)
discard await self.autonatClient.dialMe(switch, peerId).wait(self.dialTimeout)
debug "dialMe answer is reachable"
Reachable
except AutonatUnreachableError:
trace "dialMe answer is not reachable", peerId = $peerId
except AutonatUnreachableError as error:
debug "dialMe answer is not reachable", msg = error.msg
NotReachable
except AsyncTimeoutError:
trace "dialMe timed out", peerId = $peerId
except AsyncTimeoutError as error:
debug "dialMe timed out", msg = error.msg
Unknown
except CatchableError as err:
trace "dialMe unexpected error", peerId = $peerId, errMsg = $err.msg
except CatchableError as error:
debug "dialMe unexpected error", msg = error.msg
Unknown
await self.handleAnswer(ans)
if not isNil(self.statusAndConfidenceHandler):
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
await switch.peerInfo.update()
return ans
proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
trace "Asking peers for reachability"
var peers = switch.connectedPeers(Direction.Out)
self.rng.shuffle(peers)
var answersFromPeers = 0
for peer in peers:
if answersFromPeers >= self.numPeersToAsk:
break
elif (await askPeer(self, switch, peer)) != Unknown:
if not hasEnoughIncomingSlots(switch):
debug "No incoming slots available, not asking peers", incomingSlotsAvailable=switch.connManager.slotsAvailable(In)
break
if (await askPeer(self, switch, peer)) != Unknown:
answersFromPeers.inc()
proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.async.} =
heartbeat "Schedule AutonatService run", interval:
heartbeat "Scheduling AutonatService run", interval:
await service.run(switch)
proc addressMapper(
self: AutonatService,
peerStore: PeerStore,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
if self.networkReachability != NetworkReachability.Reachable:
return listenAddrs
var addrs = newSeq[MultiAddress]()
for listenAddr in listenAddrs:
var processedMA = listenAddr
try:
let hostIP = initTAddress(listenAddr).get()
if not hostIP.isGlobal() and self.networkReachability == NetworkReachability.Reachable:
processedMA = peerStore.guessDialableAddr(listenAddr) # handle manual port forwarding
except CatchableError as exc:
debug "Error while handling address mapper", msg = exc.msg
addrs.add(processedMA)
return addrs
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
return await addressMapper(self, switch.peerStore, listenAddrs)
info "Setting up AutonatService"
let hasBeenSetup = await procCall Service(self).setup(switch)
if hasBeenSetup:
if self.askNewConnectedPeers:
self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} =
discard askPeer(self, switch, peerId)
await self.callHandler()
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
if self.scheduleInterval.isSome():
self.scheduleHandle = schedule(self, switch, self.scheduleInterval.get())
if self.enableAddressMapper:
switch.peerInfo.addressMappers.add(self.addressMapper)
return hasBeenSetup
method run*(self: AutonatService, switch: Switch) {.async, public.} =
trace "Running AutonatService"
await askConnectedPeers(self, switch)
await self.callHandler()
method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public.} =
info "Stopping AutonatService"
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
if not isNil(self.scheduleHandle):
@@ -151,6 +210,9 @@ method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public
self.scheduleHandle = nil
if not isNil(self.newConnectedPeerHandler):
switch.connManager.removePeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
if self.enableAddressMapper:
switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper)
await switch.peerInfo.update()
return hasBeenStopped
proc statusAndConfidenceHandler*(self: AutonatService, statusAndConfidenceHandler: StatusAndConfidenceHandler) =

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -92,15 +92,15 @@ proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async,
method dial*(
self: RelayTransport,
hostname: string,
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
let address = MultiAddress.init($ma & "/p2p/" & $peerId.get()).tryGet()
result = await self.dial(address)
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe} =
if ma.protocols.isOk():
let sma = toSeq(ma.items())
if sma.len >= 3:
result = CircuitRelay.match(sma[^2].get()) and
P2PPattern.match(sma[^1].get())
result = sma.len >= 2 and CircuitRelay.match(sma[^1].get())
trace "Handles return", ma, result
proc new*(T: typedesc[RelayTransport], cl: RelayClient, upgrader: Upgrade): T =

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -26,7 +26,10 @@ import ../protobuf/minprotobuf,
../multiaddress,
../protocols/protocol,
../utility,
../errors
../errors,
../observedaddrmanager
export observedaddrmanager
logScope:
topics = "libp2p identify"
@@ -56,6 +59,7 @@ type
Identify* = ref object of LPProtocol
peerInfo*: PeerInfo
sendSignedPeerRecord*: bool
observedAddrManager*: ObservedAddrManager
IdentifyPushHandler* = proc (
peer: PeerId,
@@ -160,7 +164,8 @@ proc new*(
): T =
let identify = T(
peerInfo: peerInfo,
sendSignedPeerRecord: sendSignedPeerRecord
sendSignedPeerRecord: sendSignedPeerRecord,
observedAddrManager: ObservedAddrManager.new(),
)
identify.init()
identify
@@ -182,7 +187,7 @@ method init*(p: Identify) =
p.handler = handle
p.codec = IdentifyCodec
proc identify*(p: Identify,
proc identify*(self: Identify,
conn: Connection,
remotePeerId: PeerId): Future[IdentifyInfo] {.async, gcsafe.} =
trace "initiating identify", conn
@@ -194,23 +199,25 @@ proc identify*(p: Identify,
let infoOpt = decodeMsg(message)
if infoOpt.isNone():
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
result = infoOpt.get()
if result.pubkey.isSome:
let peer = PeerId.init(result.pubkey.get())
if peer.isErr:
raise newException(IdentityInvalidMsgError, $peer.error)
else:
result.peerId = peer.get()
if peer.get() != remotePeerId:
trace "Peer ids don't match",
remote = peer,
local = remotePeerId
raise newException(IdentityNoMatchError, "Peer ids don't match")
else:
var info = infoOpt.get()
if info.pubkey.isNone():
raise newException(IdentityInvalidMsgError, "No pubkey in identify")
let peer = PeerId.init(info.pubkey.get())
if peer.isErr:
raise newException(IdentityInvalidMsgError, $peer.error)
if peer.get() != remotePeerId:
trace "Peer ids don't match", remote = peer, local = remotePeerId
raise newException(IdentityNoMatchError, "Peer ids don't match")
info.peerId = peer.get()
if info.observedAddr.isSome:
if not self.observedAddrManager.addObservation(info.observedAddr.get()):
debug "Observed address is not valid", observedAddr = info.observedAddr.get()
return info
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
## Create a IdentifyPush protocol. `handler` will be called every time
## a peer sends us new `PeerInfo`

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
import std/[sequtils, sets, hashes, tables]
import std/[sets, hashes, tables]
import chronos, chronicles, metrics
import ./pubsub,
./pubsubpeer,

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -14,7 +14,7 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
import std/[tables, sets, options, sequtils]
import std/[sets, sequtils]
import chronos, chronicles, metrics
import ./pubsub,
./floodsub,
@@ -158,8 +158,7 @@ method onNewPeer(g: GossipSub, peer: PubSubPeer) =
peer.appScore = stats.appScore
peer.behaviourPenalty = stats.behaviourPenalty
peer.iWantBudget = IWantPeerBudget
peer.iHaveBudget = IHavePeerBudget
peer.iHaveBudget = IHavePeerBudget
method onPubSubPeerEvent*(p: GossipSub, peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
case event.kind
@@ -340,7 +339,7 @@ proc validateAndRelay(g: GossipSub,
# In theory, if topics are the same in all messages, we could batch - we'd
# also have to be careful to only include validated messages
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
trace "forwared message to peers", peers = toSendPeers.len, msgId, peer
trace "forwarded message to peers", peers = toSendPeers.len, msgId, peer
for topic in msg.topicIds:
if topic notin g.topics: continue

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
import std/[tables, sequtils, sets, algorithm]
import std/[tables, sequtils, sets, algorithm, deques]
import chronos, chronicles, metrics
import "."/[types, scoring]
import ".."/[pubsubpeer, peertable, timedcache, mcache, floodsub, pubsub]
@@ -31,7 +31,7 @@ declareGauge(libp2p_gossipsub_no_peers_topics, "number of topics in mesh with no
declareGauge(libp2p_gossipsub_low_peers_topics, "number of topics in mesh with at least one but below dlow peers")
declareGauge(libp2p_gossipsub_healthy_peers_topics, "number of topics in mesh with at least dlow peers (but below dhigh)")
declareCounter(libp2p_gossipsub_above_dhigh_condition, "number of above dhigh pruning branches ran", labels = ["topic"])
declareSummary(libp2p_gossipsub_mcache_hit, "ratio of successful IWANT message cache lookups")
declareGauge(libp2p_gossipsub_received_iwants, "received iwants", labels = ["kind"])
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [Defect].} =
g.withPeerStats(p.peerId) do (stats: var PeerStats):
@@ -130,6 +130,10 @@ proc handleGraft*(g: GossipSub,
continue
if g.mesh.hasPeer(topic, peer):
trace "peer already in mesh", peer, topic
continue
# Check backingOff
# Ignore BackoffSlackTime here, since this only for outbound activity
# and subtract a second time to avoid race conditions
@@ -176,6 +180,10 @@ proc handleGraft*(g: GossipSub,
topicID: topic,
peers: g.peerExchangeList(topic),
backoff: g.parameters.pruneBackoff.seconds.uint64))
let backoff = Moment.fromNow(g.parameters.pruneBackoff)
g.backingOff
.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] = backoff
else:
trace "peer grafting topic we're not interested in", peer, topic
# gossip 1.1, we do not send a control message prune anymore
@@ -272,28 +280,30 @@ proc handleIHave*(g: GossipSub,
proc handleIWant*(g: GossipSub,
peer: PubSubPeer,
iwants: seq[ControlIWant]): seq[Message] {.raises: [Defect].} =
var messages: seq[Message]
var
messages: seq[Message]
invalidRequests = 0
if peer.score < g.parameters.gossipThreshold:
trace "iwant: ignoring low score peer", peer, score = peer.score
elif peer.iWantBudget <= 0:
trace "iwant: ignoring out of budget peer", peer, score = peer.score
else:
let deIwants = iwants.deduplicate()
for iwant in deIwants:
let deIwantsMsgs = iwant.messageIds.deduplicate()
for mid in deIwantsMsgs:
for iwant in iwants:
for mid in iwant.messageIds:
trace "peer sent iwant", peer, messageID = mid
# canAskIWant will only return true once for a specific message
if not peer.canAskIWant(mid):
libp2p_gossipsub_received_iwants.inc(1, labelValues=["notsent"])
invalidRequests.inc()
if invalidRequests > 20:
libp2p_gossipsub_received_iwants.inc(1, labelValues=["skipped"])
return messages
continue
let msg = g.mcache.get(mid)
if msg.isSome:
libp2p_gossipsub_mcache_hit.observe(1)
# avoid spam
if peer.iWantBudget > 0:
messages.add(msg.get())
dec peer.iWantBudget
else:
break
libp2p_gossipsub_received_iwants.inc(1, labelValues=["correct"])
messages.add(msg.get())
else:
libp2p_gossipsub_mcache_hit.observe(0)
libp2p_gossipsub_received_iwants.inc(1, labelValues=["unknown"])
return messages
proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} =
@@ -616,8 +626,11 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises:
g.rng.shuffle(allPeers)
allPeers.setLen(target)
let msgIdsAsSet = ihave.messageIds.toHashSet()
for peer in allPeers:
control.mgetOrPut(peer, ControlMessage()).ihave.add(ihave)
peer.sentIHaves[^1].incl(msgIdsAsSet)
libp2p_gossipsub_cache_window_size.set(cacheWindowSize.int64)
@@ -628,7 +641,9 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
# reset IHAVE cap
block:
for peer in g.peers.values:
peer.iWantBudget = IWantPeerBudget
peer.sentIHaves.addFirst(default(HashSet[MessageId]))
if peer.sentIHaves.len > g.parameters.historyLength:
discard peer.sentIHaves.popLast()
peer.iHaveBudget = IHavePeerBudget
var meshMetrics = MeshMetrics()

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -295,11 +295,11 @@ proc rewardDelivered*(
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
stats.topicInfos.withValue(tt, tstats):
if tstats[].inMesh:
if first:
tstats[].firstMessageDeliveries.addCapped(
1, topicParams.firstMessageDeliveriesCap)
if first:
tstats[].firstMessageDeliveries.addCapped(
1, topicParams.firstMessageDeliveriesCap)
if tstats[].inMesh:
tstats[].meshMessageDeliveries.addCapped(
1, topicParams.meshMessageDeliveriesCap)
do: # make sure we don't loose this information

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -13,11 +13,13 @@ else:
{.push raises: [].}
import chronos
import std/[tables, sets]
import std/[options, tables, sets]
import ".."/[floodsub, peertable, mcache, pubsubpeer]
import "../rpc"/[messages]
import "../../.."/[peerid, multiaddress, utility]
export options, tables, sets
const
GossipSubCodec* = "/meshsub/1.1.0"
GossipSubCodec_10* = "/meshsub/1.0.0"
@@ -46,7 +48,6 @@ const
const
BackoffSlackTime* = 2 # seconds
IWantPeerBudget* = 25 # 25 messages per second ( reset every heartbeat )
IHavePeerBudget* = 10
# the max amount of IHave to expose, not by spec, but go as example
# rust sigp: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/config.rs#L572

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -15,6 +15,8 @@ else:
import std/[tables, sets]
import ./pubsubpeer, ../../peerid
export tables, sets
type
PeerTable* = Table[string, HashSet[PubSubPeer]] # topic string to peer map

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -36,6 +36,7 @@ import metrics
import stew/results
export results
export tables, sets
export PubSubPeer
export PubSubObserver
export protocol
@@ -118,7 +119,7 @@ type
anonymize*: bool ## if we omit fromPeer and seqno from RPC messages we send
subscriptionValidator*: SubscriptionValidator # callback used to validate subscriptions
topicsHigh*: int ## the maximum number of topics a peer is allowed to subscribe to
maxMessageSize*: int ##\
maxMessageSize*: int ##\
## the maximum raw message size we'll globally allow
## for finer tuning, check message size on topic validator
##
@@ -202,7 +203,7 @@ proc broadcast*(
# Fast path that only encodes message once
let encoded = encodeRpcMsg(msg, p.anonymize)
for peer in sendPeers:
peer.sendEncoded(encoded)
asyncSpawn peer.sendEncoded(encoded)
proc sendSubs*(p: PubSub,
peer: PubSubPeer,
@@ -307,8 +308,6 @@ proc getOrCreatePeer*(
# metrics
libp2p_pubsub_peers.set(p.peers.len.int64)
pubSubPeer.connect()
return pubSubPeer
proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
@@ -382,7 +381,8 @@ method subscribePeer*(p: PubSub, peer: PeerId) {.base, gcsafe.} =
## messages
##
discard p.getOrCreatePeer(peer, p.codecs)
let pubSubPeer = p.getOrCreatePeer(peer, p.codecs)
pubSubPeer.connect()
proc updateTopicMetrics(p: PubSub, topic: string) =
# metrics
@@ -406,7 +406,11 @@ method onTopicSubscription*(p: PubSub, topic: string, subscribed: bool) {.base,
# Notify others that we are no longer interested in the topic
for _, peer in p.peers:
p.sendSubs(peer, [topic], subscribed)
# If we don't have a sendConn yet, we will
# send the full sub list when we get the sendConn,
# so no need to send it here
if peer.hasSendConn:
p.sendSubs(peer, [topic], subscribed)
if subscribed:
libp2p_pubsub_subscriptions.inc()

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -12,7 +12,7 @@ when (NimMajor, NimMinor) < (1, 4):
else:
{.push raises: [].}
import std/[sequtils, strutils, tables, hashes, options]
import std/[sequtils, strutils, tables, hashes, options, sets, deques]
import stew/results
import chronos, chronicles, nimcrypto/sha2, metrics
import rpc/[messages, message, protobuf],
@@ -55,24 +55,31 @@ type
onEvent*: OnEvent # Connectivity updates for peer
codec*: string # the protocol that this peer joined from
sendConn*: Connection # cached send connection
connectedFut: Future[void]
address*: Option[MultiAddress]
peerId*: PeerId
handler*: RPCHandler
observers*: ref seq[PubSubObserver] # ref as in smart_ptr
score*: float64
iWantBudget*: int
sentIHaves*: Deque[HashSet[MessageId]]
iHaveBudget*: int
maxMessageSize: int
appScore*: float64 # application specific score
behaviourPenalty*: float64 # the eventual penalty score
when defined(libp2p_agents_metrics):
shortAgent*: string
RPCHandler* = proc(peer: PubSubPeer, msg: RPCMsg): Future[void]
{.gcsafe, raises: [Defect].}
when defined(libp2p_agents_metrics):
func shortAgent*(p: PubSubPeer): string =
if p.sendConn.isNil or p.sendConn.getWrapped().isNil:
"unknown"
else:
#TODO the sendConn is setup before identify,
#so we have to read the parents short agent..
p.sendConn.getWrapped().shortAgent
func hash*(p: PubSubPeer): Hash =
p.peerId.hash
@@ -165,6 +172,8 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
try:
if p.connectedFut.finished:
p.connectedFut = newFuture[void]()
let newConn = await p.getConn()
if newConn.isNil:
raise (ref LPError)(msg: "Cannot establish send connection")
@@ -174,6 +183,11 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
# stop working so we make an effort to only keep a single channel alive
trace "Get new send connection", p, newConn
# Careful to race conditions here.
# Topic subscription relies on either connectedFut
# to be completed, or onEvent to be called later
p.connectedFut.complete()
p.sendConn = newConn
p.address = if p.sendConn.observedAddr.isSome: some(p.sendConn.observedAddr.get) else: none(MultiAddress)
@@ -208,27 +222,13 @@ proc connectImpl(p: PubSubPeer) {.async.} =
debug "Could not establish send connection", msg = exc.msg
proc connect*(p: PubSubPeer) =
if p.connected:
return
asyncSpawn connectImpl(p)
proc sendImpl(conn: Connection, encoded: seq[byte]): Future[void] {.raises: [Defect].} =
trace "sending encoded msgs to peer", conn, encoded = shortLog(encoded)
let fut = conn.writeLp(encoded) # Avoid copying `encoded` into future
proc sendWaiter(): Future[void] {.async.} =
try:
await fut
trace "sent pubsub message to remote", conn
except CatchableError as exc: # never cancelled
# Because we detach the send call from the currently executing task using
# asyncSpawn, no exceptions may leak out of it
trace "Unable to send to remote", conn, msg = exc.msg
# Next time sendConn is used, it will be have its close flag set and thus
# will be recycled
await conn.close() # This will clean up the send connection
return sendWaiter()
proc hasSendConn*(p: PubSubPeer): bool =
p.sendConn != nil
template sendMetrics(msg: RPCMsg): untyped =
when defined(libp2p_expensive_metrics):
@@ -237,7 +237,7 @@ template sendMetrics(msg: RPCMsg): untyped =
# metrics
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect].} =
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect], async.} =
doAssert(not isNil(p), "pubsubpeer nil!")
if msg.len <= 0:
@@ -248,14 +248,27 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect].} =
info "trying to send a too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
return
let conn = p.sendConn
if p.sendConn == nil:
discard await p.connectedFut.withTimeout(1.seconds)
var conn = p.sendConn
if conn == nil or conn.closed():
trace "No send connection, skipping message", p, msg = shortLog(msg)
debug "No send connection, skipping message", p, msg = shortLog(msg)
return
# To limit the size of the closure, we only pass the encoded message and
# connection to the spawned send task
asyncSpawn sendImpl(conn, msg)
trace "sending encoded msgs to peer", conn, encoded = shortLog(msg)
try:
await conn.writeLp(msg)
trace "sent pubsub message to remote", conn
except CatchableError as exc: # never cancelled
# Because we detach the send call from the currently executing task using
# asyncSpawn, no exceptions may leak out of it
trace "Unable to send to remote", conn, msg = exc.msg
# Next time sendConn is used, it will be have its close flag set and thus
# will be recycled
await conn.close() # This will clean up the send connection
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
@@ -277,7 +290,14 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
sendMetrics(msg)
encodeRpcMsg(msg, anonymize)
p.sendEncoded(encoded)
asyncSpawn p.sendEncoded(encoded)
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
for sentIHave in p.sentIHaves.mitems():
if msgId in sentIHave:
sentIHave.excl(msgId)
return true
return false
proc new*(
T: typedesc[PubSubPeer],
@@ -287,10 +307,12 @@ proc new*(
codec: string,
maxMessageSize: int): T =
T(
result = T(
getConn: getConn,
onEvent: onEvent,
codec: codec,
peerId: peerId,
connectedFut: newFuture[void](),
maxMessageSize: maxMessageSize
)
result.sentIHaves.addFirst(default(HashSet[MessageId]))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -14,7 +14,7 @@ else:
import std/[tables]
import chronos/timer
import chronos/timer, stew/results
const Timeout* = 10.seconds # default timeout in ms
@@ -22,6 +22,7 @@ type
TimedEntry*[K] = ref object of RootObj
key: K
addedAt: Moment
expiresAt: Moment
next, prev: TimedEntry[K]
TimedCache*[K] = object of RootObj
@@ -30,15 +31,14 @@ type
timeout: Duration
func expire*(t: var TimedCache, now: Moment = Moment.now()) =
let expirationLimit = now - t.timeout
while t.head != nil and t.head.addedAt < expirationLimit:
while t.head != nil and t.head.expiresAt < now:
t.entries.del(t.head.key)
t.head.prev = nil
t.head = t.head.next
if t.head == nil: t.tail = nil
func del*[K](t: var TimedCache[K], key: K): bool =
# Removes existing key from cache, returning false if it was not present
func del*[K](t: var TimedCache[K], key: K): Opt[TimedEntry[K]] =
# Removes existing key from cache, returning the previous value if present
var item: TimedEntry[K]
if t.entries.pop(key, item):
if t.head == item: t.head = item.next
@@ -46,9 +46,9 @@ func del*[K](t: var TimedCache[K], key: K): bool =
if item.next != nil: item.next.prev = item.prev
if item.prev != nil: item.prev.next = item.next
true
Opt.some(item)
else:
false
Opt.none(TimedEntry[K])
func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
# Puts k in cache, returning true if the item was already present and false
@@ -56,9 +56,13 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
# refreshed.
t.expire(now)
var res = t.del(k) # Refresh existing item
var previous = t.del(k) # Refresh existing item
let node = TimedEntry[K](key: k, addedAt: now)
let addedAt =
if previous.isSome: previous.get().addedAt
else: now
let node = TimedEntry[K](key: k, addedAt: addedAt, expiresAt: now + t.timeout)
if t.head == nil:
t.tail = node
@@ -66,7 +70,7 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
else:
# search from tail because typically that's where we add when now grows
var cur = t.tail
while cur != nil and node.addedAt < cur.addedAt:
while cur != nil and node.expiresAt < cur.expiresAt:
cur = cur.prev
if cur == nil:
@@ -82,7 +86,7 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
t.entries[k] = node
res
previous.isSome()
func contains*[K](t: TimedCache[K], k: K): bool =
k in t.entries

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -56,7 +56,6 @@ proc new*(T: type SecureConn,
peerId: peerId,
observedAddr: observedAddr,
closeEvent: conn.closeEvent,
upgraded: conn.upgraded,
timeout: timeout,
dir: conn.dir)
result.initStream()

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -0,0 +1,146 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import chronos, chronicles, times, tables, sequtils, options
import ../switch,
../protocols/connectivity/relay/[client, utils]
logScope:
topics = "libp2p autorelay"
type
OnReservationHandler = proc (addresses: seq[MultiAddress]) {.gcsafe, raises: [Defect].}
AutoRelayService* = ref object of Service
running: bool
runner: Future[void]
client: RelayClient
numRelays: int
relayPeers: Table[PeerId, Future[void]]
relayAddresses: Table[PeerId, seq[MultiAddress]]
backingOff: seq[PeerId]
peerAvailable: AsyncEvent
onReservation: OnReservationHandler
addressMapper: AddressMapper
rng: ref HmacDrbgContext
proc addressMapper(
self: AutoRelayService,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
return concat(toSeq(self.relayAddresses.values))
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
while self.running:
let
rsvp = await self.client.reserve(relayPid).wait(chronos.seconds(5))
relayedAddr = rsvp.addrs.mapIt(
MultiAddress.init($it & "/p2p-circuit").tryGet())
ttl = rsvp.expire.int64 - times.now().utc.toTime.toUnix
if ttl <= 60:
# A reservation under a minute is basically useless
break
if relayPid notin self.relayAddresses or self.relayAddresses[relayPid] != relayedAddr:
self.relayAddresses[relayPid] = relayedAddr
await switch.peerInfo.update()
if not self.onReservation.isNil():
self.onReservation(concat(toSeq(self.relayAddresses.values)))
await sleepAsync chronos.seconds(ttl - 30)
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
return await addressMapper(self, listenAddrs)
let hasBeenSetUp = await procCall Service(self).setup(switch)
if hasBeenSetUp:
proc handlePeerJoined(peerId: PeerId, event: PeerEvent) {.async.} =
trace "Peer Joined", peerId
if self.relayPeers.len < self.numRelays:
self.peerAvailable.fire()
proc handlePeerLeft(peerId: PeerId, event: PeerEvent) {.async.} =
trace "Peer Left", peerId
self.relayPeers.withValue(peerId, future):
future[].cancel()
switch.addPeerEventHandler(handlePeerJoined, Joined)
switch.addPeerEventHandler(handlePeerLeft, Left)
switch.peerInfo.addressMappers.add(self.addressMapper)
await self.run(switch)
return hasBeenSetUp
proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
await sleepAsync(chronos.seconds(5))
self.backingOff.keepItIf(it != pid)
self.peerAvailable.fire()
proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
while true:
# Remove relayPeers that failed
let peers = toSeq(self.relayPeers.keys())
for k in peers:
if self.relayPeers[k].finished():
self.relayPeers.del(k)
self.relayAddresses.del(k)
if not self.onReservation.isNil():
self.onReservation(concat(toSeq(self.relayAddresses.values)))
# To avoid ddosing our peers in certain conditions
self.backingOff.add(k)
asyncSpawn self.manageBackedOff(k)
# Get all connected relayPeers
self.peerAvailable.clear()
var connectedPeers = switch.connectedPeers(Direction.Out)
connectedPeers.keepItIf(RelayV2HopCodec in switch.peerStore[ProtoBook][it] and
it notin self.relayPeers and
it notin self.backingOff)
self.rng.shuffle(connectedPeers)
for relayPid in connectedPeers:
if self.relayPeers.len() >= self.numRelays:
break
self.relayPeers[relayPid] = self.reserveAndUpdate(relayPid, switch)
if self.relayPeers.len() > 0:
await one(toSeq(self.relayPeers.values())) or self.peerAvailable.wait()
else:
await self.peerAvailable.wait()
await sleepAsync(200.millis)
method run*(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
if self.running:
trace "Autorelay is already running"
return
self.running = true
self.runner = self.innerRun(switch)
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
self.running = false
self.runner.cancel()
switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper)
await switch.peerInfo.update()
return hasBeenStopped
proc getAddresses*(self: AutoRelayService): seq[MultiAddress] =
result = concat(toSeq(self.relayAddresses.values))
proc new*(T: typedesc[AutoRelayService],
numRelays: int,
client: RelayClient,
onReservation: OnReservationHandler,
rng: ref HmacDrbgContext): T =
T(numRelays: numRelays,
client: client,
onReservation: onReservation,
peerAvailable: newAsyncEvent(),
rng: rng)

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -103,11 +103,11 @@ method readOnce*(s: ChronosStream, pbytes: pointer, nbytes: int): Future[int] {.
withExceptions:
result = await s.client.readOnce(pbytes, nbytes)
s.activity = true # reset activity flag
libp2p_network_bytes.inc(nbytes.int64, labelValues = ["in"])
libp2p_network_bytes.inc(result.int64, labelValues = ["in"])
when defined(libp2p_agents_metrics):
s.trackPeerIdentity()
if s.tracked:
libp2p_peers_traffic_read.inc(nbytes.int64, labelValues = [s.shortAgent])
libp2p_peers_traffic_read.inc(result.int64, labelValues = [s.shortAgent])
proc completeWrite(
s: ChronosStream, fut: Future[int], msgLen: int): Future[void] {.async.} =

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -39,7 +39,6 @@ type
timeoutHandler*: TimeoutHandler # timeout handler
peerId*: PeerId
observedAddr*: Opt[MultiAddress]
upgraded*: Future[void]
protocol*: string # protocol used by the connection, used as tag for metrics
transportDir*: Direction # The bottom level transport (generally the socket) direction
when defined(libp2p_agents_metrics):
@@ -47,22 +46,6 @@ type
proc timeoutMonitor(s: Connection) {.async, gcsafe.}
proc isUpgraded*(s: Connection): bool =
if not isNil(s.upgraded):
return s.upgraded.finished
proc upgrade*(s: Connection, failed: ref CatchableError = nil) =
if not isNil(s.upgraded):
if not isNil(failed):
s.upgraded.fail(failed)
return
s.upgraded.complete()
proc onUpgrade*(s: Connection) {.async.} =
if not isNil(s.upgraded):
await s.upgraded
func shortLog*(conn: Connection): string =
try:
if conn.isNil: "Connection(nil)"
@@ -80,9 +63,6 @@ method initStream*(s: Connection) =
doAssert(isNil(s.timerTaskFut))
if isNil(s.upgraded):
s.upgraded = newFuture[void]()
if s.timeout > 0.millis:
trace "Monitoring for timeout", s, timeout = s.timeout
@@ -100,10 +80,6 @@ method closeImpl*(s: Connection): Future[void] =
s.timerTaskFut.cancel()
s.timerTaskFut = nil
if not isNil(s.upgraded) and not s.upgraded.finished:
s.upgraded.cancel()
s.upgraded = nil
trace "Closed connection", s
procCall LPStream(s).closeImpl()
@@ -158,6 +134,13 @@ proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
method getWrapped*(s: Connection): Connection {.base.} =
doAssert(false, "not implemented!")
when defined(libp2p_agents_metrics):
proc setShortAgent*(s: Connection, shortAgent: string) =
var conn = s
while not isNil(conn):
conn.shortAgent = shortAgent
conn = conn.getWrapped()
proc new*(C: type Connection,
peerId: PeerId,
dir: Direction,

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -148,10 +148,11 @@ method connect*(
s: Switch,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false): Future[void] {.public.} =
forceDial = false,
reuseConnection = true): Future[void] {.public.} =
## Connects to a peer without opening a stream to it
s.dialer.connect(peerId, addrs, forceDial)
s.dialer.connect(peerId, addrs, forceDial, reuseConnection)
method connect*(
s: Switch,
@@ -219,24 +220,27 @@ proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil)
s.ms.addHandler(proto.codecs, proto, matcher)
s.peerInfo.protocols.add(proto.codec)
proc upgradeMonitor(conn: Connection, upgrades: AsyncSemaphore) {.async.} =
## monitor connection for upgrades
##
proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} =
let muxed = await trans.upgradeIncoming(conn)
switch.connManager.storeMuxer(muxed)
await switch.peerStore.identify(muxed)
trace "Connection upgrade succeeded"
proc upgradeMonitor(
switch: Switch,
trans: Transport,
conn: Connection,
upgrades: AsyncSemaphore) {.async.} =
try:
# Since we don't control the flow of the
# upgrade, this timeout guarantees that a
# "hanged" remote doesn't hold the upgrade
# forever
await conn.onUpgrade.wait(30.seconds) # wait for connection to be upgraded
trace "Connection upgrade succeeded"
await switch.upgrader(trans, conn).wait(30.seconds)
except CatchableError as exc:
libp2p_failed_upgrades_incoming.inc()
if exc isnot CancelledError:
libp2p_failed_upgrades_incoming.inc()
if not isNil(conn):
await conn.close()
trace "Exception awaiting connection upgrade", exc = exc.msg, conn
finally:
upgrades.release() # don't forget to release the slot!
upgrades.release()
proc accept(s: Switch, transport: Transport) {.async.} = # noraises
## switch accept loop, ran for every transport
@@ -277,8 +281,7 @@ proc accept(s: Switch, transport: Transport) {.async.} = # noraises
conn.transportDir = Direction.In
debug "Accepted an incoming connection", conn
asyncSpawn upgradeMonitor(conn, upgrades)
asyncSpawn transport.upgradeIncoming(conn)
asyncSpawn s.upgradeMonitor(transport, conn, upgrades)
except CancelledError as exc:
trace "releasing semaphore on cancellation"
upgrades.release() # always release the slot
@@ -296,6 +299,10 @@ proc stop*(s: Switch) {.async, public.} =
trace "Stopping switch"
s.started = false
for service in s.services:
discard await service.stop(s)
# close and cleanup all connections
await s.connManager.close()
@@ -372,14 +379,13 @@ proc start*(s: Switch) {.async, gcsafe, public.} =
proc newSwitch*(peerInfo: PeerInfo,
transports: seq[Transport],
identity: Identify,
secureManagers: openArray[Secure] = [],
connManager: ConnManager,
ms: MultistreamSelect,
peerStore: PeerStore,
nameResolver: NameResolver = nil,
peerStore = PeerStore.new(),
services = newSeq[Service]()): Switch
{.raises: [Defect, LPError], public.} =
{.raises: [Defect, LPError].} =
if secureManagers.len == 0:
raise newException(LPError, "Provide at least one secure manager")
@@ -389,11 +395,9 @@ proc newSwitch*(peerInfo: PeerInfo,
transports: transports,
connManager: connManager,
peerStore: peerStore,
dialer: Dialer.new(peerInfo.peerId, connManager, transports, ms, nameResolver),
dialer: Dialer.new(peerInfo.peerId, connManager, peerStore, transports, nameResolver),
nameResolver: nameResolver,
services: services)
switch.connManager.peerStore = peerStore
switch.mount(identity)
return switch

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -42,6 +42,7 @@ type
servers*: seq[StreamServer]
clients: array[Direction, seq[StreamTransport]]
flags: set[ServerFlags]
clientFlags: set[TransportFlags]
acceptFuts: seq[Future[StreamTransport]]
TcpTransportTracker* = ref object of TrackerBase
@@ -129,8 +130,18 @@ proc new*(
flags: set[ServerFlags] = {},
upgrade: Upgrade): T {.public.} =
let transport = T(
flags: flags,
let
transport = T(
flags: flags,
clientFlags:
if ServerFlags.TcpNoDelay in flags:
compilesOr:
{TransportFlags.TcpNoDelay}
do:
doAssert(false)
default(set[TransportFlags])
else:
default(set[TransportFlags]),
upgrader: upgrade)
return transport
@@ -246,13 +257,14 @@ method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
method dial*(
self: TcpTransport,
hostname: string,
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
## dial a peer
##
trace "Dialing remote peer", address = $address
let transp = await connect(address)
let transp = await connect(address, flags = self.clientFlags)
try:
let observedAddr = await getObservedAddr(transp)
return await self.connHandler(transp, Opt.some(observedAddr), Direction.Out)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -188,7 +188,8 @@ proc dialPeer(
method dial*(
self: TorTransport,
hostname: string,
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
## dial a peer
##
if not handlesDial(address):
@@ -268,7 +269,7 @@ proc new*(
transports: switch.transports,
connManager: switch.connManager,
peerStore: switch.peerStore,
dialer: Dialer.new(switch.peerInfo.peerId, switch.connManager, switch.transports, switch.ms, nil),
dialer: Dialer.new(switch.peerInfo.peerId, switch.connManager, switch.peerStore, switch.transports, nil),
nameResolver: nil)
torSwitch.connManager.peerStore = switch.peerStore

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -18,6 +18,7 @@ import chronos, chronicles
import ../stream/connection,
../multiaddress,
../multicodec,
../muxers/muxer,
../upgrademngrs/upgrade
logScope:
@@ -65,7 +66,8 @@ method accept*(self: Transport): Future[Connection]
method dial*(
self: Transport,
hostname: string,
address: MultiAddress): Future[Connection] {.base, gcsafe.} =
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.base, gcsafe.} =
## dial a peer
##
@@ -73,12 +75,13 @@ method dial*(
proc dial*(
self: Transport,
address: MultiAddress): Future[Connection] {.gcsafe.} =
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.gcsafe.} =
self.dial("", address)
method upgradeIncoming*(
self: Transport,
conn: Connection): Future[void] {.base, gcsafe.} =
conn: Connection): Future[Muxer] {.base, gcsafe.} =
## base upgrade method that the transport uses to perform
## transport specific upgrades
##
@@ -88,7 +91,7 @@ method upgradeIncoming*(
method upgradeOutgoing*(
self: Transport,
conn: Connection,
peerId: Opt[PeerId]): Future[Connection] {.base, gcsafe.} =
peerId: Opt[PeerId]): Future[Muxer] {.base, gcsafe.} =
## base upgrade method that the transport uses to perform
## transport specific upgrades
##

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -292,7 +292,8 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
method dial*(
self: WsTransport,
hostname: string,
address: MultiAddress): Future[Connection] {.async, gcsafe.} =
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
## dial a peer
##

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -30,35 +30,24 @@ type
proc getMuxerByCodec(self: MuxedUpgrade, muxerName: string): MuxerProvider =
for m in self.muxers:
if muxerName in m.codecs:
if muxerName == m.codec:
return m
proc identify*(
self: MuxedUpgrade,
muxer: Muxer) {.async, gcsafe.} =
# new stream for identify
var stream = await muxer.newStream()
if stream == nil:
return
try:
await self.identify(stream)
when defined(libp2p_agents_metrics):
muxer.connection.shortAgent = stream.shortAgent
finally:
await stream.closeWithEOF()
proc mux*(
self: MuxedUpgrade,
conn: Connection): Future[Muxer] {.async, gcsafe.} =
## mux outgoing connection
conn: Connection,
direction: Direction): Future[Muxer] {.async, gcsafe.} =
## mux connection
trace "Muxing connection", conn
if self.muxers.len == 0:
warn "no muxers registered, skipping upgrade flow", conn
return
let muxerName = await self.ms.select(conn, self.muxers.mapIt(it.codec))
let muxerName =
if direction == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
else: await MultistreamSelect.handle(conn, self.muxers.mapIt(it.codec))
if muxerName.len == 0 or muxerName == "na":
debug "no muxer available, early exit", conn
return
@@ -70,36 +59,23 @@ proc mux*(
# install stream handler
muxer.streamHandler = self.streamHandler
self.connManager.storeConn(conn)
# store it in muxed connections if we have a peer for it
self.connManager.storeMuxer(muxer, muxer.handle()) # store muxer and start read loop
try:
await self.identify(muxer)
except CatchableError as exc:
# Identify is non-essential, though if it fails, it might indicate that
# the connection was closed already - this will be picked up by the read
# loop
debug "Could not identify connection", conn, msg = exc.msg
muxer.handler = muxer.handle()
return muxer
method upgradeOutgoing*(
proc upgrade(
self: MuxedUpgrade,
conn: Connection,
peerId: Opt[PeerId]): Future[Connection] {.async, gcsafe.} =
trace "Upgrading outgoing connection", conn
direction: Direction,
peerId: Opt[PeerId]): Future[Muxer] {.async.} =
trace "Upgrading connection", conn, direction
let sconn = await self.secure(conn, peerId) # secure the connection
let sconn = await self.secure(conn, direction, peerId) # secure the connection
if isNil(sconn):
raise newException(UpgradeFailedError,
"unable to secure connection, stopping upgrade")
let muxer = await self.mux(sconn) # mux it if possible
let muxer = await self.mux(sconn, direction) # mux it if possible
if muxer == nil:
# TODO this might be relaxed in the future
raise newException(UpgradeFailedError,
"a muxer is required for outgoing connections")
@@ -111,108 +87,28 @@ method upgradeOutgoing*(
raise newException(UpgradeFailedError,
"Connection closed or missing peer info, stopping upgrade")
trace "Upgraded outgoing connection", conn, sconn
trace "Upgraded connection", conn, sconn, direction
return muxer
return sconn
method upgradeOutgoing*(
self: MuxedUpgrade,
conn: Connection,
peerId: Opt[PeerId]): Future[Muxer] {.async, gcsafe.} =
return await self.upgrade(conn, Out, peerId)
method upgradeIncoming*(
self: MuxedUpgrade,
incomingConn: Connection) {.async, gcsafe.} = # noraises
trace "Upgrading incoming connection", incomingConn
let ms = MultistreamSelect.new()
# secure incoming connections
proc securedHandler(conn: Connection,
proto: string)
{.async, gcsafe, closure.} =
trace "Starting secure handler", conn
let secure = self.secureManagers.filterIt(it.codec == proto)[0]
var cconn = conn
try:
var sconn = await secure.secure(cconn, false, Opt.none(PeerId))
if isNil(sconn):
return
cconn = sconn
# add the muxer
for muxer in self.muxers:
ms.addHandler(muxer.codecs, muxer)
# handle subsequent secure requests
await ms.handle(cconn)
except CatchableError as exc:
debug "Exception in secure handler during incoming upgrade", msg = exc.msg, conn
if not cconn.isUpgraded:
cconn.upgrade(exc)
finally:
if not isNil(cconn):
await cconn.close()
trace "Stopped secure handler", conn
try:
if (await ms.select(incomingConn)): # just handshake
# add the secure handlers
for k in self.secureManagers:
ms.addHandler(k.codec, securedHandler)
# handle un-secured connections
# we handshaked above, set this ms handler as active
await ms.handle(incomingConn, active = true)
except CatchableError as exc:
debug "Exception upgrading incoming", exc = exc.msg
if not incomingConn.isUpgraded:
incomingConn.upgrade(exc)
finally:
if not isNil(incomingConn):
await incomingConn.close()
proc muxerHandler(
self: MuxedUpgrade,
muxer: Muxer) {.async, gcsafe.} =
let
conn = muxer.connection
# store incoming connection
self.connManager.storeConn(conn)
# store muxer and muxed connection
self.connManager.storeMuxer(muxer)
try:
await self.identify(muxer)
when defined(libp2p_agents_metrics):
#TODO Passing data between layers is a pain
if muxer.connection of SecureConn:
let secureConn = (SecureConn)muxer.connection
secureConn.stream.shortAgent = muxer.connection.shortAgent
except IdentifyError as exc:
# Identify is non-essential, though if it fails, it might indicate that
# the connection was closed already - this will be picked up by the read
# loop
debug "Could not identify connection", conn, msg = exc.msg
except LPStreamClosedError as exc:
debug "Identify stream closed", conn, msg = exc.msg
except LPStreamEOFError as exc:
debug "Identify stream EOF", conn, msg = exc.msg
except CancelledError as exc:
await muxer.close()
raise exc
except CatchableError as exc:
await muxer.close()
trace "Exception in muxer handler", conn, msg = exc.msg
conn: Connection): Future[Muxer] {.async, gcsafe.} =
return await self.upgrade(conn, In, Opt.none(PeerId))
proc new*(
T: type MuxedUpgrade,
identity: Identify,
muxers: seq[MuxerProvider],
secureManagers: openArray[Secure] = [],
connManager: ConnManager,
ms: MultistreamSelect): T =
let upgrader = T(
identity: identity,
muxers: muxers,
secureManagers: @secureManagers,
connManager: connManager,
@@ -231,10 +127,4 @@ proc new*(
await conn.closeWithEOF()
trace "Stream handler done", conn
for _, val in muxers:
val.streamHandler = upgrader.streamHandler
val.muxerHandler = proc(muxer: Muxer): Future[void]
{.raises: [Defect].} =
upgrader.muxerHandler(muxer)
return upgrader

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -19,6 +19,7 @@ import pkg/[chronos, chronicles, metrics]
import ../stream/connection,
../protocols/secure/secure,
../protocols/identify,
../muxers/muxer,
../multistream,
../peerstore,
../connmanager,
@@ -37,29 +38,31 @@ type
Upgrade* = ref object of RootObj
ms*: MultistreamSelect
identity*: Identify
connManager*: ConnManager
secureManagers*: seq[Secure]
method upgradeIncoming*(
self: Upgrade,
conn: Connection): Future[void] {.base.} =
conn: Connection): Future[Muxer] {.base.} =
doAssert(false, "Not implemented!")
method upgradeOutgoing*(
self: Upgrade,
conn: Connection,
peerId: Opt[PeerId]): Future[Connection] {.base.} =
peerId: Opt[PeerId]): Future[Muxer] {.base.} =
doAssert(false, "Not implemented!")
proc secure*(
self: Upgrade,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Connection] {.async, gcsafe.} =
if self.secureManagers.len <= 0:
raise newException(UpgradeFailedError, "No secure managers registered!")
let codec = await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
let codec =
if direction == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
else: await MultistreamSelect.handle(conn, self.secureManagers.mapIt(it.codec))
if codec.len == 0:
raise newException(UpgradeFailedError, "Unable to negotiate a secure channel!")
@@ -70,30 +73,4 @@ proc secure*(
# let's avoid duplicating checks but detect if it fails to do it properly
doAssert(secureProtocol.len > 0)
return await secureProtocol[0].secure(conn, true, peerId)
proc identify*(
self: Upgrade,
conn: Connection) {.async, gcsafe.} =
## identify the connection
if (await self.ms.select(conn, self.identity.codec)):
let
info = await self.identity.identify(conn, conn.peerId)
peerStore = self.connManager.peerStore
if info.pubkey.isNone and isNil(conn):
raise newException(UpgradeFailedError,
"no public key provided and no existing peer identity found")
conn.peerId = info.peerId
when defined(libp2p_agents_metrics):
conn.shortAgent = "unknown"
if info.agentVersion.isSome and info.agentVersion.get().len > 0:
let shortAgent = info.agentVersion.get().split("/")[0].safeToLowerAscii()
if shortAgent.isOk() and KnownLibP2PAgentsSeq.contains(shortAgent.get()):
conn.shortAgent = shortAgent.get()
peerStore.updatePeerInfo(info)
trace "identified remote peer", conn, peerId = shortLog(conn.peerId)
return await secureProtocol[0].secure(conn, direction == Out, peerId)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -19,6 +19,12 @@ template public* {.pragma.}
const
ShortDumpMax = 12
template compilesOr*(a, b: untyped): untyped =
when compiles(a):
a
else:
b
func shortLog*(item: openArray[byte]): string =
if item.len <= ShortDumpMax:
result = item.toHex()
@@ -57,5 +63,5 @@ when defined(libp2p_agents_metrics):
err("toLowerAscii failed")
const
KnownLibP2PAgents* {.strdefine.} = ""
KnownLibP2PAgents* {.strdefine.} = "nim-libp2p"
KnownLibP2PAgentsSeq* = KnownLibP2PAgents.safeToLowerAscii().tryGet().split(",")

35
libp2p/utils/future.nim Normal file
View File

@@ -0,0 +1,35 @@
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import chronos
type
AllFuturesFailedError* = object of CatchableError
proc anyCompleted*[T](futs: seq[Future[T]]): Future[Future[T]] {.async.} =
## Returns a future that will complete with the first future that completes.
## If all futures fail or futs is empty, the returned future will fail with AllFuturesFailedError.
var requests = futs
while true:
if requests.len == 0:
raise newException(AllFuturesFailedError, "None of the futures completed successfully")
var raceFut = await one(requests)
if raceFut.completed:
return raceFut
let index = requests.find(raceFut)
requests.del(index)

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))

View File

@@ -1,5 +1,5 @@
# Nim-Libp2p
# Copyright (c) 2022 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -14,7 +14,7 @@ else:
## This module implements wire network connection procedures.
import chronos, stew/endians2
import multiaddress, multicodec, errors
import multiaddress, multicodec, errors, utility
when defined(windows):
import winlean
@@ -76,7 +76,8 @@ proc initTAddress*(ma: MultiAddress): MaResult[TransportAddress] =
proc connect*(
ma: MultiAddress,
bufferSize = DefaultStreamBufferSize,
child: StreamTransport = nil): Future[StreamTransport]
child: StreamTransport = nil,
flags = default(set[TransportFlags])): Future[StreamTransport]
{.raises: [Defect, LPError, MaInvalidAddress].} =
## Open new connection to remote peer with address ``ma`` and create
## new transport object ``StreamTransport`` for established connection.
@@ -86,7 +87,13 @@ proc connect*(
if not(RTRANSPMA.match(ma)):
raise newException(MaInvalidAddress, "Incorrect or unsupported address!")
return connect(initTAddress(ma).tryGet(), bufferSize, child)
let transportAddress = initTAddress(ma).tryGet()
compilesOr:
return connect(transportAddress, bufferSize, child, flags)
do:
# support for older chronos versions
return connect(transportAddress, bufferSize, child)
proc createStreamServer*[T](ma: MultiAddress,
cbproc: StreamCallback,

View File

@@ -1,208 +0,0 @@
{
"version": 1,
"packages": {
"unittest2": {
"version": "0.0.5",
"vcsRevision": "da8398c45cafd5bd7772da1fc96e3924a18d3823",
"url": "https://github.com/status-im/nim-unittest2",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "b3f8493a4948989ef3e645a38b23aad77e851e26"
}
},
"testutils": {
"version": "0.5.0",
"vcsRevision": "dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34",
"url": "https://github.com/status-im/nim-testutils",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "756d0757c4dd06a068f9d38c7f238576ba5ee897"
}
},
"stew": {
"version": "0.1.0",
"vcsRevision": "7184d2424dc3945657884646a72715d494917aad",
"url": "https://github.com/status-im/nim-stew",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "f3125ed2fd126dfd3edbaea14275abd9fa57d703"
}
},
"bearssl": {
"version": "0.2.0",
"vcsRevision": "a647994910904b0103a05db3a5ec1ecfc4d91a88",
"url": "https://github.com/status-im/nim-bearssl",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "d634751df2716ea9975912a2d5d0a090bb6bcfa9"
}
},
"httputils": {
"version": "0.3.0",
"vcsRevision": "a85bd52ae0a956983ca6b3267c72961d2ec0245f",
"url": "https://github.com/status-im/nim-http-utils",
"downloadMethod": "git",
"dependencies": [
"stew",
"unittest2"
],
"checksums": {
"sha1": "92933b21bcd29335f68e377e2b2193fa331e28b3"
}
},
"chronos": {
"version": "3.0.11",
"vcsRevision": "75d030ff71264513fb9701c75a326cd36fcb4692",
"url": "https://github.com/status-im/nim-chronos",
"downloadMethod": "git",
"dependencies": [
"stew",
"bearssl",
"httputils",
"unittest2"
],
"checksums": {
"sha1": "57a674ba3c1a57a694fa7810d93ceb68f338a861"
}
},
"faststreams": {
"version": "0.3.0",
"vcsRevision": "b42daf41d8eb4fbce40add6836bed838f8d85b6f",
"url": "https://github.com/status-im/nim-faststreams",
"downloadMethod": "git",
"dependencies": [
"stew",
"chronos",
"unittest2"
],
"checksums": {
"sha1": "62f7ac8fb200a8ecb9e6c63f5553a7dad66ae613"
}
},
"serialization": {
"version": "0.1.0",
"vcsRevision": "d77417cba6896c26287a68e6a95762e45a1b87e5",
"url": "https://github.com/status-im/nim-serialization",
"downloadMethod": "git",
"dependencies": [
"faststreams",
"unittest2",
"stew"
],
"checksums": {
"sha1": "e17244c6654de22254acb9bcf71d8ddbeca8b2aa"
}
},
"metrics": {
"version": "0.0.1",
"vcsRevision": "21e99a2e9d9f80e68bef65c80ef781613005fccb",
"url": "https://github.com/status-im/nim-metrics",
"downloadMethod": "git",
"dependencies": [
"chronos"
],
"checksums": {
"sha1": "ab1c994bbcd6b04f2500f05d8ea4e463f33dd310"
}
},
"nimcrypto": {
"version": "0.5.4",
"vcsRevision": "24e006df85927f64916e60511620583b11403178",
"url": "https://github.com/cheatfate/nimcrypto",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "a4db2105de265930f1578bb7957f49fa39b10d9b"
}
},
"json_serialization": {
"version": "0.1.0",
"vcsRevision": "a7d815ed92f200f490c95d3cfd722089cc923ce6",
"url": "https://github.com/status-im/nim-json-serialization",
"downloadMethod": "git",
"dependencies": [
"serialization",
"stew"
],
"checksums": {
"sha1": "50fc34a992ef3df68a7bee88af096bb8ed42572f"
}
},
"chronicles": {
"version": "0.10.3",
"vcsRevision": "32ac8679680ea699f7dbc046e8e0131cac97d41a",
"url": "https://github.com/status-im/nim-chronicles",
"downloadMethod": "git",
"dependencies": [
"testutils",
"json_serialization"
],
"checksums": {
"sha1": "79f09526d4d9b9196dd2f6a75310d71a890c4f88"
}
},
"zlib": {
"version": "0.1.0",
"vcsRevision": "6a6670afba6b97b29b920340e2641978c05ab4d8",
"url": "https://github.com/status-im/nim-zlib",
"downloadMethod": "git",
"dependencies": [
"stew"
],
"checksums": {
"sha1": "2621e46369be2a6846713e8c3d681a5bba3e0325"
}
},
"websock": {
"version": "0.1.0",
"vcsRevision": "691f069b209d372b1240d5ae1f57fb7bbafeaba7",
"url": "https://github.com/status-im/nim-websock",
"downloadMethod": "git",
"dependencies": [
"chronos",
"httputils",
"chronicles",
"stew",
"nimcrypto",
"bearssl",
"zlib"
],
"checksums": {
"sha1": "c71edfce064e7c0cadde0e687c6edc0caaf9ec07"
}
},
"dnsclient": {
"version": "0.3.2",
"vcsRevision": "fcd7443634b950eaea574e5eaa00a628ae029823",
"url": "https://github.com/ba0f3/dnsclient.nim",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "146aa4a8d512a3a786c5bf54311b79900166d9d7"
}
},
"secp256k1": {
"version": "0.5.2",
"vcsRevision": "fd173fdff863ce2e211cf64c9a03bc7539fe40b0",
"url": "https://github.com/status-im/nim-secp256k1",
"downloadMethod": "git",
"dependencies": [
"stew",
"nimcrypto"
],
"checksums": {
"sha1": "657c79f6f2b1b6da92a9cda81ffc9f95d26443cb"
}
}
}
}

Some files were not shown because too many files have changed in this diff Show More