Compare commits

...

33 Commits

Author SHA1 Message Date
Jacek Sieka
a9b5f504e9 debug logging 2024-03-02 10:19:16 +01:00
diegomrsantos
fe4ff79885 feat: message prioritization with immediate peer-published dispatch and queuing for other msgs (#1015) 2024-02-16 10:54:16 +01:00
Álex Cabeza Romero
aa4ebb0b3c docs(general): Improve docs (#1021) 2024-02-15 16:14:26 +01:00
diegomrsantos
e0f70b7177 improvement: enhanced checkExpiring macro with custom timeout (#1023) 2024-02-09 11:51:27 +01:00
Ludovic Chenut
c1dfd58772 fix: yamux metrics (#1022) 2024-02-08 12:36:58 +01:00
Álex Cabeza Romero
04af0c4323 test(flaky): Log checkExpiring failure (#1018)
Add simple logging mechanism on checkExpiring failure.
2024-02-06 17:47:13 +01:00
Ludovic Chenut
eb0890cd6f docs: add comments and improve yamux readability (#1006) 2024-02-02 15:14:02 +01:00
Álex Cabeza Romero
9bc5ec1566 tests(flaky): Increase check timeouts (#995)
Increase checkExpiring timeouts to verify impact on flaky tests.
2024-01-31 23:46:12 +00:00
diegomrsantos
5594bcb33e fix: more metrics issues when libp2p_expensive_metrics is enabled (#1016) 2024-01-30 16:55:55 +01:00
diegomrsantos
d46bcdb6ac fix: compilation issue when libp2p_expensive_metrics is enabled. (#1014) 2024-01-29 11:31:11 +01:00
diegomrsantos
9468bb6b4d fix(hole-punching-interop): update nim to 1.6.16 (#1012) 2024-01-26 11:15:40 +01:00
diegomrsantos
2725be64ba fix: use a temp var in withValue (#1010) 2024-01-18 16:25:56 +01:00
diegomrsantos
e3c967ad19 improvement(ci): improve ci daily workflows (#1002) 2023-12-18 20:14:33 +01:00
Ludovic Chenut
d2c98bd87d improvement(yamux): make the window size configurable (#987)
Co-authored-by: Diego <diego@status.im>
2023-12-15 16:30:50 +01:00
Ivan FB
3011ba4326 libp2p/multiaddress.nim: use of IpAddress instead of ValidIpAddress (#1001) 2023-12-12 12:53:36 +01:00
Etan Kissling
c6566707fa include connection info when logging identify message (#991) 2023-12-05 18:44:16 +01:00
diegomrsantos
3be681ec4d feat: add hole-punching interop tests (#998) 2023-12-05 18:37:33 +01:00
Jacek Sieka
2ede0fa40c remove redundant gcsafe annotations (#999) 2023-12-05 08:05:32 +01:00
Roman Zajic
7c195ab927 fix: remove forgotten "matrix-prep" job (#997) 2023-12-02 09:56:50 +08:00
Roman Zajic
3230407ffe fix: move workflows for Nim Devel and legacy i386 from "Daily" (#968) 2023-12-01 17:47:47 +08:00
diegomrsantos
deb72c8580 fix(dcutr): update the DCUtR initiator transport direction to Inbound (#994) 2023-11-29 17:38:47 +01:00
diegomrsantos
ce0685c272 fix(identify): do not add p2p and relayed addrs to observed addr manager (#990) 2023-11-21 18:24:35 +01:00
diegomrsantos
1f4b090227 fix(yamux): doesn't work in a Relayv2 connection (#979)
Co-authored-by: Ludovic Chenut <ludovic@status.im>
2023-11-21 16:03:29 +01:00
diegomrsantos
fb05f5ae22 fix(dcutr): handle tcp/p2p addresses (#989) 2023-11-20 17:06:17 +01:00
diegomrsantos
e12f65f193 fix(multiaddress): add quic-v1 multiaddress support (#988) 2023-11-20 11:09:56 +01:00
diegomrsantos
4b3bc4f819 Make ObservedAddrManager injectable (#970) 2023-11-20 11:06:02 +01:00
diegomrsantos
6791f5e7bb fix(dcutr): make the dcutr client inbound and the server outbound (#983) 2023-11-17 10:46:35 +01:00
diegomrsantos
08d9c84aca Remove unittest2 range (#986) 2023-11-17 08:20:02 +01:00
Jacek Sieka
4e7eaba67a fix chronos v4 compat (#982) 2023-11-16 16:54:34 +01:00
diegomrsantos
5f7a3ab829 fix: doc workflow (#985) 2023-11-16 15:58:05 +01:00
diegomrsantos
ebef85c9d7 Rate limit fixes (#965) 2023-11-09 14:20:28 +01:00
diegomrsantos
3fc1236659 Revert "Prevent concurrent IWANT of the same message (#943)" (#977) 2023-11-03 15:24:27 +01:00
Ludovic Chenut
fc4e9a8bb8 Fix WS transport when the connection aborts (#967) 2023-10-23 17:12:20 +02:00
102 changed files with 1680 additions and 1143 deletions

12
.github/workflows/daily.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
name: Daily
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
call-multi-nim-common:
uses: ./.github/workflows/daily_common.yml
with:
nim-branch: "['version-1-6','version-2-0']"
cpu: "['amd64']"

84
.github/workflows/daily_common.yml vendored Normal file
View File

@@ -0,0 +1,84 @@
name: daily-common
on:
workflow_call:
inputs:
nim-branch:
description: 'Nim branch'
required: true
type: string
cpu:
description: 'CPU'
required: true
type: string
exclude:
description: 'Exclude matrix configurations'
required: false
type: string
default: "[]"
jobs:
delete-cache:
runs-on: ubuntu-latest
steps:
- uses: snnaplab/delete-branch-cache-action@v1
build:
needs: delete-cache
timeout-minutes: 120
strategy:
fail-fast: false
matrix:
platform:
- os: linux
builder: ubuntu-20
shell: bash
- os: macos
builder: macos-12
shell: bash
- os: windows
builder: windows-2019
shell: msys2 {0}
branch: ${{ fromJSON(inputs.nim-branch) }}
cpu: ${{ fromJSON(inputs.cpu) }}
exclude: ${{ fromJSON(inputs.exclude) }}
defaults:
run:
shell: ${{ matrix.platform.shell }}
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.platform.builder }}
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: ${{ matrix.platform.os }}
shell: ${{ matrix.platform.shell }}
nim_branch: ${{ matrix.branch }}
cpu: ${{ matrix.cpu }}
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: '~1.15.5'
cache: false
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Run tests
run: |
nim --version
nimble --version
nimble install -y --depsOnly
NIMFLAGS="${NIMFLAGS} --mm:refc" nimble test
if [[ "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--mm:orc':\n"
NIMFLAGS="${NIMFLAGS} --mm:orc" nimble test
fi

13
.github/workflows/daily_i386.yml vendored Normal file
View File

@@ -0,0 +1,13 @@
name: Daily i386
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
call-multi-nim-common:
uses: ./.github/workflows/daily_common.yml
with:
nim-branch: "['version-1-6','version-2-0', 'devel']"
cpu: "['i386']"
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"

12
.github/workflows/daily_nim_devel.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
name: Daily Nim Devel
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
call-multi-nim-common:
uses: ./.github/workflows/daily_common.yml
with:
nim-branch: "['devel']"
cpu: "['amd64']"

View File

@@ -19,13 +19,13 @@ jobs:
- uses: jiro4989/setup-nim-action@v1
with:
nim-version: 'stable'
nim-version: '1.6.x'
- name: Generate doc
run: |
nim --version
nimble --version
nimble install_pinned -y
nimble install_pinned
# nim doc can "fail", but the doc is still generated
nim doc --git.url:https://github.com/status-im/nim-libp2p --git.commit:${GITHUB_REF##*/} --outdir:${GITHUB_REF##*/} --project libp2p || true

View File

@@ -52,3 +52,17 @@ jobs:
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/test_head.json
run-hole-punching-interop:
name: Run hole-punching interoperability tests
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json

View File

@@ -1,82 +0,0 @@
name: Daily
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
delete-cache:
runs-on: ubuntu-latest
steps:
- uses: snnaplab/delete-branch-cache-action@v1
build:
needs: delete-cache
timeout-minutes: 120
strategy:
fail-fast: false
matrix:
target:
- os: linux
cpu: amd64
- os: linux
cpu: i386
- os: macos
cpu: amd64
- os: windows
cpu: amd64
#- os: windows
#cpu: i386
branch: [version-1-6, version-2-0, devel]
include:
- target:
os: linux
builder: ubuntu-20.04
shell: bash
- target:
os: macos
builder: macos-12
shell: bash
- target:
os: windows
builder: windows-2019
shell: msys2 {0}
defaults:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: ${{ matrix.target.os }}
shell: ${{ matrix.shell }}
nim_branch: ${{ matrix.branch }}
cpu: ${{ matrix.target.cpu }}
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: '~1.15.5'
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Run tests
run: |
nim --version
nimble --version
nimble install -y --depsOnly
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
if [[ "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--gc:orc':\n"
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
fi

1
.gitignore vendored
View File

@@ -16,3 +16,4 @@ tests/pubsub/testgossipsub
examples/*.md
nimble.develop
nimble.paths
go-libp2p-daemon/

View File

@@ -20,6 +20,7 @@
- [Background](#background)
- [Install](#install)
- [Getting Started](#getting-started)
- [Go-libp2p-daemon](#go-libp2p-daemon)
- [Modules](#modules)
- [Users](#users)
- [Stability](#stability)
@@ -40,6 +41,8 @@ Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p'
## Install
**Prerequisite**
- [Nim](https://nim-lang.org/install.html)
> The currently supported Nim version is 1.6.18.
```
nimble install libp2p
```
@@ -47,11 +50,11 @@ nimble install libp2p
## Getting Started
You'll find the nim-libp2p documentation [here](https://status-im.github.io/nim-libp2p/docs/).
**Go Daemon:**
Please find the installation and usage intructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
### Testing
Remember you'll need to build the `go-libp2p-daemon` binary to run the `nim-libp2p` tests.
To do so, please follow the installation instructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
## Modules
List of packages modules implemented in nim-libp2p:
| Name | Description |

View File

@@ -1,6 +1,8 @@
# Table of Contents
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Script](#script)
- [Usage](#usage)
- [Example](#example)
- [Getting Started](#getting-started)
@@ -8,26 +10,29 @@
# Introduction
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
For more information about the go daemon, check out [this repository](https://github.com/libp2p/go-libp2p-daemon).
> **Required only** for running the tests.
# Prerequisites
Go with version `1.15.15`.
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
# Installation
Follow one of the methods below:
## Script
Run the build script while having the `go` command pointing to the correct Go version.
We recommend using `1.15.15`, as previously stated.
```sh
# clone and install dependencies
git clone https://github.com/status-im/nim-libp2p
cd nim-libp2p
nimble install
# perform unit tests
nimble test
# update the git submodule to install the go daemon
git submodule update --init --recursive
go version
git clone https://github.com/libp2p/go-libp2p-daemon
cd go-libp2p-daemon
git checkout v0.0.1
go install ./...
cd ..
./scripts/build_p2pd.sh
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the correct directory.
If you find any issues, please head into our discord and ask for our asistance.
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
```sh
export PATH="$PATH:$HOME/go/bin"
```
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
# Usage

View File

@@ -13,7 +13,7 @@ type
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
await conn.writeLp("Roger p2p!")
@@ -40,7 +40,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
##
# The actual application
##
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng() # Single random number source for the whole application
# port 0 will take a random available port

View File

@@ -53,7 +53,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
##
##
## Let's now start to create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng()
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()

View File

@@ -25,7 +25,7 @@ type TestProto = ref object of LPProtocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will in be handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
# Read up to 1024 bytes from this connection, and transform them into
# a string
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
@@ -44,7 +44,7 @@ proc hello(p: TestProto, conn: Connection) {.async.} =
## Again, pretty straight-forward, we just send a message on the connection.
##
## We can now create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng()
testProto = TestProto.new()

View File

@@ -108,7 +108,7 @@ type
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
var res: MetricProto
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
@@ -126,7 +126,7 @@ proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
return MetricList.decode(protobuf).tryGet()
## We can now create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let rng = newRng()
proc randomMetricGenerator: Future[MetricList] {.async.} =
let metricCount = rng[].generate(uint32) mod 16

View File

@@ -33,7 +33,7 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
const DumbCodec = "/dumb/proto/1.0.0"
type DumbProto = ref object of LPProtocol
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
await conn.close()
return T.new(codecs = @[DumbCodec], handler = handle)
@@ -49,7 +49,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
## (rendezvous in this case) as a bootnode. For this example, we'll
## create a bootnode, and then every peer will advertise itself on the
## bootnode, and use it to find other peers
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let bootNode = createSwitch()
await bootNode.start()

View File

@@ -143,7 +143,7 @@ proc draw(g: Game) =
## peer know that we are available, check that he is also available,
## and launch the game.
proc new(T: typedesc[GameProto], g: Game): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
defer: await conn.closeWithEof()
if g.peerFound.finished or g.hasCandidate:
await conn.close()

View File

@@ -17,7 +17,7 @@ requires "nim >= 1.6.0",
"secp256k1",
"stew#head",
"websock",
"unittest2 >= 0.0.5 & <= 0.1.0"
"unittest2"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)

View File

@@ -25,7 +25,7 @@ import
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
connmanager, upgrademngrs/muxedupgrade,
connmanager, upgrademngrs/muxedupgrade, observedaddrmanager,
nameresolving/nameresolver,
errors, utility
@@ -59,6 +59,7 @@ type
circuitRelay: Relay
rdv: RendezVous
services: seq[Service]
observedAddrManager: ObservedAddrManager
proc new*(T: type[SwitchBuilder]): T {.public.} =
## Creates a SwitchBuilder
@@ -121,8 +122,8 @@ proc withMplex*(
b.muxers.add(MuxerProvider.new(newMuxer, MplexCodec))
b
proc withYamux*(b: SwitchBuilder): SwitchBuilder =
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn)
proc withYamux*(b: SwitchBuilder, windowSize: int = YamuxDefaultWindowSize): SwitchBuilder =
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn, windowSize)
assert b.muxers.countIt(it.codec == YamuxCodec) == 0, "Yamux build multiple times"
b.muxers.add(MuxerProvider.new(newMuxer, YamuxCodec))
@@ -201,6 +202,10 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
b.services = services
b
proc withObservedAddrManager*(b: SwitchBuilder, observedAddrManager: ObservedAddrManager): SwitchBuilder =
b.observedAddrManager = observedAddrManager
b
proc build*(b: SwitchBuilder): Switch
{.raises: [LPError], public.} =
@@ -223,8 +228,13 @@ proc build*(b: SwitchBuilder): Switch
protoVersion = b.protoVersion,
agentVersion = b.agentVersion)
let identify =
if b.observedAddrManager != nil:
Identify.new(peerInfo, b.sendSignedPeerRecord, b.observedAddrManager)
else:
Identify.new(peerInfo, b.sendSignedPeerRecord)
let
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
ms = MultistreamSelect.new()
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)

View File

@@ -128,7 +128,7 @@ proc removeConnEventHandler*(c: ConnManager,
proc triggerConnEvent*(c: ConnManager,
peerId: PeerId,
event: ConnEvent) {.async, gcsafe.} =
event: ConnEvent) {.async.} =
try:
trace "About to trigger connection events", peer = peerId
if c.connEvents[event.kind].len() > 0:
@@ -160,7 +160,7 @@ proc removePeerEventHandler*(c: ConnManager,
proc triggerPeerEvents*(c: ConnManager,
peerId: PeerId,
event: PeerEvent) {.async, gcsafe.} =
event: PeerEvent) {.async.} =
trace "About to trigger peer events", peer = peerId
if c.peerEvents[event.kind].len == 0:
@@ -379,7 +379,7 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
cs.trackConnection(mux.connection)
proc getStream*(c: ConnManager,
muxer: Muxer): Future[Connection] {.async, gcsafe.} =
muxer: Muxer): Future[Connection] {.async.} =
## get a muxed stream for the passed muxer
##
@@ -387,7 +387,7 @@ proc getStream*(c: ConnManager,
return await muxer.newStream()
proc getStream*(c: ConnManager,
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
peerId: PeerId): Future[Connection] {.async.} =
## get a muxed stream for the passed peer from any connection
##
@@ -395,7 +395,7 @@ proc getStream*(c: ConnManager,
proc getStream*(c: ConnManager,
peerId: PeerId,
dir: Direction): Future[Connection] {.async, gcsafe.} =
dir: Direction): Future[Connection] {.async.} =
## get a muxed stream for the passed peer from a connection with `dir`
##

View File

@@ -553,7 +553,7 @@ proc getSocket(pattern: string,
closeSocket(sock)
# This is forward declaration needed for newDaemonApi()
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async, gcsafe.}
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
proc copyEnv(): StringTableRef =
## This procedure copy all environment variables into StringTable.
@@ -755,7 +755,7 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
# Starting daemon process
# echo "Starting ", cmd, " ", args.join(" ")
api.process =
api.process =
exceptionToAssert:
startProcess(cmd, "", args, env, {poParentStreams})
# Waiting until daemon will not be bound to control socket.
@@ -1032,7 +1032,7 @@ proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
var value: seq[byte]
if pbDhtResponse.getRequiredField(3, value).isErr():
raise newException(DaemonLocalError, "Missing required DHT field `value`!")
return initProtoBuffer(value)
else:
raise newException(DaemonLocalError, "Wrong message type!")

View File

@@ -26,7 +26,7 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out) {.async, base.} =
dir = Direction.Out) {.async, base.} =
## connect remote peer without negotiating
## a protocol
##

View File

@@ -53,7 +53,7 @@ proc dialAndUpgrade(
peerId: Opt[PeerId],
hostname: string,
address: MultiAddress,
upgradeDir = Direction.Out):
dir = Direction.Out):
Future[Muxer] {.async.} =
for transport in self.transports: # for each transport
@@ -75,15 +75,19 @@ proc dialAndUpgrade(
let mux =
try:
dialed.transportDir = upgradeDir
await transport.upgrade(dialed, upgradeDir, peerId)
# This is for the very specific case of a simultaneous dial during DCUtR. In this case, both sides will have
# an Outbound direction at the transport level. Therefore we update the DCUtR initiator transport direction to Inbound.
# The if below is more general and might handle other use cases in the future.
if dialed.dir != dir:
dialed.dir = dir
await transport.upgrade(dialed, peerId)
except CatchableError as exc:
# If we failed to establish the connection through one transport,
# we won't succeeded through another - no use in trying again
await dialed.close()
debug "Upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
if exc isnot CancelledError:
if upgradeDir == Direction.Out:
if dialed.dir == Direction.Out:
libp2p_failed_upgrades_outgoing.inc()
else:
libp2p_failed_upgrades_incoming.inc()
@@ -91,7 +95,7 @@ proc dialAndUpgrade(
# Try other address
return nil
doAssert not isNil(mux), "connection died after upgrade " & $upgradeDir
doAssert not isNil(mux), "connection died after upgrade " & $dialed.dir
debug "Dial successful", peerId = mux.connection.peerId
return mux
return nil
@@ -128,7 +132,7 @@ proc dialAndUpgrade(
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress],
upgradeDir = Direction.Out):
dir = Direction.Out):
Future[Muxer] {.async.} =
debug "Dialing peer", peerId = peerId.get(default(PeerId))
@@ -146,7 +150,7 @@ proc dialAndUpgrade(
else: await self.nameResolver.resolveMAddress(expandedAddress)
for resolvedAddress in resolvedAddresses:
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, upgradeDir)
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
if not isNil(result):
return result
@@ -164,7 +168,7 @@ proc internalConnect(
addrs: seq[MultiAddress],
forceDial: bool,
reuseConnection = true,
upgradeDir = Direction.Out):
dir = Direction.Out):
Future[Muxer] {.async.} =
if Opt.some(self.localPeerId) == peerId:
raise newException(CatchableError, "can't dial self!")
@@ -182,7 +186,7 @@ proc internalConnect(
let slot = self.connManager.getOutgoingSlot(forceDial)
let muxed =
try:
await self.dialAndUpgrade(peerId, addrs, upgradeDir)
await self.dialAndUpgrade(peerId, addrs, dir)
except CatchableError as exc:
slot.release()
raise exc
@@ -209,7 +213,7 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out) {.async.} =
dir = Direction.Out) {.async.} =
## connect remote peer without negotiating
## a protocol
##
@@ -217,7 +221,7 @@ method connect*(
if self.connManager.connCount(peerId) > 0 and reuseConnection:
return
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, upgradeDir)
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
method connect*(
self: Dialer,

View File

@@ -19,7 +19,8 @@ func toException*(e: string): ref LPError =
# sadly nim needs more love for hygienic templates
# so here goes the macro, its based on the proc/template version
# and uses quote do so it's quite readable
macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
# TODO https://github.com/nim-lang/Nim/issues/22936
macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
let nexclude = exclude.len
case nexclude
of 0:

View File

@@ -398,6 +398,9 @@ const
MAProtocol(
mcodec: multiCodec("quic"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("quic-v1"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("ip6zone"), kind: Length, size: 0,
coder: TranscoderIP6Zone
@@ -955,7 +958,7 @@ proc init*(mtype: typedesc[MultiAddress]): MultiAddress =
## Initialize empty MultiAddress.
result.data = initVBuffer()
proc init*(mtype: typedesc[MultiAddress], address: ValidIpAddress,
proc init*(mtype: typedesc[MultiAddress], address: IpAddress,
protocol: IpTransportProtocol, port: Port): MultiAddress =
var res: MultiAddress
res.data = initVBuffer()

View File

@@ -193,6 +193,7 @@ const MultiCodecList = [
("https", 0x01BB),
("tls", 0x01C0),
("quic", 0x01CC),
("quic-v1", 0x01CD),
("ws", 0x01DD),
("wss", 0x01DE),
("p2p-websocket-star", 0x01DF), # not in multicodec list

View File

@@ -131,7 +131,7 @@ proc handle*(
protos: seq[string],
matchers = newSeq[Matcher](),
active: bool = false,
): Future[string] {.async, gcsafe.} =
): Future[string] {.async.} =
trace "Starting multistream negotiation", conn, handshaked = active
var handshaked = active
while not conn.atEof:
@@ -172,10 +172,9 @@ proc handle*(
trace "no handlers", conn, protocol = ms
await conn.writeLp(Na)
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async.} =
trace "Starting multistream handler", conn, handshaked = active
var
handshaked = active
protos: seq[string]
matchers: seq[Matcher]
for h in m.handlers:

View File

@@ -42,7 +42,7 @@ const MaxMsgSize* = 1 shl 20 # 1mb
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
newException(InvalidMplexMsgType, "invalid message type")
proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
proc readMsg*(conn: Connection): Future[Msg] {.async.} =
let header = await conn.readVarint()
trace "read header varint", varint = header, conn

View File

@@ -73,8 +73,8 @@ func shortLog*(s: LPChannel): auto =
chronicles.formatIt(LPChannel): shortLog(it)
proc open*(s: LPChannel) {.async, gcsafe.} =
trace "Opening channel", s, conn = s.conn
proc open*(s: LPChannel) {.async.} =
debug "Opening channel", s, conn = s.conn
if s.conn.isClosed:
return
try:
@@ -95,44 +95,44 @@ proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
if s.closedLocal and s.atEof():
await procCall BufferStream(s).close()
proc reset*(s: LPChannel) {.async, gcsafe.} =
proc reset*(s: LPChannel) {.deprecated, async.} =
if s.isClosed:
trace "Already closed", s
debug "Already closed", s
return
s.isClosed = true
s.closedLocal = true
s.localReset = not s.remoteReset
trace "Resetting channel", s, len = s.len
debug "Resetting channel", s, len = s.len
if s.isOpen and not s.conn.isClosed:
# If the connection is still active, notify the other end
proc resetMessage() {.async.} =
try:
trace "sending reset message", s, conn = s.conn
debug "sending reset message", s, conn = s.conn
await s.conn.writeMsg(s.id, s.resetCode) # write reset
except CatchableError as exc:
# No cancellations
await s.conn.close()
trace "Can't send reset message", s, conn = s.conn, msg = exc.msg
debug "Can't send reset message", s, conn = s.conn, msg = exc.msg
asyncSpawn resetMessage()
await s.closeImpl() # noraises, nocancels
trace "Channel reset", s
debug "Channel reset", s
method close*(s: LPChannel) {.async, gcsafe.} =
method close*(s: LPChannel) {.async.} =
## Close channel for writing - a message will be sent to the other peer
## informing them that the channel is closed and that we're waiting for
## their acknowledgement.
if s.closedLocal:
trace "Already closed", s
debug "Already closed", s
return
s.closedLocal = true
trace "Closing channel", s, conn = s.conn, len = s.len
debug "Closing channel", s, conn = s.conn, len = s.len
if s.isOpen and not s.conn.isClosed:
try:
@@ -144,18 +144,18 @@ method close*(s: LPChannel) {.async, gcsafe.} =
# It's harmless that close message cannot be sent - the connection is
# likely down already
await s.conn.close()
trace "Cannot send close message", s, id = s.id, msg = exc.msg
debug "Cannot send close message", s, id = s.id, msg = exc.msg
await s.closeUnderlying() # maybe already eofed
trace "Closed channel", s, len = s.len
debug "Closed channel", s, len = s.len
method initStream*(s: LPChannel) =
if s.objName.len == 0:
s.objName = LPChannelTrackerName
s.timeoutHandler = proc(): Future[void] {.gcsafe.} =
trace "Idle timeout expired, resetting LPChannel", s
debug "Idle timeout expired, resetting LPChannel", s
s.reset()
procCall BufferStream(s).initStream()
@@ -182,7 +182,7 @@ method readOnce*(s: LPChannel,
if s.protocol.len > 0:
libp2p_protocols_bytes.inc(bytes.int64, labelValues=[s.protocol, "in"])
trace "readOnce", s, bytes
debug "readOnce", s, bytes
if bytes == 0:
await s.closeUnderlying()
return bytes
@@ -217,9 +217,13 @@ proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
return
if not s.isOpen:
debug "Opening channel for writing", s
await s.open()
debug "Opened channel", s
debug "Writing msg (prep)", s, msg = msg.len
await s.conn.writeMsg(s.id, s.msgCode, msg)
debug "Wrote msg (prep)", s, msg = msg.len
proc completeWrite(
s: LPChannel, fut: Future[void], msgLen: int): Future[void] {.async.} =
@@ -231,7 +235,9 @@ proc completeWrite(
libp2p_mplex_qtime.time:
await fut
else:
debug "Waiting for complete", s, msg = msgLen
await fut
debug "Completed", s, msg = msgLen
when defined(libp2p_network_protocols_metrics):
if s.protocol.len > 0:
@@ -248,7 +254,7 @@ proc completeWrite(
except LPStreamEOFError as exc:
raise exc
except CatchableError as exc:
trace "exception in lpchannel write handler", s, msg = exc.msg
debug "exception in lpchannel write handler", s, msg = exc.msg
await s.reset()
await s.conn.close()
raise newLPStreamConnDownError(exc)
@@ -267,6 +273,7 @@ method write*(s: LPChannel, msg: seq[byte]): Future[void] =
# Fast path: Avoid a copy of msg being kept in the closure created by
# `{.async.}` as this drives up memory usage - the conditions are laid out
# in prepareWrite
debug "Writing fast path", s, msg = msg.len
s.conn.writeMsg(s.id, s.msgCode, msg)
else:
prepareWrite(s, msg)
@@ -300,6 +307,6 @@ proc init*(
when chronicles.enabledLogLevel == LogLevel.TRACE:
chann.name = if chann.name.len > 0: chann.name else: $chann.oid
trace "Created new lpchannel", s = chann, id, initiator
debug "Created new lpchannel", s = chann, id, initiator
return chann

View File

@@ -54,15 +54,15 @@ proc newTooManyChannels(): ref TooManyChannels =
newException(TooManyChannels, "max allowed channel count exceeded")
proc newInvalidChannelIdError(): ref InvalidChannelIdError =
newException(InvalidChannelIdError, "max allowed channel count exceeded")
newException(InvalidChannelIdError, "Channel id already taken")
proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
proc cleanupChann(m: Mplex, chann: LPChannel) {.async.} =
## remove the local channel from the internal tables
##
try:
await chann.join()
m.channels[chann.initiator].del(chann.id)
trace "cleaned up channel", m, chann
debug "cleaned up channel", m, chann
when defined(libp2p_expensive_metrics):
libp2p_mplex_channels.set(
@@ -99,7 +99,7 @@ proc newStreamInternal*(m: Mplex,
when defined(libp2p_agents_metrics):
result.shortAgent = m.connection.shortAgent
trace "Creating new channel", m, channel = result, id, initiator, name
debug "Creating new channel", m, channel = result, id, initiator, name
m.channels[initiator][id] = result
@@ -116,17 +116,17 @@ proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
##
try:
await m.streamHandler(chann)
trace "finished handling stream", m, chann
debug "finished handling stream", m, chann
doAssert(chann.closed, "connection not closed by handler!")
except CatchableError as exc:
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
debug "Exception in mplex stream handler", m, chann, msg = exc.msg
await chann.reset()
method handle*(m: Mplex) {.async, gcsafe.} =
trace "Starting mplex handler", m
method handle*(m: Mplex) {.async.} =
debug "Starting mplex handler", m
try:
while not m.connection.atEof:
trace "waiting for data", m
debug "waiting for data", m
let
(id, msgType, data) = await m.connection.readMsg()
initiator = bool(ord(msgType) and 1)
@@ -137,13 +137,13 @@ method handle*(m: Mplex) {.async, gcsafe.} =
msgType = msgType
size = data.len
trace "read message from connection", m, data = data.shortLog
debug "read message from connection", m, data = data.shortLog
var channel =
if MessageType(msgType) != MessageType.New:
let tmp = m.channels[initiator].getOrDefault(id, nil)
if tmp == nil:
trace "Channel not found, skipping", m
debug "Channel not found, skipping", m
continue
tmp
@@ -156,11 +156,11 @@ method handle*(m: Mplex) {.async, gcsafe.} =
let name = string.fromBytes(data)
m.newStreamInternal(false, id, name, timeout = m.outChannTimeout)
trace "Processing channel message", m, channel, data = data.shortLog
debug "Processing channel message", m, channel, data = data.shortLog
case msgType:
of MessageType.New:
trace "created channel", m, channel
debug "created channel", m, channel
if not isNil(m.streamHandler):
# Launch handler task
@@ -173,13 +173,13 @@ method handle*(m: Mplex) {.async, gcsafe.} =
allowed = MaxMsgSize, channel
raise newLPStreamLimitError()
trace "pushing data to channel", m, channel, len = data.len
debug "pushing data to channel", m, channel, len = data.len
try:
await channel.pushData(data)
trace "pushed data to channel", m, channel, len = data.len
debug "pushed data to channel", m, channel, len = data.len
except LPStreamClosedError as exc:
# Channel is being closed, but `cleanupChann` was not yet triggered.
trace "pushing data to channel failed", m, channel, len = data.len,
debug "pushing data to channel failed", m, channel, len = data.len,
msg = exc.msg
discard # Ignore message, same as if `cleanupChann` had completed.
@@ -191,12 +191,12 @@ method handle*(m: Mplex) {.async, gcsafe.} =
except CancelledError:
debug "Unexpected cancellation in mplex handler", m
except LPStreamEOFError as exc:
trace "Stream EOF", m, msg = exc.msg
debug "Stream EOF", m, msg = exc.msg
except CatchableError as exc:
debug "Unexpected exception in mplex read loop", m, msg = exc.msg
finally:
await m.close()
trace "Stopped mplex handler", m
debug "Stopped mplex handler", m
proc new*(M: type Mplex,
conn: Connection,
@@ -211,7 +211,7 @@ proc new*(M: type Mplex,
method newStream*(m: Mplex,
name: string = "",
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
lazy: bool = false): Future[Connection] {.async.} =
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
if not lazy:
@@ -219,13 +219,13 @@ method newStream*(m: Mplex,
return Connection(channel)
method close*(m: Mplex) {.async, gcsafe.} =
method close*(m: Mplex) {.async.} =
if m.isClosed:
trace "Already closed", m
debug "Already closed", m
return
m.isClosed = true
trace "Closing mplex", m
debug "Closing mplex", m
var channs = toSeq(m.channels[false].values) & toSeq(m.channels[true].values)
@@ -245,7 +245,7 @@ method close*(m: Mplex) {.async, gcsafe.} =
m.channels[false].clear()
m.channels[true].clear()
trace "Closed mplex", m
debug "Closed mplex", m
method getStreams*(m: Mplex): seq[Connection] =
for c in m.channels[false].values: result.add(c)

View File

@@ -46,11 +46,11 @@ chronicles.formatIt(Muxer): shortLog(it)
# muxer interface
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
Future[Connection] {.base, async, gcsafe.} = discard
method close*(m: Muxer) {.base, async, gcsafe.} =
Future[Connection] {.base, async.} = discard
method close*(m: Muxer) {.base, async.} =
if not isNil(m.connection):
await m.connection.close()
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
method handle*(m: Muxer): Future[void] {.base, async.} = discard
proc new*(
T: typedesc[MuxerProvider],

View File

@@ -22,15 +22,16 @@ logScope:
const
YamuxCodec* = "/yamux/1.0.0"
YamuxVersion = 0.uint8
DefaultWindowSize = 256000
YamuxDefaultWindowSize* = 256000
MaxSendQueueSize = 256000
MaxChannelCount = 200
when defined(libp2p_yamux_metrics):
declareGauge(libp2p_yamux_channels, "yamux channels", labels = ["initiator", "peer"])
declareHistogram libp2p_yamux_send_queue, "message send queue length (in byte)",
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
declareHistogram libp2p_yamux_recv_queue, "message recv queue length (in byte)",
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
type
YamuxError* = object of CatchableError
@@ -59,7 +60,7 @@ type
streamId: uint32
length: uint32
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async.} =
var buffer: array[12, byte]
await conn.readExactly(addr buffer[0], 12)
@@ -143,6 +144,7 @@ type
recvWindow: int
sendWindow: int
maxRecvWindow: int
maxSendQueueSize: int
conn: Connection
isSrc: bool
opened: bool
@@ -169,9 +171,18 @@ proc `$`(channel: YamuxChannel): string =
if s.len > 0:
result &= " {" & s.foldl(if a != "": a & ", " & b else: b, "") & "}"
proc sendQueueBytes(channel: YamuxChannel, limit: bool = false): int =
for (elem, sent, _) in channel.sendQueue:
result.inc(min(elem.len - sent, if limit: channel.maxRecvWindow div 3 else: elem.len - sent))
proc lengthSendQueue(channel: YamuxChannel): int =
## Returns the length of what remains to be sent
##
channel.sendQueue.foldl(a + b.data.len - b.sent, 0)
proc lengthSendQueueWithLimit(channel: YamuxChannel): int =
## Returns the length of what remains to be sent, but limit the size of big messages.
##
# For leniency, limit big messages size to the third of maxSendQueueSize
# This value is arbitrary, it's not in the specs, it permits to store up to
# 3 big messages if the peer is stalling.
channel.sendQueue.foldl(a + min(b.data.len - b.sent, channel.maxSendQueueSize div 3), 0)
proc actuallyClose(channel: YamuxChannel) {.async.} =
if channel.closedLocally and channel.sendQueue.len == 0 and
@@ -183,15 +194,19 @@ proc remoteClosed(channel: YamuxChannel) {.async.} =
channel.closedRemotely.complete()
await channel.actuallyClose()
method closeImpl*(channel: YamuxChannel) {.async, gcsafe.} =
method closeImpl*(channel: YamuxChannel) {.async.} =
if not channel.closedLocally:
channel.closedLocally = true
channel.isEof = true
if channel.isReset == false and channel.sendQueue.len == 0:
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
await channel.actuallyClose()
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
# If we reset locally, we want to flush up to a maximum of recvWindow
# bytes. It's because the peer we're connected to can send us data before
# it receives the reset.
if channel.isReset:
return
trace "Reset channel"
@@ -212,11 +227,14 @@ proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
await channel.remoteClosed()
channel.receivedData.fire()
if not isLocal:
# If we reset locally, we want to flush up to a maximum of recvWindow
# bytes. We use the recvWindow in the proc cleanupChann.
# If the reset is remote, there's no reason to flush anything.
channel.recvWindow = 0
proc updateRecvWindow(channel: YamuxChannel) {.async.} =
## Send to the peer a window update when the recvWindow is empty enough
##
# In order to avoid spamming a window update everytime a byte is read,
# we send it everytime half of the maxRecvWindow is read.
let inWindow = channel.recvWindow + channel.recvQueue.len
if inWindow > channel.maxRecvWindow div 2:
return
@@ -234,6 +252,7 @@ method readOnce*(
pbytes: pointer,
nbytes: int):
Future[int] {.async.} =
## Read from a yamux channel
if channel.isReset:
raise if channel.remoteReset:
@@ -249,6 +268,7 @@ method readOnce*(
await channel.closedRemotely or channel.receivedData.wait()
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
channel.returnedEof = true
channel.isEof = true
return 0
let toRead = min(channel.recvQueue.len, nbytes)
@@ -278,21 +298,22 @@ proc trySend(channel: YamuxChannel) {.async.} =
return
channel.isSending = true
defer: channel.isSending = false
while channel.sendQueue.len != 0:
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
if channel.sendWindow == 0:
trace "send window empty"
if channel.sendQueueBytes(true) > channel.maxRecvWindow:
debug "channel send queue too big, resetting", maxSendWindow=channel.maxRecvWindow,
currentQueueSize = channel.sendQueueBytes(true)
trace "trying to send while the sendWindow is empty"
if channel.lengthSendQueueWithLimit() > channel.maxSendQueueSize:
trace "channel send queue too big, resetting", maxSendQueueSize=channel.maxSendQueueSize,
currentQueueSize = channel.lengthSendQueueWithLimit()
try:
await channel.reset(true)
except CatchableError as exc:
debug "failed to reset", msg=exc.msg
warn "failed to reset", msg=exc.msg
break
let
bytesAvailable = channel.sendQueueBytes()
bytesAvailable = channel.lengthSendQueue()
toSend = min(channel.sendWindow, bytesAvailable)
var
sendBuffer = newSeqUninitialized[byte](toSend + 12)
@@ -307,20 +328,24 @@ proc trySend(channel: YamuxChannel) {.async.} =
var futures: seq[Future[void]]
while inBuffer < toSend:
# concatenate the different message we try to send into one buffer
let (data, sent, fut) = channel.sendQueue[0]
let bufferToSend = min(data.len - sent, toSend - inBuffer)
sendBuffer.toOpenArray(12, 12 + toSend - 1)[inBuffer..<(inBuffer+bufferToSend)] =
channel.sendQueue[0].data.toOpenArray(sent, sent + bufferToSend - 1)
channel.sendQueue[0].sent.inc(bufferToSend)
if channel.sendQueue[0].sent >= data.len:
# if every byte of the message is in the buffer, add the write future to the
# sequence of futures to be completed (or failed) when the buffer is sent
futures.add(fut)
channel.sendQueue.delete(0)
inBuffer.inc(bufferToSend)
trace "build send buffer", h = $header, msg=string.fromBytes(sendBuffer[12..^1])
trace "try to send the buffer", h = $header
channel.sendWindow.dec(toSend)
try: await channel.conn.write(sendBuffer)
except CatchableError as exc:
trace "failed to send the buffer"
let connDown = newLPStreamConnDownError(exc)
for fut in futures.items():
fut.fail(connDown)
@@ -331,6 +356,8 @@ proc trySend(channel: YamuxChannel) {.async.} =
channel.activity = true
method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
## Write to yamux channel
##
result = newFuture[void]("Yamux Send")
if channel.remoteReset:
result.fail(newLPStreamResetError())
@@ -343,15 +370,20 @@ method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
return result
channel.sendQueue.add((msg, 0, result))
when defined(libp2p_yamux_metrics):
libp2p_yamux_recv_queue.observe(channel.sendQueueBytes().int64)
libp2p_yamux_send_queue.observe(channel.lengthSendQueue().int64)
asyncSpawn channel.trySend()
proc open*(channel: YamuxChannel) {.async, gcsafe.} =
proc open(channel: YamuxChannel) {.async.} =
## Open a yamux channel by sending a window update with Syn or Ack flag
##
if channel.opened:
trace "Try to open channel twice"
return
channel.opened = true
await channel.conn.write(YamuxHeader.data(channel.id, 0, {if channel.isSrc: Syn else: Ack}))
await channel.conn.write(YamuxHeader.windowUpdate(
channel.id,
uint32(max(channel.maxRecvWindow - YamuxDefaultWindowSize, 0)),
{if channel.isSrc: Syn else: Ack}))
method getWrapped*(channel: YamuxChannel): Connection = channel.conn
@@ -362,12 +394,14 @@ type
currentId: uint32
isClosed: bool
maxChannCount: int
windowSize: int
maxSendQueueSize: int
proc lenBySrc(m: Yamux, isSrc: bool): int =
for v in m.channels.values():
if v.isSrc == isSrc: result += 1
proc cleanupChann(m: Yamux, channel: YamuxChannel) {.async.} =
proc cleanupChannel(m: Yamux, channel: YamuxChannel) {.async.} =
await channel.join()
m.channels.del(channel.id)
when defined(libp2p_yamux_metrics):
@@ -375,12 +409,19 @@ proc cleanupChann(m: Yamux, channel: YamuxChannel) {.async.} =
if channel.isReset and channel.recvWindow > 0:
m.flushed[channel.id] = channel.recvWindow
proc createStream(m: Yamux, id: uint32, isSrc: bool): YamuxChannel =
proc createStream(m: Yamux, id: uint32, isSrc: bool,
recvWindow: int, maxSendQueueSize: int): YamuxChannel =
# As you can see, during initialization, recvWindow can be larger than maxRecvWindow.
# This is because the peer we're connected to will always assume
# that the initial recvWindow is 256k.
# To solve this contradiction, no updateWindow will be sent until recvWindow is less
# than maxRecvWindow
result = YamuxChannel(
id: id,
maxRecvWindow: DefaultWindowSize,
recvWindow: DefaultWindowSize,
sendWindow: DefaultWindowSize,
maxRecvWindow: recvWindow,
recvWindow: if recvWindow > YamuxDefaultWindowSize: recvWindow else: YamuxDefaultWindowSize,
sendWindow: YamuxDefaultWindowSize,
maxSendQueueSize: maxSendQueueSize,
isSrc: isSrc,
conn: m.connection,
receivedData: newAsyncEvent(),
@@ -398,7 +439,7 @@ proc createStream(m: Yamux, id: uint32, isSrc: bool): YamuxChannel =
when defined(libp2p_agents_metrics):
result.shortAgent = m.connection.shortAgent
m.channels[id] = result
asyncSpawn m.cleanupChann(result)
asyncSpawn m.cleanupChannel(result)
trace "created channel", id, pid=m.connection.peerId
when defined(libp2p_yamux_metrics):
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $result.peerId])
@@ -419,7 +460,7 @@ method close*(m: Yamux) {.async.} =
trace "Closed yamux"
proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
## call the muxer stream handler for this channel
## Call the muxer stream handler for this channel
##
try:
await m.streamHandler(channel)
@@ -429,7 +470,7 @@ proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
trace "Exception in yamux stream handler", msg = exc.msg
await channel.reset()
method handle*(m: Yamux) {.async, gcsafe.} =
method handle*(m: Yamux) {.async.} =
trace "Starting yamux handler", pid=m.connection.peerId
try:
while not m.connection.atEof:
@@ -453,9 +494,11 @@ method handle*(m: Yamux) {.async, gcsafe.} =
else:
if header.streamId in m.flushed:
m.flushed.del(header.streamId)
if header.streamId mod 2 == m.currentId mod 2:
debug "Peer used our reserved stream id, skipping", id=header.streamId, currentId=m.currentId, peerId=m.connection.peerId
raise newException(YamuxError, "Peer used our reserved stream id")
let newStream = m.createStream(header.streamId, false)
let newStream = m.createStream(header.streamId, false, m.windowSize, m.maxSendQueueSize)
if m.channels.len >= m.maxChannCount:
await newStream.reset()
continue
@@ -511,19 +554,24 @@ method getStreams*(m: Yamux): seq[Connection] =
method newStream*(
m: Yamux,
name: string = "",
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
lazy: bool = false): Future[Connection] {.async.} =
if m.channels.len > m.maxChannCount - 1:
raise newException(TooManyChannels, "max allowed channel count exceeded")
let stream = m.createStream(m.currentId, true)
let stream = m.createStream(m.currentId, true, m.windowSize, m.maxSendQueueSize)
m.currentId += 2
if not lazy:
await stream.open()
return stream
proc new*(T: type[Yamux], conn: Connection, maxChannCount: int = MaxChannelCount): T =
proc new*(T: type[Yamux], conn: Connection,
maxChannCount: int = MaxChannelCount,
windowSize: int = YamuxDefaultWindowSize,
maxSendQueueSize: int = MaxSendQueueSize): T =
T(
connection: conn,
currentId: if conn.dir == Out: 1 else: 2,
maxChannCount: maxChannCount
maxChannCount: maxChannCount,
windowSize: windowSize,
maxSendQueueSize: maxSendQueueSize
)

View File

@@ -52,7 +52,7 @@ proc resolveOneAddress(
ma: MultiAddress,
domain: Domain = Domain.AF_UNSPEC,
prefix = ""): Future[seq[MultiAddress]]
{.async, raises: [MaError, TransportAddressError].} =
{.async.} =
#Resolve a single address
var pbuf: array[2, byte]

View File

@@ -140,7 +140,7 @@ proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[voi
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
raise newException(AutonatError, "Received malformed message")

View File

@@ -162,7 +162,7 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy
proc addressMapper(
self: AutonatService,
peerStore: PeerStore,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
if self.networkReachability != NetworkReachability.Reachable:
return listenAddrs
@@ -179,7 +179,7 @@ proc addressMapper(
return addrs
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return await addressMapper(self, switch.peerStore, listenAddrs)
info "Setting up AutonatService"

View File

@@ -66,7 +66,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
if peerDialableAddrs.len > self.maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false))
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.In))
try:
discard await anyCompleted(futs).wait(self.connectTimeout)
debug "Dcutr initiator has directly connected to the remote peer."

View File

@@ -56,5 +56,10 @@ proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
await conn.writeLp(pb.buffer)
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] =
addrs.filterIt(TCP.match(it))
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] {.raises: [LPError]} =
var result = newSeq[MultiAddress]()
for a in addrs:
# This is necessary to also accept addrs like /ip4/198.51.100/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N
if [TCP, mapAnd(TCP_DNS, P2PPattern), mapAnd(TCP_IP, P2PPattern)].anyIt(it.match(a)):
result.add(a[0..1].tryGet())
return result

View File

@@ -29,7 +29,7 @@ logScope:
proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(stream: Connection, proto: string) {.async.} =
var peerDialableAddrs: seq[MultiAddress]
try:
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
@@ -56,7 +56,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
if peerDialableAddrs.len > maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, upgradeDir = Direction.In))
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.Out))
try:
discard await anyCompleted(futs).wait(connectTimeout)
debug "Dcutr receiver has directly connected to the remote peer."

View File

@@ -189,7 +189,7 @@ proc dialPeerV2*(
conn.limitData = msgRcvFromRelay.limit.data
return conn
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@@ -201,7 +201,7 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
trace "Unexpected client / relayv2 handshake", msgType=msg.msgType
await sendStopError(conn, MalformedMessage)
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, gcsafe.} =
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
let src = msg.srcPeer.valueOr:
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
return
@@ -226,7 +226,7 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, g
if cl.onNewConnection != nil: await cl.onNewConnection(conn, 0, 0)
else: await conn.close()
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@@ -266,7 +266,7 @@ proc new*(T: typedesc[RelayClient], canHop: bool = false,
maxCircuitPerPeer: maxCircuitPerPeer,
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
case proto:
of RelayV1Codec: await cl.handleStreamV1(conn)

View File

@@ -47,6 +47,7 @@ proc new*(
limitDuration: uint32,
limitData: uint64): T =
let rc = T(conn: conn, limitDuration: limitDuration, limitData: limitData)
rc.dir = conn.dir
rc.initStream()
if limitDuration > 0:
proc checkDurationConnection() {.async.} =

View File

@@ -105,7 +105,7 @@ proc isRelayed*(conn: Connection): bool =
wrappedConn = wrappedConn.getWrapped()
return false
proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleReserve(r: Relay, conn: Connection) {.async.} =
if conn.isRelayed():
trace "reservation attempt over relay connection", pid = conn.peerId
await sendHopStatus(conn, PermissionDenied)
@@ -128,7 +128,7 @@ proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleConnect(r: Relay,
connSrc: Connection,
msg: HopMessage) {.async, gcsafe.} =
msg: HopMessage) {.async.} =
if connSrc.isRelayed():
trace "connection attempt over relay connection"
await sendHopStatus(connSrc, PermissionDenied)
@@ -200,7 +200,7 @@ proc handleConnect(r: Relay,
await rconnDst.close()
await bridge(rconnSrc, rconnDst)
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@@ -214,7 +214,7 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
# Relay V1
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsafe.} =
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
r.streamCount.inc()
defer: r.streamCount.dec()
if r.streamCount + r.rsvp.len() >= r.maxCircuit:
@@ -293,7 +293,7 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
trace "relaying connection", src, dst
await bridge(connSrc, connDst)
proc handleStreamV1(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@@ -336,7 +336,7 @@ proc new*(T: typedesc[Relay],
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
case proto:
of RelayV2HopCodec: await r.handleHopStreamV2(conn)

View File

@@ -37,24 +37,24 @@ method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
self.client.onNewConnection = proc(
conn: Connection,
duration: uint32 = 0,
data: uint64 = 0) {.async, gcsafe, raises: [].} =
data: uint64 = 0) {.async.} =
await self.queue.addLast(RelayConnection.new(conn, duration, data))
await conn.join()
self.selfRunning = true
await procCall Transport(self).start(ma)
trace "Starting Relay transport"
method stop*(self: RelayTransport) {.async, gcsafe.} =
method stop*(self: RelayTransport) {.async.} =
self.running = false
self.selfRunning = false
self.client.onNewConnection = nil
while not self.queue.empty():
await self.queue.popFirstNoWait().close()
method accept*(self: RelayTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: RelayTransport): Future[Connection] {.async.} =
result = await self.queue.popFirst()
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async, gcsafe.} =
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
let
sma = toSeq(ma.items())
relayAddrs = sma[0..sma.len-4].mapIt(it.tryGet()).foldl(a & b)
@@ -90,7 +90,7 @@ method dial*(
self: RelayTransport,
hostname: string,
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
peerId.withValue(pid):
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
result = await self.dial(address)

View File

@@ -21,14 +21,14 @@ const
RelayV2HopCodec* = "/libp2p/circuit/relay/0.2.0/hop"
RelayV2StopCodec* = "/libp2p/circuit/relay/0.2.0/stop"
proc sendStatus*(conn: Connection, code: StatusV1) {.async, gcsafe.} =
proc sendStatus*(conn: Connection, code: StatusV1) {.async.} =
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
let
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async, gcsafe.} =
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async.} =
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
let
msg = HopMessage(msgType: HopMessageType.Status, status: Opt.some(code))

View File

@@ -21,6 +21,7 @@ import ../protobuf/minprotobuf,
../peerid,
../crypto/crypto,
../multiaddress,
../multicodec,
../protocols/protocol,
../utility,
../errors,
@@ -77,7 +78,7 @@ chronicles.expandIt(IdentifyInfo):
signedPeerRecord =
# The SPR contains the same data as the identify message
# would be cumbersome to log
if iinfo.signedPeerRecord.isSome(): "Some"
if it.signedPeerRecord.isSome(): "Some"
else: "None"
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
@@ -133,24 +134,24 @@ proc decodeMsg*(buf: seq[byte]): Opt[IdentifyInfo] =
if ? pb.getField(6, agentVersion).toOpt():
iinfo.agentVersion = some(agentVersion)
debug "decodeMsg: decoded identify", iinfo
Opt.some(iinfo)
proc new*(
T: typedesc[Identify],
peerInfo: PeerInfo,
sendSignedPeerRecord = false
sendSignedPeerRecord = false,
observedAddrManager = ObservedAddrManager.new(),
): T =
let identify = T(
peerInfo: peerInfo,
sendSignedPeerRecord: sendSignedPeerRecord,
observedAddrManager: ObservedAddrManager.new(),
observedAddrManager: observedAddrManager,
)
identify.init()
identify
method init*(p: Identify) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
trace "handling identify request", conn
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
@@ -168,7 +169,7 @@ method init*(p: Identify) =
proc identify*(self: Identify,
conn: Connection,
remotePeerId: PeerId): Future[IdentifyInfo] {.async, gcsafe.} =
remotePeerId: PeerId): Future[IdentifyInfo] {.async.} =
trace "initiating identify", conn
var message = await conn.readLp(64*1024)
if len(message) == 0:
@@ -176,6 +177,7 @@ proc identify*(self: Identify,
raise newException(IdentityInvalidMsgError, "Empty message received!")
var info = decodeMsg(message).valueOr: raise newException(IdentityInvalidMsgError, "Incorrect message received!")
debug "identify: decoded message", conn, info
let
pubkey = info.pubkey.valueOr: raise newException(IdentityInvalidMsgError, "No pubkey in identify")
peer = PeerId.init(pubkey).valueOr: raise newException(IdentityInvalidMsgError, $error)
@@ -186,8 +188,12 @@ proc identify*(self: Identify,
info.peerId = peer
info.observedAddr.withValue(observed):
if not self.observedAddrManager.addObservation(observed):
debug "Observed address is not valid", observedAddr = observed
# Currently, we use the ObservedAddrManager only to find our dialable external NAT address. Therefore, addresses
# like "...\p2p-circuit\p2p\..." and "\p2p\..." are not useful to us.
if observed.contains(multiCodec("p2p-circuit")).get(false) or P2PPattern.matchPartial(observed):
trace "Not adding address to ObservedAddrManager.", observed
elif not self.observedAddrManager.addObservation(observed):
trace "Observed address is not valid.", observedAddr = observed
return info
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
@@ -198,13 +204,14 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
identifypush
proc init*(p: IdentifyPush) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
trace "handling identify push", conn
try:
var message = await conn.readLp(64*1024)
var identInfo = decodeMsg(message).valueOr:
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
debug "identify push: decoded message", conn, identInfo
identInfo.pubkey.withValue(pubkey):
let receivedPeerId = PeerId.init(pubkey).tryGet()

View File

@@ -27,7 +27,7 @@ type Perf* = ref object of LPProtocol
proc new*(T: typedesc[Perf]): T {.public.} =
var p = T()
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
var bytesRead = 0
try:
trace "Received benchmark performance check", conn

View File

@@ -51,7 +51,7 @@ proc new*(T: typedesc[Ping], handler: PingHandler = nil, rng: ref HmacDrbgContex
ping
method init*(p: Ping) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
trace "handling ping", conn
var buf: array[PingSize, byte]
@@ -71,7 +71,7 @@ method init*(p: Ping) =
proc ping*(
p: Ping,
conn: Connection,
): Future[Duration] {.async, gcsafe, public.} =
): Future[Duration] {.async, public.} =
## Sends ping to `conn`, returns the delay
trace "initiating ping", conn

View File

@@ -157,7 +157,7 @@ method rpcHandler*(f: FloodSub,
# In theory, if topics are the same in all messages, we could batch - we'd
# also have to be careful to only include validated messages
f.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
f.broadcast(toSendPeers, RPCMsg(messages: @[msg]), isHighPriority = false)
trace "Forwared message to peers", peers = toSendPeers.len
f.updateMetrics(rpcMsg)
@@ -219,7 +219,7 @@ method publish*(f: FloodSub,
return 0
# Try to send to all peers that are known to be interested
f.broadcast(peers, RPCMsg(messages: @[msg]))
f.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
when defined(libp2p_expensive_metrics):
libp2p_pubsub_messages_published.inc(labelValues = [topic])

View File

@@ -46,6 +46,9 @@ declareCounter(libp2p_gossipsub_saved_bytes, "bytes saved by gossipsub optimizat
declareCounter(libp2p_gossipsub_duplicate, "number of duplicates received")
declareCounter(libp2p_gossipsub_received, "number of messages received (deduplicated)")
when defined(libp2p_expensive_metrics):
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
proc init*(_: type[GossipSubParams]): GossipSubParams =
GossipSubParams(
explicit: true,
@@ -79,7 +82,6 @@ proc init*(_: type[GossipSubParams]): GossipSubParams =
disconnectBadPeers: false,
enablePX: false,
bandwidthEstimatebps: 100_000_000, # 100 Mbps or 12.5 MBps
iwantTimeout: 3 * GossipSubHeartbeatInterval,
overheadRateLimit: Opt.none(tuple[bytes: int, interval: Duration]),
disconnectPeerAboveRateLimit: false
)
@@ -218,6 +220,8 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
for topic, info in stats[].topicInfos.mpairs:
info.firstMessageDeliveries = 0
pubSubPeer.stopSendNonPriorityTask()
procCall FloodSub(g).unsubscribePeer(peer)
proc handleSubscribe*(g: GossipSub,
@@ -277,12 +281,28 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
respControl.prune.add(g.handleGraft(peer, control.graft))
let messages = g.handleIWant(peer, control.iwant)
if
respControl.prune.len > 0 or
respControl.iwant.len > 0 or
messages.len > 0:
# iwant and prunes from here, also messages
let
isPruneNotEmpty = respControl.prune.len > 0
isIWantNotEmpty = respControl.iwant.len > 0
if isPruneNotEmpty or isIWantNotEmpty:
if isIWantNotEmpty:
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
if isPruneNotEmpty:
for prune in respControl.prune:
if g.knownTopics.contains(prune.topicId):
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicId])
else:
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
trace "sending control message", msg = shortLog(respControl), peer
g.send(
peer,
RPCMsg(control: some(respControl)), isHighPriority = true)
if messages.len > 0:
for smsg in messages:
for topic in smsg.topicIds:
if g.knownTopics.contains(topic):
@@ -290,18 +310,11 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
else:
libp2p_pubsub_broadcast_messages.inc(labelValues = ["generic"])
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
for prune in respControl.prune:
if g.knownTopics.contains(prune.topicId):
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicId])
else:
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
trace "sending control message", msg = shortLog(respControl), peer
# iwant replies have lower priority
trace "sending iwant reply messages", peer
g.send(
peer,
RPCMsg(control: some(respControl), messages: messages))
RPCMsg(messages: messages), isHighPriority = false)
proc validateAndRelay(g: GossipSub,
msg: Message,
@@ -319,7 +332,7 @@ proc validateAndRelay(g: GossipSub,
of ValidationResult.Reject:
debug "Dropping message after validation, reason: reject",
msgId = shortLog(msgId), peer
g.punishInvalidMessage(peer, msg)
await g.punishInvalidMessage(peer, msg)
return
of ValidationResult.Ignore:
debug "Dropping message after validation, reason: ignore",
@@ -354,7 +367,7 @@ proc validateAndRelay(g: GossipSub,
if msg.data.len > msgId.len * 10:
g.broadcast(toSendPeers, RPCMsg(control: some(ControlMessage(
idontwant: @[ControlIWant(messageIds: @[msgId])]
))))
))), isHighPriority = true)
for peer in toSendPeers:
for heDontWant in peer.heDontWants:
@@ -368,7 +381,7 @@ proc validateAndRelay(g: GossipSub,
# In theory, if topics are the same in all messages, we could batch - we'd
# also have to be careful to only include validated messages
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]), isHighPriority = false)
trace "forwarded message to peers", peers = toSendPeers.len, msgId, peer
for topic in msg.topicIds:
if topic notin g.topics: continue
@@ -385,7 +398,7 @@ proc validateAndRelay(g: GossipSub,
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.raises:[PeerRateLimitError, CatchableError], async.} =
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.async.} =
# In this way we count even ignored fields by protobuf
var rmsg = rpcMsgOpt.valueOr:
@@ -427,6 +440,11 @@ method rpcHandler*(g: GossipSub,
await rateLimit(g, peer, Opt.none(RPCMsg), msgSize)
return
when defined(libp2p_expensive_metrics):
for m in rpcMsg.messages:
for t in m.topicIds:
libp2p_pubsub_received_messages.inc(labelValues = [$peer.peerId, t])
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
await rateLimit(g, peer, Opt.some(rpcMsg), msgSize)
@@ -434,7 +452,7 @@ method rpcHandler*(g: GossipSub,
peer.recvObservers(rpcMsg)
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
g.send(peer, RPCMsg(pong: rpcMsg.ping))
g.send(peer, RPCMsg(pong: rpcMsg.ping), isHighPriority = true)
peer.pingBudget.dec
for i in 0..<min(g.topicsHigh, rpcMsg.subscriptions.len):
template sub: untyped = rpcMsg.subscriptions[i]
@@ -461,9 +479,6 @@ method rpcHandler*(g: GossipSub,
let
msgId = msgIdResult.get
msgIdSalted = msgId & g.seenSalt
g.outstandingIWANTs.withValue(msgId, iwantRequest):
if iwantRequest.peer.peerId == peer.peerId:
g.outstandingIWANTs.del(msgId)
# addSeen adds salt to msgId to avoid
# remote attacking the hash function
@@ -496,14 +511,14 @@ method rpcHandler*(g: GossipSub,
# always validate if signature is present or required
debug "Dropping message due to failed signature verification",
msgId = shortLog(msgId), peer
g.punishInvalidMessage(peer, msg)
await g.punishInvalidMessage(peer, msg)
continue
if msg.seqno.len > 0 and msg.seqno.len != 8:
# if we have seqno should be 8 bytes long
debug "Dropping message due to invalid seqno length",
msgId = shortLog(msgId), peer
g.punishInvalidMessage(peer, msg)
await g.punishInvalidMessage(peer, msg)
continue
# g.anonymize needs no evaluation when receiving messages
@@ -547,7 +562,7 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
topicID: topic,
peers: g.peerExchangeList(topic),
backoff: g.parameters.unsubscribeBackoff.seconds.uint64)])))
g.broadcast(mpeers, msg)
g.broadcast(mpeers, msg, isHighPriority = true)
for peer in mpeers:
g.pruned(peer, topic, backoff = some(g.parameters.unsubscribeBackoff))
@@ -651,7 +666,7 @@ method publish*(g: GossipSub,
g.mcache.put(msgId, msg)
g.broadcast(peers, RPCMsg(messages: @[msg]))
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
if g.knownTopics.contains(topic):
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])

View File

@@ -254,8 +254,7 @@ proc handleIHave*(g: GossipSub,
if not g.hasSeen(msgId):
if peer.iHaveBudget <= 0:
break
elif msgId notin res.messageIds and msgId notin g.outstandingIWANTs:
g.outstandingIWANTs[msgId] = IWANTRequest(messageId: msgId, peer: peer, timestamp: Moment.now())
elif msgId notin res.messageIds:
res.messageIds.add(msgId)
dec peer.iHaveBudget
trace "requested message via ihave", messageID=msgId
@@ -301,17 +300,6 @@ proc handleIWant*(g: GossipSub,
messages.add(msg)
return messages
proc checkIWANTTimeouts(g: GossipSub, timeoutDuration: Duration) {.raises: [].} =
let currentTime = Moment.now()
var idsToRemove = newSeq[MessageId]()
for msgId, request in g.outstandingIWANTs.pairs():
if currentTime - request.timestamp > timeoutDuration:
trace "IWANT request timed out", messageID=msgId, peer=request.peer
request.peer.behaviourPenalty += 0.1
idsToRemove.add(msgId)
for msgId in idsToRemove:
g.outstandingIWANTs.del(msgId)
proc commitMetrics(metrics: var MeshMetrics) {.raises: [].} =
libp2p_gossipsub_low_peers_topics.set(metrics.lowPeersTopics)
libp2p_gossipsub_no_peers_topics.set(metrics.noPeersTopics)
@@ -542,14 +530,14 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
# Send changes to peers after table updates to avoid stale state
if grafts.len > 0:
let graft = RPCMsg(control: some(ControlMessage(graft: @[ControlGraft(topicID: topic)])))
g.broadcast(grafts, graft)
g.broadcast(grafts, graft, isHighPriority = true)
if prunes.len > 0:
let prune = RPCMsg(control: some(ControlMessage(
prune: @[ControlPrune(
topicID: topic,
peers: g.peerExchangeList(topic),
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
g.broadcast(prunes, prune)
g.broadcast(prunes, prune, isHighPriority = true)
proc dropFanoutPeers*(g: GossipSub) {.raises: [].} =
# drop peers that we haven't published to in
@@ -681,7 +669,7 @@ proc onHeartbeat(g: GossipSub) {.raises: [].} =
topicID: t,
peers: g.peerExchangeList(t),
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
g.broadcast(prunes, prune)
g.broadcast(prunes, prune, isHighPriority = true)
# pass by ptr in order to both signal we want to update metrics
# and as well update the struct for each topic during this iteration
@@ -703,7 +691,7 @@ proc onHeartbeat(g: GossipSub) {.raises: [].} =
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicId])
else:
libp2p_pubsub_broadcast_ihave.inc(labelValues = ["generic"])
g.send(peer, RPCMsg(control: some(control)))
g.send(peer, RPCMsg(control: some(control)), isHighPriority = true)
g.mcache.shift() # shift the cache
@@ -717,5 +705,3 @@ proc heartbeat*(g: GossipSub) {.async.} =
for trigger in g.heartbeatEvents:
trace "firing heartbeat event", instance = cast[int](g)
trigger.fire()
checkIWANTTimeouts(g, g.parameters.iwantTimeout)

View File

@@ -240,15 +240,15 @@ proc scoringHeartbeat*(g: GossipSub) {.async.} =
trace "running scoring heartbeat", instance = cast[int](g)
g.updateScores()
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) =
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
let uselessAppBytesNum = msg.data.len
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
debug "Peer sent invalid message and it's above rate limit", peer, uselessAppBytesNum
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
# discard g.disconnectPeer(peer)
# debug "Peer disconnected", peer, uselessAppBytesNum
# raise newException(PeerRateLimitError, "Peer sent invalid message and it's above rate limit")
if g.parameters.disconnectPeerAboveRateLimit:
await g.disconnectPeer(peer)
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
for tt in msg.topicIds:

View File

@@ -143,7 +143,6 @@ type
enablePX*: bool
bandwidthEstimatebps*: int # This is currently used only for limting flood publishing. 0 disables flood-limiting completely
iwantTimeout*: Duration
overheadRateLimit*: Opt[tuple[bytes: int, interval: Duration]]
disconnectPeerAboveRateLimit*: bool
@@ -181,7 +180,6 @@ type
routingRecordsHandler*: seq[RoutingRecordsHandler] # Callback for peer exchange
heartbeatEvents*: seq[AsyncEvent]
outstandingIWANTs*: Table[MessageId, IWANTRequest]
MeshMetrics* = object
# scratch buffers for metrics
@@ -192,8 +190,3 @@ type
lowPeersTopics*: int64 # npeers < dlow
healthyPeersTopics*: int64 # npeers >= dlow
underDoutTopics*: int64
IWANTRequest* = object
messageId*: MessageId
peer*: PubSubPeer
timestamp*: Moment

View File

@@ -138,18 +138,34 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
libp2p_pubsub_peers.set(p.peers.len.int64)
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg) {.raises: [].} =
## Attempt to send `msg` to remote peer
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool) {.raises: [].} =
## This procedure attempts to send a `msg` (of type `RPCMsg`) to the specified remote peer in the PubSub network.
##
## Parameters:
## - `p`: The `PubSub` instance.
## - `peer`: An instance of `PubSubPeer` representing the peer to whom the message should be sent.
## - `msg`: The `RPCMsg` instance that contains the message to be sent.
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
## priority messages have been sent.
trace "sending pubsub message to peer", peer, msg = shortLog(msg)
peer.send(msg, p.anonymize)
asyncSpawn peer.send(msg, p.anonymize, isHighPriority)
proc broadcast*(
p: PubSub,
sendPeers: auto, # Iteratble[PubSubPeer]
msg: RPCMsg) {.raises: [].} =
## Attempt to send `msg` to the given peers
msg: RPCMsg,
isHighPriority: bool) {.raises: [].} =
## This procedure attempts to send a `msg` (of type `RPCMsg`) to a specified group of peers in the PubSub network.
##
## Parameters:
## - `p`: The `PubSub` instance.
## - `sendPeers`: An iterable of `PubSubPeer` instances representing the peers to whom the message should be sent.
## - `msg`: The `RPCMsg` instance that contains the message to be broadcast.
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
## priority messages have been sent.
let npeers = sendPeers.len.int64
for sub in msg.subscriptions:
@@ -195,19 +211,19 @@ proc broadcast*(
if anyIt(sendPeers, it.hasObservers):
for peer in sendPeers:
p.send(peer, msg)
p.send(peer, msg, isHighPriority)
else:
# Fast path that only encodes message once
let encoded = encodeRpcMsg(msg, p.anonymize)
for peer in sendPeers:
asyncSpawn peer.sendEncoded(encoded)
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
proc sendSubs*(p: PubSub,
peer: PubSubPeer,
topics: openArray[string],
subscribe: bool) =
## send subscriptions to remote peer
p.send(peer, RPCMsg.withSubs(topics, subscribe))
p.send(peer, RPCMsg.withSubs(topics, subscribe), isHighPriority = true)
for topic in topics:
if subscribe:

View File

@@ -28,10 +28,12 @@ logScope:
when defined(libp2p_expensive_metrics):
declareCounter(libp2p_pubsub_sent_messages, "number of messages sent", labels = ["id", "topic"])
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
declareCounter(libp2p_pubsub_skipped_received_messages, "number of received skipped messages", labels = ["id"])
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
declareGauge(libp2p_gossipsub_priority_queue_size, "the number of messages in the priority queue", labels = ["id"])
declareGauge(libp2p_gossipsub_non_priority_queue_size, "the number of messages in the non-priority queue", labels = ["id"])
type
PeerRateLimitError* = object of CatchableError
@@ -50,6 +52,14 @@ type
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [].} # have to pass peer as it's unknown during init
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
RpcMessageQueue* = ref object
# Tracks async tasks for sending high-priority peer-published messages.
sendPriorityQueue: Deque[Future[void]]
# Queue for lower-priority messages, like "IWANT" replies and relay messages.
nonPriorityQueue: AsyncQueue[seq[byte]]
# Task for processing non-priority message queue.
sendNonPriorityTask: Future[void]
PubSubPeer* = ref object of RootObj
getConn*: GetConn # callback to establish a new send connection
onEvent*: OnEvent # Connectivity updates for peer
@@ -71,6 +81,8 @@ type
behaviourPenalty*: float64 # the eventual penalty score
overheadRateLimitOpt*: Opt[TokenBucket]
rpcmessagequeue: RpcMessageQueue
RPCHandler* = proc(peer: PubSubPeer, data: seq[byte]): Future[void]
{.gcsafe, raises: [].}
@@ -83,6 +95,16 @@ when defined(libp2p_agents_metrics):
#so we have to read the parents short agent..
p.sendConn.getWrapped().shortAgent
proc getAgent*(peer: PubSubPeer): string =
return
when defined(libp2p_agents_metrics):
if peer.shortAgent.len > 0:
peer.shortAgent
else:
"unknown"
else:
"unknown"
func hash*(p: PubSubPeer): Hash =
p.peerId.hash
@@ -131,19 +153,13 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
try:
try:
while not conn.atEof:
trace "waiting for data", conn, peer = p, closed = conn.closed
debug "waiting for data", conn, peer = p, closed = conn.closed
var data = await conn.readLp(p.maxMessageSize)
trace "read data from peer",
debug "read data from peer",
conn, peer = p, closed = conn.closed,
data = data.shortLog
when defined(libp2p_expensive_metrics):
for m in rmsg.messages:
for t in m.topicIDs:
# metrics
libp2p_pubsub_received_messages.inc(labelValues = [$p.peerId, t])
await p.handler(p, data)
data = newSeq[byte]() # Release memory
except PeerRateLimitError as exc:
@@ -156,9 +172,9 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
except CancelledError:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.
trace "Unexpected cancellation in PubSubPeer.handle"
debug "Unexpected cancellation in PubSubPeer.handle"
except CatchableError as exc:
trace "Exception occurred in PubSubPeer.handle",
debug "Exception occurred in PubSubPeer.handle",
conn, peer = p, closed = conn.closed, exc = exc.msg
finally:
debug "exiting pubsub read loop",
@@ -176,7 +192,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
# remote peer - if we had multiple channels up and one goes down, all
# stop working so we make an effort to only keep a single channel alive
trace "Get new send connection", p, newConn
debug "Get new send connection", p, newConn
# Careful to race conditions here.
# Topic subscription relies on either connectedFut
@@ -191,7 +207,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
await handle(p, newConn)
finally:
if p.sendConn != nil:
trace "Removing send connection", p, conn = p.sendConn
debug "Removing send connection", p, conn = p.sendConn
await p.sendConn.close()
p.sendConn = nil
@@ -230,21 +246,29 @@ proc hasSendConn*(p: PubSubPeer): bool =
template sendMetrics(msg: RPCMsg): untyped =
when defined(libp2p_expensive_metrics):
for x in msg.messages:
for t in x.topicIDs:
for t in x.topicIds:
# metrics
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [], async.} =
doAssert(not isNil(p), "pubsubpeer nil!")
proc clearSendPriorityQueue(p: PubSubPeer) =
while p.rpcmessagequeue.sendPriorityQueue.len > 0 and p.rpcmessagequeue.sendPriorityQueue[0].finished:
when defined(libp2p_expensive_metrics):
libp2p_gossipsub_priority_queue_size.dec(labelValues = [$p.peerId])
let f = p.rpcmessagequeue.sendPriorityQueue.popFirst()
debug "Finished", p, f = repr(cast[pointer](f))
if msg.len <= 0:
debug "empty message, skipping", p, msg = shortLog(msg)
return
if p.rpcmessagequeue.sendPriorityQueue.len > 0 and p.rpcmessagequeue.sendPriorityQueue[^1].finished:
for f in p.rpcmessagequeue.sendPriorityQueue:
if f.failed():
debug "Broken failed", p, f = repr(cast[pointer](f)), err= f.error().msg
elif f.completed:
debug "Broken completed", p, f = repr(cast[pointer](f))
else:
debug "Broken pending", p, f = repr(cast[pointer](f))
if msg.len > p.maxMessageSize:
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
return
quit 1
proc sendMsg(p: PubSubPeer, msg: seq[byte]) {.async.} =
if p.sendConn == nil:
# Wait for a send conn to be setup. `connectOnce` will
# complete this even if the sendConn setup failed
@@ -255,20 +279,53 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [], async.} =
debug "No send connection", p, msg = shortLog(msg)
return
trace "sending encoded msgs to peer", conn, encoded = shortLog(msg)
debug "sending encoded msgs to peer", conn, encoded = shortLog(msg)
try:
await conn.writeLp(msg)
trace "sent pubsub message to remote", conn
debug "sent pubsub message to remote", conn
except CatchableError as exc: # never cancelled
# Because we detach the send call from the currently executing task using
# asyncSpawn, no exceptions may leak out of it
trace "Unable to send to remote", conn, msg = exc.msg
debug "Unable to send to remote", conn, msg = exc.msg
# Next time sendConn is used, it will be have its close flag set and thus
# will be recycled
await conn.close() # This will clean up the send connection
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool) {.async.} =
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
##
## Parameters:
## - `p`: The `PubSubPeer` instance to which the message is to be sent.
## - `msg`: The message to be sent, encoded as a sequence of bytes (`seq[byte]`).
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
## priority messages have been sent.
doAssert(not isNil(p), "pubsubpeer nil!")
if msg.len <= 0:
debug "empty message, skipping", p, msg = shortLog(msg)
return
if msg.len > p.maxMessageSize:
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
return
if isHighPriority:
p.clearSendPriorityQueue()
let f = p.sendMsg(msg)
if not f.finished:
debug "Unfinished", p, msg = msg.len, f = repr(cast[pointer](f))
p.rpcmessagequeue.sendPriorityQueue.addLast(f)
when defined(libp2p_expensive_metrics):
libp2p_gossipsub_priority_queue_size.inc(labelValues = [$p.peerId])
else:
await p.rpcmessagequeue.nonPriorityQueue.addLast(msg)
when defined(libp2p_expensive_metrics):
libp2p_gossipsub_non_priority_queue_size.inc(labelValues = [$p.peerId])
debug "message queued", p, msg = shortLog(msg)
iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize: bool): seq[byte] =
## This iterator takes an `RPCMsg` and sequentially repackages its Messages into new `RPCMsg` instances.
## Each new `RPCMsg` accumulates Messages until reaching the specified `maxSize`. If a single Message
@@ -286,10 +343,10 @@ iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize:
# Check if adding the next message will exceed maxSize
if float(currentSize + msgSize) * 1.1 > float(maxSize): # Guessing 10% protobuf overhead
if currentRPCMsg.messages.len == 0:
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
debug "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
continue # Skip this message
trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
debug "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
yield encodeRpcMsg(currentRPCMsg, anonymize)
currentRPCMsg = RPCMsg()
currentSize = 0
@@ -299,12 +356,21 @@ iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize:
# Check if there is a non-empty currentRPCMsg left to be added
if currentSize > 0 and currentRPCMsg.messages.len > 0:
trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
debug "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
yield encodeRpcMsg(currentRPCMsg, anonymize)
else:
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
debug "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [].} =
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool) {.async.} =
## Asynchronously sends an `RPCMsg` to a specified `PubSubPeer` with an option for anonymization.
##
## Parameters:
## - `p`: The `PubSubPeer` instance to which the message is to be sent.
## - `msg`: The `RPCMsg` instance representing the message to be sent.
## - `anonymize`: A boolean flag indicating whether the message should be sent with anonymization.
## - `isHighPriority`: A boolean flag indicating whether the message should be treated as high priority.
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
## priority messages have been sent.
# When sending messages, we take care to re-encode them with the right
# anonymization flag to ensure that we're not penalized for sending invalid
# or malicious data on the wire - in particular, re-encoding protects against
@@ -324,11 +390,11 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [].} =
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
asyncSpawn p.sendEncoded(encodedSplitMsg)
await p.sendEncoded(encodedSplitMsg, isHighPriority)
else:
# If the message size is within limits, send it as is
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
asyncSpawn p.sendEncoded(encoded)
debug "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
await p.sendEncoded(encoded, isHighPriority)
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
for sentIHave in p.sentIHaves.mitems():
@@ -337,6 +403,43 @@ proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
return true
return false
proc sendNonPriorityTask(p: PubSubPeer) {.async.} =
while true:
# we send non-priority messages only if there are no pending priority messages
let msg = await p.rpcmessagequeue.nonPriorityQueue.popFirst()
while p.rpcmessagequeue.sendPriorityQueue.len > 0:
p.clearSendPriorityQueue()
# this minimizes the number of times we have to wait for something (each wait = performance cost)
# we will never wait for a finished future and by waiting for the last one, all that come before it are guaranteed
# to be finished already (since sends are processed in order).
if p.rpcmessagequeue.sendPriorityQueue.len > 0:
await p.rpcmessagequeue.sendPriorityQueue[^1]
when defined(libp2p_expensive_metrics):
libp2p_gossipsub_non_priority_queue_size.dec(labelValues = [$p.peerId])
await p.sendMsg(msg)
proc startSendNonPriorityTask(p: PubSubPeer) =
debug "starting sendNonPriorityTask", p
if p.rpcmessagequeue.sendNonPriorityTask.isNil:
p.rpcmessagequeue.sendNonPriorityTask = p.sendNonPriorityTask()
proc stopSendNonPriorityTask*(p: PubSubPeer) =
if not p.rpcmessagequeue.sendNonPriorityTask.isNil:
debug "stopping sendNonPriorityTask", p
p.rpcmessagequeue.sendNonPriorityTask.cancel()
p.rpcmessagequeue.sendNonPriorityTask = nil
p.rpcmessagequeue.sendPriorityQueue.clear()
p.rpcmessagequeue.nonPriorityQueue.clear()
when defined(libp2p_expensive_metrics):
libp2p_gossipsub_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
libp2p_gossipsub_non_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
proc new(T: typedesc[RpcMessageQueue]): T =
return T(
sendPriorityQueue: initDeque[Future[void]](),
nonPriorityQueue: newAsyncQueue[seq[byte]](),
)
proc new*(
T: typedesc[PubSubPeer],
peerId: PeerId,
@@ -353,17 +456,9 @@ proc new*(
peerId: peerId,
connectedFut: newFuture[void](),
maxMessageSize: maxMessageSize,
overheadRateLimitOpt: overheadRateLimitOpt
overheadRateLimitOpt: overheadRateLimitOpt,
rpcmessagequeue: RpcMessageQueue.new(),
)
result.sentIHaves.addFirst(default(HashSet[MessageId]))
result.heDontWants.addFirst(default(HashSet[MessageId]))
proc getAgent*(peer: PubSubPeer): string =
return
when defined(libp2p_agents_metrics):
if peer.shortAgent.len > 0:
peer.shortAgent
else:
"unknown"
else:
"unknown"
result.startSendNonPriorityTask()

View File

@@ -636,7 +636,7 @@ proc new*(T: typedesc[RendezVous],
sema: newAsyncSemaphore(SemaphoreDefaultSize)
)
logScope: topics = "libp2p discovery rendezvous"
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
let
buf = await conn.readLp(4096)

View File

@@ -19,7 +19,7 @@ type
method init(p: PlainText) {.gcsafe.} =
proc handle(conn: Connection, proto: string)
{.async, gcsafe.} = discard
{.async.} = discard
## plain text doesn't do anything
p.codec = PlainTextCodec

View File

@@ -135,10 +135,9 @@ method init*(s: Secure) =
method secure*(s: Secure,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]):
Future[Connection] {.base.} =
s.handleConn(conn, initiator, peerId)
s.handleConn(conn, conn.dir == Direction.Out, peerId)
method readOnce*(s: SecureConn,
pbytes: pointer,

View File

@@ -37,7 +37,7 @@ proc isRunning*(self: AutoRelayService): bool =
proc addressMapper(
self: AutoRelayService,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return concat(toSeq(self.relayAddresses.values))
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
@@ -58,8 +58,8 @@ proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch)
self.onReservation(concat(toSeq(self.relayAddresses.values)))
await sleepAsync chronos.seconds(ttl - 30)
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return await addressMapper(self, listenAddrs)
let hasBeenSetUp = await procCall Service(self).setup(switch)
@@ -83,7 +83,7 @@ proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
self.backingOff.keepItIf(it != pid)
self.peerAvailable.fire()
proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
while true:
# Remove relayPeers that failed
let peers = toSeq(self.relayPeers.keys())
@@ -116,14 +116,14 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
await self.peerAvailable.wait()
await sleepAsync(200.millis)
method run*(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
method run*(self: AutoRelayService, switch: Switch) {.async.} =
if self.running:
trace "Autorelay is already running"
return
self.running = true
self.runner = self.innerRun(switch)
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
self.running = false

View File

@@ -94,7 +94,7 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and not self.autoRelayService.isRunning():
discard await self.autoRelayService.setup(switch)
elif networkReachability == NetworkReachability.Reachable and self.autoRelayService.isRunning():

View File

@@ -50,7 +50,7 @@ method initStream*(s: ChronosStream) =
if s.objName.len == 0:
s.objName = ChronosStreamTrackerName
s.timeoutHandler = proc() {.async, gcsafe.} =
s.timeoutHandler = proc() {.async.} =
trace "Idle timeout expired, closing ChronosStream", s
await s.close()

View File

@@ -41,7 +41,7 @@ type
when defined(libp2p_agents_metrics):
shortAgent*: string
proc timeoutMonitor(s: Connection) {.async, gcsafe.}
proc timeoutMonitor(s: Connection) {.async.}
func shortLog*(conn: Connection): string =
try:
@@ -74,7 +74,7 @@ method closeImpl*(s: Connection): Future[void] =
trace "Closing connection", s
if not isNil(s.timerTaskFut) and not s.timerTaskFut.finished:
s.timerTaskFut.cancel()
s.timerTaskFut.cancelSoon()
s.timerTaskFut = nil
trace "Closed connection", s
@@ -110,7 +110,7 @@ proc pollActivity(s: Connection): Future[bool] {.async.} =
return false
proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
proc timeoutMonitor(s: Connection) {.async.} =
## monitor the channel for inactivity
##
## if the timeout was hit, it means that

View File

@@ -158,7 +158,7 @@ method initStream*(s: LPStream) {.base.} =
libp2p_open_streams.inc(labelValues = [s.objName, $s.dir])
inc getStreamTracker(s.objName).opened
trace "Stream created", s, objName = s.objName, dir = $s.dir
debug "Stream created", s, objName = s.objName, dir = $s.dir
proc join*(s: LPStream): Future[void] {.public.} =
## Wait for the stream to be closed
@@ -206,7 +206,7 @@ proc readExactly*(s: LPStream,
if read == 0:
doAssert s.atEof()
trace "couldn't read all bytes, stream EOF", s, nbytes, read
debug "couldn't read all bytes, stream EOF", s, nbytes, read
# Re-readOnce to raise a more specific error than EOF
# Raise EOF if it doesn't raise anything(shouldn't happen)
discard await s.readOnce(addr pbuffer[read], nbytes - read)
@@ -214,7 +214,7 @@ proc readExactly*(s: LPStream,
raise newLPStreamEOFError()
if read < nbytes:
trace "couldn't read all bytes, incomplete data", s, nbytes, read
debug "couldn't read all bytes, incomplete data", s, nbytes, read
raise newLPStreamIncompleteError()
proc readLine*(s: LPStream,
@@ -246,7 +246,7 @@ proc readLine*(s: LPStream,
if len(result) == lim:
break
proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
proc readVarint*(conn: LPStream): Future[uint64] {.async, public.} =
var
buffer: array[10, byte]
@@ -264,7 +264,7 @@ proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
if true: # can't end with a raise apparently
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, gcsafe, public.} =
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, public.} =
## read length prefixed msg, with the length encoded as a varint
let
length = await s.readVarint()
@@ -300,17 +300,17 @@ proc write*(s: LPStream, msg: string): Future[void] {.public.} =
method closeImpl*(s: LPStream): Future[void] {.async, base.} =
## Implementation of close - called only once
trace "Closing stream", s, objName = s.objName, dir = $s.dir
debug "Closing stream", s, objName = s.objName, dir = $s.dir
libp2p_open_streams.dec(labelValues = [s.objName, $s.dir])
inc getStreamTracker(s.objName).closed
s.closeEvent.fire()
trace "Closed stream", s, objName = s.objName, dir = $s.dir
debug "Closed stream", s, objName = s.objName, dir = $s.dir
method close*(s: LPStream): Future[void] {.base, async, public.} = # {.raises [Defect].}
## close the stream - this may block, but will not raise exceptions
##
if s.isClosed:
trace "Already closed", s
debug "Already closed", s
return
s.isClosed = true # Set flag before performing virtual close
@@ -332,9 +332,9 @@ proc closeWithEOF*(s: LPStream): Future[void] {.async, public.} =
## ongoing (which may be the case during cancellations)!
##
trace "Closing with EOF", s
debug "Closing with EOF", s
if s.closedWithEOF:
trace "Already closed"
debug "Already closed"
return
# prevent any further calls to avoid triggering
@@ -350,7 +350,7 @@ proc closeWithEOF*(s: LPStream): Future[void] {.async, public.} =
if (await readOnce(s, addr buf[0], buf.len)) != 0:
debug "Unexpected bytes while waiting for EOF", s
except LPStreamEOFError:
trace "Expected EOF came", s
debug "Expected EOF came", s
except CancelledError as exc:
raise exc
except CatchableError as exc:

View File

@@ -71,17 +71,17 @@ type
inUse: bool
method setup*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
method setup*(self: Service, switch: Switch): Future[bool] {.base, async.} =
if self.inUse:
warn "service setup has already been called"
return false
self.inUse = true
return true
method run*(self: Service, switch: Switch) {.base, async, gcsafe.} =
method run*(self: Service, switch: Switch) {.base, async.} =
doAssert(false, "Not implemented!")
method stop*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
method stop*(self: Service, switch: Switch): Future[bool] {.base, async.} =
if not self.inUse:
warn "service is already stopped"
return false
@@ -141,10 +141,10 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.public.} =
dir = Direction.Out): Future[void] {.public.} =
## Connects to a peer without opening a stream to it
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, dir)
method connect*(
s: Switch,
@@ -213,7 +213,7 @@ proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil)
s.peerInfo.protocols.add(proto.codec)
proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} =
let muxed = await trans.upgrade(conn, Direction.In, Opt.none(PeerId))
let muxed = await trans.upgrade(conn, Opt.none(PeerId))
switch.connManager.storeMuxer(muxed)
await switch.peerStore.identify(muxed)
trace "Connection upgrade succeeded"
@@ -321,7 +321,7 @@ proc stop*(s: Switch) {.async, public.} =
trace "Switch stopped"
proc start*(s: Switch) {.async, gcsafe, public.} =
proc start*(s: Switch) {.async, public.} =
## Start listening on every transport
if s.started:

View File

@@ -174,7 +174,7 @@ method start*(
trace "Listening on", address = ma
method stop*(self: TcpTransport) {.async, gcsafe.} =
method stop*(self: TcpTransport) {.async.} =
## stop the transport
##
try:
@@ -210,7 +210,7 @@ method stop*(self: TcpTransport) {.async, gcsafe.} =
except CatchableError as exc:
trace "Error shutting down tcp transport", exc = exc.msg
method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: TcpTransport): Future[Connection] {.async.} =
## accept a new TCP connection
##
@@ -219,7 +219,7 @@ method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
try:
if self.acceptFuts.len <= 0:
self.acceptFuts = self.servers.mapIt(it.accept())
self.acceptFuts = self.servers.mapIt(Future[StreamTransport](it.accept()))
if self.acceptFuts.len <= 0:
return
@@ -260,7 +260,7 @@ method dial*(
self: TcpTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
## dial a peer
##

View File

@@ -82,7 +82,7 @@ proc handlesStart(address: MultiAddress): bool {.gcsafe.} =
return TcpOnion3.match(address)
proc connectToTorServer(
transportAddress: TransportAddress): Future[StreamTransport] {.async, gcsafe.} =
transportAddress: TransportAddress): Future[StreamTransport] {.async.} =
let transp = await connect(transportAddress)
try:
discard await transp.write(@[Socks5ProtocolVersion, NMethods, Socks5AuthMethod.NoAuth.byte])
@@ -99,7 +99,7 @@ proc connectToTorServer(
await transp.closeWait()
raise err
proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
proc readServerReply(transp: StreamTransport) {.async.} =
## The specification for this code is defined on
## [link text](https://www.rfc-editor.org/rfc/rfc1928#section-5)
## and [link text](https://www.rfc-editor.org/rfc/rfc1928#section-6).
@@ -121,7 +121,7 @@ proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
let atyp = firstFourOctets[3]
case atyp:
of Socks5AddressType.IPv4.byte:
discard await transp.read(ipV4NumOctets + portNumOctets)
discard await transp.read(ipV4NumOctets + portNumOctets)
of Socks5AddressType.FQDN.byte:
let fqdnNumOctets = await transp.read(1)
discard await transp.read(int(uint8.fromBytes(fqdnNumOctets)) + portNumOctets)
@@ -166,7 +166,7 @@ proc parseDnsTcp(address: MultiAddress):
(Socks5AddressType.FQDN.byte, dstAddr, dstPort)
proc dialPeer(
transp: StreamTransport, address: MultiAddress) {.async, gcsafe.} =
transp: StreamTransport, address: MultiAddress) {.async.} =
let (atyp, dstAddr, dstPort) =
if Onion3.match(address):
parseOnion3(address)
@@ -190,7 +190,7 @@ method dial*(
self: TorTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
## dial a peer
##
if not handlesDial(address):
@@ -229,14 +229,14 @@ method start*(
else:
raise newException(TransportStartError, "Tor Transport couldn't start, no supported addr was provided.")
method accept*(self: TorTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: TorTransport): Future[Connection] {.async.} =
## accept a new Tor connection
##
let conn = await self.tcpTransport.accept()
conn.observedAddr = Opt.none(MultiAddress)
return conn
method stop*(self: TorTransport) {.async, gcsafe.} =
method stop*(self: TorTransport) {.async.} =
## stop the transport
##
await procCall Transport(self).stop() # call base

View File

@@ -83,13 +83,12 @@ proc dial*(
method upgrade*(
self: Transport,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Muxer] {.base, gcsafe.} =
## base upgrade method that the transport uses to perform
## transport specific upgrades
##
self.upgrader.upgrade(conn, direction, peerId)
self.upgrader.upgrade(conn, peerId)
method handles*(
self: Transport,

View File

@@ -173,7 +173,7 @@ method start*(
self.running = true
method stop*(self: WsTransport) {.async, gcsafe.} =
method stop*(self: WsTransport) {.async.} =
## stop the transport
##
@@ -237,7 +237,7 @@ proc connHandler(self: WsTransport,
asyncSpawn onClose()
return conn
method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: WsTransport): Future[Connection] {.async.} =
## accept a new WS connection
##
@@ -276,6 +276,8 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
debug "AsyncStream Error", exc = exc.msg
except TransportTooManyError as exc:
debug "Too many files opened", exc = exc.msg
except TransportAbortedError as exc:
debug "Connection aborted", exc = exc.msg
except AsyncTimeoutError as exc:
debug "Timed out", exc = exc.msg
except TransportUseClosedError as exc:
@@ -293,7 +295,7 @@ method dial*(
self: WsTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
## dial a peer
##

View File

@@ -32,8 +32,7 @@ proc getMuxerByCodec(self: MuxedUpgrade, muxerName: string): MuxerProvider =
proc mux*(
self: MuxedUpgrade,
conn: Connection,
direction: Direction): Future[Muxer] {.async, gcsafe.} =
conn: Connection): Future[Muxer] {.async.} =
## mux connection
trace "Muxing connection", conn
@@ -42,7 +41,7 @@ proc mux*(
return
let muxerName =
if direction == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
if conn.dir == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
else: await MultistreamSelect.handle(conn, self.muxers.mapIt(it.codec))
if muxerName.len == 0 or muxerName == "na":
@@ -62,16 +61,15 @@ proc mux*(
method upgrade*(
self: MuxedUpgrade,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Muxer] {.async.} =
trace "Upgrading connection", conn, direction
trace "Upgrading connection", conn, direction = conn.dir
let sconn = await self.secure(conn, direction, peerId) # secure the connection
let sconn = await self.secure(conn, peerId) # secure the connection
if isNil(sconn):
raise newException(UpgradeFailedError,
"unable to secure connection, stopping upgrade")
let muxer = await self.mux(sconn, direction) # mux it if possible
let muxer = await self.mux(sconn) # mux it if possible
if muxer == nil:
raise newException(UpgradeFailedError,
"a muxer is required for outgoing connections")
@@ -84,7 +82,7 @@ method upgrade*(
raise newException(UpgradeFailedError,
"Connection closed or missing peer info, stopping upgrade")
trace "Upgraded connection", conn, sconn, direction
trace "Upgraded connection", conn, sconn, direction = conn.dir
return muxer
proc new*(
@@ -98,8 +96,7 @@ proc new*(
secureManagers: @secureManagers,
ms: ms)
upgrader.streamHandler = proc(conn: Connection)
{.async, gcsafe, raises: [].} =
upgrader.streamHandler = proc(conn: Connection) {.async.} =
trace "Starting stream handler", conn
try:
await upgrader.ms.handle(conn) # handle incoming connection

View File

@@ -40,20 +40,18 @@ type
method upgrade*(
self: Upgrade,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Muxer] {.base.} =
doAssert(false, "Not implemented!")
proc secure*(
self: Upgrade,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId]): Future[Connection] {.async.} =
if self.secureManagers.len <= 0:
raise newException(UpgradeFailedError, "No secure managers registered!")
let codec =
if direction == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
if conn.dir == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
else: await MultistreamSelect.handle(conn, self.secureManagers.mapIt(it.codec))
if codec.len == 0:
raise newException(UpgradeFailedError, "Unable to negotiate a secure channel!")
@@ -65,4 +63,4 @@ proc secure*(
# let's avoid duplicating checks but detect if it fails to do it properly
doAssert(secureProtocol.len > 0)
return await secureProtocol[0].secure(conn, direction == Out, peerId)
return await secureProtocol[0].secure(conn, peerId)

View File

@@ -89,8 +89,27 @@ template exceptionToAssert*(body: untyped): untyped =
res
template withValue*[T](self: Opt[T] | Option[T], value, body: untyped): untyped =
if self.isSome:
let value {.inject.} = self.get()
## This template provides a convenient way to work with `Option` types in Nim.
## It allows you to execute a block of code (`body`) only when the `Option` is not empty.
##
## `self` is the `Option` instance being checked.
## `value` is the variable name to be used within the `body` for the unwrapped value.
## `body` is a block of code that is executed only if `self` contains a value.
##
## The `value` within `body` is automatically unwrapped from the `Option`, making it
## simpler to work with without needing explicit checks or unwrapping.
##
## Example:
## ```nim
## let myOpt = Opt.some(5)
## myOpt.withValue(value):
## echo value # Will print 5
## ```
##
## Note: This is a template, and it will be inlined at the call site, offering good performance.
let temp = (self)
if temp.isSome:
let value {.inject.} = temp.get()
body
macro withValue*[T](self: Opt[T] | Option[T], value, body, body2: untyped): untyped =

View File

@@ -89,6 +89,7 @@ build_target() {
mkdir "$CACHE_DIR"
cp -a "$TARGET_DIR"/* "$CACHE_DIR"/
fi
echo "Binary built successfully."
}
if target_needs_rebuilding; then

View File

@@ -5,21 +5,21 @@ export unittest2, chronos
template asyncTeardown*(body: untyped): untyped =
teardown:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
template asyncSetup*(body: untyped): untyped =
setup:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
template asyncTest*(name: string, body: untyped): untyped =
test name:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
@@ -31,7 +31,7 @@ template flakyAsyncTest*(name: string, attempts: int, body: untyped): untyped =
inc attemptNumber
try:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
except Exception as e:

View File

@@ -20,7 +20,7 @@ proc writeLp(s: StreamTransport, msg: string | seq[byte]): Future[int] {.gcsafe.
buf.finish()
result = s.write(buf.buffer)
proc readLp(s: StreamTransport): Future[seq[byte]] {.async, gcsafe.} =
proc readLp(s: StreamTransport): Future[seq[byte]] {.async.} =
## read length prefixed msg
var
size: uint

View File

@@ -30,7 +30,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport2 = transpProvider()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
if conn.observedAddr.isSome():
check transport1.handles(conn.observedAddr.get())
@@ -58,7 +58,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
await conn.write("Hello!")
await conn.close()
@@ -85,7 +85,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
var msg = newSeq[byte](6)
await conn.readExactly(addr msg[0], 6)
@@ -147,7 +147,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(addrs)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
while true:
let conn = await transport1.accept()
await conn.write(newSeq[byte](0))
@@ -214,7 +214,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
await conn.close()

View File

@@ -1,6 +1,7 @@
{.push raises: [].}
import chronos
import macros
import algorithm
import ../libp2p/transports/tcptransport
@@ -110,19 +111,83 @@ proc bridgedConnections*: (Connection, Connection) =
await connA.pushData(data)
return (connA, connB)
proc checkExpiringInternal(cond: proc(): bool {.raises: [], gcsafe.} ): Future[bool] {.async, gcsafe.} =
let start = Moment.now()
while true:
if Moment.now() > (start + chronos.seconds(5)):
return false
elif cond():
return true
macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
## Periodically checks a given condition until it is true or a timeout occurs.
##
## `code`: untyped - A condition expression that should eventually evaluate to true.
## `timeout`: Duration - The maximum duration to wait for the condition to be true.
##
## Examples:
## ```nim
## # Example 1:
## asyncTest "checkUntilCustomTimeout should pass if the condition is true":
## let a = 2
## let b = 2
## checkUntilCustomTimeout(2.seconds):
## a == b
##
## # Example 2: Multiple conditions
## asyncTest "checkUntilCustomTimeout should pass if the conditions are true":
## let a = 2
## let b = 2
## checkUntilCustomTimeout(5.seconds)::
## a == b
## a == 2
## b == 1
## ```
# Helper proc to recursively build a combined boolean expression
proc buildAndExpr(n: NimNode): NimNode =
if n.kind == nnkStmtList and n.len > 0:
var combinedExpr = n[0] # Start with the first expression
for i in 1..<n.len:
# Combine the current expression with the next using 'and'
combinedExpr = newCall("and", combinedExpr, n[i])
return combinedExpr
else:
await sleepAsync(1.millis)
return n
template checkExpiring*(code: untyped): untyped =
check await checkExpiringInternal(proc(): bool = code)
# Build the combined expression
let combinedBoolExpr = buildAndExpr(code)
result = quote do:
proc checkExpiringInternal(): Future[void] {.gensym, async.} =
let start = Moment.now()
while true:
if Moment.now() > (start + `timeout`):
checkpoint("[TIMEOUT] Timeout was reached and the conditions were not true. Check if the code is working as " &
"expected or consider increasing the timeout param.")
check `code`
return
else:
if `combinedBoolExpr`:
return
else:
await sleepAsync(1.millis)
await checkExpiringInternal()
macro checkUntilTimeout*(code: untyped): untyped =
## Same as `checkUntilCustomTimeout` but with a default timeout of 10 seconds.
##
## Examples:
## ```nim
## # Example 1:
## asyncTest "checkUntilTimeout should pass if the condition is true":
## let a = 2
## let b = 2
## checkUntilTimeout:
## a == b
##
## # Example 2: Multiple conditions
## asyncTest "checkUntilTimeout should pass if the conditions are true":
## let a = 2
## let b = 2
## checkUntilTimeout:
## a == b
## a == 2
## b == 1
## ```
result = quote do:
checkUntilCustomTimeout(10.seconds, `code`)
proc unorderedCompare*[T](a, b: seq[T]): bool =
if a == b:
@@ -146,8 +211,8 @@ proc default*(T: typedesc[MockResolver]): T =
resolver.ipResponses[("localhost", true)] = @["::1"]
resolver
proc setDNSAddr*(switch: Switch) {.gcsafe, async.} =
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
proc setDNSAddr*(switch: Switch) {.async.} =
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
switch.peerInfo.addressMappers.add(addressMapper)
await switch.peerInfo.update()

View File

@@ -0,0 +1,17 @@
# syntax=docker/dockerfile:1.5-labs
FROM nimlang/nim:1.6.16 as builder
WORKDIR /workspace
COPY .pinned libp2p.nimble nim-libp2p/
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
COPY . nim-libp2p/
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./tests/hole-punching-interop/hole_punching.nim
FROM --platform=linux/amd64 debian:bookworm-slim
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2
COPY --from=builder /workspace/nim-libp2p/hole-punching-tests /usr/bin/hole-punch-client
ENV RUST_BACKTRACE=1

View File

@@ -0,0 +1,114 @@
import std/[os, options, strformat]
import redis
import chronos, chronicles
import ../../libp2p/[builders,
switch,
observedaddrmanager,
services/hpservice,
services/autorelayservice,
protocols/connectivity/autonat/client as aclient,
protocols/connectivity/relay/client as rclient,
protocols/connectivity/relay/relay,
protocols/connectivity/autonat/service,
protocols/ping]
import ../stubs/autonatclientstub
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
let rng = newRng()
var builder = SwitchBuilder.new()
.withRng(rng)
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
.withTcpTransport({ServerFlags.TcpNoDelay})
.withYamux()
.withAutonat()
.withNoise()
if hpService != nil:
builder = builder.withServices(@[hpService])
if r != nil:
builder = builder.withCircuitRelay(r)
let s = builder.build()
s.mount(Ping.new(rng=rng))
return s
proc main() {.async.} =
try:
let relayClient = RelayClient.new()
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
autonatClientStub.answer = NotReachable
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
let hpservice = HPService.new(autonatService, autoRelayService)
let
isListener = getEnv("MODE") == "listen"
switch = createSwitch(relayClient, hpservice)
auxSwitch = createSwitch()
redisClient = open("redis", 6379.Port)
debug "Connected to redis"
await switch.start()
await auxSwitch.start()
let relayAddr =
try:
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
except Exception as e:
raise newException(CatchableError, e.msg)
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
# client stub will answer NotReachable.
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
# Wait for autonat to be NotReachable
while autonatService.networkReachability != NetworkReachability.NotReachable:
await sleepAsync(100.milliseconds)
# This will trigger the autonat relay service to make a reservation.
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
debug "Got relay address", relayMA
let relayId = await switch.connect(relayMA)
debug "Connected to relay", relayId
# Wait for our relay address to be published
while switch.peerInfo.addrs.len == 0:
await sleepAsync(100.milliseconds)
if isListener:
let listenerPeerId = switch.peerInfo.peerId
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
debug "Pushed listener client peer id to redis", listenerPeerId
# Nothing to do anymore, wait to be killed
await sleepAsync(2.minutes)
else:
let listenerId =
try:
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
except Exception as e:
raise newException(CatchableError, e.msg)
debug "Got listener peer id", listenerId
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
debug "Dialing listener relay address", listenerRelayAddr
await switch.connect(listenerId, @[listenerRelayAddr])
# wait for hole-punching to complete in the background
await sleepAsync(5000.milliseconds)
let conn = switch.connManager.selectMuxer(listenerId).connection
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
let delay = await Ping.new().ping(channel)
await allFuturesThrowing(channel.close(), conn.close(), switch.stop(), auxSwitch.stop())
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
quit(0)
except CatchableError as e:
error "Unexpected error", msg = e.msg
discard waitFor(main().withTimeout(4.minutes))
quit(1)

View File

@@ -0,0 +1,7 @@
{
"id": "nim-libp2p-head",
"containerImageID": "nim-libp2p-head",
"transports": [
"tcp"
]
}

View File

@@ -26,7 +26,7 @@ import ../../libp2p/protocols/pubsub/errors as pubsub_errors
import ../helpers
proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
proc waitSub(sender, receiver: auto; key: string) {.async.} =
# turn things deterministic
# this is for testing purposes only
var ceil = 15
@@ -43,7 +43,7 @@ suite "FloodSub":
asyncTest "FloodSub basic publish/subscribe A -> B":
var completionFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
completionFut.complete(true)
@@ -81,7 +81,7 @@ suite "FloodSub":
asyncTest "FloodSub basic publish/subscribe B -> A":
var completionFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
completionFut.complete(true)
@@ -113,7 +113,7 @@ suite "FloodSub":
asyncTest "FloodSub validation should succeed":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@@ -151,7 +151,7 @@ suite "FloodSub":
await allFuturesThrowing(nodesFut)
asyncTest "FloodSub validation should fail":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let
@@ -186,7 +186,7 @@ suite "FloodSub":
asyncTest "FloodSub validation one fails and one succeeds":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foo"
handlerFut.complete(true)
@@ -235,7 +235,7 @@ suite "FloodSub":
counter = new int
futs[i] = (
fut,
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
(proc(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
inc counter[]
if counter[] == runs - 1:
@@ -283,7 +283,7 @@ suite "FloodSub":
counter = new int
futs[i] = (
fut,
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
(proc(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
inc counter[]
if counter[] == runs - 1:
@@ -333,7 +333,7 @@ suite "FloodSub":
asyncTest "FloodSub message size validation":
var messageReceived = 0
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check data.len < 50
inc(messageReceived)
@@ -361,7 +361,7 @@ suite "FloodSub":
check (await smallNode[0].publish("foo", smallMessage1)) > 0
check (await bigNode[0].publish("foo", smallMessage2)) > 0
checkExpiring: messageReceived == 2
checkUntilTimeout: messageReceived == 2
check (await smallNode[0].publish("foo", bigMessage)) > 0
check (await bigNode[0].publish("foo", bigMessage)) > 0
@@ -375,7 +375,7 @@ suite "FloodSub":
asyncTest "FloodSub message size validation 2":
var messageReceived = 0
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
inc(messageReceived)
let
@@ -396,7 +396,7 @@ suite "FloodSub":
check (await bigNode1[0].publish("foo", bigMessage)) > 0
checkExpiring: messageReceived == 1
checkUntilTimeout: messageReceived == 1
await allFuturesThrowing(
bigNode1[0].switch.stop(),

View File

@@ -24,7 +24,7 @@ import utils
import ../helpers
proc noop(data: seq[byte]) {.async, gcsafe.} = discard
proc noop(data: seq[byte]) {.async.} = discard
const MsgIdSuccess = "msg id gen success"
@@ -718,104 +718,6 @@ suite "GossipSub internal":
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "two IHAVEs should generate only one IWANT":
let gossipSub = TestGossipSub.init(newStandardSwitch())
var iwantCount = 0
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
check false
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
let topic = "foobar"
var conns = newSeq[Connection]()
gossipSub.subscribe(topic, handler2)
# Setup two connections and two peers
var ihaveMessageId: string
var firstPeer: PubSubPeer
let seqno = @[0'u8, 1, 2, 3]
for i in 0..<2:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
if isNil(firstPeer):
firstPeer = peer
ihaveMessageId = byteutils.toHex(seqno) & $firstPeer.peerId
peer.handler = handler
# Simulate that each peer sends an IHAVE message to our node
let msg = ControlIHave(
topicID: topic,
messageIDs: @[ihaveMessageId.toBytes()]
)
let iwants = gossipSub.handleIHave(peer, @[msg])
if iwants.messageIds.len > 0:
iwantCount += 1
# Verify that our node responds with only one IWANT message
check: iwantCount == 1
check: gossipSub.outstandingIWANTs.contains(ihaveMessageId.toBytes())
# Simulate that our node receives the RPCMsg in response to the IWANT
let actualMessageData = "Hello, World!".toBytes
let rpcMsg = RPCMsg(
messages: @[Message(
fromPeer: firstPeer.peerId,
seqno: seqno,
data: actualMessageData
)]
)
await gossipSub.rpcHandler(firstPeer, encodeRpcMsg(rpcMsg, false))
check: not gossipSub.outstandingIWANTs.contains(ihaveMessageId.toBytes())
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "handle unanswered IWANT messages":
let gossipSub = TestGossipSub.init(newStandardSwitch())
gossipSub.parameters.heartbeatInterval = 50.milliseconds
gossipSub.parameters.iwantTimeout = 10.milliseconds
await gossipSub.start()
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} = discard
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
let topic = "foobar"
var conns = newSeq[Connection]()
gossipSub.subscribe(topic, handler2)
# Setup a connection and a peer
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
# Simulate that the peer sends an IHAVE message to our node
let ihaveMessageId = @[0'u8, 1, 2, 3]
let ihaveMsg = ControlIHave(
topicID: topic,
messageIDs: @[ihaveMessageId]
)
discard gossipSub.handleIHave(peer, @[ihaveMsg])
check: gossipSub.outstandingIWANTs.contains(ihaveMessageId)
check: peer.behaviourPenalty == 0.0
await sleepAsync(60.milliseconds)
check: not gossipSub.outstandingIWANTs.contains(ihaveMessageId)
check: peer.behaviourPenalty == 0.1
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
proc setupTest(): Future[tuple[gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]]] {.async.} =
let
nodes = generateNodes(2, gossip = true, verifySignature = false)
@@ -828,10 +730,10 @@ suite "GossipSub internal":
var receivedMessages = new(HashSet[seq[byte]])
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerA(topic: string, data: seq[byte]) {.async.} =
receivedMessages[].incl(data)
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerB(topic: string, data: seq[byte]) {.async.} =
discard
nodes[0].subscribe("foobar", handlerA)
@@ -877,9 +779,9 @@ suite "GossipSub internal":
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
ihave: @[ControlIHave(topicId: "foobar", messageIds: iwantMessageIds)]
))))
))), isHighPriority = false)
checkExpiring: receivedMessages[] == sentMessages
checkUntilTimeout: receivedMessages[] == sentMessages
check receivedMessages[].len == 2
await teardownTest(gossip0, gossip1)
@@ -894,10 +796,10 @@ suite "GossipSub internal":
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
))))
))), isHighPriority = false)
await sleepAsync(300.milliseconds)
checkExpiring: receivedMessages[].len == 0
checkUntilTimeout: receivedMessages[].len == 0
await teardownTest(gossip0, gossip1)
@@ -911,9 +813,9 @@ suite "GossipSub internal":
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
))))
))), isHighPriority = false)
checkExpiring: receivedMessages[] == sentMessages
checkUntilTimeout: receivedMessages[] == sentMessages
check receivedMessages[].len == 2
await teardownTest(gossip0, gossip1)
@@ -929,7 +831,7 @@ suite "GossipSub internal":
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
))))
))), isHighPriority = false)
var smallestSet: HashSet[seq[byte]]
let seqs = toSeq(sentMessages)
@@ -938,7 +840,7 @@ suite "GossipSub internal":
else:
smallestSet.incl(seqs[1])
checkExpiring: receivedMessages[] == smallestSet
checkUntilTimeout: receivedMessages[] == smallestSet
check receivedMessages[].len == 1
await teardownTest(gossip0, gossip1)

View File

@@ -47,7 +47,7 @@ suite "GossipSub":
asyncTest "GossipSub validation should succeed":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@@ -92,7 +92,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "GossipSub validation should fail (reject)":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let
@@ -138,7 +138,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "GossipSub validation should fail (ignore)":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let
@@ -185,7 +185,7 @@ suite "GossipSub":
asyncTest "GossipSub validation one fails and one succeeds":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foo"
handlerFut.complete(true)
@@ -238,7 +238,7 @@ suite "GossipSub":
asyncTest "GossipSub unsub - resub faster than backoff":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@@ -289,7 +289,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let
@@ -310,9 +310,9 @@ suite "GossipSub":
let gossip1 = GossipSub(nodes[0])
let gossip2 = GossipSub(nodes[1])
checkExpiring:
"foobar" in gossip2.topics and
"foobar" in gossip1.gossipsub and
checkUntilTimeout:
"foobar" in gossip2.topics
"foobar" in gossip1.gossipsub
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
await allFuturesThrowing(
@@ -323,7 +323,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let
@@ -374,7 +374,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over fanout A -> B":
var passed = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete()
@@ -428,7 +428,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
var passed = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete()
@@ -454,9 +454,9 @@ suite "GossipSub":
nodes[1].subscribe("foobar", handler)
let gsNode = GossipSub(nodes[1])
checkExpiring:
gsNode.mesh.getOrDefault("foobar").len == 0 and
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0 and
checkUntilTimeout:
gsNode.mesh.getOrDefault("foobar").len == 0
GossipSub(nodes[0]).mesh.getOrDefault("foobar").len == 0
(
GossipSub(nodes[0]).gossipsub.getOrDefault("foobar").len == 1 or
GossipSub(nodes[0]).fanout.getOrDefault("foobar").len == 1
@@ -481,7 +481,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over mesh A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete(true)
@@ -548,11 +548,11 @@ suite "GossipSub":
var
aReceived = 0
cReceived = 0
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerA(topic: string, data: seq[byte]) {.async.} =
inc aReceived
check aReceived < 2
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerB(topic: string, data: seq[byte]) {.async.} = discard
proc handlerC(topic: string, data: seq[byte]) {.async.} =
inc cReceived
check cReceived < 2
cRelayed.complete()
@@ -572,16 +572,16 @@ suite "GossipSub":
gossip1.seen = TimedCache[MessageId].init()
gossip3.seen = TimedCache[MessageId].init()
let msgId = toSeq(gossip2.validationSeen.keys)[0]
checkExpiring(try: gossip2.validationSeen[msgId].len > 0 except: false)
checkUntilTimeout(try: gossip2.validationSeen[msgId].len > 0 except: false)
result = ValidationResult.Accept
bFinished.complete()
nodes[1].addValidator("foobar", slowValidator)
checkExpiring(
gossip1.mesh.getOrDefault("foobar").len == 2 and
gossip2.mesh.getOrDefault("foobar").len == 2 and
gossip3.mesh.getOrDefault("foobar").len == 2)
checkUntilTimeout:
gossip1.mesh.getOrDefault("foobar").len == 2
gossip2.mesh.getOrDefault("foobar").len == 2
gossip3.mesh.getOrDefault("foobar").len == 2
tryPublish await nodes[0].publish("foobar", "Hello!".toBytes()), 2
await bFinished
@@ -596,7 +596,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over floodPublish A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete(true)
@@ -653,7 +653,7 @@ suite "GossipSub":
)
proc connectNodes(nodes: seq[PubSub], target: PubSub) {.async.} =
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for node in nodes:
@@ -661,7 +661,7 @@ suite "GossipSub":
await node.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
proc baseTestProcedure(nodes: seq[PubSub], gossip1: GossipSub, numPeersFirstMsg: int, numPeersSecondMsg: int) {.async.} =
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
block setup:
@@ -676,7 +676,7 @@ suite "GossipSub":
# Now try with a mesh
gossip1.subscribe("foobar", handler)
checkExpiring: gossip1.mesh.peers("foobar") > 5
checkUntilTimeout: gossip1.mesh.peers("foobar") > 5
# use a different length so that the message is not equal to the last
check (await nodes[0].publish("foobar", newSeq[byte](500_000))) == numPeersSecondMsg
@@ -727,7 +727,7 @@ suite "GossipSub":
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@@ -778,7 +778,7 @@ suite "GossipSub":
var handler: TopicHandler
capture dialer, i:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@@ -819,7 +819,7 @@ suite "GossipSub":
# PX to A & C
#
# C sent his SPR, not A
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
discard # not used in this test
let
@@ -895,9 +895,9 @@ suite "GossipSub":
await nodes[1].switch.connect(nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs)
let bFinished = newFuture[void]()
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = bFinished.complete()
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} = doAssert false
proc handlerA(topic: string, data: seq[byte]) {.async.} = discard
proc handlerB(topic: string, data: seq[byte]) {.async.} = bFinished.complete()
proc handlerC(topic: string, data: seq[byte]) {.async.} = doAssert false
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
@@ -912,14 +912,14 @@ suite "GossipSub":
gossip3.broadcast(gossip3.mesh["foobar"], RPCMsg(control: some(ControlMessage(
idontwant: @[ControlIWant(messageIds: @[newSeq[byte](10)])]
))))
checkExpiring: gossip2.mesh.getOrDefault("foobar").anyIt(it.heDontWants[^1].len == 1)
))), isHighPriority = true)
checkUntilTimeout: gossip2.mesh.getOrDefault("foobar").anyIt(it.heDontWants[^1].len == 1)
tryPublish await nodes[0].publish("foobar", newSeq[byte](10000)), 1
await bFinished
checkExpiring: toSeq(gossip3.mesh.getOrDefault("foobar")).anyIt(it.heDontWants[^1].len == 1)
checkUntilTimeout: toSeq(gossip3.mesh.getOrDefault("foobar")).anyIt(it.heDontWants[^1].len == 1)
check: toSeq(gossip1.mesh.getOrDefault("foobar")).anyIt(it.heDontWants[^1].len == 0)
await allFuturesThrowing(
@@ -943,7 +943,7 @@ suite "GossipSub":
await subscribeNodes(nodes)
proc handle(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handle(topic: string, data: seq[byte]) {.async.} = discard
let gossip0 = GossipSub(nodes[0])
let gossip1 = GossipSub(nodes[1])
@@ -952,6 +952,10 @@ suite "GossipSub":
gossip1.subscribe("foobar", handle)
await waitSubGraph(nodes, "foobar")
# Avoid being disconnected by failing signature verification
gossip0.verifySignature = false
gossip1.verifySignature = false
return (nodes, gossip0, gossip1)
proc currentRateLimitHits(): float64 =
@@ -964,8 +968,10 @@ suite "GossipSub":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
let msg = RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: "Valid data".toBytes)])
gossip0.broadcast(gossip0.mesh["foobar"], msg)
gossip0.broadcast(
gossip0.mesh["foobar"],
RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](10))]),
isHighPriority = true)
await sleepAsync(300.millis)
check currentRateLimitHits() == rateLimitHits
@@ -973,9 +979,13 @@ suite "GossipSub":
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(gossip0.mesh["foobar"], msg)
gossip0.broadcast(
gossip0.mesh["foobar"],
RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](12))]),
isHighPriority = true)
await sleepAsync(300.millis)
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
check currentRateLimitHits() == rateLimitHits
await stopNodes(nodes)
@@ -986,8 +996,7 @@ suite "GossipSub":
let (nodes, gossip0, gossip1) = await initializeGossipTest()
# Simulate sending an undecodable message
let msg = newSeqWith[byte](30, 1.byte)
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(msg)
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](33, 1.byte), isHighPriority = true)
await sleepAsync(300.millis)
check currentRateLimitHits() == rateLimitHits + 1
@@ -995,9 +1004,9 @@ suite "GossipSub":
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(msg)
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](35, 1.byte), isHighPriority = true)
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
@@ -1008,11 +1017,10 @@ suite "GossipSub":
let msg = RPCMsg(control: some(ControlMessage(prune: @[
ControlPrune(topicID: "foobar", peers: @[
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](30)))
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))
], backoff: 123'u64)
])))
gossip0.broadcast(gossip0.mesh["foobar"], msg)
gossip0.broadcast(gossip0.mesh["foobar"], msg, isHighPriority = true)
await sleepAsync(300.millis)
check currentRateLimitHits() == rateLimitHits + 1
@@ -1020,9 +1028,47 @@ suite "GossipSub":
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(gossip0.mesh["foobar"], msg)
let msg2 = RPCMsg(control: some(ControlMessage(prune: @[
ControlPrune(topicID: "foobar", peers: @[
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))
], backoff: 123'u64)
])))
gossip0.broadcast(gossip0.mesh["foobar"], msg2, isHighPriority = true)
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
asyncTest "e2e - GossipSub should rate limit invalid messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
let topic = "foobar"
proc execValidator(topic: string, message: messages.Message): Future[ValidationResult] {.raises: [].} =
let res = newFuture[ValidationResult]()
res.complete(ValidationResult.Reject)
res
gossip0.addValidator(topic, execValidator)
gossip1.addValidator(topic, execValidator)
let msg = RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](40))])
gossip0.broadcast(gossip0.mesh[topic], msg, isHighPriority = true)
await sleepAsync(300.millis)
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(
gossip0.mesh[topic],
RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](35))]),
isHighPriority = true)
checkUntilTimeout gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)

View File

@@ -59,7 +59,7 @@ suite "GossipSub":
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@@ -93,7 +93,7 @@ suite "GossipSub":
asyncTest "GossipSub invalid topic subscription":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@@ -155,7 +155,7 @@ suite "GossipSub":
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
### await subscribeNodes(nodes)
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handler(topic: string, data: seq[byte]) {.async.} = discard
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)
@@ -182,10 +182,10 @@ suite "GossipSub":
await GossipSub(nodes[2]).addDirectPeer(nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
proc noop(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc noop(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
nodes[0].subscribe("foobar", noop)
@@ -226,7 +226,7 @@ suite "GossipSub":
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
@@ -272,7 +272,7 @@ suite "GossipSub":
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@@ -324,7 +324,7 @@ suite "GossipSub":
# Adding again subscriptions
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for i in 0..<runs:
@@ -368,7 +368,7 @@ suite "GossipSub":
)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
handlerFut.complete()
await subscribeNodes(nodes)

View File

@@ -128,7 +128,7 @@ proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
dialed.add(node.peerInfo.peerId)
proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
proc waitSub*(sender, receiver: auto; key: string) {.async.} =
if sender == receiver:
return
let timeout = Moment.now() + 5.seconds
@@ -148,7 +148,7 @@ proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
await sleepAsync(5.milliseconds)
doAssert Moment.now() < timeout, "waitSub timeout!"
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async, gcsafe.} =
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
let timeout = Moment.now() + 5.seconds
while true:
var

View File

@@ -24,7 +24,7 @@ type
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.gcsafe, async.}
dir = Direction.Out): Future[void] {.async.}
method connect*(
self: SwitchStub,
@@ -32,11 +32,11 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out) {.async.} =
dir = Direction.Out) {.async.} =
if (self.connectStub != nil):
await self.connectStub(self, peerId, addrs, forceDial, reuseConnection, upgradeDir)
await self.connectStub(self, peerId, addrs, forceDial, reuseConnection, dir)
else:
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, dir)
proc new*(T: typedesc[SwitchStub], switch: Switch, connectStub: connectStubType = nil): T =
return SwitchStub(

View File

@@ -39,7 +39,7 @@ proc createAutonatSwitch(nameResolver: NameResolver = nil): Switch =
proc makeAutonatServicePrivate(): Switch =
var autonatProtocol = new LPProtocol
autonatProtocol.handler = proc (conn: Connection, proto: string) {.async, gcsafe.} =
autonatProtocol.handler = proc (conn: Connection, proto: string) {.async.} =
discard await conn.readLp(1024)
await conn.writeLp(AutonatDialResponse(
status: DialError,

View File

@@ -87,7 +87,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
awaiter.complete()
@@ -131,7 +131,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
autonatClientStub.answer = Reachable
@@ -173,7 +173,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@@ -213,7 +213,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
autonatClientStub.answer = Unknown
@@ -267,7 +267,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@@ -302,12 +302,12 @@ suite "Autonat Service":
let awaiter2 = newFuture[void]()
let awaiter3 = newFuture[void]()
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter2.finished:
awaiter2.complete()
@@ -345,7 +345,7 @@ suite "Autonat Service":
let awaiter1 = newFuture[void]()
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
@@ -388,7 +388,7 @@ suite "Autonat Service":
var awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@@ -428,7 +428,7 @@ suite "Autonat Service":
let switch1 = createSwitch(autonatService)
let switch2 = createSwitch()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
fail()
check autonatService.networkReachability == NetworkReachability.Unknown

View File

@@ -32,7 +32,7 @@ method newStream*(
m: TestMuxer,
name: string = "",
lazy: bool = false):
Future[Connection] {.async, gcsafe.} =
Future[Connection] {.async.} =
result = Connection.new(m.peerId, Direction.Out, Opt.none(MultiAddress))
suite "Connection Manager":
@@ -215,7 +215,7 @@ suite "Connection Manager":
await connMngr.close()
checkExpiring: waitedConn3.cancelled()
checkUntilTimeout: waitedConn3.cancelled()
await allFuturesThrowing(
allFutures(muxs.mapIt( it.close() )))
@@ -231,7 +231,7 @@ suite "Connection Manager":
await muxer.close()
checkExpiring: muxer notin connMngr
checkUntilTimeout: muxer notin connMngr
await connMngr.close()
@@ -254,7 +254,7 @@ suite "Connection Manager":
check peerId in connMngr
await connMngr.dropPeer(peerId)
checkExpiring: peerId notin connMngr
checkUntilTimeout: peerId notin connMngr
check isNil(connMngr.selectMuxer(peerId, Direction.In))
check isNil(connMngr.selectMuxer(peerId, Direction.Out))

View File

@@ -57,14 +57,15 @@ suite "Dcutr":
for t in behindNATSwitch.transports:
t.networkReachability = NetworkReachability.NotReachable
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
checkExpiring:
expect CatchableError:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
# dial will succeed.
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
checkUntilTimeout:
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@@ -82,9 +83,9 @@ suite "Dcutr":
body
checkExpiring:
# no connection will be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
checkUntilTimeout:
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@@ -95,7 +96,7 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
await sleepAsync(100.millis)
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectTimeoutProc)
@@ -114,7 +115,7 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
raise newException(CatchableError, "error")
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectErrorProc)
@@ -142,13 +143,16 @@ suite "Dcutr":
for t in behindNATSwitch.transports:
t.networkReachability = NetworkReachability.NotReachable
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
checkExpiring:
expect CatchableError:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail, but the client dial will succeed.
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
checkUntilTimeout:
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@@ -159,7 +163,7 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
await sleepAsync(100.millis)
await ductrServerTest(connectProc)
@@ -171,7 +175,23 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
raise newException(CatchableError, "error")
await ductrServerTest(connectProc)
test "should return valid TCP/IP and TCP/DNS addresses only":
let testAddrs = @[MultiAddress.init("/ip4/192.0.2.1/tcp/1234").tryGet(),
MultiAddress.init("/ip4/203.0.113.5/tcp/5678/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N").tryGet(),
MultiAddress.init("/ip6/::1/tcp/9012").tryGet(),
MultiAddress.init("/dns4/example.com/tcp/3456/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N").tryGet(),
MultiAddress.init("/ip4/198.51.100.42/udp/7890").tryGet()]
let expected = @[MultiAddress.init("/ip4/192.0.2.1/tcp/1234").tryGet(),
MultiAddress.init("/ip4/203.0.113.5/tcp/5678").tryGet(),
MultiAddress.init("/ip6/::1/tcp/9012").tryGet(),
MultiAddress.init("/dns4/example.com/tcp/3456").tryGet()]
let result = getHolePunchableAddrs(testAddrs)
check result == expected

42
tests/testhelpers.nim Normal file
View File

@@ -0,0 +1,42 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import ./helpers
suite "Helpers":
asyncTest "checkUntilTimeout should pass if the condition is true":
let a = 2
let b = 2
checkUntilTimeout:
a == b
asyncTest "checkUntilTimeout should pass if the conditions are true":
let a = 2
let b = 2
checkUntilTimeout:
a == b
a == 2
b == 2
asyncTest "checkUntilCustomTimeout should pass when the condition is true":
let a = 2
let b = 2
checkUntilCustomTimeout(2.seconds):
a == b
asyncTest "checkUntilCustomTimeout should pass when the conditions are true":
let a = 2
let b = 2
checkUntilCustomTimeout(5.seconds):
a == b
a == 2
b == 2

View File

@@ -65,7 +65,7 @@ suite "Hole Punching":
let publicPeerSwitch = createSwitch(RelayClient.new())
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
publicPeerSwitch.peerInfo.addressMappers.add(addressMapper)
await publicPeerSwitch.peerInfo.update()
@@ -89,8 +89,8 @@ suite "Hole Punching":
await publicPeerSwitch.connect(privatePeerSwitch.peerInfo.peerId, (await privatePeerRelayAddr))
checkExpiring:
privatePeerSwitch.connManager.connCount(publicPeerSwitch.peerInfo.peerId) == 1 and
checkUntilTimeout:
privatePeerSwitch.connManager.connCount(publicPeerSwitch.peerInfo.peerId) == 1
not isRelayed(privatePeerSwitch.connManager.selectMuxer(publicPeerSwitch.peerInfo.peerId).connection)
await allFuturesThrowing(
@@ -127,8 +127,8 @@ suite "Hole Punching":
await publicPeerSwitch.connect(privatePeerSwitch.peerInfo.peerId, (await privatePeerRelayAddr))
checkExpiring:
privatePeerSwitch.connManager.connCount(publicPeerSwitch.peerInfo.peerId) == 1 and
checkUntilTimeout:
privatePeerSwitch.connManager.connCount(publicPeerSwitch.peerInfo.peerId) == 1
not isRelayed(privatePeerSwitch.connManager.selectMuxer(publicPeerSwitch.peerInfo.peerId).connection)
await allFuturesThrowing(
@@ -193,38 +193,24 @@ suite "Hole Punching":
await privatePeerSwitch2.connect(privatePeerSwitch1.peerInfo.peerId, (await privatePeerRelayAddr1))
privatePeerSwitch2.connectStub = rcvConnectStub
checkExpiring:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
# dial will succeed.
privatePeerSwitch1.connManager.connCount(privatePeerSwitch2.peerInfo.peerId) == 1 and
not isRelayed(privatePeerSwitch1.connManager.selectMuxer(privatePeerSwitch2.peerInfo.peerId).connection)
# wait for hole punching to finish in the background
await sleepAsync(600.millis)
await allFuturesThrowing(
privatePeerSwitch1.stop(), privatePeerSwitch2.stop(), switchRelay.stop(),
switchAux.stop(), switchAux2.stop(), switchAux3.stop(), switchAux4.stop())
asyncTest "Hole punching when peers addresses are private":
proc connectStub(self: SwitchStub,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
self.connectStub = nil # this stub should be called only once
await sleepAsync(100.millis) # avoid simultaneous dialing that causes address in use error
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
await holePunchingTest(nil, connectStub, NotReachable)
await holePunchingTest(nil, nil, NotReachable)
asyncTest "Hole punching when there is an error during unilateral direct connection":
asyncTest "Hole punching when peers addresses are private and there is an error in the initiator side":
proc connectStub(self: SwitchStub,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
self.connectStub = nil # this stub should be called only once
raise newException(CatchableError, "error")

View File

@@ -73,7 +73,7 @@ suite "Identify":
asyncTest "default agent version":
msListen.addHandler(IdentifyCodec, identifyProto1)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)
@@ -95,7 +95,7 @@ suite "Identify":
remotePeerInfo.agentVersion = customAgentVersion
msListen.addHandler(IdentifyCodec, identifyProto1)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)
@@ -136,7 +136,7 @@ suite "Identify":
asyncTest "can send signed peer record":
msListen.addHandler(IdentifyCodec, identifyProto1)
identifyProto1.sendSignedPeerRecord = true
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)
@@ -219,8 +219,8 @@ suite "Identify":
await identifyPush2.push(switch2.peerInfo, conn)
checkExpiring: switch1.peerStore[ProtoBook][switch2.peerInfo.peerId] == switch2.peerInfo.protocols
checkExpiring: switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs
checkUntilTimeout: switch1.peerStore[ProtoBook][switch2.peerInfo.peerId] == switch2.peerInfo.protocols
checkUntilTimeout: switch1.peerStore[AddressBook][switch2.peerInfo.peerId] == switch2.peerInfo.addrs
await closeAll()

View File

@@ -97,7 +97,7 @@ suite "Mplex":
suite "channel half-closed":
asyncTest "(local close) - should close for write":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -112,7 +112,7 @@ suite "Mplex":
asyncTest "(local close) - should allow reads until remote closes":
let
conn = TestBufferStream.new(
proc (data: seq[byte]) {.gcsafe, async.} =
proc (data: seq[byte]) {.async.} =
discard,
)
chann = LPChannel.init(1, conn, true)
@@ -139,7 +139,7 @@ suite "Mplex":
asyncTest "(remote close) - channel should close for reading by remote":
let
conn = TestBufferStream.new(
proc (data: seq[byte]) {.gcsafe, async.} =
proc (data: seq[byte]) {.async.} =
discard,
)
chann = LPChannel.init(1, conn, true)
@@ -162,7 +162,7 @@ suite "Mplex":
let
testData = "Hello!".toBytes
conn = TestBufferStream.new(
proc (data: seq[byte]) {.gcsafe, async.} =
proc (data: seq[byte]) {.async.} =
discard
)
chann = LPChannel.init(1, conn, true)
@@ -175,7 +175,7 @@ suite "Mplex":
await conn.close()
asyncTest "should not allow pushing data to channel when remote end closed":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -192,7 +192,7 @@ suite "Mplex":
suite "channel reset":
asyncTest "channel should fail reading":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -205,7 +205,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete read":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -220,7 +220,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete pushData":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -239,7 +239,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and push":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -254,7 +254,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and pushes":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -279,7 +279,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and push with cancel":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -293,7 +293,7 @@ suite "Mplex":
await conn.close()
asyncTest "should complete both read and push after reset":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -311,7 +311,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete ongoing push without reader":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -323,7 +323,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete ongoing read without a push":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -335,7 +335,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should allow all reads and pushes to complete":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -364,7 +364,7 @@ suite "Mplex":
await conn.close()
asyncTest "channel should fail writing":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -376,7 +376,7 @@ suite "Mplex":
await conn.close()
asyncTest "channel should reset on timeout":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(
@@ -392,11 +392,11 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == "HELLO"
await stream.close()
@@ -429,11 +429,11 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == "HELLO"
await stream.close()
@@ -473,12 +473,12 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
try:
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(MaxMsgSize)
check msg == bigseq
trace "Bigseq check passed!"
@@ -520,11 +520,11 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
await stream.writeLp("Hello from stream!")
await stream.close()
@@ -557,12 +557,12 @@ suite "Mplex":
let listenFut = transport1.start(ma)
let done = newFuture[void]()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var count = 1
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == &"stream {count}!"
count.inc
@@ -601,12 +601,12 @@ suite "Mplex":
let listenFut = transport1.start(ma)
let done = newFuture[void]()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var count = 1
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == &"stream {count} from dialer!"
await stream.writeLp(&"stream {count} from listener!")
@@ -646,12 +646,12 @@ suite "Mplex":
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
try:
discard await stream.readLp(1024)
@@ -697,11 +697,11 @@ suite "Mplex":
var count = 0
var done = newFuture[void]()
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
count.inc()
if count == 10:
@@ -761,11 +761,11 @@ suite "Mplex":
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -805,11 +805,11 @@ suite "Mplex":
var mplexListen: Mplex
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -829,7 +829,7 @@ suite "Mplex":
check:
unorderedCompare(dialStreams, mplexDial.getStreams())
checkExpiring: listenStreams.len == 10 and dialStreams.len == 10
checkUntilTimeout: listenStreams.len == 10 and dialStreams.len == 10
await mplexListen.close()
await allFuturesThrowing(
@@ -851,11 +851,11 @@ suite "Mplex":
var mplexHandle: Future[void]
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -876,7 +876,7 @@ suite "Mplex":
check:
unorderedCompare(dialStreams, mplexDial.getStreams())
checkExpiring: listenStreams.len == 10 and dialStreams.len == 10
checkUntilTimeout: listenStreams.len == 10 and dialStreams.len == 10
mplexHandle.cancel()
await allFuturesThrowing(
@@ -896,11 +896,11 @@ suite "Mplex":
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -920,7 +920,7 @@ suite "Mplex":
check:
unorderedCompare(dialStreams, mplexDial.getStreams())
checkExpiring: listenStreams.len == 10 and dialStreams.len == 10
checkUntilTimeout: listenStreams.len == 10 and dialStreams.len == 10
await conn.close()
await allFuturesThrowing(
@@ -943,11 +943,11 @@ suite "Mplex":
var listenConn: Connection
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
listenConn = await transport1.accept()
let mplexListen = Mplex.new(listenConn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -967,7 +967,7 @@ suite "Mplex":
check:
unorderedCompare(dialStreams, mplexDial.getStreams())
checkExpiring: listenStreams.len == 10 and dialStreams.len == 10
checkUntilTimeout: listenStreams.len == 10 and dialStreams.len == 10
await listenConn.closeWithEOF()
await allFuturesThrowing(
@@ -992,11 +992,11 @@ suite "Mplex":
var complete = newFuture[void]()
const MsgSize = 1024
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
try:
let msg = await stream.readLp(MsgSize)
check msg.len == MsgSize
@@ -1064,11 +1064,11 @@ suite "Mplex":
var complete = newFuture[void]()
const MsgSize = 512
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(MsgSize)
check msg.len == MsgSize
await stream.close()

View File

@@ -60,6 +60,7 @@ const
"/ip4/127.0.0.1/tcp/1234",
"/ip4/127.0.0.1/tcp/1234/",
"/ip4/127.0.0.1/udp/1234/quic",
"/ip4/192.168.80.3/udp/33422/quic-v1",
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234",
"/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",

View File

@@ -34,7 +34,7 @@ type
method readOnce*(s: TestSelectStream,
pbytes: pointer,
nbytes: int): Future[int] {.async, gcsafe.} =
nbytes: int): Future[int] {.async.} =
case s.step:
of 1:
var buf = newSeq[byte](1)
@@ -64,9 +64,9 @@ method readOnce*(s: TestSelectStream,
return "\0x3na\n".len()
method write*(s: TestSelectStream, msg: seq[byte]) {.async, gcsafe.} = discard
method write*(s: TestSelectStream, msg: seq[byte]) {.async.} = discard
method close(s: TestSelectStream) {.async, gcsafe.} =
method close(s: TestSelectStream) {.async.} =
s.isClosed = true
s.isEof = true
@@ -113,11 +113,11 @@ method readOnce*(s: TestLsStream,
copyMem(pbytes, addr buf[0], buf.len())
return buf.len()
method write*(s: TestLsStream, msg: seq[byte]) {.async, gcsafe.} =
method write*(s: TestLsStream, msg: seq[byte]) {.async.} =
if s.step == 4:
await s.ls(msg)
method close(s: TestLsStream) {.async, gcsafe.} =
method close(s: TestLsStream) {.async.} =
s.isClosed = true
s.isEof = true
@@ -137,7 +137,7 @@ type
method readOnce*(s: TestNaStream,
pbytes: pointer,
nbytes: int):
Future[int] {.async, gcsafe.} =
Future[int] {.async.} =
case s.step:
of 1:
var buf = newSeq[byte](1)
@@ -167,11 +167,11 @@ method readOnce*(s: TestNaStream,
return "\0x3na\n".len()
method write*(s: TestNaStream, msg: seq[byte]) {.async, gcsafe.} =
method write*(s: TestNaStream, msg: seq[byte]) {.async.} =
if s.step == 4:
await s.na(string.fromBytes(msg))
method close(s: TestNaStream) {.async, gcsafe.} =
method close(s: TestNaStream) {.async.} =
s.isClosed = true
s.isEof = true
@@ -197,7 +197,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
check proto == "/test/proto/1.0.0"
await conn.close()
@@ -210,7 +210,7 @@ suite "Multistream select":
var conn: Connection = nil
let done = newFuture[void]()
proc testLsHandler(proto: seq[byte]) {.async, gcsafe.} =
proc testLsHandler(proto: seq[byte]) {.async.} =
var strProto: string = string.fromBytes(proto)
check strProto == "\x26/test/proto1/1.0.0\n/test/proto2/1.0.0\n"
await conn.close()
@@ -218,7 +218,7 @@ suite "Multistream select":
conn = Connection(newTestLsStream(testLsHandler))
proc testHandler(conn: Connection, proto: string): Future[void]
{.async, gcsafe.} = discard
{.async.} = discard
var protocol: LPProtocol = new LPProtocol
protocol.handler = testHandler
ms.addHandler("/test/proto1/1.0.0", protocol)
@@ -230,7 +230,7 @@ suite "Multistream select":
let ms = MultistreamSelect.new()
var conn: Connection = nil
proc testNaHandler(msg: string): Future[void] {.async, gcsafe.} =
proc testNaHandler(msg: string): Future[void] {.async.} =
check msg == "\x03na\n"
await conn.close()
conn = newTestNaStream(testNaHandler)
@@ -238,7 +238,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} = discard
Future[void] {.async.} = discard
protocol.handler = testHandler
ms.addHandler("/unabvailable/proto/1.0.0", protocol)
@@ -250,7 +250,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
check proto == "/test/proto/1.0.0"
await conn.writeLp("Hello!")
await conn.close()
@@ -262,7 +262,7 @@ suite "Multistream select":
let transport1 = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
await msListen.handle(conn)
await conn.close()
@@ -293,7 +293,7 @@ suite "Multistream select":
# Unblock the 5 streams, check that we can open a new one
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
await blocker
await conn.writeLp("Hello!")
await conn.close()
@@ -315,7 +315,7 @@ suite "Multistream select":
await msListen.handle(c)
await c.close()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
while true:
let conn = await transport1.accept()
asyncSpawn acceptedOne(conn)
@@ -362,7 +362,7 @@ suite "Multistream select":
let msListen = MultistreamSelect.new()
var protocol: LPProtocol = new LPProtocol
protocol.handler = proc(conn: Connection, proto: string) {.async, gcsafe.} =
protocol.handler = proc(conn: Connection, proto: string) {.async.} =
# never reached
discard
@@ -379,7 +379,7 @@ suite "Multistream select":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
try:
await msListen.handle(conn)
@@ -412,7 +412,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
check proto == "/test/proto/1.0.0"
await conn.writeLp("Hello!")
await conn.close()
@@ -424,7 +424,7 @@ suite "Multistream select":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
await msListen.handle(conn)
@@ -450,7 +450,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
await conn.writeLp(&"Hello from {proto}!")
await conn.close()
@@ -462,7 +462,7 @@ suite "Multistream select":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
await msListen.handle(conn)

View File

@@ -57,4 +57,5 @@ import testtcptransport,
testautorelay,
testdcutr,
testhpservice,
testutility
testutility,
testhelpers

View File

@@ -41,7 +41,7 @@ type
{.push raises: [].}
method init(p: TestProto) {.gcsafe.} =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
await conn.writeLp("Hello!")
@@ -100,7 +100,7 @@ suite "Noise":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
try:
await sconn.write("Hello!")
finally:
@@ -115,7 +115,7 @@ suite "Noise":
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
conn = await transport2.dial(transport1.addrs[0])
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
var msg = newSeq[byte](6)
await sconn.readExactly(addr msg[0], 6)
@@ -140,11 +140,11 @@ suite "Noise":
asyncSpawn transport1.start(server)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var conn: Connection
try:
conn = await transport1.accept()
discard await serverNoise.secure(conn, false, Opt.none(PeerId))
discard await serverNoise.secure(conn, Opt.none(PeerId))
except CatchableError:
discard
finally:
@@ -160,7 +160,7 @@ suite "Noise":
var sconn: Connection = nil
expect(NoiseDecryptTagError):
sconn = await clientNoise.secure(conn, true, Opt.some(conn.peerId))
sconn = await clientNoise.secure(conn, Opt.some(conn.peerId))
await conn.close()
await handlerWait
@@ -178,9 +178,9 @@ suite "Noise":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(server)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
defer:
await sconn.close()
await conn.close()
@@ -196,7 +196,7 @@ suite "Noise":
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
conn = await transport2.dial(transport1.addrs[0])
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
await sconn.write("Hello!")
await acceptFut
@@ -221,9 +221,9 @@ suite "Noise":
transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
listenFut = transport1.start(server)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
defer:
await sconn.close()
let msg = await sconn.readLp(1024*1024)
@@ -237,7 +237,7 @@ suite "Noise":
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
conn = await transport2.dial(transport1.addrs[0])
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
await sconn.writeLp(hugePayload)
await readTask

View File

@@ -42,7 +42,7 @@ suite "Ping":
transport1 = TcpTransport.new(upgrade = Upgrade())
transport2 = TcpTransport.new(upgrade = Upgrade())
proc handlePing(peer: PeerId) {.async, gcsafe, closure.} =
proc handlePing(peer: PeerId) {.async, closure.} =
inc pingReceivedCount
pingProto1 = Ping.new()
pingProto2 = Ping.new(handlePing)
@@ -63,7 +63,7 @@ suite "Ping":
asyncTest "simple ping":
msListen.addHandler(PingCodec, pingProto1)
serverFut = transport1.start(@[ma])
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)
@@ -78,7 +78,7 @@ suite "Ping":
asyncTest "ping callback":
msDial.addHandler(PingCodec, pingProto2)
serverFut = transport1.start(@[ma])
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
discard await msListen.select(c, PingCodec)
discard await pingProto1.ping(c)
@@ -92,7 +92,7 @@ suite "Ping":
asyncTest "bad ping data ack":
type FakePing = ref object of LPProtocol
let fakePingProto = FakePing()
proc fakeHandle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc fakeHandle(conn: Connection, proto: string) {.async, closure.} =
var
buf: array[32, byte]
fakebuf: array[32, byte]
@@ -103,7 +103,7 @@ suite "Ping":
msListen.addHandler(PingCodec, fakePingProto)
serverFut = transport1.start(@[ma])
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)

View File

@@ -19,14 +19,22 @@ import ./helpers
import std/times
import stew/byteutils
proc createSwitch(r: Relay): Switch =
result = SwitchBuilder.new()
proc createSwitch(r: Relay = nil, useYamux: bool = false): Switch =
var builder = SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withTcpTransport()
.withMplex()
if useYamux:
builder = builder.withYamux()
else:
builder = builder.withMplex()
if r != nil:
builder = builder.withCircuitRelay(r)
return builder
.withNoise()
.withCircuitRelay(r)
.build()
suite "Circuit Relay V2":
@@ -122,308 +130,310 @@ suite "Circuit Relay V2":
expect(ReservationError):
discard await cl1.reserve(src2.peerInfo.peerId, addrs)
suite "Connection":
asyncTeardown:
checkTrackers()
var
customProtoCodec {.threadvar.}: string
proto {.threadvar.}: LPProtocol
ttl {.threadvar.}: int
ldur {.threadvar.}: uint32
ldata {.threadvar.}: uint64
srcCl {.threadvar.}: RelayClient
dstCl {.threadvar.}: RelayClient
rv2 {.threadvar.}: Relay
src {.threadvar.}: Switch
dst {.threadvar.}: Switch
rel {.threadvar.}: Switch
rsvp {.threadvar.}: Rsvp
conn {.threadvar.}: Connection
asyncSetup:
customProtoCodec = "/test"
proto = new LPProtocol
proto.codec = customProtoCodec
ttl = 60
ldur = 120
ldata = 16384
srcCl = RelayClient.new()
dstCl = RelayClient.new()
src = createSwitch(srcCl)
dst = createSwitch(dstCl)
rel = newStandardSwitch()
asyncTest "Connection succeed":
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check: "test1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test2")
check: "test3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test4")
await conn.close()
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("test1")
check: "test2" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test3")
check: "test4" == string.fromBytes(await conn.readLp(1024))
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Connection duration exceeded":
ldur = 3
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check "wanna sleep?" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("yeah!")
check "go!" == string.fromBytes(await conn.readLp(1024))
await sleepAsync(chronos.timer.seconds(ldur + 1))
await conn.writeLp("that was a cool power nap")
await conn.close()
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("wanna sleep?")
check: "yeah!" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("go!")
expect(LPStreamEOFError):
discard await conn.readLp(1024)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Connection data exceeded":
ldata = 1000
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check "count me the better story you know" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("do you expect a lorem ipsum or...?")
check "surprise me!" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("""Call me Ishmael. Some years ago--never mind how long
precisely--having little or no money in my purse, and nothing
particular to interest me on shore, I thought I would sail about a
little and see the watery part of the world. It is a way I have of
driving off the spleen and regulating the circulation. Whenever I
find myself growing grim about the mouth; whenever it is a damp,
drizzly November in my soul; whenever I find myself involuntarily
pausing before coffin warehouses, and bringing up the rear of every
funeral I meet; and especially whenever my hypos get such an upper
hand of me, that it requires a strong moral principle to prevent me
from deliberately stepping into the street, and methodically knocking
people's hats off--then, I account it high time to get to sea as soon
as I can. This is my substitute for pistol and ball. With a
philosophical flourish Cato throws himself upon his sword; I quietly
take to the ship.""")
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("count me the better story you know")
check: "do you expect a lorem ipsum or...?" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("surprise me!")
expect(LPStreamEOFError):
discard await conn.readLp(1024)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Reservation ttl expire during connection":
ttl = 3
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check: "test1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test2")
check: "test3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test4")
await conn.close()
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("test1")
check: "test2" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test3")
check: "test4" == string.fromBytes(await conn.readLp(1024))
await src.disconnect(rel.peerInfo.peerId)
await sleepAsync(chronos.timer.seconds(ttl + 1))
expect(DialFailedError):
check: conn.atEof()
await conn.close()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Connection over relay":
# src => rel => rel2 => dst
# rel2 reserve rel
# dst reserve rel2
# src try to connect with dst
proto.handler = proc(conn: Connection, proto: string) {.async.} =
raise newException(CatchableError, "Should not be here")
let
rel2Cl = RelayClient.new(canHop = true)
rel2 = createSwitch(rel2Cl)
rv2 = Relay.new()
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await rel2.start()
await src.start()
await dst.start()
let
addrs = @[ MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit/p2p/" &
$rel2.peerInfo.peerId & "/p2p/" &
$rel2.peerInfo.peerId & "/p2p-circuit").get() ]
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await rel2.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel2.peerInfo.peerId, rel2.peerInfo.addrs)
rsvp = await rel2Cl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
let rsvp2 = await dstCl.reserve(rel2.peerInfo.peerId, rel2.peerInfo.addrs)
expect(DialFailedError):
conn = await src.dial(dst.peerInfo.peerId, addrs, customProtoCodec)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop(), rel2.stop())
asyncTest "Connection using ClientRelay":
for (useYamux, muxName) in [(false, "Mplex"), (true, "Yamux")]:
suite "Circuit Relay V2 Connection using " & muxName:
asyncTeardown:
checkTrackers()
var
protoABC = new LPProtocol
protoBCA = new LPProtocol
protoCAB = new LPProtocol
protoABC.codec = "/abctest"
protoABC.handler = proc(conn: Connection, proto: string) {.async.} =
check: "testABC1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testABC2")
check: "testABC3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testABC4")
await conn.close()
protoBCA.codec = "/bcatest"
protoBCA.handler = proc(conn: Connection, proto: string) {.async.} =
check: "testBCA1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testBCA2")
check: "testBCA3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testBCA4")
await conn.close()
protoCAB.codec = "/cabtest"
protoCAB.handler = proc(conn: Connection, proto: string) {.async.} =
check: "testCAB1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testCAB2")
check: "testCAB3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testCAB4")
await conn.close()
customProtoCodec {.threadvar.}: string
proto {.threadvar.}: LPProtocol
ttl {.threadvar.}: int
ldur {.threadvar.}: uint32
ldata {.threadvar.}: uint64
srcCl {.threadvar.}: RelayClient
dstCl {.threadvar.}: RelayClient
rv2 {.threadvar.}: Relay
src {.threadvar.}: Switch
dst {.threadvar.}: Switch
rel {.threadvar.}: Switch
rsvp {.threadvar.}: Rsvp
conn {.threadvar.}: Connection
let
clientA = RelayClient.new(canHop = true)
clientB = RelayClient.new(canHop = true)
clientC = RelayClient.new(canHop = true)
switchA = createSwitch(clientA)
switchB = createSwitch(clientB)
switchC = createSwitch(clientC)
asyncSetup:
customProtoCodec = "/test"
proto = new LPProtocol
proto.codec = customProtoCodec
ttl = 60
ldur = 120
ldata = 16384
srcCl = RelayClient.new()
dstCl = RelayClient.new()
src = createSwitch(srcCl, useYamux)
dst = createSwitch(dstCl, useYamux)
rel = createSwitch(nil, useYamux)
switchA.mount(protoBCA)
switchB.mount(protoCAB)
switchC.mount(protoABC)
asyncTest "Connection succeed":
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check: "test1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test2")
check: "test3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test4")
await conn.close()
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await switchA.start()
await switchB.start()
await switchC.start()
await rel.start()
await src.start()
await dst.start()
let
addrsABC = MultiAddress.init($switchB.peerInfo.addrs[0] & "/p2p/" &
$switchB.peerInfo.peerId & "/p2p-circuit").get()
addrsBCA = MultiAddress.init($switchC.peerInfo.addrs[0] & "/p2p/" &
$switchC.peerInfo.peerId & "/p2p-circuit").get()
addrsCAB = MultiAddress.init($switchA.peerInfo.addrs[0] & "/p2p/" &
$switchA.peerInfo.peerId & "/p2p-circuit").get()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await switchA.connect(switchB.peerInfo.peerId, switchB.peerInfo.addrs)
await switchB.connect(switchC.peerInfo.peerId, switchC.peerInfo.addrs)
await switchC.connect(switchA.peerInfo.peerId, switchA.peerInfo.addrs)
let rsvpABC = await clientA.reserve(switchC.peerInfo.peerId, switchC.peerInfo.addrs)
let rsvpBCA = await clientB.reserve(switchA.peerInfo.peerId, switchA.peerInfo.addrs)
let rsvpCAB = await clientC.reserve(switchB.peerInfo.peerId, switchB.peerInfo.addrs)
let connABC = await switchA.dial(switchC.peerInfo.peerId, @[ addrsABC ], "/abctest")
let connBCA = await switchB.dial(switchA.peerInfo.peerId, @[ addrsBCA ], "/bcatest")
let connCAB = await switchC.dial(switchB.peerInfo.peerId, @[ addrsCAB ], "/cabtest")
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await connABC.writeLp("testABC1")
await connBCA.writeLp("testBCA1")
await connCAB.writeLp("testCAB1")
check:
"testABC2" == string.fromBytes(await connABC.readLp(1024))
"testBCA2" == string.fromBytes(await connBCA.readLp(1024))
"testCAB2" == string.fromBytes(await connCAB.readLp(1024))
await connABC.writeLp("testABC3")
await connBCA.writeLp("testBCA3")
await connCAB.writeLp("testCAB3")
check:
"testABC4" == string.fromBytes(await connABC.readLp(1024))
"testBCA4" == string.fromBytes(await connBCA.readLp(1024))
"testCAB4" == string.fromBytes(await connCAB.readLp(1024))
await allFutures(connABC.close(), connBCA.close(), connCAB.close())
await allFutures(switchA.stop(), switchB.stop(), switchC.stop())
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("test1")
check: "test2" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test3")
check: "test4" == string.fromBytes(await conn.readLp(1024))
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Connection duration exceeded":
ldur = 3
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check "wanna sleep?" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("yeah!")
check "go!" == string.fromBytes(await conn.readLp(1024))
await sleepAsync(chronos.timer.seconds(ldur + 1))
await conn.writeLp("that was a cool power nap")
await conn.close()
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("wanna sleep?")
check: "yeah!" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("go!")
expect(LPStreamEOFError):
discard await conn.readLp(1024)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Connection data exceeded":
ldata = 1000
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check "count me the better story you know" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("do you expect a lorem ipsum or...?")
check "surprise me!" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("""Call me Ishmael. Some years ago--never mind how long
precisely--having little or no money in my purse, and nothing
particular to interest me on shore, I thought I would sail about a
little and see the watery part of the world. It is a way I have of
driving off the spleen and regulating the circulation. Whenever I
find myself growing grim about the mouth; whenever it is a damp,
drizzly November in my soul; whenever I find myself involuntarily
pausing before coffin warehouses, and bringing up the rear of every
funeral I meet; and especially whenever my hypos get such an upper
hand of me, that it requires a strong moral principle to prevent me
from deliberately stepping into the street, and methodically knocking
people's hats off--then, I account it high time to get to sea as soon
as I can. This is my substitute for pistol and ball. With a
philosophical flourish Cato throws himself upon his sword; I quietly
take to the ship.""")
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("count me the better story you know")
check: "do you expect a lorem ipsum or...?" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("surprise me!")
expect(LPStreamEOFError):
discard await conn.readLp(1024)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Reservation ttl expire during connection":
ttl = 3
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check: "test1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test2")
check: "test3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test4")
await conn.close()
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("test1")
check: "test2" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test3")
check: "test4" == string.fromBytes(await conn.readLp(1024))
await src.disconnect(rel.peerInfo.peerId)
await sleepAsync(chronos.timer.seconds(ttl + 1))
expect(DialFailedError):
check: conn.atEof()
await conn.close()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Connection over relay":
# src => rel => rel2 => dst
# rel2 reserve rel
# dst reserve rel2
# src try to connect with dst
proto.handler = proc(conn: Connection, proto: string) {.async.} =
raise newException(CatchableError, "Should not be here")
let
rel2Cl = RelayClient.new(canHop = true)
rel2 = createSwitch(rel2Cl, useYamux)
rv2 = Relay.new()
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await rel2.start()
await src.start()
await dst.start()
let
addrs = @[ MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit/p2p/" &
$rel2.peerInfo.peerId & "/p2p/" &
$rel2.peerInfo.peerId & "/p2p-circuit").get() ]
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await rel2.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel2.peerInfo.peerId, rel2.peerInfo.addrs)
rsvp = await rel2Cl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
let rsvp2 = await dstCl.reserve(rel2.peerInfo.peerId, rel2.peerInfo.addrs)
expect(DialFailedError):
conn = await src.dial(dst.peerInfo.peerId, addrs, customProtoCodec)
if not conn.isNil():
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop(), rel2.stop())
asyncTest "Connection using ClientRelay":
var
protoABC = new LPProtocol
protoBCA = new LPProtocol
protoCAB = new LPProtocol
protoABC.codec = "/abctest"
protoABC.handler = proc(conn: Connection, proto: string) {.async.} =
check: "testABC1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testABC2")
check: "testABC3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testABC4")
await conn.close()
protoBCA.codec = "/bcatest"
protoBCA.handler = proc(conn: Connection, proto: string) {.async.} =
check: "testBCA1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testBCA2")
check: "testBCA3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testBCA4")
await conn.close()
protoCAB.codec = "/cabtest"
protoCAB.handler = proc(conn: Connection, proto: string) {.async.} =
check: "testCAB1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testCAB2")
check: "testCAB3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testCAB4")
await conn.close()
let
clientA = RelayClient.new(canHop = true)
clientB = RelayClient.new(canHop = true)
clientC = RelayClient.new(canHop = true)
switchA = createSwitch(clientA, useYamux)
switchB = createSwitch(clientB, useYamux)
switchC = createSwitch(clientC, useYamux)
switchA.mount(protoBCA)
switchB.mount(protoCAB)
switchC.mount(protoABC)
await switchA.start()
await switchB.start()
await switchC.start()
let
addrsABC = MultiAddress.init($switchB.peerInfo.addrs[0] & "/p2p/" &
$switchB.peerInfo.peerId & "/p2p-circuit").get()
addrsBCA = MultiAddress.init($switchC.peerInfo.addrs[0] & "/p2p/" &
$switchC.peerInfo.peerId & "/p2p-circuit").get()
addrsCAB = MultiAddress.init($switchA.peerInfo.addrs[0] & "/p2p/" &
$switchA.peerInfo.peerId & "/p2p-circuit").get()
await switchA.connect(switchB.peerInfo.peerId, switchB.peerInfo.addrs)
await switchB.connect(switchC.peerInfo.peerId, switchC.peerInfo.addrs)
await switchC.connect(switchA.peerInfo.peerId, switchA.peerInfo.addrs)
let rsvpABC = await clientA.reserve(switchC.peerInfo.peerId, switchC.peerInfo.addrs)
let rsvpBCA = await clientB.reserve(switchA.peerInfo.peerId, switchA.peerInfo.addrs)
let rsvpCAB = await clientC.reserve(switchB.peerInfo.peerId, switchB.peerInfo.addrs)
let connABC = await switchA.dial(switchC.peerInfo.peerId, @[ addrsABC ], "/abctest")
let connBCA = await switchB.dial(switchA.peerInfo.peerId, @[ addrsBCA ], "/bcatest")
let connCAB = await switchC.dial(switchB.peerInfo.peerId, @[ addrsCAB ], "/cabtest")
await connABC.writeLp("testABC1")
await connBCA.writeLp("testBCA1")
await connCAB.writeLp("testCAB1")
check:
"testABC2" == string.fromBytes(await connABC.readLp(1024))
"testBCA2" == string.fromBytes(await connBCA.readLp(1024))
"testCAB2" == string.fromBytes(await connCAB.readLp(1024))
await connABC.writeLp("testABC3")
await connBCA.writeLp("testBCA3")
await connCAB.writeLp("testCAB3")
check:
"testABC4" == string.fromBytes(await connABC.readLp(1024))
"testBCA4" == string.fromBytes(await connBCA.readLp(1024))
"testCAB4" == string.fromBytes(await connCAB.readLp(1024))
await allFutures(connABC.close(), connBCA.close(), connCAB.close())
await allFutures(switchA.stop(), switchB.stop(), switchC.stop())

View File

@@ -62,8 +62,8 @@ suite "RendezVous Interface":
dm.advertise(RdvNamespace("ns1"))
dm.advertise(RdvNamespace("ns2"))
checkExpiring: rdv.numAdvertiseNs1 >= 5
checkExpiring: rdv.numAdvertiseNs2 >= 5
checkUntilTimeout: rdv.numAdvertiseNs1 >= 5
checkUntilTimeout: rdv.numAdvertiseNs2 >= 5
await client.stop()
asyncTest "Check timeToAdvertise interval":

View File

@@ -46,7 +46,7 @@ suite "Switch":
asyncTest "e2e use switch dial proto string":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@@ -86,7 +86,7 @@ suite "Switch":
asyncTest "e2e use switch dial proto string with custom matcher":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@@ -131,7 +131,7 @@ suite "Switch":
asyncTest "e2e should not leak bufferstreams and connections on channel close":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@@ -171,7 +171,7 @@ suite "Switch":
check not switch2.isConnected(switch1.peerInfo.peerId)
asyncTest "e2e use connect then dial":
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@@ -283,12 +283,12 @@ suite "Switch":
await switch2.disconnect(switch1.peerInfo.peerId)
check not switch2.isConnected(switch1.peerInfo.peerId)
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkUntilTimeout: not switch1.isConnected(switch2.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
checkExpiring:
checkUntilTimeout:
startCounts ==
@[
switch1.connManager.inSema.count, switch1.connManager.outSema.count,
@@ -305,7 +305,7 @@ suite "Switch":
var step = 0
var kinds: set[ConnEventKind]
proc hook(peerId: PeerId, event: ConnEvent) {.async, gcsafe.} =
proc hook(peerId: PeerId, event: ConnEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@@ -336,7 +336,7 @@ suite "Switch":
await switch2.disconnect(switch1.peerInfo.peerId)
check not switch2.isConnected(switch1.peerInfo.peerId)
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkUntilTimeout: not switch1.isConnected(switch2.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
@@ -357,7 +357,7 @@ suite "Switch":
var step = 0
var kinds: set[ConnEventKind]
proc hook(peerId: PeerId, event: ConnEvent) {.async, gcsafe.} =
proc hook(peerId: PeerId, event: ConnEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@@ -388,7 +388,7 @@ suite "Switch":
await switch2.disconnect(switch1.peerInfo.peerId)
check not switch2.isConnected(switch1.peerInfo.peerId)
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkUntilTimeout: not switch1.isConnected(switch2.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
@@ -409,7 +409,7 @@ suite "Switch":
var step = 0
var kinds: set[PeerEventKind]
proc handler(peerId: PeerId, event: PeerEvent) {.async, gcsafe.} =
proc handler(peerId: PeerId, event: PeerEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@@ -439,7 +439,7 @@ suite "Switch":
await switch2.disconnect(switch1.peerInfo.peerId)
check not switch2.isConnected(switch1.peerInfo.peerId)
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkUntilTimeout: not switch1.isConnected(switch2.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
@@ -460,7 +460,7 @@ suite "Switch":
var step = 0
var kinds: set[PeerEventKind]
proc handler(peerId: PeerId, event: PeerEvent) {.async, gcsafe.} =
proc handler(peerId: PeerId, event: PeerEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@@ -490,7 +490,7 @@ suite "Switch":
await switch2.disconnect(switch1.peerInfo.peerId)
check not switch2.isConnected(switch1.peerInfo.peerId)
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkUntilTimeout: not switch1.isConnected(switch2.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
@@ -521,7 +521,7 @@ suite "Switch":
var step = 0
var kinds: set[PeerEventKind]
proc handler(peerId: PeerId, event: PeerEvent) {.async, gcsafe.} =
proc handler(peerId: PeerId, event: PeerEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@@ -554,8 +554,8 @@ suite "Switch":
check not switch2.isConnected(switch1.peerInfo.peerId)
check not switch3.isConnected(switch1.peerInfo.peerId)
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkExpiring: not switch1.isConnected(switch3.peerInfo.peerId)
checkUntilTimeout: not switch1.isConnected(switch2.peerInfo.peerId)
checkUntilTimeout: not switch1.isConnected(switch3.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
@@ -581,7 +581,7 @@ suite "Switch":
var switches: seq[Switch]
var done = newFuture[void]()
var onConnect: Future[void]
proc hook(peerId: PeerId, event: ConnEvent) {.async, gcsafe.} =
proc hook(peerId: PeerId, event: ConnEvent) {.async.} =
case event.kind:
of ConnEventKind.Connected:
await onConnect
@@ -619,7 +619,7 @@ suite "Switch":
var switches: seq[Switch]
var done = newFuture[void]()
var onConnect: Future[void]
proc hook(peerId2: PeerId, event: ConnEvent) {.async, gcsafe.} =
proc hook(peerId2: PeerId, event: ConnEvent) {.async.} =
case event.kind:
of ConnEventKind.Connected:
if conns == 5:
@@ -662,7 +662,7 @@ suite "Switch":
let transport = TcpTransport.new(upgrade = Upgrade())
await transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport.accept()
await conn.closeWithEOF()
@@ -686,7 +686,7 @@ suite "Switch":
switch.stop())
asyncTest "e2e calling closeWithEOF on the same stream should not assert":
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
discard await conn.readLp(100)
let testProto = new TestProto
@@ -711,7 +711,7 @@ suite "Switch":
await allFuturesThrowing(readers)
await switch2.stop() #Otherwise this leaks
checkExpiring: not switch1.isConnected(switch2.peerInfo.peerId)
checkUntilTimeout: not switch1.isConnected(switch2.peerInfo.peerId)
checkTracker(LPChannelTrackerName)
checkTracker(SecureConnTrackerName)
@@ -832,7 +832,7 @@ suite "Switch":
asyncTest "e2e peer store":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@@ -882,7 +882,7 @@ suite "Switch":
# this randomly locks the Windows CI job
skip()
return
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@@ -1019,7 +1019,7 @@ suite "Switch":
await srcTcpSwitch.stop()
asyncTest "mount unstarted protocol":
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
check "test123" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test456")
await conn.close()

View File

@@ -30,7 +30,7 @@ suite "TCP transport":
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport.accept()
await conn.write("Hello!")
await conn.close()
@@ -52,7 +52,7 @@ suite "TCP transport":
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var msg = newSeq[byte](6)
let conn = await transport.accept()
await conn.readExactly(addr msg[0], 6)
@@ -73,7 +73,7 @@ suite "TCP transport":
let address = initTAddress("0.0.0.0:0")
let handlerWait = newFuture[void]()
proc serveClient(server: StreamServer,
transp: StreamTransport) {.async, gcsafe.} =
transp: StreamTransport) {.async.} =
var wstream = newAsyncStreamWriter(transp)
await wstream.write("Hello!")
await wstream.finish()
@@ -106,7 +106,7 @@ suite "TCP transport":
let address = initTAddress("0.0.0.0:0")
let handlerWait = newFuture[void]()
proc serveClient(server: StreamServer,
transp: StreamTransport) {.async, gcsafe.} =
transp: StreamTransport) {.async.} =
var rstream = newAsyncStreamReader(transp)
let msg = await rstream.read(6)
check string.fromBytes(msg) == "Hello!"
@@ -179,7 +179,7 @@ suite "TCP transport":
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade(), connectionsTimeout=1.milliseconds)
asyncSpawn transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport.accept()
await conn.join()

View File

@@ -56,7 +56,7 @@ suite "Tor transport":
check string.fromBytes(resp) == "server"
await client.stop()
proc serverAcceptHandler() {.async, gcsafe.} =
proc serverAcceptHandler() {.async.} =
let conn = await server.accept()
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)
@@ -87,7 +87,7 @@ suite "Tor transport":
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)

Some files were not shown because too many files have changed in this diff Show More