Compare commits

...

78 Commits

Author SHA1 Message Date
Roman
b0d1144f96 test: rebase and re-run project tests with Nim 1.6 2023-12-13 16:34:25 +01:00
Etan Kissling
762be89dd7 include connection info when logging identify message (#991) 2023-12-13 16:34:25 +01:00
diegomrsantos
5702b2d355 feat: add hole-punching interop tests (#998) 2023-12-13 16:34:25 +01:00
Jacek Sieka
9058c981cc remove redundant gcsafe annotations (#999) 2023-12-13 16:34:25 +01:00
Roman Zajic
acad1abc28 fix: remove forgotten "matrix-prep" job (#997) 2023-12-13 16:34:25 +01:00
Roman Zajic
aeb7167da4 fix: move workflows for Nim Devel and legacy i386 from "Daily" (#968) 2023-12-13 16:34:24 +01:00
diegomrsantos
061ea21729 fix(dcutr): update the DCUtR initiator transport direction to Inbound (#994) 2023-12-13 16:34:23 +01:00
diegomrsantos
6cdd4c911b fix(identify): do not add p2p and relayed addrs to observed addr manager (#990) 2023-12-13 16:34:22 +01:00
diegomrsantos
7ce2afba13 fix(yamux): doesn't work in a Relayv2 connection (#979)
Co-authored-by: Ludovic Chenut <ludovic@status.im>
2023-12-13 16:34:22 +01:00
diegomrsantos
39586605d9 fix(dcutr): handle tcp/p2p addresses (#989) 2023-12-13 16:34:22 +01:00
diegomrsantos
ff9493190f fix(multiaddress): add quic-v1 multiaddress support (#988) 2023-12-13 16:34:22 +01:00
diegomrsantos
b0964a410a Make ObservedAddrManager injectable (#970) 2023-12-13 16:34:22 +01:00
diegomrsantos
6189c2aaf5 fix(dcutr): make the dcutr client inbound and the server outbound (#983) 2023-12-13 16:34:22 +01:00
Jacek Sieka
373a0287a5 fix chronos v4 compat (#982) 2023-12-13 16:34:14 +01:00
diegomrsantos
0d05707875 fix: doc workflow (#985) 2023-12-13 16:34:14 +01:00
diegomrsantos
b835100682 Rate limit fixes (#965) 2023-12-13 16:34:14 +01:00
diegomrsantos
c876904425 Revert "Prevent concurrent IWANT of the same message (#943)" (#977) 2023-12-13 16:34:14 +01:00
Roman
2efa4b7d3d test: re-run project tests with Nim 2.0 2023-12-13 13:49:03 +01:00
Roman
ed58e8722e test: run project tests with Nim 1.6 - full CI workflow 2023-12-13 13:37:35 +01:00
Roman
cd5512d1a7 test: retest with Nim 2.0 2023-12-13 13:23:18 +01:00
Roman
20492387c0 test: download Nimble sources relative to NIM_DIR 2023-12-13 13:16:29 +01:00
Roman
7638bcc9cd test: check where the Nim sources were downloaded 2023-12-13 10:20:10 +01:00
Roman
f368a76377 test: check where the Nimble sources were downloaded 2023-12-13 09:47:59 +01:00
Roman
82e02f27cc test: check if sources downloaded 2023-12-07 11:52:52 +01:00
Roman
3962ac7ad0 test: download desired Nimble sources 2023-12-07 11:40:02 +01:00
Roman
410dee4aa3 test: install script debug - build_nim 2023-12-06 16:45:27 +01:00
Roman
203669e5f2 test: install script debug - default buildchain 2023-12-06 16:38:34 +01:00
Roman
8f7c339868 test: install script debug - custom buildchain 2023-12-06 16:30:41 +01:00
Roman
9593db16ae test: install script debug 2023-12-06 16:22:34 +01:00
Roman
82479ef6bf test: set dummy dir for CI cache 2023-12-06 16:15:19 +01:00
Roman
64b23d9ed2 test: no CI_CACHE 2023-12-06 16:13:30 +01:00
Roman
beacbd7008 test: different nimble dir 2023-12-06 15:55:02 +01:00
Roman
cd15368ebf test: use original Nimble DIR 2023-12-06 15:11:44 +01:00
Roman
5cf1d1dfa0 test: change Nimble commit for v0.14.2 in Nim install script 2023-12-06 14:33:17 +01:00
Roman
f14c1a0f7e test: find new nimble.exe entire system Windows 2023-11-23 21:34:30 +08:00
Roman
81f054798f test: find new nimble.exe install dir on Windows 2023-11-23 21:26:35 +08:00
Roman
425215fa54 test: try to create symlink to nimble.cmd 2023-11-23 20:57:46 +08:00
Roman
ce4a7fc24c test: yaml indent 2023-11-23 20:46:30 +08:00
Roman
df6f443811 test: remove NIMBLE_DIR variable 2023-11-23 20:44:41 +08:00
Roman
f61b69a3e7 test: replace nimble.exe with nimble.cmd on windows 2023-11-23 20:43:34 +08:00
Roman
2a2a552bb0 test: get nimble.cmd contents on windows 2023-11-23 20:06:59 +08:00
Roman
f35e08ae0a test: find nimble.exe file windows 2023-11-23 19:57:20 +08:00
Roman
d77cee6c41 test: add .exe to remove file for windows 2023-11-23 19:48:27 +08:00
Roman
7bc112916e test: add .exe to symlink for windows 2023-11-23 19:46:30 +08:00
Roman
6f37b671bd test: check install dir 2023-11-23 19:00:37 +08:00
Roman
3662f217c0 test: check Nimble binary on Windows 2023-11-23 18:52:03 +08:00
Roman
f10cbd7b41 test: find Nimble on Windows 2023-11-23 18:41:12 +08:00
Roman
cd8aceb18a test: retest for Windows 2023-11-23 18:28:44 +08:00
Roman
c1531eae4f test: cancel in progress to false 2023-11-20 18:17:10 +08:00
Roman
93e71455fd test: find pkgs2 on macos - at home dir 2023-11-17 17:06:26 +08:00
Roman
b595f31001 test: find pkgs2 on macos 2023-11-17 16:55:49 +08:00
Roman
6f8ef7727f test: nimble install once only 2023-11-17 16:37:06 +08:00
Roman
e870d386f2 test: list pkgs2 content on linux 2023-11-17 16:27:00 +08:00
Roman
9ce561d1be test: list nimbledir 2023-11-17 16:15:45 +08:00
Roman
a9b73d39e7 test: find where the dep files were installed 2023-11-17 16:06:43 +08:00
Roman
634db967a2 test: verify downloaded dependencies are same across platforms 2023-11-17 15:58:49 +08:00
Roman
a940f5bcf9 test: commit lock file 2023-11-17 15:49:17 +08:00
Roman
967fe7ddde test: generate lock file 2023-11-17 14:55:38 +08:00
Roman
d34134799c test: print Nimble lock file - CI workflow 2023-11-17 14:25:47 +08:00
Roman
e57f20bb65 test: print Nimble lock file 2023-11-17 14:19:29 +08:00
Roman
761b54157a fix: run the tests 2023-11-16 21:17:10 +08:00
Roman
f644f0b9aa fix: use single version unittest2 >= 0.2.1 2023-11-16 21:11:21 +08:00
Roman
8514f1718f fix: use nimble install --depsOnly 2023-11-16 21:01:56 +08:00
Roman
c448822d91 fix: use nimble install instead of pinned 2023-11-16 20:54:57 +08:00
Roman
27f902b462 fix: use relative path 2023-11-16 20:48:01 +08:00
Roman
ed6b481832 fix: install the latest version of Nimble 2023-11-16 20:39:15 +08:00
Roman
cfe2ef5714 Request specific Nim version 1.6.16 2023-11-11 10:06:44 +08:00
Roman
ac8d05637a Move Nimble upgrade to Run tests step 2023-11-11 09:59:57 +08:00
Roman
066d296e40 Nimble version check 2023-11-11 09:34:42 +08:00
Roman
330b00b2ce Specify version 2023-11-10 18:01:09 +08:00
Roman
75c98fcba9 Accept download from internet 2023-11-10 15:50:34 +08:00
Roman
84a585cd0f Upgrade Nimble to latest to support lock file 2023-11-10 15:32:35 +08:00
Roman
04e6da9cdc CI workflow with improvements - retest 5 2023-11-09 19:57:48 +08:00
Roman
39d0f1bfbd CI workflow with improvements - retest 4 2023-11-03 15:02:48 +08:00
Roman
56b07da02d CI workflow with improvements - retest 3 2023-11-03 15:02:31 +08:00
Roman
a20a542fb4 CI workflow with improvements - retest 2 2023-11-03 15:02:14 +08:00
Roman
ff8ea85ae3 CI workflow with improvements - retest 2023-11-03 14:49:29 +08:00
Roman
2e781e0c41 Disable caching for Nim and deps, increase verbosity 2023-11-03 13:41:32 +08:00
94 changed files with 1597 additions and 853 deletions

View File

@@ -112,20 +112,14 @@ runs:
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
- name: Restore Nim from cache
id: nim-cache
uses: actions/cache@v3
with:
path: '${{ github.workspace }}/nim'
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
- name: Build Nim and Nimble
shell: ${{ inputs.shell }}
if: ${{ steps.nim-cache.outputs.cache-hit != 'true' }}
run: |
# We don't want partial matches of the cache restored
rm -rf nim
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
export NIMBLE_DIR=dist/nimble
cp ./scripts/build_nim.sh .
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_branch }} \
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
bash build_nim.sh nim csources dist/nimble NimBinaries
bash build_nim.sh nim csources ${NIMBLE_DIR} NimBinaries

View File

@@ -1,17 +1,23 @@
name: CI
name: CI - Test
on:
push:
branches:
- master
- unstable
pull_request:
# - master
# - unstable
- 'fix/ci-workflow-stability'
#pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
cancel-in-progress: false
jobs:
delete-cache:
runs-on: ubuntu-latest
steps:
- uses: snnaplab/delete-branch-cache-action@v1
build:
timeout-minutes: 90
strategy:
@@ -26,8 +32,6 @@ jobs:
cpu: amd64
- os: windows
cpu: amd64
#- os: windows
#cpu: i386
branch: [version-1-6]
include:
- target:
@@ -52,7 +56,7 @@ jobs:
continue-on-error: ${{ matrix.branch == 'devel' }}
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
submodules: true
@@ -65,28 +69,22 @@ jobs:
nim_branch: ${{ matrix.branch }}
- name: Setup Go
uses: actions/setup-go@v2
uses: actions/setup-go@v4
with:
go-version: '~1.15.5'
cache: false
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3
with:
path: nimbledeps
key: nimbledeps-${{ hashFiles('.pinned') }}
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
run: |
nimble install_pinned
nimble install -y --depsOnly
- name: Run tests
run: |
nim --version
nimble --version
nimble test
NIMFLAGS="${NIMFLAGS} --mm:refc --verbosity:3" nimble test

View File

@@ -19,13 +19,13 @@ jobs:
- uses: jiro4989/setup-nim-action@v1
with:
nim-version: 'stable'
nim-version: '1.6.x'
- name: Generate doc
run: |
nim --version
nimble --version
nimble install_pinned -y
nimble install_pinned
# nim doc can "fail", but the doc is still generated
nim doc --git.url:https://github.com/status-im/nim-libp2p --git.commit:${GITHUB_REF##*/} --outdir:${GITHUB_REF##*/} --project libp2p || true

View File

@@ -52,3 +52,17 @@ jobs:
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/test_head.json
run-hole-punching-interop:
name: Run hole-punching interoperability tests
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json

View File

@@ -75,8 +75,8 @@ jobs:
nim --version
nimble --version
nimble install -y --depsOnly
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
if [[ "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--gc:orc':\n"
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
fi
# NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
# if [[ "${{ matrix.branch }}" == "devel" ]]; then
# echo -e "\nTesting with '--gc:orc':\n"
# NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
# fi

81
.github/workflows/multi_nim_common.yml vendored Normal file
View File

@@ -0,0 +1,81 @@
name: daily-common
on:
workflow_call:
inputs:
nim-branch:
description: 'Nim branch'
required: true
type: string
platform:
description: 'Platform'
required: true
type: string
jobs:
delete-cache:
runs-on: ubuntu-latest
steps:
- uses: snnaplab/delete-branch-cache-action@v1
build:
needs: delete-cache
timeout-minutes: 120
strategy:
fail-fast: false
matrix:
target: ${{ fromJSON(inputs.platform) }}
branch: ${{ fromJSON(inputs.nim-branch) }}
include:
- target:
os: linux
builder: ubuntu-20.04
shell: bash
- target:
os: macos
builder: macos-12
shell: bash
- target:
os: windows
builder: windows-2019
shell: msys2 {0}
defaults:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: ${{ matrix.target.os }}
shell: ${{ matrix.shell }}
nim_branch: ${{ matrix.branch }}
cpu: ${{ matrix.target.cpu }}
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: '~1.15.5'
cache: false
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Run tests
run: |
nim --version
nimble --version
nimble install -y --depsOnly
NIMFLAGS="${NIMFLAGS} --mm:refc" nimble test
if [[ "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--mm:orc':\n"
NIMFLAGS="${NIMFLAGS} --mm:orc" nimble test
fi

13
.github/workflows/multi_nim_devel.yml vendored Normal file
View File

@@ -0,0 +1,13 @@
name: Nim Devel
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
call-multi-nim-common:
uses: status-im/nim-libp2p/.github/workflows/multi_nim_common.yml@unstable
with:
nim-branch: "['devel']"
platform: "[{'os':'linux','cpu':'amd64'},{'os':'macos','cpu':'amd64'},{'os':'windows','cpu':'amd64'}]"

14
.github/workflows/multi_nim_legacy.yml vendored Normal file
View File

@@ -0,0 +1,14 @@
name: Legacy Platforms
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
call-multi-nim-common:
uses: status-im/nim-libp2p/.github/workflows/multi_nim_common.yml@unstable
with:
nim-branch: "['version-1-6','version-2-0']"
platform: "[{'os':'linux','cpu':'i386'}]"

View File

@@ -13,7 +13,7 @@ type
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
await conn.writeLp("Roger p2p!")
@@ -40,7 +40,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
##
# The actual application
##
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng() # Single random number source for the whole application
# port 0 will take a random available port

View File

@@ -53,7 +53,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
##
##
## Let's now start to create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng()
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()

View File

@@ -25,7 +25,7 @@ type TestProto = ref object of LPProtocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will in be handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
# Read up to 1024 bytes from this connection, and transform them into
# a string
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
@@ -44,7 +44,7 @@ proc hello(p: TestProto, conn: Connection) {.async.} =
## Again, pretty straight-forward, we just send a message on the connection.
##
## We can now create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng()
testProto = TestProto.new()

View File

@@ -108,7 +108,7 @@ type
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
var res: MetricProto
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
@@ -126,7 +126,7 @@ proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
return MetricList.decode(protobuf).tryGet()
## We can now create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let rng = newRng()
proc randomMetricGenerator: Future[MetricList] {.async.} =
let metricCount = rng[].generate(uint32) mod 16

View File

@@ -33,7 +33,7 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
const DumbCodec = "/dumb/proto/1.0.0"
type DumbProto = ref object of LPProtocol
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
await conn.close()
return T.new(codecs = @[DumbCodec], handler = handle)
@@ -49,7 +49,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
## (rendezvous in this case) as a bootnode. For this example, we'll
## create a bootnode, and then every peer will advertise itself on the
## bootnode, and use it to find other peers
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let bootNode = createSwitch()
await bootNode.start()

View File

@@ -143,7 +143,7 @@ proc draw(g: Game) =
## peer know that we are available, check that he is also available,
## and launch the game.
proc new(T: typedesc[GameProto], g: Game): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
defer: await conn.closeWithEof()
if g.peerFound.finished or g.hasCandidate:
await conn.close()

View File

@@ -17,7 +17,7 @@ requires "nim >= 1.6.0",
"secp256k1",
"stew#head",
"websock",
"unittest2 >= 0.0.5 & <= 0.1.0"
"unittest2 >= 0.2.1"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)

View File

@@ -25,7 +25,7 @@ import
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
connmanager, upgrademngrs/muxedupgrade,
connmanager, upgrademngrs/muxedupgrade, observedaddrmanager,
nameresolving/nameresolver,
errors, utility
@@ -59,6 +59,7 @@ type
circuitRelay: Relay
rdv: RendezVous
services: seq[Service]
observedAddrManager: ObservedAddrManager
proc new*(T: type[SwitchBuilder]): T {.public.} =
## Creates a SwitchBuilder
@@ -201,6 +202,10 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
b.services = services
b
proc withObservedAddrManager*(b: SwitchBuilder, observedAddrManager: ObservedAddrManager): SwitchBuilder =
b.observedAddrManager = observedAddrManager
b
proc build*(b: SwitchBuilder): Switch
{.raises: [LPError], public.} =
@@ -223,8 +228,13 @@ proc build*(b: SwitchBuilder): Switch
protoVersion = b.protoVersion,
agentVersion = b.agentVersion)
let identify =
if b.observedAddrManager != nil:
Identify.new(peerInfo, b.sendSignedPeerRecord, b.observedAddrManager)
else:
Identify.new(peerInfo, b.sendSignedPeerRecord)
let
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
ms = MultistreamSelect.new()
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)

View File

@@ -128,7 +128,7 @@ proc removeConnEventHandler*(c: ConnManager,
proc triggerConnEvent*(c: ConnManager,
peerId: PeerId,
event: ConnEvent) {.async, gcsafe.} =
event: ConnEvent) {.async.} =
try:
trace "About to trigger connection events", peer = peerId
if c.connEvents[event.kind].len() > 0:
@@ -160,7 +160,7 @@ proc removePeerEventHandler*(c: ConnManager,
proc triggerPeerEvents*(c: ConnManager,
peerId: PeerId,
event: PeerEvent) {.async, gcsafe.} =
event: PeerEvent) {.async.} =
trace "About to trigger peer events", peer = peerId
if c.peerEvents[event.kind].len == 0:
@@ -379,7 +379,7 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
cs.trackConnection(mux.connection)
proc getStream*(c: ConnManager,
muxer: Muxer): Future[Connection] {.async, gcsafe.} =
muxer: Muxer): Future[Connection] {.async.} =
## get a muxed stream for the passed muxer
##
@@ -387,7 +387,7 @@ proc getStream*(c: ConnManager,
return await muxer.newStream()
proc getStream*(c: ConnManager,
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
peerId: PeerId): Future[Connection] {.async.} =
## get a muxed stream for the passed peer from any connection
##
@@ -395,7 +395,7 @@ proc getStream*(c: ConnManager,
proc getStream*(c: ConnManager,
peerId: PeerId,
dir: Direction): Future[Connection] {.async, gcsafe.} =
dir: Direction): Future[Connection] {.async.} =
## get a muxed stream for the passed peer from a connection with `dir`
##

View File

@@ -553,7 +553,7 @@ proc getSocket(pattern: string,
closeSocket(sock)
# This is forward declaration needed for newDaemonApi()
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async, gcsafe.}
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
proc copyEnv(): StringTableRef =
## This procedure copy all environment variables into StringTable.
@@ -755,7 +755,7 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
# Starting daemon process
# echo "Starting ", cmd, " ", args.join(" ")
api.process =
api.process =
exceptionToAssert:
startProcess(cmd, "", args, env, {poParentStreams})
# Waiting until daemon will not be bound to control socket.
@@ -1032,7 +1032,7 @@ proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
var value: seq[byte]
if pbDhtResponse.getRequiredField(3, value).isErr():
raise newException(DaemonLocalError, "Missing required DHT field `value`!")
return initProtoBuffer(value)
else:
raise newException(DaemonLocalError, "Wrong message type!")

View File

@@ -26,7 +26,7 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out) {.async, base.} =
dir = Direction.Out) {.async, base.} =
## connect remote peer without negotiating
## a protocol
##

View File

@@ -53,7 +53,7 @@ proc dialAndUpgrade(
peerId: Opt[PeerId],
hostname: string,
address: MultiAddress,
upgradeDir = Direction.Out):
dir = Direction.Out):
Future[Muxer] {.async.} =
for transport in self.transports: # for each transport
@@ -75,15 +75,19 @@ proc dialAndUpgrade(
let mux =
try:
dialed.transportDir = upgradeDir
await transport.upgrade(dialed, upgradeDir, peerId)
# This is for the very specific case of a simultaneous dial during DCUtR. In this case, both sides will have
# an Outbound direction at the transport level. Therefore we update the DCUtR initiator transport direction to Inbound.
# The if below is more general and might handle other use cases in the future.
if dialed.dir != dir:
dialed.dir = dir
await transport.upgrade(dialed, peerId)
except CatchableError as exc:
# If we failed to establish the connection through one transport,
# we won't succeeded through another - no use in trying again
await dialed.close()
debug "Upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
if exc isnot CancelledError:
if upgradeDir == Direction.Out:
if dialed.dir == Direction.Out:
libp2p_failed_upgrades_outgoing.inc()
else:
libp2p_failed_upgrades_incoming.inc()
@@ -91,7 +95,7 @@ proc dialAndUpgrade(
# Try other address
return nil
doAssert not isNil(mux), "connection died after upgrade " & $upgradeDir
doAssert not isNil(mux), "connection died after upgrade " & $dialed.dir
debug "Dial successful", peerId = mux.connection.peerId
return mux
return nil
@@ -128,7 +132,7 @@ proc dialAndUpgrade(
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress],
upgradeDir = Direction.Out):
dir = Direction.Out):
Future[Muxer] {.async.} =
debug "Dialing peer", peerId = peerId.get(default(PeerId))
@@ -146,7 +150,7 @@ proc dialAndUpgrade(
else: await self.nameResolver.resolveMAddress(expandedAddress)
for resolvedAddress in resolvedAddresses:
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, upgradeDir)
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
if not isNil(result):
return result
@@ -164,7 +168,7 @@ proc internalConnect(
addrs: seq[MultiAddress],
forceDial: bool,
reuseConnection = true,
upgradeDir = Direction.Out):
dir = Direction.Out):
Future[Muxer] {.async.} =
if Opt.some(self.localPeerId) == peerId:
raise newException(CatchableError, "can't dial self!")
@@ -182,7 +186,7 @@ proc internalConnect(
let slot = self.connManager.getOutgoingSlot(forceDial)
let muxed =
try:
await self.dialAndUpgrade(peerId, addrs, upgradeDir)
await self.dialAndUpgrade(peerId, addrs, dir)
except CatchableError as exc:
slot.release()
raise exc
@@ -209,7 +213,7 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out) {.async.} =
dir = Direction.Out) {.async.} =
## connect remote peer without negotiating
## a protocol
##
@@ -217,7 +221,7 @@ method connect*(
if self.connManager.connCount(peerId) > 0 and reuseConnection:
return
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, upgradeDir)
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
method connect*(
self: Dialer,

View File

@@ -19,7 +19,8 @@ func toException*(e: string): ref LPError =
# sadly nim needs more love for hygienic templates
# so here goes the macro, its based on the proc/template version
# and uses quote do so it's quite readable
macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
# TODO https://github.com/nim-lang/Nim/issues/22936
macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
let nexclude = exclude.len
case nexclude
of 0:

View File

@@ -398,6 +398,9 @@ const
MAProtocol(
mcodec: multiCodec("quic"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("quic-v1"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("ip6zone"), kind: Length, size: 0,
coder: TranscoderIP6Zone

View File

@@ -193,6 +193,7 @@ const MultiCodecList = [
("https", 0x01BB),
("tls", 0x01C0),
("quic", 0x01CC),
("quic-v1", 0x01CD),
("ws", 0x01DD),
("wss", 0x01DE),
("p2p-websocket-star", 0x01DF), # not in multicodec list

View File

@@ -131,7 +131,7 @@ proc handle*(
protos: seq[string],
matchers = newSeq[Matcher](),
active: bool = false,
): Future[string] {.async, gcsafe.} =
): Future[string] {.async.} =
trace "Starting multistream negotiation", conn, handshaked = active
var handshaked = active
while not conn.atEof:
@@ -172,10 +172,9 @@ proc handle*(
trace "no handlers", conn, protocol = ms
await conn.writeLp(Na)
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async.} =
trace "Starting multistream handler", conn, handshaked = active
var
handshaked = active
protos: seq[string]
matchers: seq[Matcher]
for h in m.handlers:

View File

@@ -42,7 +42,7 @@ const MaxMsgSize* = 1 shl 20 # 1mb
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
newException(InvalidMplexMsgType, "invalid message type")
proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
proc readMsg*(conn: Connection): Future[Msg] {.async.} =
let header = await conn.readVarint()
trace "read header varint", varint = header, conn

View File

@@ -73,7 +73,7 @@ func shortLog*(s: LPChannel): auto =
chronicles.formatIt(LPChannel): shortLog(it)
proc open*(s: LPChannel) {.async, gcsafe.} =
proc open*(s: LPChannel) {.async.} =
trace "Opening channel", s, conn = s.conn
if s.conn.isClosed:
return
@@ -95,7 +95,7 @@ proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
if s.closedLocal and s.atEof():
await procCall BufferStream(s).close()
proc reset*(s: LPChannel) {.async, gcsafe.} =
proc reset*(s: LPChannel) {.async.} =
if s.isClosed:
trace "Already closed", s
return
@@ -123,7 +123,7 @@ proc reset*(s: LPChannel) {.async, gcsafe.} =
trace "Channel reset", s
method close*(s: LPChannel) {.async, gcsafe.} =
method close*(s: LPChannel) {.async.} =
## Close channel for writing - a message will be sent to the other peer
## informing them that the channel is closed and that we're waiting for
## their acknowledgement.

View File

@@ -122,7 +122,7 @@ proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
await chann.reset()
method handle*(m: Mplex) {.async, gcsafe.} =
method handle*(m: Mplex) {.async.} =
trace "Starting mplex handler", m
try:
while not m.connection.atEof:
@@ -211,7 +211,7 @@ proc new*(M: type Mplex,
method newStream*(m: Mplex,
name: string = "",
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
lazy: bool = false): Future[Connection] {.async.} =
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
if not lazy:
@@ -219,7 +219,7 @@ method newStream*(m: Mplex,
return Connection(channel)
method close*(m: Mplex) {.async, gcsafe.} =
method close*(m: Mplex) {.async.} =
if m.isClosed:
trace "Already closed", m
return

View File

@@ -46,11 +46,11 @@ chronicles.formatIt(Muxer): shortLog(it)
# muxer interface
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
Future[Connection] {.base, async, gcsafe.} = discard
method close*(m: Muxer) {.base, async, gcsafe.} =
Future[Connection] {.base, async.} = discard
method close*(m: Muxer) {.base, async.} =
if not isNil(m.connection):
await m.connection.close()
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
method handle*(m: Muxer): Future[void] {.base, async.} = discard
proc new*(
T: typedesc[MuxerProvider],

View File

@@ -59,7 +59,7 @@ type
streamId: uint32
length: uint32
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async.} =
var buffer: array[12, byte]
await conn.readExactly(addr buffer[0], 12)
@@ -183,9 +183,10 @@ proc remoteClosed(channel: YamuxChannel) {.async.} =
channel.closedRemotely.complete()
await channel.actuallyClose()
method closeImpl*(channel: YamuxChannel) {.async, gcsafe.} =
method closeImpl*(channel: YamuxChannel) {.async.} =
if not channel.closedLocally:
channel.closedLocally = true
channel.isEof = true
if channel.isReset == false and channel.sendQueue.len == 0:
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
@@ -249,6 +250,7 @@ method readOnce*(
await channel.closedRemotely or channel.receivedData.wait()
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
channel.returnedEof = true
channel.isEof = true
return 0
let toRead = min(channel.recvQueue.len, nbytes)
@@ -346,7 +348,7 @@ method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
libp2p_yamux_recv_queue.observe(channel.sendQueueBytes().int64)
asyncSpawn channel.trySend()
proc open*(channel: YamuxChannel) {.async, gcsafe.} =
proc open*(channel: YamuxChannel) {.async.} =
if channel.opened:
trace "Try to open channel twice"
return
@@ -429,7 +431,7 @@ proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
trace "Exception in yamux stream handler", msg = exc.msg
await channel.reset()
method handle*(m: Yamux) {.async, gcsafe.} =
method handle*(m: Yamux) {.async.} =
trace "Starting yamux handler", pid=m.connection.peerId
try:
while not m.connection.atEof:
@@ -454,6 +456,7 @@ method handle*(m: Yamux) {.async, gcsafe.} =
if header.streamId in m.flushed:
m.flushed.del(header.streamId)
if header.streamId mod 2 == m.currentId mod 2:
debug "Peer used our reserved stream id, skipping", id=header.streamId, currentId=m.currentId, peerId=m.connection.peerId
raise newException(YamuxError, "Peer used our reserved stream id")
let newStream = m.createStream(header.streamId, false)
if m.channels.len >= m.maxChannCount:
@@ -511,7 +514,7 @@ method getStreams*(m: Yamux): seq[Connection] =
method newStream*(
m: Yamux,
name: string = "",
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
lazy: bool = false): Future[Connection] {.async.} =
if m.channels.len > m.maxChannCount - 1:
raise newException(TooManyChannels, "max allowed channel count exceeded")

View File

@@ -52,7 +52,7 @@ proc resolveOneAddress(
ma: MultiAddress,
domain: Domain = Domain.AF_UNSPEC,
prefix = ""): Future[seq[MultiAddress]]
{.async, raises: [MaError, TransportAddressError].} =
{.async.} =
#Resolve a single address
var pbuf: array[2, byte]

View File

@@ -140,7 +140,7 @@ proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[voi
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
raise newException(AutonatError, "Received malformed message")

View File

@@ -162,7 +162,7 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy
proc addressMapper(
self: AutonatService,
peerStore: PeerStore,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
if self.networkReachability != NetworkReachability.Reachable:
return listenAddrs
@@ -179,7 +179,7 @@ proc addressMapper(
return addrs
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return await addressMapper(self, switch.peerStore, listenAddrs)
info "Setting up AutonatService"

View File

@@ -66,7 +66,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
if peerDialableAddrs.len > self.maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false))
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.In))
try:
discard await anyCompleted(futs).wait(self.connectTimeout)
debug "Dcutr initiator has directly connected to the remote peer."

View File

@@ -56,5 +56,10 @@ proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
await conn.writeLp(pb.buffer)
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] =
addrs.filterIt(TCP.match(it))
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] {.raises: [LPError]} =
var result = newSeq[MultiAddress]()
for a in addrs:
# This is necessary to also accept addrs like /ip4/198.51.100/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N
if [TCP, mapAnd(TCP_DNS, P2PPattern), mapAnd(TCP_IP, P2PPattern)].anyIt(it.match(a)):
result.add(a[0..1].tryGet())
return result

View File

@@ -29,7 +29,7 @@ logScope:
proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(stream: Connection, proto: string) {.async.} =
var peerDialableAddrs: seq[MultiAddress]
try:
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
@@ -56,7 +56,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
if peerDialableAddrs.len > maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, upgradeDir = Direction.In))
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.Out))
try:
discard await anyCompleted(futs).wait(connectTimeout)
debug "Dcutr receiver has directly connected to the remote peer."

View File

@@ -189,7 +189,7 @@ proc dialPeerV2*(
conn.limitData = msgRcvFromRelay.limit.data
return conn
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@@ -201,7 +201,7 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
trace "Unexpected client / relayv2 handshake", msgType=msg.msgType
await sendStopError(conn, MalformedMessage)
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, gcsafe.} =
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
let src = msg.srcPeer.valueOr:
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
return
@@ -226,7 +226,7 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, g
if cl.onNewConnection != nil: await cl.onNewConnection(conn, 0, 0)
else: await conn.close()
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@@ -266,7 +266,7 @@ proc new*(T: typedesc[RelayClient], canHop: bool = false,
maxCircuitPerPeer: maxCircuitPerPeer,
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
case proto:
of RelayV1Codec: await cl.handleStreamV1(conn)

View File

@@ -47,6 +47,7 @@ proc new*(
limitDuration: uint32,
limitData: uint64): T =
let rc = T(conn: conn, limitDuration: limitDuration, limitData: limitData)
rc.dir = conn.dir
rc.initStream()
if limitDuration > 0:
proc checkDurationConnection() {.async.} =

View File

@@ -105,7 +105,7 @@ proc isRelayed*(conn: Connection): bool =
wrappedConn = wrappedConn.getWrapped()
return false
proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleReserve(r: Relay, conn: Connection) {.async.} =
if conn.isRelayed():
trace "reservation attempt over relay connection", pid = conn.peerId
await sendHopStatus(conn, PermissionDenied)
@@ -128,7 +128,7 @@ proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleConnect(r: Relay,
connSrc: Connection,
msg: HopMessage) {.async, gcsafe.} =
msg: HopMessage) {.async.} =
if connSrc.isRelayed():
trace "connection attempt over relay connection"
await sendHopStatus(connSrc, PermissionDenied)
@@ -200,7 +200,7 @@ proc handleConnect(r: Relay,
await rconnDst.close()
await bridge(rconnSrc, rconnDst)
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@@ -214,7 +214,7 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
# Relay V1
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsafe.} =
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
r.streamCount.inc()
defer: r.streamCount.dec()
if r.streamCount + r.rsvp.len() >= r.maxCircuit:
@@ -293,7 +293,7 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
trace "relaying connection", src, dst
await bridge(connSrc, connDst)
proc handleStreamV1(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@@ -336,7 +336,7 @@ proc new*(T: typedesc[Relay],
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
case proto:
of RelayV2HopCodec: await r.handleHopStreamV2(conn)

View File

@@ -37,24 +37,24 @@ method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
self.client.onNewConnection = proc(
conn: Connection,
duration: uint32 = 0,
data: uint64 = 0) {.async, gcsafe, raises: [].} =
data: uint64 = 0) {.async.} =
await self.queue.addLast(RelayConnection.new(conn, duration, data))
await conn.join()
self.selfRunning = true
await procCall Transport(self).start(ma)
trace "Starting Relay transport"
method stop*(self: RelayTransport) {.async, gcsafe.} =
method stop*(self: RelayTransport) {.async.} =
self.running = false
self.selfRunning = false
self.client.onNewConnection = nil
while not self.queue.empty():
await self.queue.popFirstNoWait().close()
method accept*(self: RelayTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: RelayTransport): Future[Connection] {.async.} =
result = await self.queue.popFirst()
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async, gcsafe.} =
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
let
sma = toSeq(ma.items())
relayAddrs = sma[0..sma.len-4].mapIt(it.tryGet()).foldl(a & b)
@@ -90,7 +90,7 @@ method dial*(
self: RelayTransport,
hostname: string,
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
peerId.withValue(pid):
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
result = await self.dial(address)

View File

@@ -21,14 +21,14 @@ const
RelayV2HopCodec* = "/libp2p/circuit/relay/0.2.0/hop"
RelayV2StopCodec* = "/libp2p/circuit/relay/0.2.0/stop"
proc sendStatus*(conn: Connection, code: StatusV1) {.async, gcsafe.} =
proc sendStatus*(conn: Connection, code: StatusV1) {.async.} =
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
let
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async, gcsafe.} =
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async.} =
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
let
msg = HopMessage(msgType: HopMessageType.Status, status: Opt.some(code))

View File

@@ -21,6 +21,7 @@ import ../protobuf/minprotobuf,
../peerid,
../crypto/crypto,
../multiaddress,
../multicodec,
../protocols/protocol,
../utility,
../errors,
@@ -77,7 +78,7 @@ chronicles.expandIt(IdentifyInfo):
signedPeerRecord =
# The SPR contains the same data as the identify message
# would be cumbersome to log
if iinfo.signedPeerRecord.isSome(): "Some"
if it.signedPeerRecord.isSome(): "Some"
else: "None"
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
@@ -133,24 +134,24 @@ proc decodeMsg*(buf: seq[byte]): Opt[IdentifyInfo] =
if ? pb.getField(6, agentVersion).toOpt():
iinfo.agentVersion = some(agentVersion)
debug "decodeMsg: decoded identify", iinfo
Opt.some(iinfo)
proc new*(
T: typedesc[Identify],
peerInfo: PeerInfo,
sendSignedPeerRecord = false
sendSignedPeerRecord = false,
observedAddrManager = ObservedAddrManager.new(),
): T =
let identify = T(
peerInfo: peerInfo,
sendSignedPeerRecord: sendSignedPeerRecord,
observedAddrManager: ObservedAddrManager.new(),
observedAddrManager: observedAddrManager,
)
identify.init()
identify
method init*(p: Identify) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
trace "handling identify request", conn
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
@@ -168,7 +169,7 @@ method init*(p: Identify) =
proc identify*(self: Identify,
conn: Connection,
remotePeerId: PeerId): Future[IdentifyInfo] {.async, gcsafe.} =
remotePeerId: PeerId): Future[IdentifyInfo] {.async.} =
trace "initiating identify", conn
var message = await conn.readLp(64*1024)
if len(message) == 0:
@@ -176,6 +177,7 @@ proc identify*(self: Identify,
raise newException(IdentityInvalidMsgError, "Empty message received!")
var info = decodeMsg(message).valueOr: raise newException(IdentityInvalidMsgError, "Incorrect message received!")
debug "identify: decoded message", conn, info
let
pubkey = info.pubkey.valueOr: raise newException(IdentityInvalidMsgError, "No pubkey in identify")
peer = PeerId.init(pubkey).valueOr: raise newException(IdentityInvalidMsgError, $error)
@@ -186,8 +188,12 @@ proc identify*(self: Identify,
info.peerId = peer
info.observedAddr.withValue(observed):
if not self.observedAddrManager.addObservation(observed):
debug "Observed address is not valid", observedAddr = observed
# Currently, we use the ObservedAddrManager only to find our dialable external NAT address. Therefore, addresses
# like "...\p2p-circuit\p2p\..." and "\p2p\..." are not useful to us.
if observed.contains(multiCodec("p2p-circuit")).get(false) or P2PPattern.matchPartial(observed):
trace "Not adding address to ObservedAddrManager.", observed
elif not self.observedAddrManager.addObservation(observed):
trace "Observed address is not valid.", observedAddr = observed
return info
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
@@ -198,13 +204,14 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
identifypush
proc init*(p: IdentifyPush) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
trace "handling identify push", conn
try:
var message = await conn.readLp(64*1024)
var identInfo = decodeMsg(message).valueOr:
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
debug "identify push: decoded message", conn, identInfo
identInfo.pubkey.withValue(pubkey):
let receivedPeerId = PeerId.init(pubkey).tryGet()

View File

@@ -27,7 +27,7 @@ type Perf* = ref object of LPProtocol
proc new*(T: typedesc[Perf]): T {.public.} =
var p = T()
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
var bytesRead = 0
try:
trace "Received benchmark performance check", conn

View File

@@ -51,7 +51,7 @@ proc new*(T: typedesc[Ping], handler: PingHandler = nil, rng: ref HmacDrbgContex
ping
method init*(p: Ping) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
trace "handling ping", conn
var buf: array[PingSize, byte]
@@ -71,7 +71,7 @@ method init*(p: Ping) =
proc ping*(
p: Ping,
conn: Connection,
): Future[Duration] {.async, gcsafe, public.} =
): Future[Duration] {.async, public.} =
## Sends ping to `conn`, returns the delay
trace "initiating ping", conn

View File

@@ -79,7 +79,6 @@ proc init*(_: type[GossipSubParams]): GossipSubParams =
disconnectBadPeers: false,
enablePX: false,
bandwidthEstimatebps: 100_000_000, # 100 Mbps or 12.5 MBps
iwantTimeout: 3 * GossipSubHeartbeatInterval,
overheadRateLimit: Opt.none(tuple[bytes: int, interval: Duration]),
disconnectPeerAboveRateLimit: false
)
@@ -319,7 +318,7 @@ proc validateAndRelay(g: GossipSub,
of ValidationResult.Reject:
debug "Dropping message after validation, reason: reject",
msgId = shortLog(msgId), peer
g.punishInvalidMessage(peer, msg)
await g.punishInvalidMessage(peer, msg)
return
of ValidationResult.Ignore:
debug "Dropping message after validation, reason: ignore",
@@ -385,7 +384,7 @@ proc validateAndRelay(g: GossipSub,
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.raises:[PeerRateLimitError, CatchableError], async.} =
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.async.} =
# In this way we count even ignored fields by protobuf
var rmsg = rpcMsgOpt.valueOr:
@@ -461,9 +460,6 @@ method rpcHandler*(g: GossipSub,
let
msgId = msgIdResult.get
msgIdSalted = msgId & g.seenSalt
g.outstandingIWANTs.withValue(msgId, iwantRequest):
if iwantRequest.peer.peerId == peer.peerId:
g.outstandingIWANTs.del(msgId)
# addSeen adds salt to msgId to avoid
# remote attacking the hash function
@@ -496,14 +492,14 @@ method rpcHandler*(g: GossipSub,
# always validate if signature is present or required
debug "Dropping message due to failed signature verification",
msgId = shortLog(msgId), peer
g.punishInvalidMessage(peer, msg)
await g.punishInvalidMessage(peer, msg)
continue
if msg.seqno.len > 0 and msg.seqno.len != 8:
# if we have seqno should be 8 bytes long
debug "Dropping message due to invalid seqno length",
msgId = shortLog(msgId), peer
g.punishInvalidMessage(peer, msg)
await g.punishInvalidMessage(peer, msg)
continue
# g.anonymize needs no evaluation when receiving messages

View File

@@ -254,8 +254,7 @@ proc handleIHave*(g: GossipSub,
if not g.hasSeen(msgId):
if peer.iHaveBudget <= 0:
break
elif msgId notin res.messageIds and msgId notin g.outstandingIWANTs:
g.outstandingIWANTs[msgId] = IWANTRequest(messageId: msgId, peer: peer, timestamp: Moment.now())
elif msgId notin res.messageIds:
res.messageIds.add(msgId)
dec peer.iHaveBudget
trace "requested message via ihave", messageID=msgId
@@ -301,17 +300,6 @@ proc handleIWant*(g: GossipSub,
messages.add(msg)
return messages
proc checkIWANTTimeouts(g: GossipSub, timeoutDuration: Duration) {.raises: [].} =
let currentTime = Moment.now()
var idsToRemove = newSeq[MessageId]()
for msgId, request in g.outstandingIWANTs.pairs():
if currentTime - request.timestamp > timeoutDuration:
trace "IWANT request timed out", messageID=msgId, peer=request.peer
request.peer.behaviourPenalty += 0.1
idsToRemove.add(msgId)
for msgId in idsToRemove:
g.outstandingIWANTs.del(msgId)
proc commitMetrics(metrics: var MeshMetrics) {.raises: [].} =
libp2p_gossipsub_low_peers_topics.set(metrics.lowPeersTopics)
libp2p_gossipsub_no_peers_topics.set(metrics.noPeersTopics)
@@ -717,5 +705,3 @@ proc heartbeat*(g: GossipSub) {.async.} =
for trigger in g.heartbeatEvents:
trace "firing heartbeat event", instance = cast[int](g)
trigger.fire()
checkIWANTTimeouts(g, g.parameters.iwantTimeout)

View File

@@ -240,15 +240,15 @@ proc scoringHeartbeat*(g: GossipSub) {.async.} =
trace "running scoring heartbeat", instance = cast[int](g)
g.updateScores()
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) =
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
let uselessAppBytesNum = msg.data.len
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
debug "Peer sent invalid message and it's above rate limit", peer, uselessAppBytesNum
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
# discard g.disconnectPeer(peer)
# debug "Peer disconnected", peer, uselessAppBytesNum
# raise newException(PeerRateLimitError, "Peer sent invalid message and it's above rate limit")
if g.parameters.disconnectPeerAboveRateLimit:
await g.disconnectPeer(peer)
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
for tt in msg.topicIds:

View File

@@ -143,7 +143,6 @@ type
enablePX*: bool
bandwidthEstimatebps*: int # This is currently used only for limting flood publishing. 0 disables flood-limiting completely
iwantTimeout*: Duration
overheadRateLimit*: Opt[tuple[bytes: int, interval: Duration]]
disconnectPeerAboveRateLimit*: bool
@@ -181,7 +180,6 @@ type
routingRecordsHandler*: seq[RoutingRecordsHandler] # Callback for peer exchange
heartbeatEvents*: seq[AsyncEvent]
outstandingIWANTs*: Table[MessageId, IWANTRequest]
MeshMetrics* = object
# scratch buffers for metrics
@@ -192,8 +190,3 @@ type
lowPeersTopics*: int64 # npeers < dlow
healthyPeersTopics*: int64 # npeers >= dlow
underDoutTopics*: int64
IWANTRequest* = object
messageId*: MessageId
peer*: PubSubPeer
timestamp*: Moment

View File

@@ -234,7 +234,7 @@ template sendMetrics(msg: RPCMsg): untyped =
# metrics
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [], async.} =
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.async.} =
doAssert(not isNil(p), "pubsubpeer nil!")
if msg.len <= 0:

View File

@@ -636,7 +636,7 @@ proc new*(T: typedesc[RendezVous],
sema: newAsyncSemaphore(SemaphoreDefaultSize)
)
logScope: topics = "libp2p discovery rendezvous"
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
let
buf = await conn.readLp(4096)

View File

@@ -19,7 +19,7 @@ type
method init(p: PlainText) {.gcsafe.} =
proc handle(conn: Connection, proto: string)
{.async, gcsafe.} = discard
{.async.} = discard
## plain text doesn't do anything
p.codec = PlainTextCodec

View File

@@ -135,10 +135,9 @@ method init*(s: Secure) =
method secure*(s: Secure,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]):
Future[Connection] {.base.} =
s.handleConn(conn, initiator, peerId)
s.handleConn(conn, conn.dir == Direction.Out, peerId)
method readOnce*(s: SecureConn,
pbytes: pointer,

View File

@@ -37,7 +37,7 @@ proc isRunning*(self: AutoRelayService): bool =
proc addressMapper(
self: AutoRelayService,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return concat(toSeq(self.relayAddresses.values))
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
@@ -58,8 +58,8 @@ proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch)
self.onReservation(concat(toSeq(self.relayAddresses.values)))
await sleepAsync chronos.seconds(ttl - 30)
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return await addressMapper(self, listenAddrs)
let hasBeenSetUp = await procCall Service(self).setup(switch)
@@ -83,7 +83,7 @@ proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
self.backingOff.keepItIf(it != pid)
self.peerAvailable.fire()
proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
while true:
# Remove relayPeers that failed
let peers = toSeq(self.relayPeers.keys())
@@ -116,14 +116,14 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
await self.peerAvailable.wait()
await sleepAsync(200.millis)
method run*(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
method run*(self: AutoRelayService, switch: Switch) {.async.} =
if self.running:
trace "Autorelay is already running"
return
self.running = true
self.runner = self.innerRun(switch)
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
self.running = false

View File

@@ -94,7 +94,7 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and not self.autoRelayService.isRunning():
discard await self.autoRelayService.setup(switch)
elif networkReachability == NetworkReachability.Reachable and self.autoRelayService.isRunning():

View File

@@ -50,7 +50,7 @@ method initStream*(s: ChronosStream) =
if s.objName.len == 0:
s.objName = ChronosStreamTrackerName
s.timeoutHandler = proc() {.async, gcsafe.} =
s.timeoutHandler = proc() {.async.} =
trace "Idle timeout expired, closing ChronosStream", s
await s.close()

View File

@@ -41,7 +41,7 @@ type
when defined(libp2p_agents_metrics):
shortAgent*: string
proc timeoutMonitor(s: Connection) {.async, gcsafe.}
proc timeoutMonitor(s: Connection) {.async.}
func shortLog*(conn: Connection): string =
try:
@@ -110,7 +110,7 @@ proc pollActivity(s: Connection): Future[bool] {.async.} =
return false
proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
proc timeoutMonitor(s: Connection) {.async.} =
## monitor the channel for inactivity
##
## if the timeout was hit, it means that

View File

@@ -246,7 +246,7 @@ proc readLine*(s: LPStream,
if len(result) == lim:
break
proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
proc readVarint*(conn: LPStream): Future[uint64] {.async, public.} =
var
buffer: array[10, byte]
@@ -264,7 +264,7 @@ proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
if true: # can't end with a raise apparently
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, gcsafe, public.} =
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, public.} =
## read length prefixed msg, with the length encoded as a varint
let
length = await s.readVarint()

View File

@@ -71,17 +71,17 @@ type
inUse: bool
method setup*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
method setup*(self: Service, switch: Switch): Future[bool] {.base, async.} =
if self.inUse:
warn "service setup has already been called"
return false
self.inUse = true
return true
method run*(self: Service, switch: Switch) {.base, async, gcsafe.} =
method run*(self: Service, switch: Switch) {.base, async.} =
doAssert(false, "Not implemented!")
method stop*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
method stop*(self: Service, switch: Switch): Future[bool] {.base, async.} =
if not self.inUse:
warn "service is already stopped"
return false
@@ -141,10 +141,10 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.public.} =
dir = Direction.Out): Future[void] {.public.} =
## Connects to a peer without opening a stream to it
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, dir)
method connect*(
s: Switch,
@@ -213,7 +213,7 @@ proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil)
s.peerInfo.protocols.add(proto.codec)
proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} =
let muxed = await trans.upgrade(conn, Direction.In, Opt.none(PeerId))
let muxed = await trans.upgrade(conn, Opt.none(PeerId))
switch.connManager.storeMuxer(muxed)
await switch.peerStore.identify(muxed)
trace "Connection upgrade succeeded"
@@ -321,7 +321,7 @@ proc stop*(s: Switch) {.async, public.} =
trace "Switch stopped"
proc start*(s: Switch) {.async, gcsafe, public.} =
proc start*(s: Switch) {.async, public.} =
## Start listening on every transport
if s.started:

View File

@@ -174,7 +174,7 @@ method start*(
trace "Listening on", address = ma
method stop*(self: TcpTransport) {.async, gcsafe.} =
method stop*(self: TcpTransport) {.async.} =
## stop the transport
##
try:
@@ -210,7 +210,7 @@ method stop*(self: TcpTransport) {.async, gcsafe.} =
except CatchableError as exc:
trace "Error shutting down tcp transport", exc = exc.msg
method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: TcpTransport): Future[Connection] {.async.} =
## accept a new TCP connection
##
@@ -219,7 +219,7 @@ method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
try:
if self.acceptFuts.len <= 0:
self.acceptFuts = self.servers.mapIt(it.accept())
self.acceptFuts = self.servers.mapIt(Future[StreamTransport](it.accept()))
if self.acceptFuts.len <= 0:
return
@@ -260,7 +260,7 @@ method dial*(
self: TcpTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
## dial a peer
##

View File

@@ -82,7 +82,7 @@ proc handlesStart(address: MultiAddress): bool {.gcsafe.} =
return TcpOnion3.match(address)
proc connectToTorServer(
transportAddress: TransportAddress): Future[StreamTransport] {.async, gcsafe.} =
transportAddress: TransportAddress): Future[StreamTransport] {.async.} =
let transp = await connect(transportAddress)
try:
discard await transp.write(@[Socks5ProtocolVersion, NMethods, Socks5AuthMethod.NoAuth.byte])
@@ -99,7 +99,7 @@ proc connectToTorServer(
await transp.closeWait()
raise err
proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
proc readServerReply(transp: StreamTransport) {.async.} =
## The specification for this code is defined on
## [link text](https://www.rfc-editor.org/rfc/rfc1928#section-5)
## and [link text](https://www.rfc-editor.org/rfc/rfc1928#section-6).
@@ -121,7 +121,7 @@ proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
let atyp = firstFourOctets[3]
case atyp:
of Socks5AddressType.IPv4.byte:
discard await transp.read(ipV4NumOctets + portNumOctets)
discard await transp.read(ipV4NumOctets + portNumOctets)
of Socks5AddressType.FQDN.byte:
let fqdnNumOctets = await transp.read(1)
discard await transp.read(int(uint8.fromBytes(fqdnNumOctets)) + portNumOctets)
@@ -166,7 +166,7 @@ proc parseDnsTcp(address: MultiAddress):
(Socks5AddressType.FQDN.byte, dstAddr, dstPort)
proc dialPeer(
transp: StreamTransport, address: MultiAddress) {.async, gcsafe.} =
transp: StreamTransport, address: MultiAddress) {.async.} =
let (atyp, dstAddr, dstPort) =
if Onion3.match(address):
parseOnion3(address)
@@ -190,7 +190,7 @@ method dial*(
self: TorTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
## dial a peer
##
if not handlesDial(address):
@@ -229,14 +229,14 @@ method start*(
else:
raise newException(TransportStartError, "Tor Transport couldn't start, no supported addr was provided.")
method accept*(self: TorTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: TorTransport): Future[Connection] {.async.} =
## accept a new Tor connection
##
let conn = await self.tcpTransport.accept()
conn.observedAddr = Opt.none(MultiAddress)
return conn
method stop*(self: TorTransport) {.async, gcsafe.} =
method stop*(self: TorTransport) {.async.} =
## stop the transport
##
await procCall Transport(self).stop() # call base

View File

@@ -83,13 +83,12 @@ proc dial*(
method upgrade*(
self: Transport,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Muxer] {.base, gcsafe.} =
## base upgrade method that the transport uses to perform
## transport specific upgrades
##
self.upgrader.upgrade(conn, direction, peerId)
self.upgrader.upgrade(conn, peerId)
method handles*(
self: Transport,

View File

@@ -173,7 +173,7 @@ method start*(
self.running = true
method stop*(self: WsTransport) {.async, gcsafe.} =
method stop*(self: WsTransport) {.async.} =
## stop the transport
##
@@ -237,7 +237,7 @@ proc connHandler(self: WsTransport,
asyncSpawn onClose()
return conn
method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: WsTransport): Future[Connection] {.async.} =
## accept a new WS connection
##
@@ -295,7 +295,7 @@ method dial*(
self: WsTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
## dial a peer
##

View File

@@ -32,8 +32,7 @@ proc getMuxerByCodec(self: MuxedUpgrade, muxerName: string): MuxerProvider =
proc mux*(
self: MuxedUpgrade,
conn: Connection,
direction: Direction): Future[Muxer] {.async, gcsafe.} =
conn: Connection): Future[Muxer] {.async.} =
## mux connection
trace "Muxing connection", conn
@@ -42,7 +41,7 @@ proc mux*(
return
let muxerName =
if direction == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
if conn.dir == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
else: await MultistreamSelect.handle(conn, self.muxers.mapIt(it.codec))
if muxerName.len == 0 or muxerName == "na":
@@ -62,16 +61,15 @@ proc mux*(
method upgrade*(
self: MuxedUpgrade,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Muxer] {.async.} =
trace "Upgrading connection", conn, direction
trace "Upgrading connection", conn, direction = conn.dir
let sconn = await self.secure(conn, direction, peerId) # secure the connection
let sconn = await self.secure(conn, peerId) # secure the connection
if isNil(sconn):
raise newException(UpgradeFailedError,
"unable to secure connection, stopping upgrade")
let muxer = await self.mux(sconn, direction) # mux it if possible
let muxer = await self.mux(sconn) # mux it if possible
if muxer == nil:
raise newException(UpgradeFailedError,
"a muxer is required for outgoing connections")
@@ -84,7 +82,7 @@ method upgrade*(
raise newException(UpgradeFailedError,
"Connection closed or missing peer info, stopping upgrade")
trace "Upgraded connection", conn, sconn, direction
trace "Upgraded connection", conn, sconn, direction = conn.dir
return muxer
proc new*(
@@ -98,8 +96,7 @@ proc new*(
secureManagers: @secureManagers,
ms: ms)
upgrader.streamHandler = proc(conn: Connection)
{.async, gcsafe, raises: [].} =
upgrader.streamHandler = proc(conn: Connection) {.async.} =
trace "Starting stream handler", conn
try:
await upgrader.ms.handle(conn) # handle incoming connection

View File

@@ -40,20 +40,18 @@ type
method upgrade*(
self: Upgrade,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Muxer] {.base.} =
doAssert(false, "Not implemented!")
proc secure*(
self: Upgrade,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId]): Future[Connection] {.async.} =
if self.secureManagers.len <= 0:
raise newException(UpgradeFailedError, "No secure managers registered!")
let codec =
if direction == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
if conn.dir == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
else: await MultistreamSelect.handle(conn, self.secureManagers.mapIt(it.codec))
if codec.len == 0:
raise newException(UpgradeFailedError, "Unable to negotiate a secure channel!")
@@ -65,4 +63,4 @@ proc secure*(
# let's avoid duplicating checks but detect if it fails to do it properly
doAssert(secureProtocol.len > 0)
return await secureProtocol[0].secure(conn, direction == Out, peerId)
return await secureProtocol[0].secure(conn, peerId)

219
nimble.lock Normal file
View File

@@ -0,0 +1,219 @@
{
"version": 2,
"packages": {
"results": {
"version": "0.4.0",
"vcsRevision": "f3c666a272c69d70cb41e7245e7f6844797303ad",
"url": "https://github.com/arnetheduck/nim-results",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "51e08ca9524db98dc909fb39192272cc2b5451c7"
}
},
"unittest2": {
"version": "0.2.1",
"vcsRevision": "262b697f38d6b6f1e7462d3b3ab81d79b894e336",
"url": "https://github.com/status-im/nim-unittest2",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "1bac3a8355441edeed1ef3134e7436d6fb5d4498"
}
},
"stew": {
"version": "0.1.0",
"vcsRevision": "3159137d9a3110edb4024145ce0ba778975de40e",
"url": "https://github.com/status-im/nim-stew",
"downloadMethod": "git",
"dependencies": [
"results",
"unittest2"
],
"checksums": {
"sha1": "4ab494e272e997011853faddebe9e55183613776"
}
},
"bearssl": {
"version": "0.2.1",
"vcsRevision": "e4157639db180e52727712a47deaefcbbac6ec86",
"url": "https://github.com/status-im/nim-bearssl",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "a5086fd5c0af2b852f34c0cc6e4cff93a98f97ec"
}
},
"httputils": {
"version": "0.3.0",
"vcsRevision": "3b491a40c60aad9e8d3407443f46f62511e63b18",
"url": "https://github.com/status-im/nim-http-utils",
"downloadMethod": "git",
"dependencies": [
"stew",
"unittest2"
],
"checksums": {
"sha1": "1331f33585eda05d1e50385fa7871c3bf2a449d7"
}
},
"chronos": {
"version": "3.2.0",
"vcsRevision": "ba143e029f35fd9b4cd3d89d007cc834d0d5ba3c",
"url": "https://github.com/status-im/nim-chronos",
"downloadMethod": "git",
"dependencies": [
"stew",
"bearssl",
"httputils",
"unittest2"
],
"checksums": {
"sha1": "5783067584ac6812eb64b8454ea6f9c97ff1262a"
}
},
"testutils": {
"version": "0.5.0",
"vcsRevision": "dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34",
"url": "https://github.com/status-im/nim-testutils",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "756d0757c4dd06a068f9d38c7f238576ba5ee897"
}
},
"faststreams": {
"version": "0.3.0",
"vcsRevision": "720fc5e5c8e428d9d0af618e1e27c44b42350309",
"url": "https://github.com/status-im/nim-faststreams",
"downloadMethod": "git",
"dependencies": [
"stew",
"unittest2"
],
"checksums": {
"sha1": "ab178ba25970b95d953434b5d86b4d60396ccb64"
}
},
"serialization": {
"version": "0.2.0",
"vcsRevision": "4bdbc29e54fe54049950e352bb969aab97173b35",
"url": "https://github.com/status-im/nim-serialization",
"downloadMethod": "git",
"dependencies": [
"faststreams",
"unittest2",
"stew"
],
"checksums": {
"sha1": "c8c99a387aae488e7008aded909ebfe662e74450"
}
},
"json_serialization": {
"version": "0.1.5",
"vcsRevision": "85b7ea093cb85ee4f433a617b97571bd709d30df",
"url": "https://github.com/status-im/nim-json-serialization",
"downloadMethod": "git",
"dependencies": [
"serialization",
"stew"
],
"checksums": {
"sha1": "c6b30565292acf199b8be1c62114726e354af59e"
}
},
"chronicles": {
"version": "0.10.3",
"vcsRevision": "32ac8679680ea699f7dbc046e8e0131cac97d41a",
"url": "https://github.com/status-im/nim-chronicles",
"downloadMethod": "git",
"dependencies": [
"testutils",
"json_serialization"
],
"checksums": {
"sha1": "79f09526d4d9b9196dd2f6a75310d71a890c4f88"
}
},
"nimcrypto": {
"version": "0.6.0",
"vcsRevision": "1c8d6e3caf3abc572136ae9a1da81730c4eb4288",
"url": "https://github.com/cheatfate/nimcrypto",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "da3b105ad6bd7beef25c69f03afccb5e5233d483"
}
},
"zlib": {
"version": "0.1.0",
"vcsRevision": "a2f44bb7f65571a894227ff6fde9298a104e03a5",
"url": "https://github.com/status-im/nim-zlib",
"downloadMethod": "git",
"dependencies": [
"stew"
],
"checksums": {
"sha1": "edbf76ebdecb63d302d1883fe4b23b2eb0608cb7"
}
},
"websock": {
"version": "0.1.0",
"vcsRevision": "f8ed9b40a5ff27ad02a3c237c4905b0924e3f982",
"url": "https://github.com/status-im/nim-websock",
"downloadMethod": "git",
"dependencies": [
"chronos",
"httputils",
"chronicles",
"stew",
"nimcrypto",
"bearssl",
"zlib"
],
"checksums": {
"sha1": "94f836ae589056b2deb04bdfdcd614fff80adaf5"
}
},
"dnsclient": {
"version": "0.3.4",
"vcsRevision": "23214235d4784d24aceed99bbfe153379ea557c8",
"url": "https://github.com/ba0f3/dnsclient.nim",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "65262c7e533ff49d6aca5539da4bc6c6ce132f40"
}
},
"secp256k1": {
"version": "0.6.0.3.2",
"vcsRevision": "7246d91c667f4cc3759fdd50339caa45a2ecd8be",
"url": "https://github.com/status-im/nim-secp256k1",
"downloadMethod": "git",
"dependencies": [
"stew",
"nimcrypto"
],
"checksums": {
"sha1": "aa0f88a68f67cef07f9f4a365a0121a2217dab81"
}
},
"metrics": {
"version": "0.0.1",
"vcsRevision": "6142e433fc8ea9b73379770a788017ac528d46ff",
"url": "https://github.com/status-im/nim-metrics",
"downloadMethod": "git",
"dependencies": [
"chronos"
],
"checksums": {
"sha1": "16ba266012d32d49631ca00add8e4698343758e0"
}
}
},
"tasks": {}
}

315
scripts/build_nim.sh Normal file
View File

@@ -0,0 +1,315 @@
#!/usr/bin/env bash
# used in Travis CI and AppVeyor scripts
# Copyright (c) 2018-2020 Status Research & Development GmbH. Licensed under
# either of:
# - Apache License, version 2.0
# - MIT license
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
set -e
# Git commits
: ${CSOURCES_V1_COMMIT:=a8a5241f9475099c823cfe1a5e0ca4022ac201ff}
: ${CSOURCES_V2_COMMIT:=86742fb02c6606ab01a532a0085784effb2e753e}
: ${CSOURCES_V1_REPO:=https://github.com/nim-lang/csources_v1.git}
: ${CSOURCES_V2_REPO:=https://github.com/nim-lang/csources_v2.git}
# After this Nim commit, use csources v2
: ${CSOURCES_V2_START_COMMIT:=f7c203fb6c89b5cef83c4f326aeb23ef8c4a2c40}
: ${NIMBLE_COMMIT:=3575fd54a890d910ace56678aa74b4237d604175} # 0.14.2
# NIM_COMMIT could be a (partial) commit hash, a tag, a branch name, etc. Empty by default.
NIM_COMMIT_HASH="" # full hash for NIM_COMMIT, retrieved in "nim_needs_rebuilding()"
# script arguments
[[ $# -ne 4 ]] && { echo "Usage: $0 nim_dir csources_dir ci_cache_dir"; exit 1; }
NIM_DIR="$1"
CSOURCES_DIR="$2" # can be relative to NIM_DIR
NIMBLE_DIR="$3" # can be relative to NIM_DIR
CI_CACHE="$4"
## env vars
# verbosity level
[[ -z "$V" ]] && V=0
[[ -z "$CC" ]] && CC="gcc"
# to build csources in parallel, set MAKE="make -jN"
[[ -z "$MAKE" ]] && MAKE="make"
# for 32-bit binaries on a 64-bit host
UCPU=""
[[ "$ARCH_OVERRIDE" == "x86" ]] && UCPU="ucpu=i686"
[[ -z "$NIM_BUILD_MSG" ]] && NIM_BUILD_MSG="Building the Nim compiler"
[[ -z "$QUICK_AND_DIRTY_COMPILER" ]] && QUICK_AND_DIRTY_COMPILER=0
[[ -z "$QUICK_AND_DIRTY_NIMBLE" ]] && QUICK_AND_DIRTY_NIMBLE=0
# Windows detection
if uname | grep -qiE "mingw|msys"; then
ON_WINDOWS=1
EXE_SUFFIX=".exe"
# otherwise it fails in AppVeyor due to https://github.com/git-for-windows/git/issues/2495
GIT_TIMESTAMP_ARG="--date=unix" # available since Git 2.9.4
else
ON_WINDOWS=0
EXE_SUFFIX=""
GIT_TIMESTAMP_ARG="--date=format-local:%s" # available since Git 2.7.0
fi
NIM_BINARY="${NIM_DIR}/bin/nim${EXE_SUFFIX}"
MAX_NIM_BINARIES="10" # Old ones get deleted.
nim_needs_rebuilding() {
REBUILD=0
NO_REBUILD=1
echo "Nim is being rebuilt..."
if [[ ! -e "$NIM_DIR" ]]; then
# Shallow clone, optimised for the default NIM_COMMIT value.
git clone -q --depth=1 https://github.com/status-im/Nim.git "$NIM_DIR"
fi
pushd "${NIM_DIR}" >/dev/null
if [[ -n "${NIM_COMMIT}" ]]; then
# support old Git versions, like the one from Ubuntu-18.04
git restore . 2>/dev/null || git reset --hard
if ! git checkout -q ${NIM_COMMIT} 2>/dev/null; then
echo "Downloading Nim sources..."
echo $(pwd)
# Pay the price for a non-default NIM_COMMIT here, by fetching everything.
# (This includes upstream branches and tags that might be missing from our fork.)
git remote add upstream https://github.com/nim-lang/Nim
git fetch --all --tags --quiet
git checkout -q ${NIM_COMMIT}
fi
# In case the local branch diverged and a fast-forward merge is not possible.
git fetch || true
git reset -q --hard origin/${NIM_COMMIT} 2>/dev/null || true
# In case NIM_COMMIT is a local branch that's behind the remote one it's tracking.
git pull -q 2>/dev/null || true
git checkout -q ${NIM_COMMIT}
# We can't use "rev-parse" here, because it would return the tag object's
# hash instead of the commit hash, when NIM_COMMIT is a tag.
NIM_COMMIT_HASH="$(git rev-list -n 1 ${NIM_COMMIT})"
else
# NIM_COMMIT is empty, so assume the commit we need is already checked out
NIM_COMMIT_HASH="$(git rev-list -n 1 HEAD)"
fi
if [[ ! -d "$NIMBLE_DIR" ]]; then
echo "Downloading Nimble sources..."
echo $(pwd)
mkdir -p "$NIMBLE_DIR"
pushd "$NIMBLE_DIR"
git clone https://github.com/nim-lang/nimble.git .
git checkout $NIMBLE_COMMIT
# we have to delete .git or koch.nim will checkout a branch tip, overriding our target commit
rm -rf .git
popd
fi
if [[ "$NIMBLE_DIR" != "dist/nimble" ]]; then
mkdir -p dist
rm -rf dist/nimble
ln -s ../"$NIMBLE_DIR" dist/nimble
fi
popd >/dev/null
if [[ -n "$CI_CACHE" && -d "$CI_CACHE" ]]; then
cp -a "$CI_CACHE"/* "$NIM_DIR"/bin/ || true # let this one fail with an empty cache dir
fi
# Delete old Nim binaries, to put a limit on how much storage we use.
for F in "$(ls -t "${NIM_DIR}"/bin/nim_commit_* 2>/dev/null | tail -n +$((MAX_NIM_BINARIES + 1)))"; do
if [[ -e "${F}" ]]; then
rm "${F}"
fi
done
# Compare the last built commit to the one requested.
# Handle the scenario where our symlink is manually deleted by the user.
if [[ -e "${NIM_DIR}/bin/last_built_commit" && \
-e "${NIM_DIR}/bin/nim${EXE_SUFFIX}" && \
"$(cat "${NIM_DIR}/bin/last_built_commit")" == "${NIM_COMMIT_HASH}" ]]; then
return $NO_REBUILD
elif [[ -e "${NIM_DIR}/bin/nim_commit_${NIM_COMMIT_HASH}" ]]; then
# we built the requested commit in the past, so we simply reuse it
rm -f "${NIM_DIR}/bin/nim${EXE_SUFFIX}"
ln -s "nim_commit_${NIM_COMMIT_HASH}" "${NIM_DIR}/bin/nim${EXE_SUFFIX}"
echo ${NIM_COMMIT_HASH} > "${NIM_DIR}/bin/last_built_commit"
return $NO_REBUILD
else
return $REBUILD
fi
}
build_nim() {
echo -e "$NIM_BUILD_MSG"
[[ "$V" == "0" ]] && exec &>/dev/null
# working directory
pushd "$NIM_DIR"
echo "Running build_nim"
if grep -q skipIntegrityCheck koch.nim; then
# Run Nim buildchain
. ci/funs.sh
echo "Building with default buildchain"
NIMCORES=1 nimBuildCsourcesIfNeeded $UCPU
bin/nim c --noNimblePath --skipUserCfg --skipParentCfg --warnings:off --hints:off koch
./koch --skipIntegrityCheck boot -d:release --skipUserCfg --skipParentCfg --warnings:off --hints:off
if [[ "${QUICK_AND_DIRTY_COMPILER}" == "0" ]]; then
# We want tools
./koch tools -d:release --skipUserCfg --skipParentCfg --warnings:off --hints:off
elif [[ "${QUICK_AND_DIRTY_NIMBLE}" != "0" ]]; then
# We just want nimble
./koch nimble -d:release --skipUserCfg --skipParentCfg --warnings:off --hints:off
fi
else
# Custom buildchain for older versions
# TODO Remove this once the default NIM_COMMIT supports `--skipIntegrityCheck`
# We will still be able to compile older versions by removing the flag,
# which will just waste a bit of CPU
echo "Building with custom buildchain"
# Git repos for csources and Nimble
if [[ ! -d "$CSOURCES_DIR" ]]; then
if git merge-base --is-ancestor $CSOURCES_V2_START_COMMIT $NIM_COMMIT_HASH; then
CSOURCES_REPO=$CSOURCES_V2_REPO
CSOURCES_COMMIT=$CSOURCES_V2_COMMIT
else
CSOURCES_REPO=$CSOURCES_V1_REPO
CSOURCES_COMMIT=$CSOURCES_V1_COMMIT
fi
mkdir -p "$CSOURCES_DIR"
pushd "$CSOURCES_DIR"
git clone $CSOURCES_REPO .
git checkout $CSOURCES_COMMIT
popd
fi
if [[ "$CSOURCES_DIR" != "csources" ]]; then
rm -rf csources
ln -s "$CSOURCES_DIR" csources
fi
if [[ ! -d "$NIMBLE_DIR" ]]; then
mkdir -p "$NIMBLE_DIR"
pushd "$NIMBLE_DIR"
git clone https://github.com/nim-lang/nimble.git .
git checkout $NIMBLE_COMMIT
# we have to delete .git or koch.nim will checkout a branch tip, overriding our target commit
rm -rf .git
popd
fi
if [[ "$NIMBLE_DIR" != "dist/nimble" ]]; then
mkdir -p dist
rm -rf dist/nimble
ln -s ../"$NIMBLE_DIR" dist/nimble
fi
# bootstrap the Nim compiler and build the tools
rm -f bin/{nim,nim_csources}
pushd csources
if [[ "$ON_WINDOWS" == "0" ]]; then
$MAKE $UCPU clean
$MAKE $UCPU LD=$CC
else
$MAKE myos=windows $UCPU clean
$MAKE myos=windows $UCPU CC=gcc LD=gcc
fi
popd
if [[ -e csources/bin ]]; then
rm -f bin/nim bin/nim_csources
cp -a csources/bin/nim bin/nim
cp -a csources/bin/nim bin/nim_csources
rm -rf csources/bin
else
cp -a bin/nim bin/nim_csources
fi
if [[ "$QUICK_AND_DIRTY_COMPILER" == "0" ]]; then
sed \
-e 's/koch$/--warnings:off --hints:off koch/' \
-e 's/koch boot/koch boot --warnings:off --hints:off/' \
-e '/nimBuildCsourcesIfNeeded/d' \
build_all.sh > build_all_custom.sh
sh build_all_custom.sh
rm build_all_custom.sh
else
# Don't re-build it multiple times until we get identical
# binaries, like "build_all.sh" does. Don't build any tools
# either. This is all about build speed, not developer comfort.
bin/nim_csources \
c \
--compileOnly \
--nimcache:nimcache \
-d:release \
--skipUserCfg \
--skipParentCfg \
--warnings:off \
--hints:off \
compiler/nim.nim
bin/nim_csources \
jsonscript \
--nimcache:nimcache \
--skipUserCfg \
--skipParentCfg \
compiler/nim.nim
cp -a compiler/nim bin/nim1
# If we stop here, we risk ending up with a buggy compiler:
# https://github.com/status-im/nimbus-eth2/pull/2220
# https://github.com/status-im/nimbus-eth2/issues/2310
bin/nim1 \
c \
--compileOnly \
--nimcache:nimcache \
-d:release \
--skipUserCfg \
--skipParentCfg \
--warnings:off \
--hints:off \
compiler/nim.nim
bin/nim1 \
jsonscript \
--nimcache:nimcache \
--skipUserCfg \
--skipParentCfg \
compiler/nim.nim
rm -f bin/nim
cp -a compiler/nim bin/nim
rm bin/nim1
# Do we want Nimble in this quick build?
if [[ "${QUICK_AND_DIRTY_NIMBLE}" != "0" ]]; then
bin/nim c -d:release --noNimblePath --skipUserCfg --skipParentCfg dist/nimble/src/nimble.nim
mv dist/nimble/src/nimble bin/
fi
fi
fi
if [[ "$QUICK_AND_DIRTY_COMPILER" == "0" || "${QUICK_AND_DIRTY_NIMBLE}" != "0" ]]; then
# Nimble needs a CA cert
rm -f bin/cacert.pem
curl -LsS -o bin/cacert.pem https://curl.se/ca/cacert.pem || echo "Warning: 'curl' failed to download a CA cert needed by Nimble. Ignoring it."
fi
# record the built commit
echo ${NIM_COMMIT_HASH} > bin/last_built_commit
# create the symlink
mv bin/nim bin/nim_commit_${NIM_COMMIT_HASH}
ln -s nim_commit_${NIM_COMMIT_HASH} bin/nim${EXE_SUFFIX}
# update the CI cache
popd # we were in $NIM_DIR
if [[ -n "$CI_CACHE" ]]; then
rm -rf "$CI_CACHE"
mkdir "$CI_CACHE"
cp "$NIM_DIR"/bin/* "$CI_CACHE"/
fi
}
if nim_needs_rebuilding; then
build_nim
fi

View File

@@ -5,21 +5,21 @@ export unittest2, chronos
template asyncTeardown*(body: untyped): untyped =
teardown:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
template asyncSetup*(body: untyped): untyped =
setup:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
template asyncTest*(name: string, body: untyped): untyped =
test name:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
@@ -31,7 +31,7 @@ template flakyAsyncTest*(name: string, attempts: int, body: untyped): untyped =
inc attemptNumber
try:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
except Exception as e:

View File

@@ -20,7 +20,7 @@ proc writeLp(s: StreamTransport, msg: string | seq[byte]): Future[int] {.gcsafe.
buf.finish()
result = s.write(buf.buffer)
proc readLp(s: StreamTransport): Future[seq[byte]] {.async, gcsafe.} =
proc readLp(s: StreamTransport): Future[seq[byte]] {.async.} =
## read length prefixed msg
var
size: uint

View File

@@ -30,7 +30,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport2 = transpProvider()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
if conn.observedAddr.isSome():
check transport1.handles(conn.observedAddr.get())
@@ -58,7 +58,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
await conn.write("Hello!")
await conn.close()
@@ -85,7 +85,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
var msg = newSeq[byte](6)
await conn.readExactly(addr msg[0], 6)
@@ -147,7 +147,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(addrs)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
while true:
let conn = await transport1.accept()
await conn.write(newSeq[byte](0))
@@ -214,7 +214,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
await conn.close()

View File

@@ -111,7 +111,7 @@ proc bridgedConnections*: (Connection, Connection) =
return (connA, connB)
proc checkExpiringInternal(cond: proc(): bool {.raises: [], gcsafe.} ): Future[bool] {.async, gcsafe.} =
proc checkExpiringInternal(cond: proc(): bool {.raises: [], gcsafe.} ): Future[bool] {.async.} =
let start = Moment.now()
while true:
if Moment.now() > (start + chronos.seconds(5)):
@@ -146,8 +146,8 @@ proc default*(T: typedesc[MockResolver]): T =
resolver.ipResponses[("localhost", true)] = @["::1"]
resolver
proc setDNSAddr*(switch: Switch) {.gcsafe, async.} =
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
proc setDNSAddr*(switch: Switch) {.async.} =
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
switch.peerInfo.addressMappers.add(addressMapper)
await switch.peerInfo.update()

View File

@@ -0,0 +1,17 @@
# syntax=docker/dockerfile:1.5-labs
FROM nimlang/nim:1.6.14 as builder
WORKDIR /workspace
COPY .pinned libp2p.nimble nim-libp2p/
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
COPY . nim-libp2p/
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./tests/hole-punching-interop/hole_punching.nim
FROM --platform=linux/amd64 debian:bookworm-slim
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2
COPY --from=builder /workspace/nim-libp2p/hole-punching-tests /usr/bin/hole-punch-client
ENV RUST_BACKTRACE=1

View File

@@ -0,0 +1,114 @@
import std/[os, options, strformat]
import redis
import chronos, chronicles
import ../../libp2p/[builders,
switch,
observedaddrmanager,
services/hpservice,
services/autorelayservice,
protocols/connectivity/autonat/client as aclient,
protocols/connectivity/relay/client as rclient,
protocols/connectivity/relay/relay,
protocols/connectivity/autonat/service,
protocols/ping]
import ../stubs/autonatclientstub
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
let rng = newRng()
var builder = SwitchBuilder.new()
.withRng(rng)
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
.withTcpTransport({ServerFlags.TcpNoDelay})
.withYamux()
.withAutonat()
.withNoise()
if hpService != nil:
builder = builder.withServices(@[hpService])
if r != nil:
builder = builder.withCircuitRelay(r)
let s = builder.build()
s.mount(Ping.new(rng=rng))
return s
proc main() {.async.} =
try:
let relayClient = RelayClient.new()
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
autonatClientStub.answer = NotReachable
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
let hpservice = HPService.new(autonatService, autoRelayService)
let
isListener = getEnv("MODE") == "listen"
switch = createSwitch(relayClient, hpservice)
auxSwitch = createSwitch()
redisClient = open("redis", 6379.Port)
debug "Connected to redis"
await switch.start()
await auxSwitch.start()
let relayAddr =
try:
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
except Exception as e:
raise newException(CatchableError, e.msg)
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
# client stub will answer NotReachable.
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
# Wait for autonat to be NotReachable
while autonatService.networkReachability != NetworkReachability.NotReachable:
await sleepAsync(100.milliseconds)
# This will trigger the autonat relay service to make a reservation.
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
debug "Got relay address", relayMA
let relayId = await switch.connect(relayMA)
debug "Connected to relay", relayId
# Wait for our relay address to be published
while switch.peerInfo.addrs.len == 0:
await sleepAsync(100.milliseconds)
if isListener:
let listenerPeerId = switch.peerInfo.peerId
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
debug "Pushed listener client peer id to redis", listenerPeerId
# Nothing to do anymore, wait to be killed
await sleepAsync(2.minutes)
else:
let listenerId =
try:
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
except Exception as e:
raise newException(CatchableError, e.msg)
debug "Got listener peer id", listenerId
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
debug "Dialing listener relay address", listenerRelayAddr
await switch.connect(listenerId, @[listenerRelayAddr])
# wait for hole-punching to complete in the background
await sleepAsync(5000.milliseconds)
let conn = switch.connManager.selectMuxer(listenerId).connection
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
let delay = await Ping.new().ping(channel)
await allFuturesThrowing(channel.close(), conn.close(), switch.stop(), auxSwitch.stop())
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
quit(0)
except CatchableError as e:
error "Unexpected error", msg = e.msg
discard waitFor(main().withTimeout(4.minutes))
quit(1)

View File

@@ -0,0 +1,7 @@
{
"id": "nim-libp2p-head",
"containerImageID": "nim-libp2p-head",
"transports": [
"tcp"
]
}

View File

@@ -26,7 +26,7 @@ import ../../libp2p/protocols/pubsub/errors as pubsub_errors
import ../helpers
proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
proc waitSub(sender, receiver: auto; key: string) {.async.} =
# turn things deterministic
# this is for testing purposes only
var ceil = 15
@@ -43,7 +43,7 @@ suite "FloodSub":
asyncTest "FloodSub basic publish/subscribe A -> B":
var completionFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
completionFut.complete(true)
@@ -81,7 +81,7 @@ suite "FloodSub":
asyncTest "FloodSub basic publish/subscribe B -> A":
var completionFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
completionFut.complete(true)
@@ -113,7 +113,7 @@ suite "FloodSub":
asyncTest "FloodSub validation should succeed":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@@ -151,7 +151,7 @@ suite "FloodSub":
await allFuturesThrowing(nodesFut)
asyncTest "FloodSub validation should fail":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let
@@ -186,7 +186,7 @@ suite "FloodSub":
asyncTest "FloodSub validation one fails and one succeeds":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foo"
handlerFut.complete(true)
@@ -235,7 +235,7 @@ suite "FloodSub":
counter = new int
futs[i] = (
fut,
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
(proc(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
inc counter[]
if counter[] == runs - 1:
@@ -283,7 +283,7 @@ suite "FloodSub":
counter = new int
futs[i] = (
fut,
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
(proc(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
inc counter[]
if counter[] == runs - 1:
@@ -333,7 +333,7 @@ suite "FloodSub":
asyncTest "FloodSub message size validation":
var messageReceived = 0
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check data.len < 50
inc(messageReceived)
@@ -375,7 +375,7 @@ suite "FloodSub":
asyncTest "FloodSub message size validation 2":
var messageReceived = 0
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
inc(messageReceived)
let

View File

@@ -24,7 +24,7 @@ import utils
import ../helpers
proc noop(data: seq[byte]) {.async, gcsafe.} = discard
proc noop(data: seq[byte]) {.async.} = discard
const MsgIdSuccess = "msg id gen success"
@@ -718,104 +718,6 @@ suite "GossipSub internal":
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "two IHAVEs should generate only one IWANT":
let gossipSub = TestGossipSub.init(newStandardSwitch())
var iwantCount = 0
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
check false
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
let topic = "foobar"
var conns = newSeq[Connection]()
gossipSub.subscribe(topic, handler2)
# Setup two connections and two peers
var ihaveMessageId: string
var firstPeer: PubSubPeer
let seqno = @[0'u8, 1, 2, 3]
for i in 0..<2:
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
if isNil(firstPeer):
firstPeer = peer
ihaveMessageId = byteutils.toHex(seqno) & $firstPeer.peerId
peer.handler = handler
# Simulate that each peer sends an IHAVE message to our node
let msg = ControlIHave(
topicID: topic,
messageIDs: @[ihaveMessageId.toBytes()]
)
let iwants = gossipSub.handleIHave(peer, @[msg])
if iwants.messageIds.len > 0:
iwantCount += 1
# Verify that our node responds with only one IWANT message
check: iwantCount == 1
check: gossipSub.outstandingIWANTs.contains(ihaveMessageId.toBytes())
# Simulate that our node receives the RPCMsg in response to the IWANT
let actualMessageData = "Hello, World!".toBytes
let rpcMsg = RPCMsg(
messages: @[Message(
fromPeer: firstPeer.peerId,
seqno: seqno,
data: actualMessageData
)]
)
await gossipSub.rpcHandler(firstPeer, encodeRpcMsg(rpcMsg, false))
check: not gossipSub.outstandingIWANTs.contains(ihaveMessageId.toBytes())
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
asyncTest "handle unanswered IWANT messages":
let gossipSub = TestGossipSub.init(newStandardSwitch())
gossipSub.parameters.heartbeatInterval = 50.milliseconds
gossipSub.parameters.iwantTimeout = 10.milliseconds
await gossipSub.start()
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} = discard
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
let topic = "foobar"
var conns = newSeq[Connection]()
gossipSub.subscribe(topic, handler2)
# Setup a connection and a peer
let conn = TestBufferStream.new(noop)
conns &= conn
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
peer.handler = handler
# Simulate that the peer sends an IHAVE message to our node
let ihaveMessageId = @[0'u8, 1, 2, 3]
let ihaveMsg = ControlIHave(
topicID: topic,
messageIDs: @[ihaveMessageId]
)
discard gossipSub.handleIHave(peer, @[ihaveMsg])
check: gossipSub.outstandingIWANTs.contains(ihaveMessageId)
check: peer.behaviourPenalty == 0.0
await sleepAsync(60.milliseconds)
check: not gossipSub.outstandingIWANTs.contains(ihaveMessageId)
check: peer.behaviourPenalty == 0.1
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
proc setupTest(): Future[tuple[gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]]] {.async.} =
let
nodes = generateNodes(2, gossip = true, verifySignature = false)
@@ -828,10 +730,10 @@ suite "GossipSub internal":
var receivedMessages = new(HashSet[seq[byte]])
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerA(topic: string, data: seq[byte]) {.async.} =
receivedMessages[].incl(data)
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerB(topic: string, data: seq[byte]) {.async.} =
discard
nodes[0].subscribe("foobar", handlerA)

View File

@@ -47,7 +47,7 @@ suite "GossipSub":
asyncTest "GossipSub validation should succeed":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@@ -92,7 +92,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "GossipSub validation should fail (reject)":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let
@@ -138,7 +138,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "GossipSub validation should fail (ignore)":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let
@@ -185,7 +185,7 @@ suite "GossipSub":
asyncTest "GossipSub validation one fails and one succeeds":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foo"
handlerFut.complete(true)
@@ -238,7 +238,7 @@ suite "GossipSub":
asyncTest "GossipSub unsub - resub faster than backoff":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@@ -289,7 +289,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let
@@ -323,7 +323,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let
@@ -374,7 +374,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over fanout A -> B":
var passed = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete()
@@ -428,7 +428,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
var passed = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete()
@@ -481,7 +481,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over mesh A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete(true)
@@ -548,11 +548,11 @@ suite "GossipSub":
var
aReceived = 0
cReceived = 0
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerA(topic: string, data: seq[byte]) {.async.} =
inc aReceived
check aReceived < 2
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerB(topic: string, data: seq[byte]) {.async.} = discard
proc handlerC(topic: string, data: seq[byte]) {.async.} =
inc cReceived
check cReceived < 2
cRelayed.complete()
@@ -596,7 +596,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over floodPublish A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete(true)
@@ -653,7 +653,7 @@ suite "GossipSub":
)
proc connectNodes(nodes: seq[PubSub], target: PubSub) {.async.} =
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for node in nodes:
@@ -661,7 +661,7 @@ suite "GossipSub":
await node.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
proc baseTestProcedure(nodes: seq[PubSub], gossip1: GossipSub, numPeersFirstMsg: int, numPeersSecondMsg: int) {.async.} =
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
block setup:
@@ -727,7 +727,7 @@ suite "GossipSub":
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@@ -778,7 +778,7 @@ suite "GossipSub":
var handler: TopicHandler
capture dialer, i:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@@ -819,7 +819,7 @@ suite "GossipSub":
# PX to A & C
#
# C sent his SPR, not A
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
discard # not used in this test
let
@@ -895,9 +895,9 @@ suite "GossipSub":
await nodes[1].switch.connect(nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs)
let bFinished = newFuture[void]()
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = bFinished.complete()
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} = doAssert false
proc handlerA(topic: string, data: seq[byte]) {.async.} = discard
proc handlerB(topic: string, data: seq[byte]) {.async.} = bFinished.complete()
proc handlerC(topic: string, data: seq[byte]) {.async.} = doAssert false
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
@@ -943,7 +943,7 @@ suite "GossipSub":
await subscribeNodes(nodes)
proc handle(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handle(topic: string, data: seq[byte]) {.async.} = discard
let gossip0 = GossipSub(nodes[0])
let gossip1 = GossipSub(nodes[1])
@@ -952,6 +952,10 @@ suite "GossipSub":
gossip1.subscribe("foobar", handle)
await waitSubGraph(nodes, "foobar")
# Avoid being disconnected by failing signature verification
gossip0.verifySignature = false
gossip1.verifySignature = false
return (nodes, gossip0, gossip1)
proc currentRateLimitHits(): float64 =
@@ -964,8 +968,7 @@ suite "GossipSub":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
let msg = RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: "Valid data".toBytes)])
gossip0.broadcast(gossip0.mesh["foobar"], msg)
gossip0.broadcast(gossip0.mesh["foobar"], RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](10))]))
await sleepAsync(300.millis)
check currentRateLimitHits() == rateLimitHits
@@ -973,9 +976,10 @@ suite "GossipSub":
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(gossip0.mesh["foobar"], msg)
gossip0.broadcast(gossip0.mesh["foobar"], RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](12))]))
await sleepAsync(300.millis)
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
check currentRateLimitHits() == rateLimitHits
await stopNodes(nodes)
@@ -986,8 +990,7 @@ suite "GossipSub":
let (nodes, gossip0, gossip1) = await initializeGossipTest()
# Simulate sending an undecodable message
let msg = newSeqWith[byte](30, 1.byte)
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(msg)
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](33, 1.byte))
await sleepAsync(300.millis)
check currentRateLimitHits() == rateLimitHits + 1
@@ -995,7 +998,7 @@ suite "GossipSub":
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(msg)
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](35, 1.byte))
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
@@ -1008,10 +1011,9 @@ suite "GossipSub":
let msg = RPCMsg(control: some(ControlMessage(prune: @[
ControlPrune(topicID: "foobar", peers: @[
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](30)))
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))
], backoff: 123'u64)
])))
gossip0.broadcast(gossip0.mesh["foobar"], msg)
await sleepAsync(300.millis)
@@ -1020,7 +1022,42 @@ suite "GossipSub":
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(gossip0.mesh["foobar"], msg)
let msg2 = RPCMsg(control: some(ControlMessage(prune: @[
ControlPrune(topicID: "foobar", peers: @[
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))
], backoff: 123'u64)
])))
gossip0.broadcast(gossip0.mesh["foobar"], msg2)
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
asyncTest "e2e - GossipSub should rate limit invalid messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
let topic = "foobar"
proc execValidator(topic: string, message: messages.Message): Future[ValidationResult] {.raises: [].} =
let res = newFuture[ValidationResult]()
res.complete(ValidationResult.Reject)
res
gossip0.addValidator(topic, execValidator)
gossip1.addValidator(topic, execValidator)
let msg = RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](40))])
gossip0.broadcast(gossip0.mesh[topic], msg)
await sleepAsync(300.millis)
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(gossip0.mesh[topic], RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](35))]))
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2

View File

@@ -59,7 +59,7 @@ suite "GossipSub":
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@@ -93,7 +93,7 @@ suite "GossipSub":
asyncTest "GossipSub invalid topic subscription":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@@ -155,7 +155,7 @@ suite "GossipSub":
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
### await subscribeNodes(nodes)
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handler(topic: string, data: seq[byte]) {.async.} = discard
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)
@@ -182,10 +182,10 @@ suite "GossipSub":
await GossipSub(nodes[2]).addDirectPeer(nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
proc noop(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc noop(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
nodes[0].subscribe("foobar", noop)
@@ -226,7 +226,7 @@ suite "GossipSub":
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
@@ -272,7 +272,7 @@ suite "GossipSub":
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@@ -324,7 +324,7 @@ suite "GossipSub":
# Adding again subscriptions
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for i in 0..<runs:
@@ -368,7 +368,7 @@ suite "GossipSub":
)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
handlerFut.complete()
await subscribeNodes(nodes)

View File

@@ -128,7 +128,7 @@ proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
dialed.add(node.peerInfo.peerId)
proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
proc waitSub*(sender, receiver: auto; key: string) {.async.} =
if sender == receiver:
return
let timeout = Moment.now() + 5.seconds
@@ -148,7 +148,7 @@ proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
await sleepAsync(5.milliseconds)
doAssert Moment.now() < timeout, "waitSub timeout!"
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async, gcsafe.} =
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
let timeout = Moment.now() + 5.seconds
while true:
var

View File

@@ -24,7 +24,7 @@ type
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.gcsafe, async.}
dir = Direction.Out): Future[void] {.async.}
method connect*(
self: SwitchStub,
@@ -32,11 +32,11 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out) {.async.} =
dir = Direction.Out) {.async.} =
if (self.connectStub != nil):
await self.connectStub(self, peerId, addrs, forceDial, reuseConnection, upgradeDir)
await self.connectStub(self, peerId, addrs, forceDial, reuseConnection, dir)
else:
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, dir)
proc new*(T: typedesc[SwitchStub], switch: Switch, connectStub: connectStubType = nil): T =
return SwitchStub(

View File

@@ -39,7 +39,7 @@ proc createAutonatSwitch(nameResolver: NameResolver = nil): Switch =
proc makeAutonatServicePrivate(): Switch =
var autonatProtocol = new LPProtocol
autonatProtocol.handler = proc (conn: Connection, proto: string) {.async, gcsafe.} =
autonatProtocol.handler = proc (conn: Connection, proto: string) {.async.} =
discard await conn.readLp(1024)
await conn.writeLp(AutonatDialResponse(
status: DialError,

View File

@@ -87,7 +87,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
awaiter.complete()
@@ -131,7 +131,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
autonatClientStub.answer = Reachable
@@ -173,7 +173,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@@ -213,7 +213,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
autonatClientStub.answer = Unknown
@@ -267,7 +267,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@@ -302,12 +302,12 @@ suite "Autonat Service":
let awaiter2 = newFuture[void]()
let awaiter3 = newFuture[void]()
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter2.finished:
awaiter2.complete()
@@ -345,7 +345,7 @@ suite "Autonat Service":
let awaiter1 = newFuture[void]()
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
@@ -388,7 +388,7 @@ suite "Autonat Service":
var awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@@ -428,7 +428,7 @@ suite "Autonat Service":
let switch1 = createSwitch(autonatService)
let switch2 = createSwitch()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
fail()
check autonatService.networkReachability == NetworkReachability.Unknown

View File

@@ -32,7 +32,7 @@ method newStream*(
m: TestMuxer,
name: string = "",
lazy: bool = false):
Future[Connection] {.async, gcsafe.} =
Future[Connection] {.async.} =
result = Connection.new(m.peerId, Direction.Out, Opt.none(MultiAddress))
suite "Connection Manager":

View File

@@ -57,14 +57,15 @@ suite "Dcutr":
for t in behindNATSwitch.transports:
t.networkReachability = NetworkReachability.NotReachable
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
expect CatchableError:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
checkExpiring:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
# dial will succeed.
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@@ -83,8 +84,8 @@ suite "Dcutr":
body
checkExpiring:
# no connection will be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@@ -95,7 +96,7 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
await sleepAsync(100.millis)
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectTimeoutProc)
@@ -114,7 +115,7 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
raise newException(CatchableError, "error")
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectErrorProc)
@@ -142,13 +143,16 @@ suite "Dcutr":
for t in behindNATSwitch.transports:
t.networkReachability = NetworkReachability.NotReachable
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
expect CatchableError:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
checkExpiring:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail, but the client dial will succeed.
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@@ -159,7 +163,7 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
await sleepAsync(100.millis)
await ductrServerTest(connectProc)
@@ -171,7 +175,23 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
raise newException(CatchableError, "error")
await ductrServerTest(connectProc)
test "should return valid TCP/IP and TCP/DNS addresses only":
let testAddrs = @[MultiAddress.init("/ip4/192.0.2.1/tcp/1234").tryGet(),
MultiAddress.init("/ip4/203.0.113.5/tcp/5678/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N").tryGet(),
MultiAddress.init("/ip6/::1/tcp/9012").tryGet(),
MultiAddress.init("/dns4/example.com/tcp/3456/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N").tryGet(),
MultiAddress.init("/ip4/198.51.100.42/udp/7890").tryGet()]
let expected = @[MultiAddress.init("/ip4/192.0.2.1/tcp/1234").tryGet(),
MultiAddress.init("/ip4/203.0.113.5/tcp/5678").tryGet(),
MultiAddress.init("/ip6/::1/tcp/9012").tryGet(),
MultiAddress.init("/dns4/example.com/tcp/3456").tryGet()]
let result = getHolePunchableAddrs(testAddrs)
check result == expected

View File

@@ -65,7 +65,7 @@ suite "Hole Punching":
let publicPeerSwitch = createSwitch(RelayClient.new())
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
publicPeerSwitch.peerInfo.addressMappers.add(addressMapper)
await publicPeerSwitch.peerInfo.update()
@@ -193,38 +193,24 @@ suite "Hole Punching":
await privatePeerSwitch2.connect(privatePeerSwitch1.peerInfo.peerId, (await privatePeerRelayAddr1))
privatePeerSwitch2.connectStub = rcvConnectStub
checkExpiring:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
# dial will succeed.
privatePeerSwitch1.connManager.connCount(privatePeerSwitch2.peerInfo.peerId) == 1 and
not isRelayed(privatePeerSwitch1.connManager.selectMuxer(privatePeerSwitch2.peerInfo.peerId).connection)
# wait for hole punching to finish in the background
await sleepAsync(600.millis)
await allFuturesThrowing(
privatePeerSwitch1.stop(), privatePeerSwitch2.stop(), switchRelay.stop(),
switchAux.stop(), switchAux2.stop(), switchAux3.stop(), switchAux4.stop())
asyncTest "Hole punching when peers addresses are private":
proc connectStub(self: SwitchStub,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
self.connectStub = nil # this stub should be called only once
await sleepAsync(100.millis) # avoid simultaneous dialing that causes address in use error
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
await holePunchingTest(nil, connectStub, NotReachable)
await holePunchingTest(nil, nil, NotReachable)
asyncTest "Hole punching when there is an error during unilateral direct connection":
asyncTest "Hole punching when peers addresses are private and there is an error in the initiator side":
proc connectStub(self: SwitchStub,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
self.connectStub = nil # this stub should be called only once
raise newException(CatchableError, "error")

View File

@@ -73,7 +73,7 @@ suite "Identify":
asyncTest "default agent version":
msListen.addHandler(IdentifyCodec, identifyProto1)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)
@@ -95,7 +95,7 @@ suite "Identify":
remotePeerInfo.agentVersion = customAgentVersion
msListen.addHandler(IdentifyCodec, identifyProto1)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)
@@ -136,7 +136,7 @@ suite "Identify":
asyncTest "can send signed peer record":
msListen.addHandler(IdentifyCodec, identifyProto1)
identifyProto1.sendSignedPeerRecord = true
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)

View File

@@ -97,7 +97,7 @@ suite "Mplex":
suite "channel half-closed":
asyncTest "(local close) - should close for write":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -112,7 +112,7 @@ suite "Mplex":
asyncTest "(local close) - should allow reads until remote closes":
let
conn = TestBufferStream.new(
proc (data: seq[byte]) {.gcsafe, async.} =
proc (data: seq[byte]) {.async.} =
discard,
)
chann = LPChannel.init(1, conn, true)
@@ -139,7 +139,7 @@ suite "Mplex":
asyncTest "(remote close) - channel should close for reading by remote":
let
conn = TestBufferStream.new(
proc (data: seq[byte]) {.gcsafe, async.} =
proc (data: seq[byte]) {.async.} =
discard,
)
chann = LPChannel.init(1, conn, true)
@@ -162,7 +162,7 @@ suite "Mplex":
let
testData = "Hello!".toBytes
conn = TestBufferStream.new(
proc (data: seq[byte]) {.gcsafe, async.} =
proc (data: seq[byte]) {.async.} =
discard
)
chann = LPChannel.init(1, conn, true)
@@ -175,7 +175,7 @@ suite "Mplex":
await conn.close()
asyncTest "should not allow pushing data to channel when remote end closed":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -192,7 +192,7 @@ suite "Mplex":
suite "channel reset":
asyncTest "channel should fail reading":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -205,7 +205,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete read":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -220,7 +220,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete pushData":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -239,7 +239,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and push":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -254,7 +254,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and pushes":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -279,7 +279,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and push with cancel":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -293,7 +293,7 @@ suite "Mplex":
await conn.close()
asyncTest "should complete both read and push after reset":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -311,7 +311,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete ongoing push without reader":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -323,7 +323,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete ongoing read without a push":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -335,7 +335,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should allow all reads and pushes to complete":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -364,7 +364,7 @@ suite "Mplex":
await conn.close()
asyncTest "channel should fail writing":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -376,7 +376,7 @@ suite "Mplex":
await conn.close()
asyncTest "channel should reset on timeout":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(
@@ -392,11 +392,11 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == "HELLO"
await stream.close()
@@ -429,11 +429,11 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == "HELLO"
await stream.close()
@@ -473,12 +473,12 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
try:
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(MaxMsgSize)
check msg == bigseq
trace "Bigseq check passed!"
@@ -520,11 +520,11 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
await stream.writeLp("Hello from stream!")
await stream.close()
@@ -557,12 +557,12 @@ suite "Mplex":
let listenFut = transport1.start(ma)
let done = newFuture[void]()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var count = 1
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == &"stream {count}!"
count.inc
@@ -601,12 +601,12 @@ suite "Mplex":
let listenFut = transport1.start(ma)
let done = newFuture[void]()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var count = 1
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == &"stream {count} from dialer!"
await stream.writeLp(&"stream {count} from listener!")
@@ -646,12 +646,12 @@ suite "Mplex":
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
try:
discard await stream.readLp(1024)
@@ -697,11 +697,11 @@ suite "Mplex":
var count = 0
var done = newFuture[void]()
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
count.inc()
if count == 10:
@@ -761,11 +761,11 @@ suite "Mplex":
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -805,11 +805,11 @@ suite "Mplex":
var mplexListen: Mplex
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -851,11 +851,11 @@ suite "Mplex":
var mplexHandle: Future[void]
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -896,11 +896,11 @@ suite "Mplex":
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -943,11 +943,11 @@ suite "Mplex":
var listenConn: Connection
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
listenConn = await transport1.accept()
let mplexListen = Mplex.new(listenConn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -992,11 +992,11 @@ suite "Mplex":
var complete = newFuture[void]()
const MsgSize = 1024
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
try:
let msg = await stream.readLp(MsgSize)
check msg.len == MsgSize
@@ -1064,11 +1064,11 @@ suite "Mplex":
var complete = newFuture[void]()
const MsgSize = 512
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(MsgSize)
check msg.len == MsgSize
await stream.close()

View File

@@ -60,6 +60,7 @@ const
"/ip4/127.0.0.1/tcp/1234",
"/ip4/127.0.0.1/tcp/1234/",
"/ip4/127.0.0.1/udp/1234/quic",
"/ip4/192.168.80.3/udp/33422/quic-v1",
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234",
"/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",

View File

@@ -34,7 +34,7 @@ type
method readOnce*(s: TestSelectStream,
pbytes: pointer,
nbytes: int): Future[int] {.async, gcsafe.} =
nbytes: int): Future[int] {.async.} =
case s.step:
of 1:
var buf = newSeq[byte](1)
@@ -64,9 +64,9 @@ method readOnce*(s: TestSelectStream,
return "\0x3na\n".len()
method write*(s: TestSelectStream, msg: seq[byte]) {.async, gcsafe.} = discard
method write*(s: TestSelectStream, msg: seq[byte]) {.async.} = discard
method close(s: TestSelectStream) {.async, gcsafe.} =
method close(s: TestSelectStream) {.async.} =
s.isClosed = true
s.isEof = true
@@ -113,11 +113,11 @@ method readOnce*(s: TestLsStream,
copyMem(pbytes, addr buf[0], buf.len())
return buf.len()
method write*(s: TestLsStream, msg: seq[byte]) {.async, gcsafe.} =
method write*(s: TestLsStream, msg: seq[byte]) {.async.} =
if s.step == 4:
await s.ls(msg)
method close(s: TestLsStream) {.async, gcsafe.} =
method close(s: TestLsStream) {.async.} =
s.isClosed = true
s.isEof = true
@@ -137,7 +137,7 @@ type
method readOnce*(s: TestNaStream,
pbytes: pointer,
nbytes: int):
Future[int] {.async, gcsafe.} =
Future[int] {.async.} =
case s.step:
of 1:
var buf = newSeq[byte](1)
@@ -167,11 +167,11 @@ method readOnce*(s: TestNaStream,
return "\0x3na\n".len()
method write*(s: TestNaStream, msg: seq[byte]) {.async, gcsafe.} =
method write*(s: TestNaStream, msg: seq[byte]) {.async.} =
if s.step == 4:
await s.na(string.fromBytes(msg))
method close(s: TestNaStream) {.async, gcsafe.} =
method close(s: TestNaStream) {.async.} =
s.isClosed = true
s.isEof = true
@@ -197,7 +197,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
check proto == "/test/proto/1.0.0"
await conn.close()
@@ -210,7 +210,7 @@ suite "Multistream select":
var conn: Connection = nil
let done = newFuture[void]()
proc testLsHandler(proto: seq[byte]) {.async, gcsafe.} =
proc testLsHandler(proto: seq[byte]) {.async.} =
var strProto: string = string.fromBytes(proto)
check strProto == "\x26/test/proto1/1.0.0\n/test/proto2/1.0.0\n"
await conn.close()
@@ -218,7 +218,7 @@ suite "Multistream select":
conn = Connection(newTestLsStream(testLsHandler))
proc testHandler(conn: Connection, proto: string): Future[void]
{.async, gcsafe.} = discard
{.async.} = discard
var protocol: LPProtocol = new LPProtocol
protocol.handler = testHandler
ms.addHandler("/test/proto1/1.0.0", protocol)
@@ -230,7 +230,7 @@ suite "Multistream select":
let ms = MultistreamSelect.new()
var conn: Connection = nil
proc testNaHandler(msg: string): Future[void] {.async, gcsafe.} =
proc testNaHandler(msg: string): Future[void] {.async.} =
check msg == "\x03na\n"
await conn.close()
conn = newTestNaStream(testNaHandler)
@@ -238,7 +238,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} = discard
Future[void] {.async.} = discard
protocol.handler = testHandler
ms.addHandler("/unabvailable/proto/1.0.0", protocol)
@@ -250,7 +250,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
check proto == "/test/proto/1.0.0"
await conn.writeLp("Hello!")
await conn.close()
@@ -262,7 +262,7 @@ suite "Multistream select":
let transport1 = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
await msListen.handle(conn)
await conn.close()
@@ -293,7 +293,7 @@ suite "Multistream select":
# Unblock the 5 streams, check that we can open a new one
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
await blocker
await conn.writeLp("Hello!")
await conn.close()
@@ -315,7 +315,7 @@ suite "Multistream select":
await msListen.handle(c)
await c.close()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
while true:
let conn = await transport1.accept()
asyncSpawn acceptedOne(conn)
@@ -362,7 +362,7 @@ suite "Multistream select":
let msListen = MultistreamSelect.new()
var protocol: LPProtocol = new LPProtocol
protocol.handler = proc(conn: Connection, proto: string) {.async, gcsafe.} =
protocol.handler = proc(conn: Connection, proto: string) {.async.} =
# never reached
discard
@@ -379,7 +379,7 @@ suite "Multistream select":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
try:
await msListen.handle(conn)
@@ -412,7 +412,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
check proto == "/test/proto/1.0.0"
await conn.writeLp("Hello!")
await conn.close()
@@ -424,7 +424,7 @@ suite "Multistream select":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
await msListen.handle(conn)
@@ -450,7 +450,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
await conn.writeLp(&"Hello from {proto}!")
await conn.close()
@@ -462,7 +462,7 @@ suite "Multistream select":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
await msListen.handle(conn)

View File

@@ -41,7 +41,7 @@ type
{.push raises: [].}
method init(p: TestProto) {.gcsafe.} =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
await conn.writeLp("Hello!")
@@ -100,7 +100,7 @@ suite "Noise":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
try:
await sconn.write("Hello!")
finally:
@@ -115,7 +115,7 @@ suite "Noise":
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
conn = await transport2.dial(transport1.addrs[0])
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
var msg = newSeq[byte](6)
await sconn.readExactly(addr msg[0], 6)
@@ -140,11 +140,11 @@ suite "Noise":
asyncSpawn transport1.start(server)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var conn: Connection
try:
conn = await transport1.accept()
discard await serverNoise.secure(conn, false, Opt.none(PeerId))
discard await serverNoise.secure(conn, Opt.none(PeerId))
except CatchableError:
discard
finally:
@@ -160,7 +160,7 @@ suite "Noise":
var sconn: Connection = nil
expect(NoiseDecryptTagError):
sconn = await clientNoise.secure(conn, true, Opt.some(conn.peerId))
sconn = await clientNoise.secure(conn, Opt.some(conn.peerId))
await conn.close()
await handlerWait
@@ -178,9 +178,9 @@ suite "Noise":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(server)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
defer:
await sconn.close()
await conn.close()
@@ -196,7 +196,7 @@ suite "Noise":
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
conn = await transport2.dial(transport1.addrs[0])
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
await sconn.write("Hello!")
await acceptFut
@@ -221,9 +221,9 @@ suite "Noise":
transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
listenFut = transport1.start(server)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
defer:
await sconn.close()
let msg = await sconn.readLp(1024*1024)
@@ -237,7 +237,7 @@ suite "Noise":
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
conn = await transport2.dial(transport1.addrs[0])
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
await sconn.writeLp(hugePayload)
await readTask

View File

@@ -42,7 +42,7 @@ suite "Ping":
transport1 = TcpTransport.new(upgrade = Upgrade())
transport2 = TcpTransport.new(upgrade = Upgrade())
proc handlePing(peer: PeerId) {.async, gcsafe, closure.} =
proc handlePing(peer: PeerId) {.async, closure.} =
inc pingReceivedCount
pingProto1 = Ping.new()
pingProto2 = Ping.new(handlePing)
@@ -63,7 +63,7 @@ suite "Ping":
asyncTest "simple ping":
msListen.addHandler(PingCodec, pingProto1)
serverFut = transport1.start(@[ma])
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)
@@ -78,7 +78,7 @@ suite "Ping":
asyncTest "ping callback":
msDial.addHandler(PingCodec, pingProto2)
serverFut = transport1.start(@[ma])
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
discard await msListen.select(c, PingCodec)
discard await pingProto1.ping(c)
@@ -92,7 +92,7 @@ suite "Ping":
asyncTest "bad ping data ack":
type FakePing = ref object of LPProtocol
let fakePingProto = FakePing()
proc fakeHandle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc fakeHandle(conn: Connection, proto: string) {.async, closure.} =
var
buf: array[32, byte]
fakebuf: array[32, byte]
@@ -103,7 +103,7 @@ suite "Ping":
msListen.addHandler(PingCodec, fakePingProto)
serverFut = transport1.start(@[ma])
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)

View File

@@ -19,14 +19,22 @@ import ./helpers
import std/times
import stew/byteutils
proc createSwitch(r: Relay): Switch =
result = SwitchBuilder.new()
proc createSwitch(r: Relay = nil, useYamux: bool = false): Switch =
var builder = SwitchBuilder.new()
.withRng(newRng())
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withTcpTransport()
.withMplex()
if useYamux:
builder = builder.withYamux()
else:
builder = builder.withMplex()
if r != nil:
builder = builder.withCircuitRelay(r)
return builder
.withNoise()
.withCircuitRelay(r)
.build()
suite "Circuit Relay V2":
@@ -122,308 +130,310 @@ suite "Circuit Relay V2":
expect(ReservationError):
discard await cl1.reserve(src2.peerInfo.peerId, addrs)
suite "Connection":
asyncTeardown:
checkTrackers()
var
customProtoCodec {.threadvar.}: string
proto {.threadvar.}: LPProtocol
ttl {.threadvar.}: int
ldur {.threadvar.}: uint32
ldata {.threadvar.}: uint64
srcCl {.threadvar.}: RelayClient
dstCl {.threadvar.}: RelayClient
rv2 {.threadvar.}: Relay
src {.threadvar.}: Switch
dst {.threadvar.}: Switch
rel {.threadvar.}: Switch
rsvp {.threadvar.}: Rsvp
conn {.threadvar.}: Connection
asyncSetup:
customProtoCodec = "/test"
proto = new LPProtocol
proto.codec = customProtoCodec
ttl = 60
ldur = 120
ldata = 16384
srcCl = RelayClient.new()
dstCl = RelayClient.new()
src = createSwitch(srcCl)
dst = createSwitch(dstCl)
rel = newStandardSwitch()
asyncTest "Connection succeed":
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check: "test1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test2")
check: "test3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test4")
await conn.close()
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("test1")
check: "test2" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test3")
check: "test4" == string.fromBytes(await conn.readLp(1024))
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Connection duration exceeded":
ldur = 3
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check "wanna sleep?" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("yeah!")
check "go!" == string.fromBytes(await conn.readLp(1024))
await sleepAsync(chronos.timer.seconds(ldur + 1))
await conn.writeLp("that was a cool power nap")
await conn.close()
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("wanna sleep?")
check: "yeah!" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("go!")
expect(LPStreamEOFError):
discard await conn.readLp(1024)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Connection data exceeded":
ldata = 1000
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check "count me the better story you know" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("do you expect a lorem ipsum or...?")
check "surprise me!" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("""Call me Ishmael. Some years ago--never mind how long
precisely--having little or no money in my purse, and nothing
particular to interest me on shore, I thought I would sail about a
little and see the watery part of the world. It is a way I have of
driving off the spleen and regulating the circulation. Whenever I
find myself growing grim about the mouth; whenever it is a damp,
drizzly November in my soul; whenever I find myself involuntarily
pausing before coffin warehouses, and bringing up the rear of every
funeral I meet; and especially whenever my hypos get such an upper
hand of me, that it requires a strong moral principle to prevent me
from deliberately stepping into the street, and methodically knocking
people's hats off--then, I account it high time to get to sea as soon
as I can. This is my substitute for pistol and ball. With a
philosophical flourish Cato throws himself upon his sword; I quietly
take to the ship.""")
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("count me the better story you know")
check: "do you expect a lorem ipsum or...?" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("surprise me!")
expect(LPStreamEOFError):
discard await conn.readLp(1024)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Reservation ttl expire during connection":
ttl = 3
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check: "test1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test2")
check: "test3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test4")
await conn.close()
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("test1")
check: "test2" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test3")
check: "test4" == string.fromBytes(await conn.readLp(1024))
await src.disconnect(rel.peerInfo.peerId)
await sleepAsync(chronos.timer.seconds(ttl + 1))
expect(DialFailedError):
check: conn.atEof()
await conn.close()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Connection over relay":
# src => rel => rel2 => dst
# rel2 reserve rel
# dst reserve rel2
# src try to connect with dst
proto.handler = proc(conn: Connection, proto: string) {.async.} =
raise newException(CatchableError, "Should not be here")
let
rel2Cl = RelayClient.new(canHop = true)
rel2 = createSwitch(rel2Cl)
rv2 = Relay.new()
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await rel2.start()
await src.start()
await dst.start()
let
addrs = @[ MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit/p2p/" &
$rel2.peerInfo.peerId & "/p2p/" &
$rel2.peerInfo.peerId & "/p2p-circuit").get() ]
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await rel2.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel2.peerInfo.peerId, rel2.peerInfo.addrs)
rsvp = await rel2Cl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
let rsvp2 = await dstCl.reserve(rel2.peerInfo.peerId, rel2.peerInfo.addrs)
expect(DialFailedError):
conn = await src.dial(dst.peerInfo.peerId, addrs, customProtoCodec)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop(), rel2.stop())
asyncTest "Connection using ClientRelay":
for (useYamux, muxName) in [(false, "Mplex"), (true, "Yamux")]:
suite "Circuit Relay V2 Connection using " & muxName:
asyncTeardown:
checkTrackers()
var
protoABC = new LPProtocol
protoBCA = new LPProtocol
protoCAB = new LPProtocol
protoABC.codec = "/abctest"
protoABC.handler = proc(conn: Connection, proto: string) {.async.} =
check: "testABC1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testABC2")
check: "testABC3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testABC4")
await conn.close()
protoBCA.codec = "/bcatest"
protoBCA.handler = proc(conn: Connection, proto: string) {.async.} =
check: "testBCA1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testBCA2")
check: "testBCA3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testBCA4")
await conn.close()
protoCAB.codec = "/cabtest"
protoCAB.handler = proc(conn: Connection, proto: string) {.async.} =
check: "testCAB1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testCAB2")
check: "testCAB3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testCAB4")
await conn.close()
customProtoCodec {.threadvar.}: string
proto {.threadvar.}: LPProtocol
ttl {.threadvar.}: int
ldur {.threadvar.}: uint32
ldata {.threadvar.}: uint64
srcCl {.threadvar.}: RelayClient
dstCl {.threadvar.}: RelayClient
rv2 {.threadvar.}: Relay
src {.threadvar.}: Switch
dst {.threadvar.}: Switch
rel {.threadvar.}: Switch
rsvp {.threadvar.}: Rsvp
conn {.threadvar.}: Connection
let
clientA = RelayClient.new(canHop = true)
clientB = RelayClient.new(canHop = true)
clientC = RelayClient.new(canHop = true)
switchA = createSwitch(clientA)
switchB = createSwitch(clientB)
switchC = createSwitch(clientC)
asyncSetup:
customProtoCodec = "/test"
proto = new LPProtocol
proto.codec = customProtoCodec
ttl = 60
ldur = 120
ldata = 16384
srcCl = RelayClient.new()
dstCl = RelayClient.new()
src = createSwitch(srcCl, useYamux)
dst = createSwitch(dstCl, useYamux)
rel = createSwitch(nil, useYamux)
switchA.mount(protoBCA)
switchB.mount(protoCAB)
switchC.mount(protoABC)
asyncTest "Connection succeed":
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check: "test1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test2")
check: "test3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test4")
await conn.close()
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await switchA.start()
await switchB.start()
await switchC.start()
await rel.start()
await src.start()
await dst.start()
let
addrsABC = MultiAddress.init($switchB.peerInfo.addrs[0] & "/p2p/" &
$switchB.peerInfo.peerId & "/p2p-circuit").get()
addrsBCA = MultiAddress.init($switchC.peerInfo.addrs[0] & "/p2p/" &
$switchC.peerInfo.peerId & "/p2p-circuit").get()
addrsCAB = MultiAddress.init($switchA.peerInfo.addrs[0] & "/p2p/" &
$switchA.peerInfo.peerId & "/p2p-circuit").get()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await switchA.connect(switchB.peerInfo.peerId, switchB.peerInfo.addrs)
await switchB.connect(switchC.peerInfo.peerId, switchC.peerInfo.addrs)
await switchC.connect(switchA.peerInfo.peerId, switchA.peerInfo.addrs)
let rsvpABC = await clientA.reserve(switchC.peerInfo.peerId, switchC.peerInfo.addrs)
let rsvpBCA = await clientB.reserve(switchA.peerInfo.peerId, switchA.peerInfo.addrs)
let rsvpCAB = await clientC.reserve(switchB.peerInfo.peerId, switchB.peerInfo.addrs)
let connABC = await switchA.dial(switchC.peerInfo.peerId, @[ addrsABC ], "/abctest")
let connBCA = await switchB.dial(switchA.peerInfo.peerId, @[ addrsBCA ], "/bcatest")
let connCAB = await switchC.dial(switchB.peerInfo.peerId, @[ addrsCAB ], "/cabtest")
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await connABC.writeLp("testABC1")
await connBCA.writeLp("testBCA1")
await connCAB.writeLp("testCAB1")
check:
"testABC2" == string.fromBytes(await connABC.readLp(1024))
"testBCA2" == string.fromBytes(await connBCA.readLp(1024))
"testCAB2" == string.fromBytes(await connCAB.readLp(1024))
await connABC.writeLp("testABC3")
await connBCA.writeLp("testBCA3")
await connCAB.writeLp("testCAB3")
check:
"testABC4" == string.fromBytes(await connABC.readLp(1024))
"testBCA4" == string.fromBytes(await connBCA.readLp(1024))
"testCAB4" == string.fromBytes(await connCAB.readLp(1024))
await allFutures(connABC.close(), connBCA.close(), connCAB.close())
await allFutures(switchA.stop(), switchB.stop(), switchC.stop())
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("test1")
check: "test2" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test3")
check: "test4" == string.fromBytes(await conn.readLp(1024))
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Connection duration exceeded":
ldur = 3
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check "wanna sleep?" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("yeah!")
check "go!" == string.fromBytes(await conn.readLp(1024))
await sleepAsync(chronos.timer.seconds(ldur + 1))
await conn.writeLp("that was a cool power nap")
await conn.close()
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("wanna sleep?")
check: "yeah!" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("go!")
expect(LPStreamEOFError):
discard await conn.readLp(1024)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Connection data exceeded":
ldata = 1000
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check "count me the better story you know" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("do you expect a lorem ipsum or...?")
check "surprise me!" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("""Call me Ishmael. Some years ago--never mind how long
precisely--having little or no money in my purse, and nothing
particular to interest me on shore, I thought I would sail about a
little and see the watery part of the world. It is a way I have of
driving off the spleen and regulating the circulation. Whenever I
find myself growing grim about the mouth; whenever it is a damp,
drizzly November in my soul; whenever I find myself involuntarily
pausing before coffin warehouses, and bringing up the rear of every
funeral I meet; and especially whenever my hypos get such an upper
hand of me, that it requires a strong moral principle to prevent me
from deliberately stepping into the street, and methodically knocking
people's hats off--then, I account it high time to get to sea as soon
as I can. This is my substitute for pistol and ball. With a
philosophical flourish Cato throws himself upon his sword; I quietly
take to the ship.""")
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("count me the better story you know")
check: "do you expect a lorem ipsum or...?" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("surprise me!")
expect(LPStreamEOFError):
discard await conn.readLp(1024)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Reservation ttl expire during connection":
ttl = 3
proto.handler = proc(conn: Connection, proto: string) {.async.} =
check: "test1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test2")
check: "test3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test4")
await conn.close()
rv2 = Relay.new(reservationTTL=initDuration(seconds=ttl),
limitDuration=ldur,
limitData=ldata)
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await src.start()
await dst.start()
let addrs = MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit").get()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
rsvp = await dstCl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await conn.writeLp("test1")
check: "test2" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test3")
check: "test4" == string.fromBytes(await conn.readLp(1024))
await src.disconnect(rel.peerInfo.peerId)
await sleepAsync(chronos.timer.seconds(ttl + 1))
expect(DialFailedError):
check: conn.atEof()
await conn.close()
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
conn = await src.dial(dst.peerInfo.peerId, @[ addrs ], customProtoCodec)
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop())
asyncTest "Connection over relay":
# src => rel => rel2 => dst
# rel2 reserve rel
# dst reserve rel2
# src try to connect with dst
proto.handler = proc(conn: Connection, proto: string) {.async.} =
raise newException(CatchableError, "Should not be here")
let
rel2Cl = RelayClient.new(canHop = true)
rel2 = createSwitch(rel2Cl, useYamux)
rv2 = Relay.new()
rv2.setup(rel)
rel.mount(rv2)
dst.mount(proto)
await rel.start()
await rel2.start()
await src.start()
await dst.start()
let
addrs = @[ MultiAddress.init($rel.peerInfo.addrs[0] & "/p2p/" &
$rel.peerInfo.peerId & "/p2p-circuit/p2p/" &
$rel2.peerInfo.peerId & "/p2p/" &
$rel2.peerInfo.peerId & "/p2p-circuit").get() ]
await src.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await rel2.connect(rel.peerInfo.peerId, rel.peerInfo.addrs)
await dst.connect(rel2.peerInfo.peerId, rel2.peerInfo.addrs)
rsvp = await rel2Cl.reserve(rel.peerInfo.peerId, rel.peerInfo.addrs)
let rsvp2 = await dstCl.reserve(rel2.peerInfo.peerId, rel2.peerInfo.addrs)
expect(DialFailedError):
conn = await src.dial(dst.peerInfo.peerId, addrs, customProtoCodec)
if not conn.isNil():
await allFutures(conn.close())
await allFutures(src.stop(), dst.stop(), rel.stop(), rel2.stop())
asyncTest "Connection using ClientRelay":
var
protoABC = new LPProtocol
protoBCA = new LPProtocol
protoCAB = new LPProtocol
protoABC.codec = "/abctest"
protoABC.handler = proc(conn: Connection, proto: string) {.async.} =
check: "testABC1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testABC2")
check: "testABC3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testABC4")
await conn.close()
protoBCA.codec = "/bcatest"
protoBCA.handler = proc(conn: Connection, proto: string) {.async.} =
check: "testBCA1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testBCA2")
check: "testBCA3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testBCA4")
await conn.close()
protoCAB.codec = "/cabtest"
protoCAB.handler = proc(conn: Connection, proto: string) {.async.} =
check: "testCAB1" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testCAB2")
check: "testCAB3" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("testCAB4")
await conn.close()
let
clientA = RelayClient.new(canHop = true)
clientB = RelayClient.new(canHop = true)
clientC = RelayClient.new(canHop = true)
switchA = createSwitch(clientA, useYamux)
switchB = createSwitch(clientB, useYamux)
switchC = createSwitch(clientC, useYamux)
switchA.mount(protoBCA)
switchB.mount(protoCAB)
switchC.mount(protoABC)
await switchA.start()
await switchB.start()
await switchC.start()
let
addrsABC = MultiAddress.init($switchB.peerInfo.addrs[0] & "/p2p/" &
$switchB.peerInfo.peerId & "/p2p-circuit").get()
addrsBCA = MultiAddress.init($switchC.peerInfo.addrs[0] & "/p2p/" &
$switchC.peerInfo.peerId & "/p2p-circuit").get()
addrsCAB = MultiAddress.init($switchA.peerInfo.addrs[0] & "/p2p/" &
$switchA.peerInfo.peerId & "/p2p-circuit").get()
await switchA.connect(switchB.peerInfo.peerId, switchB.peerInfo.addrs)
await switchB.connect(switchC.peerInfo.peerId, switchC.peerInfo.addrs)
await switchC.connect(switchA.peerInfo.peerId, switchA.peerInfo.addrs)
let rsvpABC = await clientA.reserve(switchC.peerInfo.peerId, switchC.peerInfo.addrs)
let rsvpBCA = await clientB.reserve(switchA.peerInfo.peerId, switchA.peerInfo.addrs)
let rsvpCAB = await clientC.reserve(switchB.peerInfo.peerId, switchB.peerInfo.addrs)
let connABC = await switchA.dial(switchC.peerInfo.peerId, @[ addrsABC ], "/abctest")
let connBCA = await switchB.dial(switchA.peerInfo.peerId, @[ addrsBCA ], "/bcatest")
let connCAB = await switchC.dial(switchB.peerInfo.peerId, @[ addrsCAB ], "/cabtest")
await connABC.writeLp("testABC1")
await connBCA.writeLp("testBCA1")
await connCAB.writeLp("testCAB1")
check:
"testABC2" == string.fromBytes(await connABC.readLp(1024))
"testBCA2" == string.fromBytes(await connBCA.readLp(1024))
"testCAB2" == string.fromBytes(await connCAB.readLp(1024))
await connABC.writeLp("testABC3")
await connBCA.writeLp("testBCA3")
await connCAB.writeLp("testCAB3")
check:
"testABC4" == string.fromBytes(await connABC.readLp(1024))
"testBCA4" == string.fromBytes(await connBCA.readLp(1024))
"testCAB4" == string.fromBytes(await connCAB.readLp(1024))
await allFutures(connABC.close(), connBCA.close(), connCAB.close())
await allFutures(switchA.stop(), switchB.stop(), switchC.stop())

View File

@@ -46,7 +46,7 @@ suite "Switch":
asyncTest "e2e use switch dial proto string":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@@ -86,7 +86,7 @@ suite "Switch":
asyncTest "e2e use switch dial proto string with custom matcher":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@@ -131,7 +131,7 @@ suite "Switch":
asyncTest "e2e should not leak bufferstreams and connections on channel close":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@@ -171,7 +171,7 @@ suite "Switch":
check not switch2.isConnected(switch1.peerInfo.peerId)
asyncTest "e2e use connect then dial":
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@@ -305,7 +305,7 @@ suite "Switch":
var step = 0
var kinds: set[ConnEventKind]
proc hook(peerId: PeerId, event: ConnEvent) {.async, gcsafe.} =
proc hook(peerId: PeerId, event: ConnEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@@ -357,7 +357,7 @@ suite "Switch":
var step = 0
var kinds: set[ConnEventKind]
proc hook(peerId: PeerId, event: ConnEvent) {.async, gcsafe.} =
proc hook(peerId: PeerId, event: ConnEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@@ -409,7 +409,7 @@ suite "Switch":
var step = 0
var kinds: set[PeerEventKind]
proc handler(peerId: PeerId, event: PeerEvent) {.async, gcsafe.} =
proc handler(peerId: PeerId, event: PeerEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@@ -460,7 +460,7 @@ suite "Switch":
var step = 0
var kinds: set[PeerEventKind]
proc handler(peerId: PeerId, event: PeerEvent) {.async, gcsafe.} =
proc handler(peerId: PeerId, event: PeerEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@@ -521,7 +521,7 @@ suite "Switch":
var step = 0
var kinds: set[PeerEventKind]
proc handler(peerId: PeerId, event: PeerEvent) {.async, gcsafe.} =
proc handler(peerId: PeerId, event: PeerEvent) {.async.} =
kinds = kinds + {event.kind}
case step:
of 0:
@@ -581,7 +581,7 @@ suite "Switch":
var switches: seq[Switch]
var done = newFuture[void]()
var onConnect: Future[void]
proc hook(peerId: PeerId, event: ConnEvent) {.async, gcsafe.} =
proc hook(peerId: PeerId, event: ConnEvent) {.async.} =
case event.kind:
of ConnEventKind.Connected:
await onConnect
@@ -619,7 +619,7 @@ suite "Switch":
var switches: seq[Switch]
var done = newFuture[void]()
var onConnect: Future[void]
proc hook(peerId2: PeerId, event: ConnEvent) {.async, gcsafe.} =
proc hook(peerId2: PeerId, event: ConnEvent) {.async.} =
case event.kind:
of ConnEventKind.Connected:
if conns == 5:
@@ -662,7 +662,7 @@ suite "Switch":
let transport = TcpTransport.new(upgrade = Upgrade())
await transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport.accept()
await conn.closeWithEOF()
@@ -686,7 +686,7 @@ suite "Switch":
switch.stop())
asyncTest "e2e calling closeWithEOF on the same stream should not assert":
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
discard await conn.readLp(100)
let testProto = new TestProto
@@ -832,7 +832,7 @@ suite "Switch":
asyncTest "e2e peer store":
let done = newFuture[void]()
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@@ -882,7 +882,7 @@ suite "Switch":
# this randomly locks the Windows CI job
skip()
return
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
@@ -1019,7 +1019,7 @@ suite "Switch":
await srcTcpSwitch.stop()
asyncTest "mount unstarted protocol":
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
check "test123" == string.fromBytes(await conn.readLp(1024))
await conn.writeLp("test456")
await conn.close()

View File

@@ -30,7 +30,7 @@ suite "TCP transport":
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport.accept()
await conn.write("Hello!")
await conn.close()
@@ -52,7 +52,7 @@ suite "TCP transport":
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var msg = newSeq[byte](6)
let conn = await transport.accept()
await conn.readExactly(addr msg[0], 6)
@@ -73,7 +73,7 @@ suite "TCP transport":
let address = initTAddress("0.0.0.0:0")
let handlerWait = newFuture[void]()
proc serveClient(server: StreamServer,
transp: StreamTransport) {.async, gcsafe.} =
transp: StreamTransport) {.async.} =
var wstream = newAsyncStreamWriter(transp)
await wstream.write("Hello!")
await wstream.finish()
@@ -106,7 +106,7 @@ suite "TCP transport":
let address = initTAddress("0.0.0.0:0")
let handlerWait = newFuture[void]()
proc serveClient(server: StreamServer,
transp: StreamTransport) {.async, gcsafe.} =
transp: StreamTransport) {.async.} =
var rstream = newAsyncStreamReader(transp)
let msg = await rstream.read(6)
check string.fromBytes(msg) == "Hello!"
@@ -179,7 +179,7 @@ suite "TCP transport":
let transport: TcpTransport = TcpTransport.new(upgrade = Upgrade(), connectionsTimeout=1.milliseconds)
asyncSpawn transport.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport.accept()
await conn.join()

View File

@@ -56,7 +56,7 @@ suite "Tor transport":
check string.fromBytes(resp) == "server"
await client.stop()
proc serverAcceptHandler() {.async, gcsafe.} =
proc serverAcceptHandler() {.async.} =
let conn = await server.accept()
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)
@@ -87,7 +87,7 @@ suite "Tor transport":
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
var resp: array[6, byte]
await conn.readExactly(addr resp, 6)

View File

@@ -89,7 +89,7 @@ suite "WebSocket transport":
const correctPattern = mapAnd(TCP, mapEq("wss"))
await transport1.start(ma)
check correctPattern.match(transport1.addrs[0])
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
while true:
let conn = await transport1.accept()
if not isNil(conn):