Compare commits

...

93 Commits

Author SHA1 Message Date
Roman
b0d1144f96 test: rebase and re-run project tests with Nim 1.6 2023-12-13 16:34:25 +01:00
Etan Kissling
762be89dd7 include connection info when logging identify message (#991) 2023-12-13 16:34:25 +01:00
diegomrsantos
5702b2d355 feat: add hole-punching interop tests (#998) 2023-12-13 16:34:25 +01:00
Jacek Sieka
9058c981cc remove redundant gcsafe annotations (#999) 2023-12-13 16:34:25 +01:00
Roman Zajic
acad1abc28 fix: remove forgotten "matrix-prep" job (#997) 2023-12-13 16:34:25 +01:00
Roman Zajic
aeb7167da4 fix: move workflows for Nim Devel and legacy i386 from "Daily" (#968) 2023-12-13 16:34:24 +01:00
diegomrsantos
061ea21729 fix(dcutr): update the DCUtR initiator transport direction to Inbound (#994) 2023-12-13 16:34:23 +01:00
diegomrsantos
6cdd4c911b fix(identify): do not add p2p and relayed addrs to observed addr manager (#990) 2023-12-13 16:34:22 +01:00
diegomrsantos
7ce2afba13 fix(yamux): doesn't work in a Relayv2 connection (#979)
Co-authored-by: Ludovic Chenut <ludovic@status.im>
2023-12-13 16:34:22 +01:00
diegomrsantos
39586605d9 fix(dcutr): handle tcp/p2p addresses (#989) 2023-12-13 16:34:22 +01:00
diegomrsantos
ff9493190f fix(multiaddress): add quic-v1 multiaddress support (#988) 2023-12-13 16:34:22 +01:00
diegomrsantos
b0964a410a Make ObservedAddrManager injectable (#970) 2023-12-13 16:34:22 +01:00
diegomrsantos
6189c2aaf5 fix(dcutr): make the dcutr client inbound and the server outbound (#983) 2023-12-13 16:34:22 +01:00
Jacek Sieka
373a0287a5 fix chronos v4 compat (#982) 2023-12-13 16:34:14 +01:00
diegomrsantos
0d05707875 fix: doc workflow (#985) 2023-12-13 16:34:14 +01:00
diegomrsantos
b835100682 Rate limit fixes (#965) 2023-12-13 16:34:14 +01:00
diegomrsantos
c876904425 Revert "Prevent concurrent IWANT of the same message (#943)" (#977) 2023-12-13 16:34:14 +01:00
Roman
2efa4b7d3d test: re-run project tests with Nim 2.0 2023-12-13 13:49:03 +01:00
Roman
ed58e8722e test: run project tests with Nim 1.6 - full CI workflow 2023-12-13 13:37:35 +01:00
Roman
cd5512d1a7 test: retest with Nim 2.0 2023-12-13 13:23:18 +01:00
Roman
20492387c0 test: download Nimble sources relative to NIM_DIR 2023-12-13 13:16:29 +01:00
Roman
7638bcc9cd test: check where the Nim sources were downloaded 2023-12-13 10:20:10 +01:00
Roman
f368a76377 test: check where the Nimble sources were downloaded 2023-12-13 09:47:59 +01:00
Roman
82e02f27cc test: check if sources downloaded 2023-12-07 11:52:52 +01:00
Roman
3962ac7ad0 test: download desired Nimble sources 2023-12-07 11:40:02 +01:00
Roman
410dee4aa3 test: install script debug - build_nim 2023-12-06 16:45:27 +01:00
Roman
203669e5f2 test: install script debug - default buildchain 2023-12-06 16:38:34 +01:00
Roman
8f7c339868 test: install script debug - custom buildchain 2023-12-06 16:30:41 +01:00
Roman
9593db16ae test: install script debug 2023-12-06 16:22:34 +01:00
Roman
82479ef6bf test: set dummy dir for CI cache 2023-12-06 16:15:19 +01:00
Roman
64b23d9ed2 test: no CI_CACHE 2023-12-06 16:13:30 +01:00
Roman
beacbd7008 test: different nimble dir 2023-12-06 15:55:02 +01:00
Roman
cd15368ebf test: use original Nimble DIR 2023-12-06 15:11:44 +01:00
Roman
5cf1d1dfa0 test: change Nimble commit for v0.14.2 in Nim install script 2023-12-06 14:33:17 +01:00
Roman
f14c1a0f7e test: find new nimble.exe entire system Windows 2023-11-23 21:34:30 +08:00
Roman
81f054798f test: find new nimble.exe install dir on Windows 2023-11-23 21:26:35 +08:00
Roman
425215fa54 test: try to create symlink to nimble.cmd 2023-11-23 20:57:46 +08:00
Roman
ce4a7fc24c test: yaml indent 2023-11-23 20:46:30 +08:00
Roman
df6f443811 test: remove NIMBLE_DIR variable 2023-11-23 20:44:41 +08:00
Roman
f61b69a3e7 test: replace nimble.exe with nimble.cmd on windows 2023-11-23 20:43:34 +08:00
Roman
2a2a552bb0 test: get nimble.cmd contents on windows 2023-11-23 20:06:59 +08:00
Roman
f35e08ae0a test: find nimble.exe file windows 2023-11-23 19:57:20 +08:00
Roman
d77cee6c41 test: add .exe to remove file for windows 2023-11-23 19:48:27 +08:00
Roman
7bc112916e test: add .exe to symlink for windows 2023-11-23 19:46:30 +08:00
Roman
6f37b671bd test: check install dir 2023-11-23 19:00:37 +08:00
Roman
3662f217c0 test: check Nimble binary on Windows 2023-11-23 18:52:03 +08:00
Roman
f10cbd7b41 test: find Nimble on Windows 2023-11-23 18:41:12 +08:00
Roman
cd8aceb18a test: retest for Windows 2023-11-23 18:28:44 +08:00
Roman
c1531eae4f test: cancel in progress to false 2023-11-20 18:17:10 +08:00
Roman
93e71455fd test: find pkgs2 on macos - at home dir 2023-11-17 17:06:26 +08:00
Roman
b595f31001 test: find pkgs2 on macos 2023-11-17 16:55:49 +08:00
Roman
6f8ef7727f test: nimble install once only 2023-11-17 16:37:06 +08:00
Roman
e870d386f2 test: list pkgs2 content on linux 2023-11-17 16:27:00 +08:00
Roman
9ce561d1be test: list nimbledir 2023-11-17 16:15:45 +08:00
Roman
a9b73d39e7 test: find where the dep files were installed 2023-11-17 16:06:43 +08:00
Roman
634db967a2 test: verify downloaded dependencies are same across platforms 2023-11-17 15:58:49 +08:00
Roman
a940f5bcf9 test: commit lock file 2023-11-17 15:49:17 +08:00
Roman
967fe7ddde test: generate lock file 2023-11-17 14:55:38 +08:00
Roman
d34134799c test: print Nimble lock file - CI workflow 2023-11-17 14:25:47 +08:00
Roman
e57f20bb65 test: print Nimble lock file 2023-11-17 14:19:29 +08:00
Roman
761b54157a fix: run the tests 2023-11-16 21:17:10 +08:00
Roman
f644f0b9aa fix: use single version unittest2 >= 0.2.1 2023-11-16 21:11:21 +08:00
Roman
8514f1718f fix: use nimble install --depsOnly 2023-11-16 21:01:56 +08:00
Roman
c448822d91 fix: use nimble install instead of pinned 2023-11-16 20:54:57 +08:00
Roman
27f902b462 fix: use relative path 2023-11-16 20:48:01 +08:00
Roman
ed6b481832 fix: install the latest version of Nimble 2023-11-16 20:39:15 +08:00
Roman
cfe2ef5714 Request specific Nim version 1.6.16 2023-11-11 10:06:44 +08:00
Roman
ac8d05637a Move Nimble upgrade to Run tests step 2023-11-11 09:59:57 +08:00
Roman
066d296e40 Nimble version check 2023-11-11 09:34:42 +08:00
Roman
330b00b2ce Specify version 2023-11-10 18:01:09 +08:00
Roman
75c98fcba9 Accept download from internet 2023-11-10 15:50:34 +08:00
Roman
84a585cd0f Upgrade Nimble to latest to support lock file 2023-11-10 15:32:35 +08:00
Roman
04e6da9cdc CI workflow with improvements - retest 5 2023-11-09 19:57:48 +08:00
Roman
39d0f1bfbd CI workflow with improvements - retest 4 2023-11-03 15:02:48 +08:00
Roman
56b07da02d CI workflow with improvements - retest 3 2023-11-03 15:02:31 +08:00
Roman
a20a542fb4 CI workflow with improvements - retest 2 2023-11-03 15:02:14 +08:00
Roman
ff8ea85ae3 CI workflow with improvements - retest 2023-11-03 14:49:29 +08:00
Roman
2e781e0c41 Disable caching for Nim and deps, increase verbosity 2023-11-03 13:41:32 +08:00
Ludovic Chenut
fc4e9a8bb8 Fix WS transport when the connection aborts (#967) 2023-10-23 17:12:20 +02:00
Tanguy
60f953629d Remove ConnManager from Upgrade (#959) 2023-10-13 12:08:17 +00:00
diegomrsantos
18b0f726df Rate Limit tests (#953) 2023-10-05 15:12:07 +00:00
diegomrsantos
459f6851e7 Add a flag if a peer should be disconnected when above rate limit (#954) 2023-10-05 14:51:27 +02:00
Tanguy
575344e2e9 Update interop CI name (#956) 2023-10-05 10:54:24 +02:00
diegomrsantos
75871817ee Split msgs in iwant response if bigger than limit (#944) 2023-10-02 11:39:28 +02:00
diegomrsantos
61929aed6c Improve rdv advertise (#951)
Co-authored-by: Ludovic Chenut <ludovic@status.im>
2023-09-27 15:52:22 +02:00
diegomrsantos
56599f5b9d GossipSub Traffic scoring (#920) 2023-09-22 16:45:08 +02:00
Tanguy
b2eac7ecbd GS: Relay messages to direct peers (#949) 2023-09-15 17:22:02 +02:00
Tanguy
20b0e40f7d Fix doc generation CI (#948) 2023-09-08 12:21:04 +02:00
Tanguy
ff77d52851 IDontWant metrics (#946) 2023-09-06 16:05:59 +00:00
Tanguy
545a31d4f0 Bump dependencies (#947) 2023-09-06 17:52:43 +02:00
Jacek Sieka
b76bac752f avoid importing ecnist when not needed (#942) 2023-08-30 11:39:48 +02:00
diegomrsantos
c6aa085e98 Prevent concurrent IWANT of the same message (#943) 2023-08-21 16:34:24 +02:00
Ludovic Chenut
e03547ea3e Perf protocol (#925) 2023-08-14 17:25:55 +02:00
107 changed files with 2452 additions and 904 deletions

View File

@@ -112,20 +112,14 @@ runs:
echo "MAKE_CMD=${MAKE_CMD}" >> $GITHUB_ENV
echo '${{ github.workspace }}/nim/bin' >> $GITHUB_PATH
- name: Restore Nim from cache
id: nim-cache
uses: actions/cache@v3
with:
path: '${{ github.workspace }}/nim'
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
- name: Build Nim and Nimble
shell: ${{ inputs.shell }}
if: ${{ steps.nim-cache.outputs.cache-hit != 'true' }}
run: |
# We don't want partial matches of the cache restored
rm -rf nim
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
export NIMBLE_DIR=dist/nimble
cp ./scripts/build_nim.sh .
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_branch }} \
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
bash build_nim.sh nim csources dist/nimble NimBinaries
bash build_nim.sh nim csources ${NIMBLE_DIR} NimBinaries

View File

@@ -1,17 +1,23 @@
name: CI
name: CI - Test
on:
push:
branches:
- master
- unstable
pull_request:
# - master
# - unstable
- 'fix/ci-workflow-stability'
#pull_request:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
cancel-in-progress: false
jobs:
delete-cache:
runs-on: ubuntu-latest
steps:
- uses: snnaplab/delete-branch-cache-action@v1
build:
timeout-minutes: 90
strategy:
@@ -26,8 +32,6 @@ jobs:
cpu: amd64
- os: windows
cpu: amd64
#- os: windows
#cpu: i386
branch: [version-1-6]
include:
- target:
@@ -52,7 +56,7 @@ jobs:
continue-on-error: ${{ matrix.branch == 'devel' }}
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v4
with:
submodules: true
@@ -65,28 +69,22 @@ jobs:
nim_branch: ${{ matrix.branch }}
- name: Setup Go
uses: actions/setup-go@v2
uses: actions/setup-go@v4
with:
go-version: '~1.15.5'
cache: false
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3
with:
path: nimbledeps
key: nimbledeps-${{ hashFiles('.pinned') }}
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
run: |
nimble install_pinned
nimble install -y --depsOnly
- name: Run tests
run: |
nim --version
nimble --version
nimble test
NIMFLAGS="${NIMFLAGS} --mm:refc --verbosity:3" nimble test

View File

@@ -19,7 +19,7 @@ jobs:
- uses: jiro4989/setup-nim-action@v1
with:
nim-version: 'stable'
nim-version: '1.6.x'
- name: Generate doc
run: |

View File

@@ -23,7 +23,7 @@ jobs:
- name: Build image
run: >
cd multidim-interop/impl/nim/v1.0 &&
cd transport-interop/impl/nim/v1.0 &&
make commitSha=$GITHUB_SHA image_name=nim-libp2p-head
- name: Create ping-version.json
@@ -45,10 +45,24 @@ jobs:
]
}
EOF
) > ${{ github.workspace }}/test_head.json
- uses: libp2p/test-plans/.github/actions/run-interop-ping-test@master
- uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/test_head.json
run-hole-punching-interop:
name: Run hole-punching interoperability tests
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json

View File

@@ -75,8 +75,8 @@ jobs:
nim --version
nimble --version
nimble install -y --depsOnly
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
if [[ "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--gc:orc':\n"
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
fi
# NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
# if [[ "${{ matrix.branch }}" == "devel" ]]; then
# echo -e "\nTesting with '--gc:orc':\n"
# NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
# fi

81
.github/workflows/multi_nim_common.yml vendored Normal file
View File

@@ -0,0 +1,81 @@
name: daily-common
on:
workflow_call:
inputs:
nim-branch:
description: 'Nim branch'
required: true
type: string
platform:
description: 'Platform'
required: true
type: string
jobs:
delete-cache:
runs-on: ubuntu-latest
steps:
- uses: snnaplab/delete-branch-cache-action@v1
build:
needs: delete-cache
timeout-minutes: 120
strategy:
fail-fast: false
matrix:
target: ${{ fromJSON(inputs.platform) }}
branch: ${{ fromJSON(inputs.nim-branch) }}
include:
- target:
os: linux
builder: ubuntu-20.04
shell: bash
- target:
os: macos
builder: macos-12
shell: bash
- target:
os: windows
builder: windows-2019
shell: msys2 {0}
defaults:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: ${{ matrix.target.os }}
shell: ${{ matrix.shell }}
nim_branch: ${{ matrix.branch }}
cpu: ${{ matrix.target.cpu }}
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: '~1.15.5'
cache: false
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Run tests
run: |
nim --version
nimble --version
nimble install -y --depsOnly
NIMFLAGS="${NIMFLAGS} --mm:refc" nimble test
if [[ "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--mm:orc':\n"
NIMFLAGS="${NIMFLAGS} --mm:orc" nimble test
fi

13
.github/workflows/multi_nim_devel.yml vendored Normal file
View File

@@ -0,0 +1,13 @@
name: Nim Devel
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
call-multi-nim-common:
uses: status-im/nim-libp2p/.github/workflows/multi_nim_common.yml@unstable
with:
nim-branch: "['devel']"
platform: "[{'os':'linux','cpu':'amd64'},{'os':'macos','cpu':'amd64'},{'os':'windows','cpu':'amd64'}]"

14
.github/workflows/multi_nim_legacy.yml vendored Normal file
View File

@@ -0,0 +1,14 @@
name: Legacy Platforms
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
call-multi-nim-common:
uses: status-im/nim-libp2p/.github/workflows/multi_nim_common.yml@unstable
with:
nim-branch: "['version-1-6','version-2-0']"
platform: "[{'os':'linux','cpu':'i386'}]"

27
.pinned
View File

@@ -1,16 +1,17 @@
bearssl;https://github.com/status-im/nim-bearssl@#9372f27a25d0718d3527afad6cc936f6a853f86e
bearssl;https://github.com/status-im/nim-bearssl@#e4157639db180e52727712a47deaefcbbac6ec86
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
chronos;https://github.com/status-im/nim-chronos@#ba143e029f35fd9b4cd3d89d007cc834d0d5ba3c
dnsclient;https://github.com/ba0f3/dnsclient.nim@#2b3d4b4e35b5e698fbbeafe16a4fa757926a4673
faststreams;https://github.com/status-im/nim-faststreams@#2a771bb91f8aae8520a5553955a2acce5fdd0c87
httputils;https://github.com/status-im/nim-http-utils@#aad684d3758a74c1b327df93da2e956458410b48
json_serialization;https://github.com/status-im/nim-json-serialization@#aa44ee61dd323022d4abe7cbf4e44668aad88454
metrics;https://github.com/status-im/nim-metrics@#abf3acc7f06cee9ee2c287d2f31413dc3df4c04e
nimcrypto;https://github.com/cheatfate/nimcrypto@#4014ef939b51e02053c2e16dd3481d47bc9267dd
secp256k1;https://github.com/status-im/nim-secp256k1@#5fd81357839d57ef38fb17647bd5e31dfa9f55b8
serialization;https://github.com/status-im/nim-serialization@#f0860e1c25acf26ef5e6ea231c7c0537c793b555
stew;https://github.com/status-im/nim-stew@#000eeb14a34832e6c95303e6508e2925db56be7c
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35
stew;https://github.com/status-im/nim-stew@#3159137d9a3110edb4024145ce0ba778975de40e
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
unittest2;https://github.com/status-im/nim-unittest2@#b178f47527074964f76c395ad0dfc81cf118f379
websock;https://github.com/status-im/nim-websock@#3696e3f3a5b938e478e473a6089bf8de386d2f04
zlib;https://github.com/status-im/nim-zlib@#d65ee2a7611eb9f0ef0e7350caed6e93ccfa9651
unittest2;https://github.com/status-im/nim-unittest2@#2300fa9924a76e6c96bc4ea79d043e3a0f27120c
websock;https://github.com/status-im/nim-websock@#f8ed9b40a5ff27ad02a3c237c4905b0924e3f982
zlib;https://github.com/status-im/nim-zlib@#38b72eda9d70067df4a953f56b5ed59630f2a17b

View File

@@ -13,7 +13,7 @@ type
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
await conn.writeLp("Roger p2p!")
@@ -40,7 +40,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
##
# The actual application
##
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng() # Single random number source for the whole application
# port 0 will take a random available port

View File

@@ -53,7 +53,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
##
##
## Let's now start to create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng()
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()

View File

@@ -25,7 +25,7 @@ type TestProto = ref object of LPProtocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will in be handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
# Read up to 1024 bytes from this connection, and transform them into
# a string
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
@@ -44,7 +44,7 @@ proc hello(p: TestProto, conn: Connection) {.async.} =
## Again, pretty straight-forward, we just send a message on the connection.
##
## We can now create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng()
testProto = TestProto.new()

View File

@@ -108,7 +108,7 @@ type
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
var res: MetricProto
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
@@ -126,7 +126,7 @@ proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
return MetricList.decode(protobuf).tryGet()
## We can now create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let rng = newRng()
proc randomMetricGenerator: Future[MetricList] {.async.} =
let metricCount = rng[].generate(uint32) mod 16

View File

@@ -33,7 +33,7 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
const DumbCodec = "/dumb/proto/1.0.0"
type DumbProto = ref object of LPProtocol
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
await conn.close()
return T.new(codecs = @[DumbCodec], handler = handle)
@@ -49,7 +49,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
## (rendezvous in this case) as a bootnode. For this example, we'll
## create a bootnode, and then every peer will advertise itself on the
## bootnode, and use it to find other peers
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let bootNode = createSwitch()
await bootNode.start()

View File

@@ -143,7 +143,7 @@ proc draw(g: Game) =
## peer know that we are available, check that he is also available,
## and launch the game.
proc new(T: typedesc[GameProto], g: Game): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
defer: await conn.closeWithEof()
if g.peerFound.finished or g.hasCandidate:
await conn.close()

View File

@@ -17,7 +17,7 @@ requires "nim >= 1.6.0",
"secp256k1",
"stew#head",
"websock",
"unittest2 >= 0.0.5 & < 0.1.0"
"unittest2 >= 0.2.1"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)

View File

@@ -25,7 +25,7 @@ import
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
connmanager, upgrademngrs/muxedupgrade,
connmanager, upgrademngrs/muxedupgrade, observedaddrmanager,
nameresolving/nameresolver,
errors, utility
@@ -59,6 +59,7 @@ type
circuitRelay: Relay
rdv: RendezVous
services: seq[Service]
observedAddrManager: ObservedAddrManager
proc new*(T: type[SwitchBuilder]): T {.public.} =
## Creates a SwitchBuilder
@@ -201,6 +202,10 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
b.services = services
b
proc withObservedAddrManager*(b: SwitchBuilder, observedAddrManager: ObservedAddrManager): SwitchBuilder =
b.observedAddrManager = observedAddrManager
b
proc build*(b: SwitchBuilder): Switch
{.raises: [LPError], public.} =
@@ -223,11 +228,16 @@ proc build*(b: SwitchBuilder): Switch
protoVersion = b.protoVersion,
agentVersion = b.agentVersion)
let identify =
if b.observedAddrManager != nil:
Identify.new(peerInfo, b.sendSignedPeerRecord, b.observedAddrManager)
else:
Identify.new(peerInfo, b.sendSignedPeerRecord)
let
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
ms = MultistreamSelect.new()
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, connManager, ms)
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
let
transports = block:

View File

@@ -128,7 +128,7 @@ proc removeConnEventHandler*(c: ConnManager,
proc triggerConnEvent*(c: ConnManager,
peerId: PeerId,
event: ConnEvent) {.async, gcsafe.} =
event: ConnEvent) {.async.} =
try:
trace "About to trigger connection events", peer = peerId
if c.connEvents[event.kind].len() > 0:
@@ -160,7 +160,7 @@ proc removePeerEventHandler*(c: ConnManager,
proc triggerPeerEvents*(c: ConnManager,
peerId: PeerId,
event: PeerEvent) {.async, gcsafe.} =
event: PeerEvent) {.async.} =
trace "About to trigger peer events", peer = peerId
if c.peerEvents[event.kind].len == 0:
@@ -379,7 +379,7 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
cs.trackConnection(mux.connection)
proc getStream*(c: ConnManager,
muxer: Muxer): Future[Connection] {.async, gcsafe.} =
muxer: Muxer): Future[Connection] {.async.} =
## get a muxed stream for the passed muxer
##
@@ -387,7 +387,7 @@ proc getStream*(c: ConnManager,
return await muxer.newStream()
proc getStream*(c: ConnManager,
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
peerId: PeerId): Future[Connection] {.async.} =
## get a muxed stream for the passed peer from any connection
##
@@ -395,7 +395,7 @@ proc getStream*(c: ConnManager,
proc getStream*(c: ConnManager,
peerId: PeerId,
dir: Direction): Future[Connection] {.async, gcsafe.} =
dir: Direction): Future[Connection] {.async.} =
## get a muxed stream for the passed peer from a connection with `dir`
##

View File

@@ -65,11 +65,13 @@ when supported(PKScheme.Ed25519):
import ed25519/ed25519
when supported(PKScheme.Secp256k1):
import secp
when supported(PKScheme.ECDSA):
import ecnist
# We are still importing `ecnist` because, it is used for SECIO handshake,
# but it will be impossible to create ECNIST keys or import ECNIST keys.
# These used to be declared in `crypto` itself
export ecnist.ephemeral, ecnist.ECDHEScheme
import ecnist, bearssl/rand, bearssl/hash as bhash
import bearssl/rand, bearssl/hash as bhash
import ../protobuf/minprotobuf, ../vbuffer, ../multihash, ../multicodec
import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
@@ -86,8 +88,6 @@ type
Sha256,
Sha512
ECDHEScheme* = EcCurveKind
PublicKey* = object
case scheme*: PKScheme
of PKScheme.RSA:
@@ -879,34 +879,6 @@ proc mac*(secret: Secret, id: int): seq[byte] {.inline.} =
offset += secret.ivsize + secret.keysize
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.macsize)
proc ephemeral*(
scheme: ECDHEScheme,
rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE.
var keypair: EcKeyPair
if scheme == Secp256r1:
keypair = ? EcKeyPair.random(Secp256r1, rng).orError(KeyError)
elif scheme == Secp384r1:
keypair = ? EcKeyPair.random(Secp384r1, rng).orError(KeyError)
elif scheme == Secp521r1:
keypair = ? EcKeyPair.random(Secp521r1, rng).orError(KeyError)
ok(keypair)
proc ephemeral*(
scheme: string, rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE using string encoding.
##
## Currently supported encoding strings are P-256, P-384, P-521, if encoding
## string is not supported P-521 key will be generated.
if scheme == "P-256":
ephemeral(Secp256r1, rng)
elif scheme == "P-384":
ephemeral(Secp384r1, rng)
elif scheme == "P-521":
ephemeral(Secp521r1, rng)
else:
ephemeral(Secp521r1, rng)
proc getOrder*(remotePubkey, localNonce: openArray[byte],
localPubkey, remoteNonce: openArray[byte]): CryptoResult[int] =
## Compare values and calculate `order` parameter.

View File

@@ -994,3 +994,33 @@ proc verify*[T: byte|char](sig: EcSignature, message: openArray[T],
# Clear context with initial value
kv.init(addr hc.vtable)
result = (res == 1)
type ECDHEScheme* = EcCurveKind
proc ephemeral*(
scheme: ECDHEScheme,
rng: var HmacDrbgContext): EcResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE.
var keypair: EcKeyPair
if scheme == Secp256r1:
keypair = ? EcKeyPair.random(Secp256r1, rng)
elif scheme == Secp384r1:
keypair = ? EcKeyPair.random(Secp384r1, rng)
elif scheme == Secp521r1:
keypair = ? EcKeyPair.random(Secp521r1, rng)
ok(keypair)
proc ephemeral*(
scheme: string, rng: var HmacDrbgContext): EcResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE using string encoding.
##
## Currently supported encoding strings are P-256, P-384, P-521, if encoding
## string is not supported P-521 key will be generated.
if scheme == "P-256":
ephemeral(Secp256r1, rng)
elif scheme == "P-384":
ephemeral(Secp384r1, rng)
elif scheme == "P-521":
ephemeral(Secp521r1, rng)
else:
ephemeral(Secp521r1, rng)

View File

@@ -553,7 +553,7 @@ proc getSocket(pattern: string,
closeSocket(sock)
# This is forward declaration needed for newDaemonApi()
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async, gcsafe.}
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
proc copyEnv(): StringTableRef =
## This procedure copy all environment variables into StringTable.
@@ -755,7 +755,7 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
# Starting daemon process
# echo "Starting ", cmd, " ", args.join(" ")
api.process =
api.process =
exceptionToAssert:
startProcess(cmd, "", args, env, {poParentStreams})
# Waiting until daemon will not be bound to control socket.
@@ -1032,7 +1032,7 @@ proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
var value: seq[byte]
if pbDhtResponse.getRequiredField(3, value).isErr():
raise newException(DaemonLocalError, "Missing required DHT field `value`!")
return initProtoBuffer(value)
else:
raise newException(DaemonLocalError, "Wrong message type!")

View File

@@ -26,7 +26,7 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out) {.async, base.} =
dir = Direction.Out) {.async, base.} =
## connect remote peer without negotiating
## a protocol
##

View File

@@ -53,7 +53,7 @@ proc dialAndUpgrade(
peerId: Opt[PeerId],
hostname: string,
address: MultiAddress,
upgradeDir = Direction.Out):
dir = Direction.Out):
Future[Muxer] {.async.} =
for transport in self.transports: # for each transport
@@ -75,15 +75,19 @@ proc dialAndUpgrade(
let mux =
try:
dialed.transportDir = upgradeDir
await transport.upgrade(dialed, upgradeDir, peerId)
# This is for the very specific case of a simultaneous dial during DCUtR. In this case, both sides will have
# an Outbound direction at the transport level. Therefore we update the DCUtR initiator transport direction to Inbound.
# The if below is more general and might handle other use cases in the future.
if dialed.dir != dir:
dialed.dir = dir
await transport.upgrade(dialed, peerId)
except CatchableError as exc:
# If we failed to establish the connection through one transport,
# we won't succeeded through another - no use in trying again
await dialed.close()
debug "Upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
if exc isnot CancelledError:
if upgradeDir == Direction.Out:
if dialed.dir == Direction.Out:
libp2p_failed_upgrades_outgoing.inc()
else:
libp2p_failed_upgrades_incoming.inc()
@@ -91,7 +95,7 @@ proc dialAndUpgrade(
# Try other address
return nil
doAssert not isNil(mux), "connection died after upgrade " & $upgradeDir
doAssert not isNil(mux), "connection died after upgrade " & $dialed.dir
debug "Dial successful", peerId = mux.connection.peerId
return mux
return nil
@@ -128,7 +132,7 @@ proc dialAndUpgrade(
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress],
upgradeDir = Direction.Out):
dir = Direction.Out):
Future[Muxer] {.async.} =
debug "Dialing peer", peerId = peerId.get(default(PeerId))
@@ -146,7 +150,7 @@ proc dialAndUpgrade(
else: await self.nameResolver.resolveMAddress(expandedAddress)
for resolvedAddress in resolvedAddresses:
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, upgradeDir)
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
if not isNil(result):
return result
@@ -164,7 +168,7 @@ proc internalConnect(
addrs: seq[MultiAddress],
forceDial: bool,
reuseConnection = true,
upgradeDir = Direction.Out):
dir = Direction.Out):
Future[Muxer] {.async.} =
if Opt.some(self.localPeerId) == peerId:
raise newException(CatchableError, "can't dial self!")
@@ -182,7 +186,7 @@ proc internalConnect(
let slot = self.connManager.getOutgoingSlot(forceDial)
let muxed =
try:
await self.dialAndUpgrade(peerId, addrs, upgradeDir)
await self.dialAndUpgrade(peerId, addrs, dir)
except CatchableError as exc:
slot.release()
raise exc
@@ -209,7 +213,7 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out) {.async.} =
dir = Direction.Out) {.async.} =
## connect remote peer without negotiating
## a protocol
##
@@ -217,7 +221,7 @@ method connect*(
if self.connManager.connCount(peerId) > 0 and reuseConnection:
return
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, upgradeDir)
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
method connect*(
self: Dialer,

View File

@@ -122,20 +122,15 @@ proc request*[T](dm: DiscoveryManager, value: T): DiscoveryQuery =
pa.add(value)
return dm.request(pa)
proc advertise*(dm: DiscoveryManager, pa: PeerAttributes) =
proc advertise*[T](dm: DiscoveryManager, value: T) =
for i in dm.interfaces:
i.toAdvertise = pa
i.toAdvertise.add(value)
if i.advertiseLoop.isNil:
i.advertisementUpdated = newAsyncEvent()
i.advertiseLoop = i.advertise()
else:
i.advertisementUpdated.fire()
proc advertise*[T](dm: DiscoveryManager, value: T) =
var pa: PeerAttributes
pa.add(value)
dm.advertise(pa)
template forEach*(query: DiscoveryQuery, code: untyped) =
## Will execute `code` for each discovered peer. The
## peer attritubtes are available through the variable

View File

@@ -19,6 +19,7 @@ type
rdv*: RendezVous
timeToRequest: Duration
timeToAdvertise: Duration
ttl: Duration
RdvNamespace* = distinct string
@@ -62,12 +63,16 @@ method advertise*(self: RendezVousInterface) {.async.} =
self.advertisementUpdated.clear()
for toAdv in toAdvertise:
await self.rdv.advertise(toAdv, self.timeToAdvertise)
try:
await self.rdv.advertise(toAdv, self.ttl)
except CatchableError as error:
debug "RendezVous advertise error: ", msg = error.msg
await sleepAsync(self.timeToAdvertise) or self.advertisementUpdated.wait()
proc new*(T: typedesc[RendezVousInterface],
rdv: RendezVous,
ttr: Duration = 1.minutes,
tta: Duration = MinimumDuration): RendezVousInterface =
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta)
tta: Duration = 1.minutes,
ttl: Duration = MinimumDuration): RendezVousInterface =
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta, ttl: ttl)

View File

@@ -19,7 +19,8 @@ func toException*(e: string): ref LPError =
# sadly nim needs more love for hygienic templates
# so here goes the macro, its based on the proc/template version
# and uses quote do so it's quite readable
macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
# TODO https://github.com/nim-lang/Nim/issues/22936
macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
let nexclude = exclude.len
case nexclude
of 0:

View File

@@ -398,6 +398,9 @@ const
MAProtocol(
mcodec: multiCodec("quic"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("quic-v1"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("ip6zone"), kind: Length, size: 0,
coder: TranscoderIP6Zone

View File

@@ -193,6 +193,7 @@ const MultiCodecList = [
("https", 0x01BB),
("tls", 0x01C0),
("quic", 0x01CC),
("quic-v1", 0x01CD),
("ws", 0x01DD),
("wss", 0x01DE),
("p2p-websocket-star", 0x01DF), # not in multicodec list

View File

@@ -131,7 +131,7 @@ proc handle*(
protos: seq[string],
matchers = newSeq[Matcher](),
active: bool = false,
): Future[string] {.async, gcsafe.} =
): Future[string] {.async.} =
trace "Starting multistream negotiation", conn, handshaked = active
var handshaked = active
while not conn.atEof:
@@ -172,10 +172,9 @@ proc handle*(
trace "no handlers", conn, protocol = ms
await conn.writeLp(Na)
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async.} =
trace "Starting multistream handler", conn, handshaked = active
var
handshaked = active
protos: seq[string]
matchers: seq[Matcher]
for h in m.handlers:

View File

@@ -42,7 +42,7 @@ const MaxMsgSize* = 1 shl 20 # 1mb
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
newException(InvalidMplexMsgType, "invalid message type")
proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
proc readMsg*(conn: Connection): Future[Msg] {.async.} =
let header = await conn.readVarint()
trace "read header varint", varint = header, conn

View File

@@ -73,7 +73,7 @@ func shortLog*(s: LPChannel): auto =
chronicles.formatIt(LPChannel): shortLog(it)
proc open*(s: LPChannel) {.async, gcsafe.} =
proc open*(s: LPChannel) {.async.} =
trace "Opening channel", s, conn = s.conn
if s.conn.isClosed:
return
@@ -95,7 +95,7 @@ proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
if s.closedLocal and s.atEof():
await procCall BufferStream(s).close()
proc reset*(s: LPChannel) {.async, gcsafe.} =
proc reset*(s: LPChannel) {.async.} =
if s.isClosed:
trace "Already closed", s
return
@@ -123,7 +123,7 @@ proc reset*(s: LPChannel) {.async, gcsafe.} =
trace "Channel reset", s
method close*(s: LPChannel) {.async, gcsafe.} =
method close*(s: LPChannel) {.async.} =
## Close channel for writing - a message will be sent to the other peer
## informing them that the channel is closed and that we're waiting for
## their acknowledgement.

View File

@@ -122,7 +122,7 @@ proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
await chann.reset()
method handle*(m: Mplex) {.async, gcsafe.} =
method handle*(m: Mplex) {.async.} =
trace "Starting mplex handler", m
try:
while not m.connection.atEof:
@@ -211,7 +211,7 @@ proc new*(M: type Mplex,
method newStream*(m: Mplex,
name: string = "",
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
lazy: bool = false): Future[Connection] {.async.} =
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
if not lazy:
@@ -219,7 +219,7 @@ method newStream*(m: Mplex,
return Connection(channel)
method close*(m: Mplex) {.async, gcsafe.} =
method close*(m: Mplex) {.async.} =
if m.isClosed:
trace "Already closed", m
return

View File

@@ -46,11 +46,11 @@ chronicles.formatIt(Muxer): shortLog(it)
# muxer interface
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
Future[Connection] {.base, async, gcsafe.} = discard
method close*(m: Muxer) {.base, async, gcsafe.} =
Future[Connection] {.base, async.} = discard
method close*(m: Muxer) {.base, async.} =
if not isNil(m.connection):
await m.connection.close()
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
method handle*(m: Muxer): Future[void] {.base, async.} = discard
proc new*(
T: typedesc[MuxerProvider],

View File

@@ -59,7 +59,7 @@ type
streamId: uint32
length: uint32
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async.} =
var buffer: array[12, byte]
await conn.readExactly(addr buffer[0], 12)
@@ -183,9 +183,10 @@ proc remoteClosed(channel: YamuxChannel) {.async.} =
channel.closedRemotely.complete()
await channel.actuallyClose()
method closeImpl*(channel: YamuxChannel) {.async, gcsafe.} =
method closeImpl*(channel: YamuxChannel) {.async.} =
if not channel.closedLocally:
channel.closedLocally = true
channel.isEof = true
if channel.isReset == false and channel.sendQueue.len == 0:
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
@@ -249,6 +250,7 @@ method readOnce*(
await channel.closedRemotely or channel.receivedData.wait()
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
channel.returnedEof = true
channel.isEof = true
return 0
let toRead = min(channel.recvQueue.len, nbytes)
@@ -346,7 +348,7 @@ method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
libp2p_yamux_recv_queue.observe(channel.sendQueueBytes().int64)
asyncSpawn channel.trySend()
proc open*(channel: YamuxChannel) {.async, gcsafe.} =
proc open*(channel: YamuxChannel) {.async.} =
if channel.opened:
trace "Try to open channel twice"
return
@@ -429,7 +431,7 @@ proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
trace "Exception in yamux stream handler", msg = exc.msg
await channel.reset()
method handle*(m: Yamux) {.async, gcsafe.} =
method handle*(m: Yamux) {.async.} =
trace "Starting yamux handler", pid=m.connection.peerId
try:
while not m.connection.atEof:
@@ -454,6 +456,7 @@ method handle*(m: Yamux) {.async, gcsafe.} =
if header.streamId in m.flushed:
m.flushed.del(header.streamId)
if header.streamId mod 2 == m.currentId mod 2:
debug "Peer used our reserved stream id, skipping", id=header.streamId, currentId=m.currentId, peerId=m.connection.peerId
raise newException(YamuxError, "Peer used our reserved stream id")
let newStream = m.createStream(header.streamId, false)
if m.channels.len >= m.maxChannCount:
@@ -511,7 +514,7 @@ method getStreams*(m: Yamux): seq[Connection] =
method newStream*(
m: Yamux,
name: string = "",
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
lazy: bool = false): Future[Connection] {.async.} =
if m.channels.len > m.maxChannCount - 1:
raise newException(TooManyChannels, "max allowed channel count exceeded")

View File

@@ -52,7 +52,7 @@ proc resolveOneAddress(
ma: MultiAddress,
domain: Domain = Domain.AF_UNSPEC,
prefix = ""): Future[seq[MultiAddress]]
{.async, raises: [MaError, TransportAddressError].} =
{.async.} =
#Resolve a single address
var pbuf: array[2, byte]

View File

@@ -140,7 +140,7 @@ proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[voi
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
raise newException(AutonatError, "Received malformed message")

View File

@@ -162,7 +162,7 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy
proc addressMapper(
self: AutonatService,
peerStore: PeerStore,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
if self.networkReachability != NetworkReachability.Reachable:
return listenAddrs
@@ -179,7 +179,7 @@ proc addressMapper(
return addrs
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return await addressMapper(self, switch.peerStore, listenAddrs)
info "Setting up AutonatService"

View File

@@ -66,7 +66,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
if peerDialableAddrs.len > self.maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false))
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.In))
try:
discard await anyCompleted(futs).wait(self.connectTimeout)
debug "Dcutr initiator has directly connected to the remote peer."

View File

@@ -56,5 +56,10 @@ proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
await conn.writeLp(pb.buffer)
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] =
addrs.filterIt(TCP.match(it))
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] {.raises: [LPError]} =
var result = newSeq[MultiAddress]()
for a in addrs:
# This is necessary to also accept addrs like /ip4/198.51.100/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N
if [TCP, mapAnd(TCP_DNS, P2PPattern), mapAnd(TCP_IP, P2PPattern)].anyIt(it.match(a)):
result.add(a[0..1].tryGet())
return result

View File

@@ -29,7 +29,7 @@ logScope:
proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(stream: Connection, proto: string) {.async.} =
var peerDialableAddrs: seq[MultiAddress]
try:
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
@@ -56,7 +56,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
if peerDialableAddrs.len > maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, upgradeDir = Direction.In))
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.Out))
try:
discard await anyCompleted(futs).wait(connectTimeout)
debug "Dcutr receiver has directly connected to the remote peer."

View File

@@ -189,7 +189,7 @@ proc dialPeerV2*(
conn.limitData = msgRcvFromRelay.limit.data
return conn
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@@ -201,7 +201,7 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
trace "Unexpected client / relayv2 handshake", msgType=msg.msgType
await sendStopError(conn, MalformedMessage)
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, gcsafe.} =
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
let src = msg.srcPeer.valueOr:
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
return
@@ -226,7 +226,7 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, g
if cl.onNewConnection != nil: await cl.onNewConnection(conn, 0, 0)
else: await conn.close()
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@@ -266,7 +266,7 @@ proc new*(T: typedesc[RelayClient], canHop: bool = false,
maxCircuitPerPeer: maxCircuitPerPeer,
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
case proto:
of RelayV1Codec: await cl.handleStreamV1(conn)

View File

@@ -47,6 +47,7 @@ proc new*(
limitDuration: uint32,
limitData: uint64): T =
let rc = T(conn: conn, limitDuration: limitDuration, limitData: limitData)
rc.dir = conn.dir
rc.initStream()
if limitDuration > 0:
proc checkDurationConnection() {.async.} =

View File

@@ -105,7 +105,7 @@ proc isRelayed*(conn: Connection): bool =
wrappedConn = wrappedConn.getWrapped()
return false
proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleReserve(r: Relay, conn: Connection) {.async.} =
if conn.isRelayed():
trace "reservation attempt over relay connection", pid = conn.peerId
await sendHopStatus(conn, PermissionDenied)
@@ -128,7 +128,7 @@ proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleConnect(r: Relay,
connSrc: Connection,
msg: HopMessage) {.async, gcsafe.} =
msg: HopMessage) {.async.} =
if connSrc.isRelayed():
trace "connection attempt over relay connection"
await sendHopStatus(connSrc, PermissionDenied)
@@ -200,7 +200,7 @@ proc handleConnect(r: Relay,
await rconnDst.close()
await bridge(rconnSrc, rconnDst)
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@@ -214,7 +214,7 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
# Relay V1
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsafe.} =
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
r.streamCount.inc()
defer: r.streamCount.dec()
if r.streamCount + r.rsvp.len() >= r.maxCircuit:
@@ -293,7 +293,7 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
trace "relaying connection", src, dst
await bridge(connSrc, connDst)
proc handleStreamV1(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@@ -336,7 +336,7 @@ proc new*(T: typedesc[Relay],
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
case proto:
of RelayV2HopCodec: await r.handleHopStreamV2(conn)

View File

@@ -37,24 +37,24 @@ method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
self.client.onNewConnection = proc(
conn: Connection,
duration: uint32 = 0,
data: uint64 = 0) {.async, gcsafe, raises: [].} =
data: uint64 = 0) {.async.} =
await self.queue.addLast(RelayConnection.new(conn, duration, data))
await conn.join()
self.selfRunning = true
await procCall Transport(self).start(ma)
trace "Starting Relay transport"
method stop*(self: RelayTransport) {.async, gcsafe.} =
method stop*(self: RelayTransport) {.async.} =
self.running = false
self.selfRunning = false
self.client.onNewConnection = nil
while not self.queue.empty():
await self.queue.popFirstNoWait().close()
method accept*(self: RelayTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: RelayTransport): Future[Connection] {.async.} =
result = await self.queue.popFirst()
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async, gcsafe.} =
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
let
sma = toSeq(ma.items())
relayAddrs = sma[0..sma.len-4].mapIt(it.tryGet()).foldl(a & b)
@@ -90,7 +90,7 @@ method dial*(
self: RelayTransport,
hostname: string,
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
peerId.withValue(pid):
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
result = await self.dial(address)

View File

@@ -21,14 +21,14 @@ const
RelayV2HopCodec* = "/libp2p/circuit/relay/0.2.0/hop"
RelayV2StopCodec* = "/libp2p/circuit/relay/0.2.0/stop"
proc sendStatus*(conn: Connection, code: StatusV1) {.async, gcsafe.} =
proc sendStatus*(conn: Connection, code: StatusV1) {.async.} =
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
let
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async, gcsafe.} =
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async.} =
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
let
msg = HopMessage(msgType: HopMessageType.Status, status: Opt.some(code))

View File

@@ -21,6 +21,7 @@ import ../protobuf/minprotobuf,
../peerid,
../crypto/crypto,
../multiaddress,
../multicodec,
../protocols/protocol,
../utility,
../errors,
@@ -77,7 +78,7 @@ chronicles.expandIt(IdentifyInfo):
signedPeerRecord =
# The SPR contains the same data as the identify message
# would be cumbersome to log
if iinfo.signedPeerRecord.isSome(): "Some"
if it.signedPeerRecord.isSome(): "Some"
else: "None"
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
@@ -133,24 +134,24 @@ proc decodeMsg*(buf: seq[byte]): Opt[IdentifyInfo] =
if ? pb.getField(6, agentVersion).toOpt():
iinfo.agentVersion = some(agentVersion)
debug "decodeMsg: decoded identify", iinfo
Opt.some(iinfo)
proc new*(
T: typedesc[Identify],
peerInfo: PeerInfo,
sendSignedPeerRecord = false
sendSignedPeerRecord = false,
observedAddrManager = ObservedAddrManager.new(),
): T =
let identify = T(
peerInfo: peerInfo,
sendSignedPeerRecord: sendSignedPeerRecord,
observedAddrManager: ObservedAddrManager.new(),
observedAddrManager: observedAddrManager,
)
identify.init()
identify
method init*(p: Identify) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
trace "handling identify request", conn
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
@@ -168,7 +169,7 @@ method init*(p: Identify) =
proc identify*(self: Identify,
conn: Connection,
remotePeerId: PeerId): Future[IdentifyInfo] {.async, gcsafe.} =
remotePeerId: PeerId): Future[IdentifyInfo] {.async.} =
trace "initiating identify", conn
var message = await conn.readLp(64*1024)
if len(message) == 0:
@@ -176,6 +177,7 @@ proc identify*(self: Identify,
raise newException(IdentityInvalidMsgError, "Empty message received!")
var info = decodeMsg(message).valueOr: raise newException(IdentityInvalidMsgError, "Incorrect message received!")
debug "identify: decoded message", conn, info
let
pubkey = info.pubkey.valueOr: raise newException(IdentityInvalidMsgError, "No pubkey in identify")
peer = PeerId.init(pubkey).valueOr: raise newException(IdentityInvalidMsgError, $error)
@@ -186,8 +188,12 @@ proc identify*(self: Identify,
info.peerId = peer
info.observedAddr.withValue(observed):
if not self.observedAddrManager.addObservation(observed):
debug "Observed address is not valid", observedAddr = observed
# Currently, we use the ObservedAddrManager only to find our dialable external NAT address. Therefore, addresses
# like "...\p2p-circuit\p2p\..." and "\p2p\..." are not useful to us.
if observed.contains(multiCodec("p2p-circuit")).get(false) or P2PPattern.matchPartial(observed):
trace "Not adding address to ObservedAddrManager.", observed
elif not self.observedAddrManager.addObservation(observed):
trace "Observed address is not valid.", observedAddr = observed
return info
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
@@ -198,13 +204,14 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
identifypush
proc init*(p: IdentifyPush) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
trace "handling identify push", conn
try:
var message = await conn.readLp(64*1024)
var identInfo = decodeMsg(message).valueOr:
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
debug "identify push: decoded message", conn, identInfo
identInfo.pubkey.withValue(pubkey):
let receivedPeerId = PeerId.init(pubkey).tryGet()

View File

@@ -0,0 +1,47 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## `Perf <https://github.com/libp2p/specs/blob/master/perf/perf.md>`_ protocol specification
import chronos, chronicles, sequtils
import stew/endians2
import ./core, ../../stream/connection
logScope:
topics = "libp2p perf"
type PerfClient* = ref object of RootObj
proc perf*(_: typedesc[PerfClient], conn: Connection,
sizeToWrite: uint64 = 0, sizeToRead: uint64 = 0):
Future[Duration] {.async, public.} =
var
size = sizeToWrite
buf: array[PerfSize, byte]
let start = Moment.now()
trace "starting performance benchmark", conn, sizeToWrite, sizeToRead
await conn.write(toSeq(toBytesBE(sizeToRead)))
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0..<toWrite])
size -= toWrite
await conn.close()
size = sizeToRead
while size > 0:
let toRead = min(size, PerfSize)
await conn.readExactly(addr buf[0], toRead.int)
size = size - toRead
let duration = Moment.now() - start
trace "finishing performance benchmark", duration
return duration

View File

@@ -0,0 +1,14 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## `Perf <https://github.com/libp2p/specs/blob/master/perf/perf.md>`_ protocol specification
const
PerfCodec* = "/perf/1.0.0"
PerfSize* = 65536

View File

@@ -0,0 +1,60 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## `Perf <https://github.com/libp2p/specs/blob/master/perf/perf.md>`_ protocol specification
{.push raises: [].}
import chronos, chronicles
import stew/endians2
import ./core,
../protocol,
../../stream/connection,
../../utility
export chronicles, connection
logScope:
topics = "libp2p perf"
type Perf* = ref object of LPProtocol
proc new*(T: typedesc[Perf]): T {.public.} =
var p = T()
proc handle(conn: Connection, proto: string) {.async.} =
var bytesRead = 0
try:
trace "Received benchmark performance check", conn
var
sizeBuffer: array[8, byte]
size: uint64
await conn.readExactly(addr sizeBuffer[0], 8)
size = uint64.fromBytesBE(sizeBuffer)
var toReadBuffer: array[PerfSize, byte]
try:
while true:
bytesRead += await conn.readOnce(addr toReadBuffer[0], PerfSize)
except CatchableError as exc:
discard
var buf: array[PerfSize, byte]
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0..<toWrite])
size -= toWrite
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "exception in perf handler", exc = exc.msg, conn
await conn.close()
p.handler = handle
p.codec = PerfCodec
return p

View File

@@ -51,7 +51,7 @@ proc new*(T: typedesc[Ping], handler: PingHandler = nil, rng: ref HmacDrbgContex
ping
method init*(p: Ping) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
trace "handling ping", conn
var buf: array[PingSize, byte]
@@ -71,7 +71,7 @@ method init*(p: Ping) =
proc ping*(
p: Ping,
conn: Connection,
): Future[Duration] {.async, gcsafe, public.} =
): Future[Duration] {.async, public.} =
## Sends ping to `conn`, returns the delay
trace "initiating ping", conn

View File

@@ -15,7 +15,7 @@ import ./pubsub,
./pubsubpeer,
./timedcache,
./peertable,
./rpc/[message, messages],
./rpc/[message, messages, protobuf],
../../crypto/crypto,
../../stream/connection,
../../peerid,
@@ -95,7 +95,16 @@ method unsubscribePeer*(f: FloodSub, peer: PeerId) =
method rpcHandler*(f: FloodSub,
peer: PubSubPeer,
rpcMsg: RPCMsg) {.async.} =
data: seq[byte]) {.async.} =
var rpcMsg = decodeRpcMsg(data).valueOr:
debug "failed to decode msg from peer", peer, err = error
raise newException(CatchableError, "")
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
# trigger hooks
peer.recvObservers(rpcMsg)
for i in 0..<min(f.topicsHigh, rpcMsg.subscriptions.len):
template sub: untyped = rpcMsg.subscriptions[i]
f.handleSubscribe(peer, sub.topic, sub.subscribe)

View File

@@ -13,13 +13,14 @@
import std/[sets, sequtils]
import chronos, chronicles, metrics
import chronos/ratelimit
import ./pubsub,
./floodsub,
./pubsubpeer,
./peertable,
./mcache,
./timedcache,
./rpc/[messages, message],
./rpc/[messages, message, protobuf],
../protocol,
../../stream/connection,
../../peerinfo,
@@ -40,6 +41,8 @@ logScope:
declareCounter(libp2p_gossipsub_failed_publish, "number of failed publish")
declareCounter(libp2p_gossipsub_invalid_topic_subscription, "number of invalid topic subscriptions that happened")
declareCounter(libp2p_gossipsub_duplicate_during_validation, "number of duplicates received during message validation")
declareCounter(libp2p_gossipsub_idontwant_saved_messages, "number of duplicates avoided by idontwant")
declareCounter(libp2p_gossipsub_saved_bytes, "bytes saved by gossipsub optimizations", labels=["kind"])
declareCounter(libp2p_gossipsub_duplicate, "number of duplicates received")
declareCounter(libp2p_gossipsub_received, "number of messages received (deduplicated)")
@@ -75,7 +78,9 @@ proc init*(_: type[GossipSubParams]): GossipSubParams =
behaviourPenaltyDecay: 0.999,
disconnectBadPeers: false,
enablePX: false,
bandwidthEstimatebps: 100_000_000 # 100 Mbps or 12.5 MBps
bandwidthEstimatebps: 100_000_000, # 100 Mbps or 12.5 MBps
overheadRateLimit: Opt.none(tuple[bytes: int, interval: Duration]),
disconnectPeerAboveRateLimit: false
)
proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
@@ -148,7 +153,7 @@ method init*(g: GossipSub) =
g.codecs &= GossipSubCodec
g.codecs &= GossipSubCodec_10
method onNewPeer(g: GossipSub, peer: PubSubPeer) =
method onNewPeer*(g: GossipSub, peer: PubSubPeer) =
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
# Make sure stats and peer information match, even when reloading peer stats
# from a previous connection
@@ -157,7 +162,7 @@ method onNewPeer(g: GossipSub, peer: PubSubPeer) =
peer.behaviourPenalty = stats.behaviourPenalty
# Check if the score is below the threshold and disconnect the peer if necessary
g.disconnectBadPeerCheck(peer, stats.score)
g.disconnectIfBadScorePeer(peer, stats.score)
peer.iHaveBudget = IHavePeerBudget
peer.pingBudget = PingsPeerBudget
@@ -202,8 +207,8 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
for t in toSeq(g.gossipsub.keys):
g.gossipsub.removePeer(t, pubSubPeer)
# also try to remove from explicit table here
g.explicit.removePeer(t, pubSubPeer)
# also try to remove from direct peers table here
g.subscribedDirectPeers.removePeer(t, pubSubPeer)
for t in toSeq(g.fanout.keys):
g.fanout.removePeer(t, pubSubPeer)
@@ -242,7 +247,7 @@ proc handleSubscribe*(g: GossipSub,
# subscribe remote peer to the topic
discard g.gossipsub.addPeer(topic, peer)
if peer.peerId in g.parameters.directPeers:
discard g.explicit.addPeer(topic, peer)
discard g.subscribedDirectPeers.addPeer(topic, peer)
else:
trace "peer unsubscribed from topic"
@@ -256,7 +261,7 @@ proc handleSubscribe*(g: GossipSub,
g.fanout.removePeer(topic, peer)
if peer.peerId in g.parameters.directPeers:
g.explicit.removePeer(topic, peer)
g.subscribedDirectPeers.removePeer(topic, peer)
trace "gossip peers", peers = g.gossipsub.peers(topic), topic
@@ -307,12 +312,13 @@ proc validateAndRelay(g: GossipSub,
var seenPeers: HashSet[PubSubPeer]
discard g.validationSeen.pop(msgIdSalted, seenPeers)
libp2p_gossipsub_duplicate_during_validation.inc(seenPeers.len.int64)
libp2p_gossipsub_saved_bytes.inc((msg.data.len * seenPeers.len).int64, labelValues = ["validation_duplicate"])
case validation
of ValidationResult.Reject:
debug "Dropping message after validation, reason: reject",
msgId = shortLog(msgId), peer
g.punishInvalidMessage(peer, msg.topicIds)
await g.punishInvalidMessage(peer, msg)
return
of ValidationResult.Ignore:
debug "Dropping message after validation, reason: ignore",
@@ -334,6 +340,9 @@ proc validateAndRelay(g: GossipSub,
g.floodsub.withValue(t, peers): toSendPeers.incl(peers[])
g.mesh.withValue(t, peers): toSendPeers.incl(peers[])
# add direct peers
toSendPeers.incl(g.subscribedDirectPeers.getOrDefault(t))
# Don't send it to source peer, or peers that
# sent it during validation
toSendPeers.excl(peer)
@@ -350,6 +359,8 @@ proc validateAndRelay(g: GossipSub,
for heDontWant in peer.heDontWants:
if msgId in heDontWant:
seenPeers.incl(peer)
libp2p_gossipsub_idontwant_saved_messages.inc
libp2p_gossipsub_saved_bytes.inc(msg.data.len.int64, labelValues = ["idontwant"])
break
toSendPeers.excl(seenPeers)
@@ -370,9 +381,57 @@ proc validateAndRelay(g: GossipSub,
except CatchableError as exc:
info "validateAndRelay failed", msg=exc.msg
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.async.} =
# In this way we count even ignored fields by protobuf
var rmsg = rpcMsgOpt.valueOr:
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(msgSize):
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
debug "Peer sent a msg that couldn't be decoded and it's above rate limit.", peer, uselessAppBytesNum = msgSize
if g.parameters.disconnectPeerAboveRateLimit:
await g.disconnectPeer(peer)
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
raise newException(CatchableError, "Peer msg couldn't be decoded")
let usefulMsgBytesNum =
if g.verifySignature:
byteSize(rmsg.messages)
else:
dataAndTopicsIdSize(rmsg.messages)
var uselessAppBytesNum = msgSize - usefulMsgBytesNum
rmsg.control.withValue(control):
uselessAppBytesNum -= (byteSize(control.ihave) + byteSize(control.iwant))
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
debug "Peer sent too much useless application data and it's above rate limit.", peer, msgSize, uselessAppBytesNum, rmsg
if g.parameters.disconnectPeerAboveRateLimit:
await g.disconnectPeer(peer)
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
method rpcHandler*(g: GossipSub,
peer: PubSubPeer,
rpcMsg: RPCMsg) {.async.} =
data: seq[byte]) {.async.} =
let msgSize = data.len
var rpcMsg = decodeRpcMsg(data).valueOr:
debug "failed to decode msg from peer", peer, err = error
await rateLimit(g, peer, Opt.none(RPCMsg), msgSize)
return
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
await rateLimit(g, peer, Opt.some(rpcMsg), msgSize)
# trigger hooks
peer.recvObservers(rpcMsg)
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
g.send(peer, RPCMsg(pong: rpcMsg.ping))
peer.pingBudget.dec
@@ -433,14 +492,14 @@ method rpcHandler*(g: GossipSub,
# always validate if signature is present or required
debug "Dropping message due to failed signature verification",
msgId = shortLog(msgId), peer
g.punishInvalidMessage(peer, msg.topicIds)
await g.punishInvalidMessage(peer, msg)
continue
if msg.seqno.len > 0 and msg.seqno.len != 8:
# if we have seqno should be 8 bytes long
debug "Dropping message due to invalid seqno length",
msgId = shortLog(msgId), peer
g.punishInvalidMessage(peer, msg.topicIds)
await g.punishInvalidMessage(peer, msg)
continue
# g.anonymize needs no evaluation when receiving messages
@@ -513,7 +572,7 @@ method publish*(g: GossipSub,
var peers: HashSet[PubSubPeer]
# add always direct peers
peers.incl(g.explicit.getOrDefault(topic))
peers.incl(g.subscribedDirectPeers.getOrDefault(topic))
if topic in g.topics: # if we're subscribed use the mesh
peers.incl(g.mesh.getOrDefault(topic))
@@ -599,11 +658,13 @@ method publish*(g: GossipSub,
return peers.len
proc maintainDirectPeer(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.async.} =
let peer = g.peers.getOrDefault(id)
if isNil(peer):
if id notin g.peers:
trace "Attempting to dial a direct peer", peer = id
if g.switch.isConnected(id):
warn "We are connected to a direct peer, but it isn't a GossipSub peer!", id
return
try:
await g.switch.connect(id, addrs)
await g.switch.connect(id, addrs, forceDial = true)
# populate the peer after it's connected
discard g.getOrCreatePeer(id, g.codecs)
except CancelledError as exc:
@@ -662,3 +723,13 @@ method initPubSub*(g: GossipSub)
# init gossip stuff
g.mcache = MCache.init(g.parameters.historyGossip, g.parameters.historyLength)
method getOrCreatePeer*(
g: GossipSub,
peerId: PeerId,
protos: seq[string]): PubSubPeer =
let peer = procCall PubSub(g).getOrCreatePeer(peerId, protos)
g.parameters.overheadRateLimit.withValue(overheadRateLimit):
peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(overheadRateLimit.bytes, overheadRateLimit.interval))
return peer

View File

@@ -106,10 +106,11 @@ proc handleGraft*(g: GossipSub,
let topic = graft.topicId
trace "peer grafted topic", peer, topic
# It is an error to GRAFT on a explicit peer
# It is an error to GRAFT on a direct peer
if peer.peerId in g.parameters.directPeers:
# receiving a graft from a direct peer should yield a more prominent warning (protocol violation)
warn "an explicit peer attempted to graft us, peering agreements should be reciprocal",
# we are trusting direct peer not to abuse this
warn "a direct peer attempted to graft us, peering agreements should be reciprocal",
peer, topic
# and such an attempt should be logged and rejected with a PRUNE
prunes.add(ControlPrune(
@@ -340,7 +341,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
# avoid negative score peers
it.score >= 0.0 and
it notin currentMesh[] and
# don't pick explicit peers
# don't pick direct peers
it.peerId notin g.parameters.directPeers and
# and avoid peers we are backing off
it.peerId notin backingOff:
@@ -380,7 +381,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
it notin currentMesh[] and
# avoid negative score peers
it.score >= 0.0 and
# don't pick explicit peers
# don't pick direct peers
it.peerId notin g.parameters.directPeers and
# and avoid peers we are backing off
it.peerId notin backingOff:
@@ -482,7 +483,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
# avoid negative score peers
it.score >= median.score and
it notin currentMesh[] and
# don't pick explicit peers
# don't pick direct peers
it.peerId notin g.parameters.directPeers and
# and avoid peers we are backing off
it.peerId notin backingOff:

View File

@@ -11,9 +11,12 @@
import std/[tables, sets]
import chronos, chronicles, metrics
import chronos/ratelimit
import "."/[types]
import ".."/[pubsubpeer]
import ../rpc/messages
import "../../.."/[peerid, multiaddress, switch, utils/heartbeat]
import ../pubsub
logScope:
topics = "libp2p gossipsub"
@@ -27,6 +30,7 @@ declareGauge(libp2p_gossipsub_peers_score_invalidMessageDeliveries, "Detailed go
declareGauge(libp2p_gossipsub_peers_score_appScore, "Detailed gossipsub scoring metric", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_behaviourPenalty, "Detailed gossipsub scoring metric", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_colocationFactor, "Detailed gossipsub scoring metric", labels = ["agent"])
declarePublicCounter(libp2p_gossipsub_peers_rate_limit_hits, "The number of times peers were above their rate limit", labels = ["agent"])
proc init*(_: type[TopicParams]): TopicParams =
TopicParams(
@@ -85,27 +89,18 @@ proc colocationFactor(g: GossipSub, peer: PubSubPeer): float64 =
{.pop.}
proc disconnectPeer(g: GossipSub, peer: PubSubPeer) {.async.} =
let agent =
when defined(libp2p_agents_metrics):
if peer.shortAgent.len > 0:
peer.shortAgent
else:
"unknown"
else:
"unknown"
libp2p_gossipsub_bad_score_disconnection.inc(labelValues = [agent])
proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async.} =
try:
await g.switch.disconnect(peer.peerId)
except CatchableError as exc: # Never cancelled
trace "Failed to close connection", peer, error = exc.name, msg = exc.msg
proc disconnectBadPeerCheck*(g: GossipSub, peer: PubSubPeer, score: float64) =
proc disconnectIfBadScorePeer*(g: GossipSub, peer: PubSubPeer, score: float64) =
if g.parameters.disconnectBadPeers and score < g.parameters.graylistThreshold and
peer.peerId notin g.parameters.directPeers:
debug "disconnecting bad score peer", peer, score = peer.score
asyncSpawn(g.disconnectPeer(peer))
libp2p_gossipsub_bad_score_disconnection.inc(labelValues = [peer.getAgent()])
proc updateScores*(g: GossipSub) = # avoid async
## https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#the-score-function
@@ -175,14 +170,7 @@ proc updateScores*(g: GossipSub) = # avoid async
score += topicScore * topicParams.topicWeight
# Score metrics
let agent =
when defined(libp2p_agents_metrics):
if peer.shortAgent.len > 0:
peer.shortAgent
else:
"unknown"
else:
"unknown"
let agent = peer.getAgent()
libp2p_gossipsub_peers_score_firstMessageDeliveries.inc(info.firstMessageDeliveries, labelValues = [agent])
libp2p_gossipsub_peers_score_meshMessageDeliveries.inc(info.meshMessageDeliveries, labelValues = [agent])
libp2p_gossipsub_peers_score_meshFailurePenalty.inc(info.meshFailurePenalty, labelValues = [agent])
@@ -219,14 +207,7 @@ proc updateScores*(g: GossipSub) = # avoid async
score += colocationFactor * g.parameters.ipColocationFactorWeight
# Score metrics
let agent =
when defined(libp2p_agents_metrics):
if peer.shortAgent.len > 0:
peer.shortAgent
else:
"unknown"
else:
"unknown"
let agent = peer.getAgent()
libp2p_gossipsub_peers_score_appScore.inc(peer.appScore, labelValues = [agent])
libp2p_gossipsub_peers_score_behaviourPenalty.inc(peer.behaviourPenalty, labelValues = [agent])
libp2p_gossipsub_peers_score_colocationFactor.inc(colocationFactor, labelValues = [agent])
@@ -246,8 +227,7 @@ proc updateScores*(g: GossipSub) = # avoid async
trace "updated peer's score", peer, score = peer.score, n_topics, is_grafted
g.disconnectBadPeerCheck(peer, stats.score)
g.disconnectIfBadScorePeer(peer, stats.score)
libp2p_gossipsub_peers_scores.inc(peer.score, labelValues = [agent])
for peer in evicting:
@@ -260,8 +240,18 @@ proc scoringHeartbeat*(g: GossipSub) {.async.} =
trace "running scoring heartbeat", instance = cast[int](g)
g.updateScores()
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, topics: seq[string]) =
for tt in topics:
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
let uselessAppBytesNum = msg.data.len
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
debug "Peer sent invalid message and it's above rate limit", peer, uselessAppBytesNum
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
if g.parameters.disconnectPeerAboveRateLimit:
await g.disconnectPeer(peer)
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
for tt in msg.topicIds:
let t = tt
if t notin g.topics:
continue

View File

@@ -144,6 +144,9 @@ type
bandwidthEstimatebps*: int # This is currently used only for limting flood publishing. 0 disables flood-limiting completely
overheadRateLimit*: Opt[tuple[bytes: int, interval: Duration]]
disconnectPeerAboveRateLimit*: bool
BackoffTable* = Table[string, Table[PeerId, Moment]]
ValidationSeenTable* = Table[MessageId, HashSet[PubSubPeer]]
@@ -158,7 +161,7 @@ type
mesh*: PeerTable # peers that we send messages to when we are subscribed to the topic
fanout*: PeerTable # peers that we send messages to when we're not subscribed to the topic
gossipsub*: PeerTable # peers that are subscribed to a topic
explicit*: PeerTable # directpeers that we keep alive explicitly
subscribedDirectPeers*: PeerTable # directpeers that we keep alive
backingOff*: BackoffTable # peers to backoff from when replenishing the mesh
lastFanoutPubSub*: Table[string, Moment] # last publish time for fanout topics
gossip*: Table[string, seq[ControlIHave]] # pending gossip

View File

@@ -17,6 +17,7 @@
import std/[tables, sequtils, sets, strutils]
import chronos, chronicles, metrics
import chronos/ratelimit
import ./errors as pubsub_errors,
./pubsubpeer,
./rpc/[message, messages, protobuf],
@@ -263,7 +264,7 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
method rpcHandler*(p: PubSub,
peer: PubSubPeer,
rpcMsg: RPCMsg): Future[void] {.base, async.} =
data: seq[byte]): Future[void] {.base, async.} =
## Handler that must be overridden by concrete implementation
raiseAssert "Unimplemented"
@@ -278,10 +279,11 @@ method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent) {
of PubSubPeerEventKind.Disconnected:
discard
proc getOrCreatePeer*(
method getOrCreatePeer*(
p: PubSub,
peerId: PeerId,
protos: seq[string]): PubSubPeer =
protos: seq[string]): PubSubPeer {.base, gcsafe.} =
p.peers.withValue(peerId, peer):
return peer[]
@@ -354,9 +356,9 @@ method handleConn*(p: PubSub,
## that we're interested in
##
proc handler(peer: PubSubPeer, msg: RPCMsg): Future[void] =
proc handler(peer: PubSubPeer, data: seq[byte]): Future[void] =
# call pubsub rpc handler
p.rpcHandler(peer, msg)
p.rpcHandler(peer, data)
let peer = p.getOrCreatePeer(conn.peerId, @[proto])

View File

@@ -12,6 +12,7 @@
import std/[sequtils, strutils, tables, hashes, options, sets, deques]
import stew/results
import chronos, chronicles, nimcrypto/sha2, metrics
import chronos/ratelimit
import rpc/[messages, message, protobuf],
../../peerid,
../../peerinfo,
@@ -32,6 +33,8 @@ when defined(libp2p_expensive_metrics):
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
type
PeerRateLimitError* = object of CatchableError
PubSubObserver* = ref object
onRecv*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
onSend*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
@@ -66,8 +69,9 @@ type
maxMessageSize: int
appScore*: float64 # application specific score
behaviourPenalty*: float64 # the eventual penalty score
overheadRateLimitOpt*: Opt[TokenBucket]
RPCHandler* = proc(peer: PubSubPeer, msg: RPCMsg): Future[void]
RPCHandler* = proc(peer: PubSubPeer, data: seq[byte]): Future[void]
{.gcsafe, raises: [].}
when defined(libp2p_agents_metrics):
@@ -107,7 +111,7 @@ func outbound*(p: PubSubPeer): bool =
else:
false
proc recvObservers(p: PubSubPeer, msg: var RPCMsg) =
proc recvObservers*(p: PubSubPeer, msg: var RPCMsg) =
# trigger hooks
if not(isNil(p.observers)) and p.observers[].len > 0:
for obs in p.observers[]:
@@ -134,26 +138,19 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
conn, peer = p, closed = conn.closed,
data = data.shortLog
var rmsg = decodeRpcMsg(data).valueOr:
debug "failed to decode msg from peer",
conn, peer = p, closed = conn.closed,
err = error
break
data = newSeq[byte]() # Release memory
trace "decoded msg from peer",
conn, peer = p, closed = conn.closed,
msg = rmsg.shortLog
# trigger hooks
p.recvObservers(rmsg)
when defined(libp2p_expensive_metrics):
for m in rmsg.messages:
for t in m.topicIDs:
# metrics
libp2p_pubsub_received_messages.inc(labelValues = [$p.peerId, t])
await p.handler(p, rmsg)
await p.handler(p, data)
data = newSeq[byte]() # Release memory
except PeerRateLimitError as exc:
debug "Peer rate limit exceeded, exiting read while", conn, peer = p, error = exc.msg
except CatchableError as exc:
debug "Exception occurred in PubSubPeer.handle",
conn, peer = p, closed = conn.closed, exc = exc.msg
finally:
await conn.close()
except CancelledError:
@@ -237,7 +234,7 @@ template sendMetrics(msg: RPCMsg): untyped =
# metrics
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [], async.} =
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.async.} =
doAssert(not isNil(p), "pubsubpeer nil!")
if msg.len <= 0:
@@ -245,7 +242,7 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [], async.} =
return
if msg.len > p.maxMessageSize:
info "trying to send a too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
return
if p.sendConn == nil:
@@ -272,9 +269,42 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [], async.} =
await conn.close() # This will clean up the send connection
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [].} =
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize: bool): seq[byte] =
## This iterator takes an `RPCMsg` and sequentially repackages its Messages into new `RPCMsg` instances.
## Each new `RPCMsg` accumulates Messages until reaching the specified `maxSize`. If a single Message
## exceeds the `maxSize` when trying to fit into an empty `RPCMsg`, the latter is skipped as too large to send.
## Every constructed `RPCMsg` is then encoded, optionally anonymized, and yielded as a sequence of bytes.
var currentRPCMsg = rpcMsg
currentRPCMsg.messages = newSeq[Message]()
var currentSize = byteSize(currentRPCMsg)
for msg in rpcMsg.messages:
let msgSize = byteSize(msg)
# Check if adding the next message will exceed maxSize
if float(currentSize + msgSize) * 1.1 > float(maxSize): # Guessing 10% protobuf overhead
if currentRPCMsg.messages.len == 0:
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
continue # Skip this message
trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
yield encodeRpcMsg(currentRPCMsg, anonymize)
currentRPCMsg = RPCMsg()
currentSize = 0
currentRPCMsg.messages.add(msg)
currentSize += msgSize
# Check if there is a non-empty currentRPCMsg left to be added
if currentSize > 0 and currentRPCMsg.messages.len > 0:
trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
yield encodeRpcMsg(currentRPCMsg, anonymize)
else:
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [].} =
# When sending messages, we take care to re-encode them with the right
# anonymization flag to ensure that we're not penalized for sending invalid
# or malicious data on the wire - in particular, re-encoding protects against
@@ -292,7 +322,13 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [].} =
sendMetrics(msg)
encodeRpcMsg(msg, anonymize)
asyncSpawn p.sendEncoded(encoded)
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
asyncSpawn p.sendEncoded(encodedSplitMsg)
else:
# If the message size is within limits, send it as is
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
asyncSpawn p.sendEncoded(encoded)
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
for sentIHave in p.sentIHaves.mitems():
@@ -307,7 +343,8 @@ proc new*(
getConn: GetConn,
onEvent: OnEvent,
codec: string,
maxMessageSize: int): T =
maxMessageSize: int,
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket)): T =
result = T(
getConn: getConn,
@@ -315,7 +352,18 @@ proc new*(
codec: codec,
peerId: peerId,
connectedFut: newFuture[void](),
maxMessageSize: maxMessageSize
maxMessageSize: maxMessageSize,
overheadRateLimitOpt: overheadRateLimitOpt
)
result.sentIHaves.addFirst(default(HashSet[MessageId]))
result.heDontWants.addFirst(default(HashSet[MessageId]))
proc getAgent*(peer: PubSubPeer): string =
return
when defined(libp2p_agents_metrics):
if peer.shortAgent.len > 0:
peer.shortAgent
else:
"unknown"
else:
"unknown"

View File

@@ -9,7 +9,7 @@
{.push raises: [].}
import options, sequtils
import options, sequtils, sugar
import "../../.."/[
peerid,
routing_record,
@@ -18,6 +18,14 @@ import "../../.."/[
export options
proc expectedFields[T](t: typedesc[T], existingFieldNames: seq[string]) {.raises: [CatchableError].} =
var fieldNames: seq[string]
for name, _ in fieldPairs(T()):
fieldNames &= name
if fieldNames != existingFieldNames:
fieldNames.keepIf(proc(it: string): bool = it notin existingFieldNames)
raise newException(CatchableError, $T & " fields changed, please search for and revise all relevant procs. New fields: " & $fieldNames)
type
PeerInfoMsg* = object
peerId*: PeerId
@@ -116,3 +124,54 @@ func shortLog*(m: RPCMsg): auto =
messages: mapIt(m.messages, it.shortLog),
control: m.control.get(ControlMessage()).shortLog
)
static: expectedFields(PeerInfoMsg, @["peerId", "signedPeerRecord"])
proc byteSize(peerInfo: PeerInfoMsg): int =
peerInfo.peerId.len + peerInfo.signedPeerRecord.len
static: expectedFields(SubOpts, @["subscribe", "topic"])
proc byteSize(subOpts: SubOpts): int =
1 + subOpts.topic.len # 1 byte for the bool
static: expectedFields(Message, @["fromPeer", "data", "seqno", "topicIds", "signature", "key"])
proc byteSize*(msg: Message): int =
msg.fromPeer.len + msg.data.len + msg.seqno.len +
msg.signature.len + msg.key.len + msg.topicIds.foldl(a + b.len, 0)
proc byteSize*(msgs: seq[Message]): int =
msgs.foldl(a + b.byteSize, 0)
static: expectedFields(ControlIHave, @["topicId", "messageIds"])
proc byteSize(controlIHave: ControlIHave): int =
controlIHave.topicId.len + controlIHave.messageIds.foldl(a + b.len, 0)
proc byteSize*(ihaves: seq[ControlIHave]): int =
ihaves.foldl(a + b.byteSize, 0)
static: expectedFields(ControlIWant, @["messageIds"])
proc byteSize(controlIWant: ControlIWant): int =
controlIWant.messageIds.foldl(a + b.len, 0)
proc byteSize*(iwants: seq[ControlIWant]): int =
iwants.foldl(a + b.byteSize, 0)
static: expectedFields(ControlGraft, @["topicId"])
proc byteSize(controlGraft: ControlGraft): int =
controlGraft.topicId.len
static: expectedFields(ControlPrune, @["topicId", "peers", "backoff"])
proc byteSize(controlPrune: ControlPrune): int =
controlPrune.topicId.len + controlPrune.peers.foldl(a + b.byteSize, 0) + 8 # 8 bytes for uint64
static: expectedFields(ControlMessage, @["ihave", "iwant", "graft", "prune", "idontwant"])
proc byteSize(control: ControlMessage): int =
control.ihave.foldl(a + b.byteSize, 0) + control.iwant.foldl(a + b.byteSize, 0) +
control.graft.foldl(a + b.byteSize, 0) + control.prune.foldl(a + b.byteSize, 0) +
control.idontwant.foldl(a + b.byteSize, 0)
static: expectedFields(RPCMsg, @["subscriptions", "messages", "control", "ping", "pong"])
proc byteSize*(rpc: RPCMsg): int =
result = rpc.subscriptions.foldl(a + b.byteSize, 0) + byteSize(rpc.messages) +
rpc.ping.len + rpc.pong.len
rpc.control.withValue(ctrl):
result += ctrl.byteSize

View File

@@ -469,6 +469,8 @@ proc advertisePeer(rdv: RendezVous,
trace "Unexpected register response", peer, msgType = msgRecv.msgType
elif msgRecv.registerResponse.tryGet().status != ResponseStatus.Ok:
trace "Refuse to register", peer, response = msgRecv.registerResponse
else:
trace "Successfully registered", peer, response = msgRecv.registerResponse
except CatchableError as exc:
trace "exception in the advertise", error = exc.msg
finally:
@@ -476,9 +478,9 @@ proc advertisePeer(rdv: RendezVous,
await rdv.sema.acquire()
discard await advertiseWrap().withTimeout(5.seconds)
proc advertise*(rdv: RendezVous,
method advertise*(rdv: RendezVous,
ns: string,
ttl: Duration = MinimumDuration) {.async.} =
ttl: Duration = MinimumDuration) {.async, base.} =
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
raise newException(RendezVousError, "Wrong Signed Peer Record")
if ns.len notin 1..255:
@@ -634,7 +636,7 @@ proc new*(T: typedesc[RendezVous],
sema: newAsyncSemaphore(SemaphoreDefaultSize)
)
logScope: topics = "libp2p discovery rendezvous"
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
let
buf = await conn.readLp(4096)

View File

@@ -19,7 +19,7 @@ type
method init(p: PlainText) {.gcsafe.} =
proc handle(conn: Connection, proto: string)
{.async, gcsafe.} = discard
{.async.} = discard
## plain text doesn't do anything
p.codec = PlainTextCodec

View File

@@ -135,10 +135,9 @@ method init*(s: Secure) =
method secure*(s: Secure,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]):
Future[Connection] {.base.} =
s.handleConn(conn, initiator, peerId)
s.handleConn(conn, conn.dir == Direction.Out, peerId)
method readOnce*(s: SecureConn,
pbytes: pointer,

View File

@@ -37,7 +37,7 @@ proc isRunning*(self: AutoRelayService): bool =
proc addressMapper(
self: AutoRelayService,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return concat(toSeq(self.relayAddresses.values))
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
@@ -58,8 +58,8 @@ proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch)
self.onReservation(concat(toSeq(self.relayAddresses.values)))
await sleepAsync chronos.seconds(ttl - 30)
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return await addressMapper(self, listenAddrs)
let hasBeenSetUp = await procCall Service(self).setup(switch)
@@ -83,7 +83,7 @@ proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
self.backingOff.keepItIf(it != pid)
self.peerAvailable.fire()
proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
while true:
# Remove relayPeers that failed
let peers = toSeq(self.relayPeers.keys())
@@ -116,14 +116,14 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
await self.peerAvailable.wait()
await sleepAsync(200.millis)
method run*(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
method run*(self: AutoRelayService, switch: Switch) {.async.} =
if self.running:
trace "Autorelay is already running"
return
self.running = true
self.runner = self.innerRun(switch)
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
self.running = false

View File

@@ -94,7 +94,7 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and not self.autoRelayService.isRunning():
discard await self.autoRelayService.setup(switch)
elif networkReachability == NetworkReachability.Reachable and self.autoRelayService.isRunning():

View File

@@ -50,7 +50,7 @@ method initStream*(s: ChronosStream) =
if s.objName.len == 0:
s.objName = ChronosStreamTrackerName
s.timeoutHandler = proc() {.async, gcsafe.} =
s.timeoutHandler = proc() {.async.} =
trace "Idle timeout expired, closing ChronosStream", s
await s.close()

View File

@@ -41,7 +41,7 @@ type
when defined(libp2p_agents_metrics):
shortAgent*: string
proc timeoutMonitor(s: Connection) {.async, gcsafe.}
proc timeoutMonitor(s: Connection) {.async.}
func shortLog*(conn: Connection): string =
try:
@@ -110,7 +110,7 @@ proc pollActivity(s: Connection): Future[bool] {.async.} =
return false
proc timeoutMonitor(s: Connection) {.async, gcsafe.} =
proc timeoutMonitor(s: Connection) {.async.} =
## monitor the channel for inactivity
##
## if the timeout was hit, it means that

View File

@@ -246,7 +246,7 @@ proc readLine*(s: LPStream,
if len(result) == lim:
break
proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
proc readVarint*(conn: LPStream): Future[uint64] {.async, public.} =
var
buffer: array[10, byte]
@@ -264,7 +264,7 @@ proc readVarint*(conn: LPStream): Future[uint64] {.async, gcsafe, public.} =
if true: # can't end with a raise apparently
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, gcsafe, public.} =
proc readLp*(s: LPStream, maxSize: int): Future[seq[byte]] {.async, public.} =
## read length prefixed msg, with the length encoded as a varint
let
length = await s.readVarint()

View File

@@ -71,17 +71,17 @@ type
inUse: bool
method setup*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
method setup*(self: Service, switch: Switch): Future[bool] {.base, async.} =
if self.inUse:
warn "service setup has already been called"
return false
self.inUse = true
return true
method run*(self: Service, switch: Switch) {.base, async, gcsafe.} =
method run*(self: Service, switch: Switch) {.base, async.} =
doAssert(false, "Not implemented!")
method stop*(self: Service, switch: Switch): Future[bool] {.base, async, gcsafe.} =
method stop*(self: Service, switch: Switch): Future[bool] {.base, async.} =
if not self.inUse:
warn "service is already stopped"
return false
@@ -141,10 +141,10 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.public.} =
dir = Direction.Out): Future[void] {.public.} =
## Connects to a peer without opening a stream to it
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, dir)
method connect*(
s: Switch,
@@ -213,7 +213,7 @@ proc mount*[T: LPProtocol](s: Switch, proto: T, matcher: Matcher = nil)
s.peerInfo.protocols.add(proto.codec)
proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} =
let muxed = await trans.upgrade(conn, Direction.In, Opt.none(PeerId))
let muxed = await trans.upgrade(conn, Opt.none(PeerId))
switch.connManager.storeMuxer(muxed)
await switch.peerStore.identify(muxed)
trace "Connection upgrade succeeded"
@@ -321,7 +321,7 @@ proc stop*(s: Switch) {.async, public.} =
trace "Switch stopped"
proc start*(s: Switch) {.async, gcsafe, public.} =
proc start*(s: Switch) {.async, public.} =
## Start listening on every transport
if s.started:

View File

@@ -174,7 +174,7 @@ method start*(
trace "Listening on", address = ma
method stop*(self: TcpTransport) {.async, gcsafe.} =
method stop*(self: TcpTransport) {.async.} =
## stop the transport
##
try:
@@ -210,7 +210,7 @@ method stop*(self: TcpTransport) {.async, gcsafe.} =
except CatchableError as exc:
trace "Error shutting down tcp transport", exc = exc.msg
method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: TcpTransport): Future[Connection] {.async.} =
## accept a new TCP connection
##
@@ -219,7 +219,7 @@ method accept*(self: TcpTransport): Future[Connection] {.async, gcsafe.} =
try:
if self.acceptFuts.len <= 0:
self.acceptFuts = self.servers.mapIt(it.accept())
self.acceptFuts = self.servers.mapIt(Future[StreamTransport](it.accept()))
if self.acceptFuts.len <= 0:
return
@@ -260,7 +260,7 @@ method dial*(
self: TcpTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
## dial a peer
##

View File

@@ -82,7 +82,7 @@ proc handlesStart(address: MultiAddress): bool {.gcsafe.} =
return TcpOnion3.match(address)
proc connectToTorServer(
transportAddress: TransportAddress): Future[StreamTransport] {.async, gcsafe.} =
transportAddress: TransportAddress): Future[StreamTransport] {.async.} =
let transp = await connect(transportAddress)
try:
discard await transp.write(@[Socks5ProtocolVersion, NMethods, Socks5AuthMethod.NoAuth.byte])
@@ -99,7 +99,7 @@ proc connectToTorServer(
await transp.closeWait()
raise err
proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
proc readServerReply(transp: StreamTransport) {.async.} =
## The specification for this code is defined on
## [link text](https://www.rfc-editor.org/rfc/rfc1928#section-5)
## and [link text](https://www.rfc-editor.org/rfc/rfc1928#section-6).
@@ -121,7 +121,7 @@ proc readServerReply(transp: StreamTransport) {.async, gcsafe.} =
let atyp = firstFourOctets[3]
case atyp:
of Socks5AddressType.IPv4.byte:
discard await transp.read(ipV4NumOctets + portNumOctets)
discard await transp.read(ipV4NumOctets + portNumOctets)
of Socks5AddressType.FQDN.byte:
let fqdnNumOctets = await transp.read(1)
discard await transp.read(int(uint8.fromBytes(fqdnNumOctets)) + portNumOctets)
@@ -166,7 +166,7 @@ proc parseDnsTcp(address: MultiAddress):
(Socks5AddressType.FQDN.byte, dstAddr, dstPort)
proc dialPeer(
transp: StreamTransport, address: MultiAddress) {.async, gcsafe.} =
transp: StreamTransport, address: MultiAddress) {.async.} =
let (atyp, dstAddr, dstPort) =
if Onion3.match(address):
parseOnion3(address)
@@ -190,7 +190,7 @@ method dial*(
self: TorTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
## dial a peer
##
if not handlesDial(address):
@@ -229,14 +229,14 @@ method start*(
else:
raise newException(TransportStartError, "Tor Transport couldn't start, no supported addr was provided.")
method accept*(self: TorTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: TorTransport): Future[Connection] {.async.} =
## accept a new Tor connection
##
let conn = await self.tcpTransport.accept()
conn.observedAddr = Opt.none(MultiAddress)
return conn
method stop*(self: TorTransport) {.async, gcsafe.} =
method stop*(self: TorTransport) {.async.} =
## stop the transport
##
await procCall Transport(self).stop() # call base

View File

@@ -83,13 +83,12 @@ proc dial*(
method upgrade*(
self: Transport,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Muxer] {.base, gcsafe.} =
## base upgrade method that the transport uses to perform
## transport specific upgrades
##
self.upgrader.upgrade(conn, direction, peerId)
self.upgrader.upgrade(conn, peerId)
method handles*(
self: Transport,

View File

@@ -173,7 +173,7 @@ method start*(
self.running = true
method stop*(self: WsTransport) {.async, gcsafe.} =
method stop*(self: WsTransport) {.async.} =
## stop the transport
##
@@ -237,7 +237,7 @@ proc connHandler(self: WsTransport,
asyncSpawn onClose()
return conn
method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: WsTransport): Future[Connection] {.async.} =
## accept a new WS connection
##
@@ -276,6 +276,8 @@ method accept*(self: WsTransport): Future[Connection] {.async, gcsafe.} =
debug "AsyncStream Error", exc = exc.msg
except TransportTooManyError as exc:
debug "Too many files opened", exc = exc.msg
except TransportAbortedError as exc:
debug "Connection aborted", exc = exc.msg
except AsyncTimeoutError as exc:
debug "Timed out", exc = exc.msg
except TransportUseClosedError as exc:
@@ -293,7 +295,7 @@ method dial*(
self: WsTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
## dial a peer
##

View File

@@ -32,8 +32,7 @@ proc getMuxerByCodec(self: MuxedUpgrade, muxerName: string): MuxerProvider =
proc mux*(
self: MuxedUpgrade,
conn: Connection,
direction: Direction): Future[Muxer] {.async, gcsafe.} =
conn: Connection): Future[Muxer] {.async.} =
## mux connection
trace "Muxing connection", conn
@@ -42,7 +41,7 @@ proc mux*(
return
let muxerName =
if direction == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
if conn.dir == Out: await self.ms.select(conn, self.muxers.mapIt(it.codec))
else: await MultistreamSelect.handle(conn, self.muxers.mapIt(it.codec))
if muxerName.len == 0 or muxerName == "na":
@@ -62,16 +61,15 @@ proc mux*(
method upgrade*(
self: MuxedUpgrade,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Muxer] {.async.} =
trace "Upgrading connection", conn, direction
trace "Upgrading connection", conn, direction = conn.dir
let sconn = await self.secure(conn, direction, peerId) # secure the connection
let sconn = await self.secure(conn, peerId) # secure the connection
if isNil(sconn):
raise newException(UpgradeFailedError,
"unable to secure connection, stopping upgrade")
let muxer = await self.mux(sconn, direction) # mux it if possible
let muxer = await self.mux(sconn) # mux it if possible
if muxer == nil:
raise newException(UpgradeFailedError,
"a muxer is required for outgoing connections")
@@ -84,24 +82,21 @@ method upgrade*(
raise newException(UpgradeFailedError,
"Connection closed or missing peer info, stopping upgrade")
trace "Upgraded connection", conn, sconn, direction
trace "Upgraded connection", conn, sconn, direction = conn.dir
return muxer
proc new*(
T: type MuxedUpgrade,
muxers: seq[MuxerProvider],
secureManagers: openArray[Secure] = [],
connManager: ConnManager,
ms: MultistreamSelect): T =
let upgrader = T(
muxers: muxers,
secureManagers: @secureManagers,
connManager: connManager,
ms: ms)
upgrader.streamHandler = proc(conn: Connection)
{.async, gcsafe, raises: [].} =
upgrader.streamHandler = proc(conn: Connection) {.async.} =
trace "Starting stream handler", conn
try:
await upgrader.ms.handle(conn) # handle incoming connection

View File

@@ -35,26 +35,23 @@ type
Upgrade* = ref object of RootObj
ms*: MultistreamSelect
connManager*: ConnManager
secureManagers*: seq[Secure]
method upgrade*(
self: Upgrade,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Muxer] {.base.} =
doAssert(false, "Not implemented!")
proc secure*(
self: Upgrade,
conn: Connection,
direction: Direction,
peerId: Opt[PeerId]): Future[Connection] {.async, gcsafe.} =
peerId: Opt[PeerId]): Future[Connection] {.async.} =
if self.secureManagers.len <= 0:
raise newException(UpgradeFailedError, "No secure managers registered!")
let codec =
if direction == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
if conn.dir == Out: await self.ms.select(conn, self.secureManagers.mapIt(it.codec))
else: await MultistreamSelect.handle(conn, self.secureManagers.mapIt(it.codec))
if codec.len == 0:
raise newException(UpgradeFailedError, "Unable to negotiate a secure channel!")
@@ -66,4 +63,4 @@ proc secure*(
# let's avoid duplicating checks but detect if it fails to do it properly
doAssert(secureProtocol.len > 0)
return await secureProtocol[0].secure(conn, direction == Out, peerId)
return await secureProtocol[0].secure(conn, peerId)

219
nimble.lock Normal file
View File

@@ -0,0 +1,219 @@
{
"version": 2,
"packages": {
"results": {
"version": "0.4.0",
"vcsRevision": "f3c666a272c69d70cb41e7245e7f6844797303ad",
"url": "https://github.com/arnetheduck/nim-results",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "51e08ca9524db98dc909fb39192272cc2b5451c7"
}
},
"unittest2": {
"version": "0.2.1",
"vcsRevision": "262b697f38d6b6f1e7462d3b3ab81d79b894e336",
"url": "https://github.com/status-im/nim-unittest2",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "1bac3a8355441edeed1ef3134e7436d6fb5d4498"
}
},
"stew": {
"version": "0.1.0",
"vcsRevision": "3159137d9a3110edb4024145ce0ba778975de40e",
"url": "https://github.com/status-im/nim-stew",
"downloadMethod": "git",
"dependencies": [
"results",
"unittest2"
],
"checksums": {
"sha1": "4ab494e272e997011853faddebe9e55183613776"
}
},
"bearssl": {
"version": "0.2.1",
"vcsRevision": "e4157639db180e52727712a47deaefcbbac6ec86",
"url": "https://github.com/status-im/nim-bearssl",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "a5086fd5c0af2b852f34c0cc6e4cff93a98f97ec"
}
},
"httputils": {
"version": "0.3.0",
"vcsRevision": "3b491a40c60aad9e8d3407443f46f62511e63b18",
"url": "https://github.com/status-im/nim-http-utils",
"downloadMethod": "git",
"dependencies": [
"stew",
"unittest2"
],
"checksums": {
"sha1": "1331f33585eda05d1e50385fa7871c3bf2a449d7"
}
},
"chronos": {
"version": "3.2.0",
"vcsRevision": "ba143e029f35fd9b4cd3d89d007cc834d0d5ba3c",
"url": "https://github.com/status-im/nim-chronos",
"downloadMethod": "git",
"dependencies": [
"stew",
"bearssl",
"httputils",
"unittest2"
],
"checksums": {
"sha1": "5783067584ac6812eb64b8454ea6f9c97ff1262a"
}
},
"testutils": {
"version": "0.5.0",
"vcsRevision": "dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34",
"url": "https://github.com/status-im/nim-testutils",
"downloadMethod": "git",
"dependencies": [
"unittest2"
],
"checksums": {
"sha1": "756d0757c4dd06a068f9d38c7f238576ba5ee897"
}
},
"faststreams": {
"version": "0.3.0",
"vcsRevision": "720fc5e5c8e428d9d0af618e1e27c44b42350309",
"url": "https://github.com/status-im/nim-faststreams",
"downloadMethod": "git",
"dependencies": [
"stew",
"unittest2"
],
"checksums": {
"sha1": "ab178ba25970b95d953434b5d86b4d60396ccb64"
}
},
"serialization": {
"version": "0.2.0",
"vcsRevision": "4bdbc29e54fe54049950e352bb969aab97173b35",
"url": "https://github.com/status-im/nim-serialization",
"downloadMethod": "git",
"dependencies": [
"faststreams",
"unittest2",
"stew"
],
"checksums": {
"sha1": "c8c99a387aae488e7008aded909ebfe662e74450"
}
},
"json_serialization": {
"version": "0.1.5",
"vcsRevision": "85b7ea093cb85ee4f433a617b97571bd709d30df",
"url": "https://github.com/status-im/nim-json-serialization",
"downloadMethod": "git",
"dependencies": [
"serialization",
"stew"
],
"checksums": {
"sha1": "c6b30565292acf199b8be1c62114726e354af59e"
}
},
"chronicles": {
"version": "0.10.3",
"vcsRevision": "32ac8679680ea699f7dbc046e8e0131cac97d41a",
"url": "https://github.com/status-im/nim-chronicles",
"downloadMethod": "git",
"dependencies": [
"testutils",
"json_serialization"
],
"checksums": {
"sha1": "79f09526d4d9b9196dd2f6a75310d71a890c4f88"
}
},
"nimcrypto": {
"version": "0.6.0",
"vcsRevision": "1c8d6e3caf3abc572136ae9a1da81730c4eb4288",
"url": "https://github.com/cheatfate/nimcrypto",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "da3b105ad6bd7beef25c69f03afccb5e5233d483"
}
},
"zlib": {
"version": "0.1.0",
"vcsRevision": "a2f44bb7f65571a894227ff6fde9298a104e03a5",
"url": "https://github.com/status-im/nim-zlib",
"downloadMethod": "git",
"dependencies": [
"stew"
],
"checksums": {
"sha1": "edbf76ebdecb63d302d1883fe4b23b2eb0608cb7"
}
},
"websock": {
"version": "0.1.0",
"vcsRevision": "f8ed9b40a5ff27ad02a3c237c4905b0924e3f982",
"url": "https://github.com/status-im/nim-websock",
"downloadMethod": "git",
"dependencies": [
"chronos",
"httputils",
"chronicles",
"stew",
"nimcrypto",
"bearssl",
"zlib"
],
"checksums": {
"sha1": "94f836ae589056b2deb04bdfdcd614fff80adaf5"
}
},
"dnsclient": {
"version": "0.3.4",
"vcsRevision": "23214235d4784d24aceed99bbfe153379ea557c8",
"url": "https://github.com/ba0f3/dnsclient.nim",
"downloadMethod": "git",
"dependencies": [],
"checksums": {
"sha1": "65262c7e533ff49d6aca5539da4bc6c6ce132f40"
}
},
"secp256k1": {
"version": "0.6.0.3.2",
"vcsRevision": "7246d91c667f4cc3759fdd50339caa45a2ecd8be",
"url": "https://github.com/status-im/nim-secp256k1",
"downloadMethod": "git",
"dependencies": [
"stew",
"nimcrypto"
],
"checksums": {
"sha1": "aa0f88a68f67cef07f9f4a365a0121a2217dab81"
}
},
"metrics": {
"version": "0.0.1",
"vcsRevision": "6142e433fc8ea9b73379770a788017ac528d46ff",
"url": "https://github.com/status-im/nim-metrics",
"downloadMethod": "git",
"dependencies": [
"chronos"
],
"checksums": {
"sha1": "16ba266012d32d49631ca00add8e4698343758e0"
}
}
},
"tasks": {}
}

315
scripts/build_nim.sh Normal file
View File

@@ -0,0 +1,315 @@
#!/usr/bin/env bash
# used in Travis CI and AppVeyor scripts
# Copyright (c) 2018-2020 Status Research & Development GmbH. Licensed under
# either of:
# - Apache License, version 2.0
# - MIT license
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
set -e
# Git commits
: ${CSOURCES_V1_COMMIT:=a8a5241f9475099c823cfe1a5e0ca4022ac201ff}
: ${CSOURCES_V2_COMMIT:=86742fb02c6606ab01a532a0085784effb2e753e}
: ${CSOURCES_V1_REPO:=https://github.com/nim-lang/csources_v1.git}
: ${CSOURCES_V2_REPO:=https://github.com/nim-lang/csources_v2.git}
# After this Nim commit, use csources v2
: ${CSOURCES_V2_START_COMMIT:=f7c203fb6c89b5cef83c4f326aeb23ef8c4a2c40}
: ${NIMBLE_COMMIT:=3575fd54a890d910ace56678aa74b4237d604175} # 0.14.2
# NIM_COMMIT could be a (partial) commit hash, a tag, a branch name, etc. Empty by default.
NIM_COMMIT_HASH="" # full hash for NIM_COMMIT, retrieved in "nim_needs_rebuilding()"
# script arguments
[[ $# -ne 4 ]] && { echo "Usage: $0 nim_dir csources_dir ci_cache_dir"; exit 1; }
NIM_DIR="$1"
CSOURCES_DIR="$2" # can be relative to NIM_DIR
NIMBLE_DIR="$3" # can be relative to NIM_DIR
CI_CACHE="$4"
## env vars
# verbosity level
[[ -z "$V" ]] && V=0
[[ -z "$CC" ]] && CC="gcc"
# to build csources in parallel, set MAKE="make -jN"
[[ -z "$MAKE" ]] && MAKE="make"
# for 32-bit binaries on a 64-bit host
UCPU=""
[[ "$ARCH_OVERRIDE" == "x86" ]] && UCPU="ucpu=i686"
[[ -z "$NIM_BUILD_MSG" ]] && NIM_BUILD_MSG="Building the Nim compiler"
[[ -z "$QUICK_AND_DIRTY_COMPILER" ]] && QUICK_AND_DIRTY_COMPILER=0
[[ -z "$QUICK_AND_DIRTY_NIMBLE" ]] && QUICK_AND_DIRTY_NIMBLE=0
# Windows detection
if uname | grep -qiE "mingw|msys"; then
ON_WINDOWS=1
EXE_SUFFIX=".exe"
# otherwise it fails in AppVeyor due to https://github.com/git-for-windows/git/issues/2495
GIT_TIMESTAMP_ARG="--date=unix" # available since Git 2.9.4
else
ON_WINDOWS=0
EXE_SUFFIX=""
GIT_TIMESTAMP_ARG="--date=format-local:%s" # available since Git 2.7.0
fi
NIM_BINARY="${NIM_DIR}/bin/nim${EXE_SUFFIX}"
MAX_NIM_BINARIES="10" # Old ones get deleted.
nim_needs_rebuilding() {
REBUILD=0
NO_REBUILD=1
echo "Nim is being rebuilt..."
if [[ ! -e "$NIM_DIR" ]]; then
# Shallow clone, optimised for the default NIM_COMMIT value.
git clone -q --depth=1 https://github.com/status-im/Nim.git "$NIM_DIR"
fi
pushd "${NIM_DIR}" >/dev/null
if [[ -n "${NIM_COMMIT}" ]]; then
# support old Git versions, like the one from Ubuntu-18.04
git restore . 2>/dev/null || git reset --hard
if ! git checkout -q ${NIM_COMMIT} 2>/dev/null; then
echo "Downloading Nim sources..."
echo $(pwd)
# Pay the price for a non-default NIM_COMMIT here, by fetching everything.
# (This includes upstream branches and tags that might be missing from our fork.)
git remote add upstream https://github.com/nim-lang/Nim
git fetch --all --tags --quiet
git checkout -q ${NIM_COMMIT}
fi
# In case the local branch diverged and a fast-forward merge is not possible.
git fetch || true
git reset -q --hard origin/${NIM_COMMIT} 2>/dev/null || true
# In case NIM_COMMIT is a local branch that's behind the remote one it's tracking.
git pull -q 2>/dev/null || true
git checkout -q ${NIM_COMMIT}
# We can't use "rev-parse" here, because it would return the tag object's
# hash instead of the commit hash, when NIM_COMMIT is a tag.
NIM_COMMIT_HASH="$(git rev-list -n 1 ${NIM_COMMIT})"
else
# NIM_COMMIT is empty, so assume the commit we need is already checked out
NIM_COMMIT_HASH="$(git rev-list -n 1 HEAD)"
fi
if [[ ! -d "$NIMBLE_DIR" ]]; then
echo "Downloading Nimble sources..."
echo $(pwd)
mkdir -p "$NIMBLE_DIR"
pushd "$NIMBLE_DIR"
git clone https://github.com/nim-lang/nimble.git .
git checkout $NIMBLE_COMMIT
# we have to delete .git or koch.nim will checkout a branch tip, overriding our target commit
rm -rf .git
popd
fi
if [[ "$NIMBLE_DIR" != "dist/nimble" ]]; then
mkdir -p dist
rm -rf dist/nimble
ln -s ../"$NIMBLE_DIR" dist/nimble
fi
popd >/dev/null
if [[ -n "$CI_CACHE" && -d "$CI_CACHE" ]]; then
cp -a "$CI_CACHE"/* "$NIM_DIR"/bin/ || true # let this one fail with an empty cache dir
fi
# Delete old Nim binaries, to put a limit on how much storage we use.
for F in "$(ls -t "${NIM_DIR}"/bin/nim_commit_* 2>/dev/null | tail -n +$((MAX_NIM_BINARIES + 1)))"; do
if [[ -e "${F}" ]]; then
rm "${F}"
fi
done
# Compare the last built commit to the one requested.
# Handle the scenario where our symlink is manually deleted by the user.
if [[ -e "${NIM_DIR}/bin/last_built_commit" && \
-e "${NIM_DIR}/bin/nim${EXE_SUFFIX}" && \
"$(cat "${NIM_DIR}/bin/last_built_commit")" == "${NIM_COMMIT_HASH}" ]]; then
return $NO_REBUILD
elif [[ -e "${NIM_DIR}/bin/nim_commit_${NIM_COMMIT_HASH}" ]]; then
# we built the requested commit in the past, so we simply reuse it
rm -f "${NIM_DIR}/bin/nim${EXE_SUFFIX}"
ln -s "nim_commit_${NIM_COMMIT_HASH}" "${NIM_DIR}/bin/nim${EXE_SUFFIX}"
echo ${NIM_COMMIT_HASH} > "${NIM_DIR}/bin/last_built_commit"
return $NO_REBUILD
else
return $REBUILD
fi
}
build_nim() {
echo -e "$NIM_BUILD_MSG"
[[ "$V" == "0" ]] && exec &>/dev/null
# working directory
pushd "$NIM_DIR"
echo "Running build_nim"
if grep -q skipIntegrityCheck koch.nim; then
# Run Nim buildchain
. ci/funs.sh
echo "Building with default buildchain"
NIMCORES=1 nimBuildCsourcesIfNeeded $UCPU
bin/nim c --noNimblePath --skipUserCfg --skipParentCfg --warnings:off --hints:off koch
./koch --skipIntegrityCheck boot -d:release --skipUserCfg --skipParentCfg --warnings:off --hints:off
if [[ "${QUICK_AND_DIRTY_COMPILER}" == "0" ]]; then
# We want tools
./koch tools -d:release --skipUserCfg --skipParentCfg --warnings:off --hints:off
elif [[ "${QUICK_AND_DIRTY_NIMBLE}" != "0" ]]; then
# We just want nimble
./koch nimble -d:release --skipUserCfg --skipParentCfg --warnings:off --hints:off
fi
else
# Custom buildchain for older versions
# TODO Remove this once the default NIM_COMMIT supports `--skipIntegrityCheck`
# We will still be able to compile older versions by removing the flag,
# which will just waste a bit of CPU
echo "Building with custom buildchain"
# Git repos for csources and Nimble
if [[ ! -d "$CSOURCES_DIR" ]]; then
if git merge-base --is-ancestor $CSOURCES_V2_START_COMMIT $NIM_COMMIT_HASH; then
CSOURCES_REPO=$CSOURCES_V2_REPO
CSOURCES_COMMIT=$CSOURCES_V2_COMMIT
else
CSOURCES_REPO=$CSOURCES_V1_REPO
CSOURCES_COMMIT=$CSOURCES_V1_COMMIT
fi
mkdir -p "$CSOURCES_DIR"
pushd "$CSOURCES_DIR"
git clone $CSOURCES_REPO .
git checkout $CSOURCES_COMMIT
popd
fi
if [[ "$CSOURCES_DIR" != "csources" ]]; then
rm -rf csources
ln -s "$CSOURCES_DIR" csources
fi
if [[ ! -d "$NIMBLE_DIR" ]]; then
mkdir -p "$NIMBLE_DIR"
pushd "$NIMBLE_DIR"
git clone https://github.com/nim-lang/nimble.git .
git checkout $NIMBLE_COMMIT
# we have to delete .git or koch.nim will checkout a branch tip, overriding our target commit
rm -rf .git
popd
fi
if [[ "$NIMBLE_DIR" != "dist/nimble" ]]; then
mkdir -p dist
rm -rf dist/nimble
ln -s ../"$NIMBLE_DIR" dist/nimble
fi
# bootstrap the Nim compiler and build the tools
rm -f bin/{nim,nim_csources}
pushd csources
if [[ "$ON_WINDOWS" == "0" ]]; then
$MAKE $UCPU clean
$MAKE $UCPU LD=$CC
else
$MAKE myos=windows $UCPU clean
$MAKE myos=windows $UCPU CC=gcc LD=gcc
fi
popd
if [[ -e csources/bin ]]; then
rm -f bin/nim bin/nim_csources
cp -a csources/bin/nim bin/nim
cp -a csources/bin/nim bin/nim_csources
rm -rf csources/bin
else
cp -a bin/nim bin/nim_csources
fi
if [[ "$QUICK_AND_DIRTY_COMPILER" == "0" ]]; then
sed \
-e 's/koch$/--warnings:off --hints:off koch/' \
-e 's/koch boot/koch boot --warnings:off --hints:off/' \
-e '/nimBuildCsourcesIfNeeded/d' \
build_all.sh > build_all_custom.sh
sh build_all_custom.sh
rm build_all_custom.sh
else
# Don't re-build it multiple times until we get identical
# binaries, like "build_all.sh" does. Don't build any tools
# either. This is all about build speed, not developer comfort.
bin/nim_csources \
c \
--compileOnly \
--nimcache:nimcache \
-d:release \
--skipUserCfg \
--skipParentCfg \
--warnings:off \
--hints:off \
compiler/nim.nim
bin/nim_csources \
jsonscript \
--nimcache:nimcache \
--skipUserCfg \
--skipParentCfg \
compiler/nim.nim
cp -a compiler/nim bin/nim1
# If we stop here, we risk ending up with a buggy compiler:
# https://github.com/status-im/nimbus-eth2/pull/2220
# https://github.com/status-im/nimbus-eth2/issues/2310
bin/nim1 \
c \
--compileOnly \
--nimcache:nimcache \
-d:release \
--skipUserCfg \
--skipParentCfg \
--warnings:off \
--hints:off \
compiler/nim.nim
bin/nim1 \
jsonscript \
--nimcache:nimcache \
--skipUserCfg \
--skipParentCfg \
compiler/nim.nim
rm -f bin/nim
cp -a compiler/nim bin/nim
rm bin/nim1
# Do we want Nimble in this quick build?
if [[ "${QUICK_AND_DIRTY_NIMBLE}" != "0" ]]; then
bin/nim c -d:release --noNimblePath --skipUserCfg --skipParentCfg dist/nimble/src/nimble.nim
mv dist/nimble/src/nimble bin/
fi
fi
fi
if [[ "$QUICK_AND_DIRTY_COMPILER" == "0" || "${QUICK_AND_DIRTY_NIMBLE}" != "0" ]]; then
# Nimble needs a CA cert
rm -f bin/cacert.pem
curl -LsS -o bin/cacert.pem https://curl.se/ca/cacert.pem || echo "Warning: 'curl' failed to download a CA cert needed by Nimble. Ignoring it."
fi
# record the built commit
echo ${NIM_COMMIT_HASH} > bin/last_built_commit
# create the symlink
mv bin/nim bin/nim_commit_${NIM_COMMIT_HASH}
ln -s nim_commit_${NIM_COMMIT_HASH} bin/nim${EXE_SUFFIX}
# update the CI cache
popd # we were in $NIM_DIR
if [[ -n "$CI_CACHE" ]]; then
rm -rf "$CI_CACHE"
mkdir "$CI_CACHE"
cp "$NIM_DIR"/bin/* "$CI_CACHE"/
fi
}
if nim_needs_rebuilding; then
build_nim
fi

View File

@@ -5,21 +5,21 @@ export unittest2, chronos
template asyncTeardown*(body: untyped): untyped =
teardown:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
template asyncSetup*(body: untyped): untyped =
setup:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
template asyncTest*(name: string, body: untyped): untyped =
test name:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
@@ -31,7 +31,7 @@ template flakyAsyncTest*(name: string, attempts: int, body: untyped): untyped =
inc attemptNumber
try:
waitFor((
proc() {.async, gcsafe.} =
proc() {.async.} =
body
)())
except Exception as e:

View File

@@ -20,7 +20,7 @@ proc writeLp(s: StreamTransport, msg: string | seq[byte]): Future[int] {.gcsafe.
buf.finish()
result = s.write(buf.buffer)
proc readLp(s: StreamTransport): Future[seq[byte]] {.async, gcsafe.} =
proc readLp(s: StreamTransport): Future[seq[byte]] {.async.} =
## read length prefixed msg
var
size: uint

View File

@@ -30,7 +30,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport2 = transpProvider()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
if conn.observedAddr.isSome():
check transport1.handles(conn.observedAddr.get())
@@ -58,7 +58,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
await conn.write("Hello!")
await conn.close()
@@ -85,7 +85,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
var msg = newSeq[byte](6)
await conn.readExactly(addr msg[0], 6)
@@ -147,7 +147,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(addrs)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
while true:
let conn = await transport1.accept()
await conn.write(newSeq[byte](0))
@@ -214,7 +214,7 @@ template commonTransportTest*(prov: TransportProvider, ma1: string, ma2: string
let transport1 = transpProvider()
await transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
await conn.close()

View File

@@ -111,7 +111,7 @@ proc bridgedConnections*: (Connection, Connection) =
return (connA, connB)
proc checkExpiringInternal(cond: proc(): bool {.raises: [], gcsafe.} ): Future[bool] {.async, gcsafe.} =
proc checkExpiringInternal(cond: proc(): bool {.raises: [], gcsafe.} ): Future[bool] {.async.} =
let start = Moment.now()
while true:
if Moment.now() > (start + chronos.seconds(5)):
@@ -146,8 +146,8 @@ proc default*(T: typedesc[MockResolver]): T =
resolver.ipResponses[("localhost", true)] = @["::1"]
resolver
proc setDNSAddr*(switch: Switch) {.gcsafe, async.} =
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
proc setDNSAddr*(switch: Switch) {.async.} =
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
switch.peerInfo.addressMappers.add(addressMapper)
await switch.peerInfo.update()

View File

@@ -0,0 +1,17 @@
# syntax=docker/dockerfile:1.5-labs
FROM nimlang/nim:1.6.14 as builder
WORKDIR /workspace
COPY .pinned libp2p.nimble nim-libp2p/
RUN cd nim-libp2p && nimble install_pinned && nimble install redis -y
COPY . nim-libp2p/
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./tests/hole-punching-interop/hole_punching.nim
FROM --platform=linux/amd64 debian:bookworm-slim
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2
COPY --from=builder /workspace/nim-libp2p/hole-punching-tests /usr/bin/hole-punch-client
ENV RUST_BACKTRACE=1

View File

@@ -0,0 +1,114 @@
import std/[os, options, strformat]
import redis
import chronos, chronicles
import ../../libp2p/[builders,
switch,
observedaddrmanager,
services/hpservice,
services/autorelayservice,
protocols/connectivity/autonat/client as aclient,
protocols/connectivity/relay/client as rclient,
protocols/connectivity/relay/relay,
protocols/connectivity/autonat/service,
protocols/ping]
import ../stubs/autonatclientstub
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
let rng = newRng()
var builder = SwitchBuilder.new()
.withRng(rng)
.withAddresses(@[ MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() ])
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
.withTcpTransport({ServerFlags.TcpNoDelay})
.withYamux()
.withAutonat()
.withNoise()
if hpService != nil:
builder = builder.withServices(@[hpService])
if r != nil:
builder = builder.withCircuitRelay(r)
let s = builder.build()
s.mount(Ping.new(rng=rng))
return s
proc main() {.async.} =
try:
let relayClient = RelayClient.new()
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
autonatClientStub.answer = NotReachable
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
let hpservice = HPService.new(autonatService, autoRelayService)
let
isListener = getEnv("MODE") == "listen"
switch = createSwitch(relayClient, hpservice)
auxSwitch = createSwitch()
redisClient = open("redis", 6379.Port)
debug "Connected to redis"
await switch.start()
await auxSwitch.start()
let relayAddr =
try:
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
except Exception as e:
raise newException(CatchableError, e.msg)
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
# client stub will answer NotReachable.
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
# Wait for autonat to be NotReachable
while autonatService.networkReachability != NetworkReachability.NotReachable:
await sleepAsync(100.milliseconds)
# This will trigger the autonat relay service to make a reservation.
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
debug "Got relay address", relayMA
let relayId = await switch.connect(relayMA)
debug "Connected to relay", relayId
# Wait for our relay address to be published
while switch.peerInfo.addrs.len == 0:
await sleepAsync(100.milliseconds)
if isListener:
let listenerPeerId = switch.peerInfo.peerId
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
debug "Pushed listener client peer id to redis", listenerPeerId
# Nothing to do anymore, wait to be killed
await sleepAsync(2.minutes)
else:
let listenerId =
try:
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
except Exception as e:
raise newException(CatchableError, e.msg)
debug "Got listener peer id", listenerId
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
debug "Dialing listener relay address", listenerRelayAddr
await switch.connect(listenerId, @[listenerRelayAddr])
# wait for hole-punching to complete in the background
await sleepAsync(5000.milliseconds)
let conn = switch.connManager.selectMuxer(listenerId).connection
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
let delay = await Ping.new().ping(channel)
await allFuturesThrowing(channel.close(), conn.close(), switch.stop(), auxSwitch.stop())
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
quit(0)
except CatchableError as e:
error "Unexpected error", msg = e.msg
discard waitFor(main().withTimeout(4.minutes))
quit(1)

View File

@@ -0,0 +1,7 @@
{
"id": "nim-libp2p-head",
"containerImageID": "nim-libp2p-head",
"transports": [
"tcp"
]
}

View File

@@ -26,7 +26,7 @@ import ../../libp2p/protocols/pubsub/errors as pubsub_errors
import ../helpers
proc waitSub(sender, receiver: auto; key: string) {.async, gcsafe.} =
proc waitSub(sender, receiver: auto; key: string) {.async.} =
# turn things deterministic
# this is for testing purposes only
var ceil = 15
@@ -43,7 +43,7 @@ suite "FloodSub":
asyncTest "FloodSub basic publish/subscribe A -> B":
var completionFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
completionFut.complete(true)
@@ -81,7 +81,7 @@ suite "FloodSub":
asyncTest "FloodSub basic publish/subscribe B -> A":
var completionFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
completionFut.complete(true)
@@ -113,7 +113,7 @@ suite "FloodSub":
asyncTest "FloodSub validation should succeed":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@@ -151,7 +151,7 @@ suite "FloodSub":
await allFuturesThrowing(nodesFut)
asyncTest "FloodSub validation should fail":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let
@@ -186,7 +186,7 @@ suite "FloodSub":
asyncTest "FloodSub validation one fails and one succeeds":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foo"
handlerFut.complete(true)
@@ -235,7 +235,7 @@ suite "FloodSub":
counter = new int
futs[i] = (
fut,
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
(proc(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
inc counter[]
if counter[] == runs - 1:
@@ -283,7 +283,7 @@ suite "FloodSub":
counter = new int
futs[i] = (
fut,
(proc(topic: string, data: seq[byte]) {.async, gcsafe.} =
(proc(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
inc counter[]
if counter[] == runs - 1:
@@ -333,7 +333,7 @@ suite "FloodSub":
asyncTest "FloodSub message size validation":
var messageReceived = 0
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check data.len < 50
inc(messageReceived)
@@ -375,7 +375,7 @@ suite "FloodSub":
asyncTest "FloodSub message size validation 2":
var messageReceived = 0
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
inc(messageReceived)
let

View File

@@ -1,40 +1,30 @@
include ../../libp2p/protocols/pubsub/gossipsub
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[options, deques]
import std/[options, deques, sequtils, enumerate, algorithm]
import stew/byteutils
import ../../libp2p/builders
import ../../libp2p/errors
import ../../libp2p/crypto/crypto
import ../../libp2p/stream/bufferstream
import ../../libp2p/protocols/pubsub/[pubsub, gossipsub, mcache, mcache, peertable]
import ../../libp2p/protocols/pubsub/rpc/[message, messages]
import ../../libp2p/switch
import ../../libp2p/muxers/muxer
import ../../libp2p/protocols/pubsub/rpc/protobuf
import utils
import ../helpers
type
TestGossipSub = ref object of GossipSub
proc noop(data: seq[byte]) {.async, gcsafe.} = discard
proc getPubSubPeer(p: TestGossipSub, peerId: PeerId): PubSubPeer =
proc getConn(): Future[Connection] =
p.switch.dial(peerId, GossipSubCodec)
let pubSubPeer = PubSubPeer.new(peerId, getConn, nil, GossipSubCodec, 1024 * 1024)
debug "created new pubsub peer", peerId
p.peers[peerId] = pubSubPeer
onNewPeer(p, pubSubPeer)
pubSubPeer
proc randomPeerId(): PeerId =
try:
PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
except CatchableError as exc:
raise newException(Defect, exc.msg)
proc noop(data: seq[byte]) {.async.} = discard
const MsgIdSuccess = "msg id gen success"
@@ -170,7 +160,7 @@ suite "GossipSub internal":
asyncTest "`replenishFanout` Degree Lo":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
@@ -197,7 +187,7 @@ suite "GossipSub internal":
asyncTest "`dropFanoutPeers` drop expired fanout topics":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
@@ -227,7 +217,7 @@ suite "GossipSub internal":
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic1 = "foobar1"
@@ -264,7 +254,7 @@ suite "GossipSub internal":
asyncTest "`getGossipPeers` - should gather up to degree D non intersecting peers":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
@@ -325,7 +315,7 @@ suite "GossipSub internal":
asyncTest "`getGossipPeers` - should not crash on missing topics in mesh":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
@@ -365,7 +355,7 @@ suite "GossipSub internal":
asyncTest "`getGossipPeers` - should not crash on missing topics in fanout":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
@@ -406,7 +396,7 @@ suite "GossipSub internal":
asyncTest "`getGossipPeers` - should not crash on missing topics in gossip":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
discard
let topic = "foobar"
@@ -447,7 +437,7 @@ suite "GossipSub internal":
asyncTest "Drop messages of topics without subscription":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
check false
let topic = "foobar"
@@ -470,7 +460,7 @@ suite "GossipSub internal":
let peer = gossipSub.getPubSubPeer(peerId)
inc seqno
let msg = Message.init(peerId, ("bar" & $i).toBytes(), topic, some(seqno))
await gossipSub.rpcHandler(peer, RPCMsg(messages: @[msg]))
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
check gossipSub.mcache.msgs.len == 0
@@ -481,7 +471,7 @@ suite "GossipSub internal":
let gossipSub = TestGossipSub.init(newStandardSwitch())
gossipSub.parameters.disconnectBadPeers = true
gossipSub.parameters.appSpecificWeight = 1.0
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
check false
let topic = "foobar"
@@ -525,7 +515,7 @@ suite "GossipSub internal":
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
await gossipSub.rpcHandler(peer, lotOfSubs)
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
check:
gossipSub.gossipsub.len == gossipSub.topicsHigh
@@ -656,7 +646,7 @@ suite "GossipSub internal":
asyncTest "handleIHave/Iwant tests":
let gossipSub = TestGossipSub.init(newStandardSwitch())
proc handler(peer: PubSubPeer, msg: RPCMsg) {.async.} =
proc handler(peer: PubSubPeer, data: seq[byte]) {.async.} =
check false
proc handler2(topic: string, data: seq[byte]) {.async.} = discard
@@ -727,3 +717,130 @@ suite "GossipSub internal":
await allFuturesThrowing(conns.mapIt(it.close()))
await gossipSub.switch.stop()
proc setupTest(): Future[tuple[gossip0: GossipSub, gossip1: GossipSub, receivedMessages: ref HashSet[seq[byte]]]] {.async.} =
let
nodes = generateNodes(2, gossip = true, verifySignature = false)
discard await allFinished(
nodes[0].switch.start(),
nodes[1].switch.start()
)
await nodes[1].switch.connect(nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs)
var receivedMessages = new(HashSet[seq[byte]])
proc handlerA(topic: string, data: seq[byte]) {.async.} =
receivedMessages[].incl(data)
proc handlerB(topic: string, data: seq[byte]) {.async.} =
discard
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
await waitSubGraph(nodes, "foobar")
var gossip0: GossipSub = GossipSub(nodes[0])
var gossip1: GossipSub = GossipSub(nodes[1])
return (gossip0, gossip1, receivedMessages)
proc teardownTest(gossip0: GossipSub, gossip1: GossipSub) {.async.} =
await allFuturesThrowing(
gossip0.switch.stop(),
gossip1.switch.stop()
)
proc createMessages(gossip0: GossipSub, gossip1: GossipSub, size1: int, size2: int): tuple[iwantMessageIds: seq[MessageId], sentMessages: HashSet[seq[byte]]] =
var iwantMessageIds = newSeq[MessageId]()
var sentMessages = initHashSet[seq[byte]]()
for i, size in enumerate([size1, size2]):
let data = newSeqWith[byte](size, i.byte)
sentMessages.incl(data)
let msg = Message.init(gossip1.peerInfo.peerId, data, "foobar", some(uint64(i + 1)))
let iwantMessageId = gossip1.msgIdProvider(msg).expect(MsgIdSuccess)
iwantMessageIds.add(iwantMessageId)
gossip1.mcache.put(iwantMessageId, msg)
let peer = gossip1.peers[(gossip0.peerInfo.peerId)]
peer.sentIHaves[^1].incl(iwantMessageId)
return (iwantMessageIds, sentMessages)
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
# Expected: Both messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let messageSize = gossip1.maxMessageSize div 2 + 1
let (iwantMessageIds, sentMessages) = createMessages(gossip0, gossip1, messageSize, messageSize)
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
ihave: @[ControlIHave(topicId: "foobar", messageIds: iwantMessageIds)]
))))
checkExpiring: receivedMessages[] == sentMessages
check receivedMessages[].len == 2
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
# Expected: No messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let messageSize = gossip1.maxMessageSize + 10
let (bigIWantMessageIds, sentMessages) = createMessages(gossip0, gossip1, messageSize, messageSize)
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
))))
await sleepAsync(300.milliseconds)
checkExpiring: receivedMessages[].len == 0
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
# Expected: Both messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let size1 = gossip1.maxMessageSize div 2
let size2 = gossip1.maxMessageSize div 3
let (bigIWantMessageIds, sentMessages) = createMessages(gossip0, gossip1, size1, size2)
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
))))
checkExpiring: receivedMessages[] == sentMessages
check receivedMessages[].len == 2
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
# Expected: Only the smaller message should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
let maxSize = gossip1.maxMessageSize
let size1 = maxSize div 2
let size2 = maxSize + 10
let (bigIWantMessageIds, sentMessages) = createMessages(gossip0, gossip1, size1, size2)
gossip1.broadcast(gossip1.mesh["foobar"], RPCMsg(control: some(ControlMessage(
ihave: @[ControlIHave(topicId: "foobar", messageIds: bigIWantMessageIds)]
))))
var smallestSet: HashSet[seq[byte]]
let seqs = toSeq(sentMessages)
if seqs[0] < seqs[1]:
smallestSet.incl(seqs[0])
else:
smallestSet.incl(seqs[1])
checkExpiring: receivedMessages[] == smallestSet
check receivedMessages[].len == 1
await teardownTest(gossip0, gossip1)

View File

@@ -10,8 +10,9 @@
{.used.}
import sequtils, options, tables, sets, sugar
import chronos, stew/byteutils
import chronos, stew/byteutils, chronos/ratelimit
import chronicles
import metrics
import utils, ../../libp2p/[errors,
peerid,
peerinfo,
@@ -20,6 +21,7 @@ import utils, ../../libp2p/[errors,
crypto/crypto,
protocols/pubsub/pubsub,
protocols/pubsub/gossipsub,
protocols/pubsub/gossipsub/scoring,
protocols/pubsub/pubsubpeer,
protocols/pubsub/peertable,
protocols/pubsub/timedcache,
@@ -45,7 +47,7 @@ suite "GossipSub":
asyncTest "GossipSub validation should succeed":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@@ -90,7 +92,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "GossipSub validation should fail (reject)":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let
@@ -136,7 +138,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "GossipSub validation should fail (ignore)":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check false # if we get here, it should fail
let
@@ -183,7 +185,7 @@ suite "GossipSub":
asyncTest "GossipSub validation one fails and one succeeds":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foo"
handlerFut.complete(true)
@@ -236,7 +238,7 @@ suite "GossipSub":
asyncTest "GossipSub unsub - resub faster than backoff":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@@ -287,7 +289,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "e2e - GossipSub should add remote peer topic subscriptions":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let
@@ -321,7 +323,7 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
asyncTest "e2e - GossipSub should add remote peer topic subscriptions if both peers are subscribed":
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let
@@ -372,7 +374,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over fanout A -> B":
var passed = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete()
@@ -426,7 +428,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
var passed = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete()
@@ -479,7 +481,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over mesh A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete(true)
@@ -546,11 +548,11 @@ suite "GossipSub":
var
aReceived = 0
cReceived = 0
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerA(topic: string, data: seq[byte]) {.async.} =
inc aReceived
check aReceived < 2
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handlerB(topic: string, data: seq[byte]) {.async.} = discard
proc handlerC(topic: string, data: seq[byte]) {.async.} =
inc cReceived
check cReceived < 2
cRelayed.complete()
@@ -594,7 +596,7 @@ suite "GossipSub":
asyncTest "e2e - GossipSub send over floodPublish A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
passed.complete(true)
@@ -651,7 +653,7 @@ suite "GossipSub":
)
proc connectNodes(nodes: seq[PubSub], target: PubSub) {.async.} =
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for node in nodes:
@@ -659,7 +661,7 @@ suite "GossipSub":
await node.switch.connect(target.peerInfo.peerId, target.peerInfo.addrs)
proc baseTestProcedure(nodes: seq[PubSub], gossip1: GossipSub, numPeersFirstMsg: int, numPeersSecondMsg: int) {.async.} =
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
block setup:
@@ -725,7 +727,7 @@ suite "GossipSub":
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@@ -776,7 +778,7 @@ suite "GossipSub":
var handler: TopicHandler
capture dialer, i:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@@ -817,7 +819,7 @@ suite "GossipSub":
# PX to A & C
#
# C sent his SPR, not A
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
discard # not used in this test
let
@@ -893,9 +895,9 @@ suite "GossipSub":
await nodes[1].switch.connect(nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs)
let bFinished = newFuture[void]()
proc handlerA(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handlerB(topic: string, data: seq[byte]) {.async, gcsafe.} = bFinished.complete()
proc handlerC(topic: string, data: seq[byte]) {.async, gcsafe.} = doAssert false
proc handlerA(topic: string, data: seq[byte]) {.async.} = discard
proc handlerB(topic: string, data: seq[byte]) {.async.} = bFinished.complete()
proc handlerC(topic: string, data: seq[byte]) {.async.} = doAssert false
nodes[0].subscribe("foobar", handlerA)
nodes[1].subscribe("foobar", handlerB)
@@ -928,3 +930,136 @@ suite "GossipSub":
await allFuturesThrowing(nodesFut.concat())
proc initializeGossipTest(): Future[(seq[PubSub], GossipSub, GossipSub)] {.async.} =
let nodes = generateNodes(
2,
gossip = true,
overheadRateLimit = Opt.some((20, 1.millis)))
discard await allFinished(
nodes[0].switch.start(),
nodes[1].switch.start(),
)
await subscribeNodes(nodes)
proc handle(topic: string, data: seq[byte]) {.async.} = discard
let gossip0 = GossipSub(nodes[0])
let gossip1 = GossipSub(nodes[1])
gossip0.subscribe("foobar", handle)
gossip1.subscribe("foobar", handle)
await waitSubGraph(nodes, "foobar")
# Avoid being disconnected by failing signature verification
gossip0.verifySignature = false
gossip1.verifySignature = false
return (nodes, gossip0, gossip1)
proc currentRateLimitHits(): float64 =
try:
libp2p_gossipsub_peers_rate_limit_hits.valueByName("libp2p_gossipsub_peers_rate_limit_hits_total", @["nim-libp2p"])
except KeyError:
0
asyncTest "e2e - GossipSub should not rate limit decodable messages below the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
gossip0.broadcast(gossip0.mesh["foobar"], RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](10))]))
await sleepAsync(300.millis)
check currentRateLimitHits() == rateLimitHits
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(gossip0.mesh["foobar"], RPCMsg(messages: @[Message(topicIDs: @["foobar"], data: newSeq[byte](12))]))
await sleepAsync(300.millis)
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
check currentRateLimitHits() == rateLimitHits
await stopNodes(nodes)
asyncTest "e2e - GossipSub should rate limit undecodable messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
# Simulate sending an undecodable message
await gossip1.peers[gossip0.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](33, 1.byte))
await sleepAsync(300.millis)
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
await gossip0.peers[gossip1.switch.peerInfo.peerId].sendEncoded(newSeqWith[byte](35, 1.byte))
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
asyncTest "e2e - GossipSub should rate limit decodable messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
let msg = RPCMsg(control: some(ControlMessage(prune: @[
ControlPrune(topicID: "foobar", peers: @[
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))
], backoff: 123'u64)
])))
gossip0.broadcast(gossip0.mesh["foobar"], msg)
await sleepAsync(300.millis)
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
let msg2 = RPCMsg(control: some(ControlMessage(prune: @[
ControlPrune(topicID: "foobar", peers: @[
PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))
], backoff: 123'u64)
])))
gossip0.broadcast(gossip0.mesh["foobar"], msg2)
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)
asyncTest "e2e - GossipSub should rate limit invalid messages above the size allowed":
let rateLimitHits = currentRateLimitHits()
let (nodes, gossip0, gossip1) = await initializeGossipTest()
let topic = "foobar"
proc execValidator(topic: string, message: messages.Message): Future[ValidationResult] {.raises: [].} =
let res = newFuture[ValidationResult]()
res.complete(ValidationResult.Reject)
res
gossip0.addValidator(topic, execValidator)
gossip1.addValidator(topic, execValidator)
let msg = RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](40))])
gossip0.broadcast(gossip0.mesh[topic], msg)
await sleepAsync(300.millis)
check currentRateLimitHits() == rateLimitHits + 1
check gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
gossip1.parameters.disconnectPeerAboveRateLimit = true
gossip0.broadcast(gossip0.mesh[topic], RPCMsg(messages: @[Message(topicIDs: @[topic], data: newSeq[byte](35))]))
checkExpiring gossip1.switch.isConnected(gossip0.switch.peerInfo.peerId) == false
check currentRateLimitHits() == rateLimitHits + 2
await stopNodes(nodes)

View File

@@ -59,7 +59,7 @@ suite "GossipSub":
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@@ -93,7 +93,7 @@ suite "GossipSub":
asyncTest "GossipSub invalid topic subscription":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
@@ -155,7 +155,7 @@ suite "GossipSub":
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
### await subscribeNodes(nodes)
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} = discard
proc handler(topic: string, data: seq[byte]) {.async.} = discard
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)
@@ -167,36 +167,44 @@ suite "GossipSub":
asyncTest "GossipSub directPeers: always forward messages":
let
nodes = generateNodes(2, gossip = true)
nodes = generateNodes(3, gossip = true)
# start switches
nodesFut = await allFinished(
nodes[0].switch.start(),
nodes[1].switch.start(),
nodes[2].switch.start(),
)
await GossipSub(nodes[0]).addDirectPeer(nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs)
await GossipSub(nodes[1]).addDirectPeer(nodes[0].switch.peerInfo.peerId, nodes[0].switch.peerInfo.addrs)
await GossipSub(nodes[1]).addDirectPeer(nodes[2].switch.peerInfo.peerId, nodes[2].switch.peerInfo.addrs)
await GossipSub(nodes[2]).addDirectPeer(nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
proc noop(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
nodes[0].subscribe("foobar", noop)
nodes[1].subscribe("foobar", noop)
nodes[2].subscribe("foobar", handler)
tryPublish await nodes[0].publish("foobar", toBytes("hellow")), 1
await handlerFut
await handlerFut.wait(2.seconds)
# peer shouldn't be in our mesh
check "foobar" notin GossipSub(nodes[0]).mesh
check "foobar" notin GossipSub(nodes[1]).mesh
check "foobar" notin GossipSub(nodes[2]).mesh
await allFuturesThrowing(
nodes[0].switch.stop(),
nodes[1].switch.stop()
nodes[1].switch.stop(),
nodes[2].switch.stop()
)
await allFuturesThrowing(nodesFut.concat())
@@ -218,7 +226,7 @@ suite "GossipSub":
GossipSub(nodes[1]).parameters.graylistThreshold = 100000
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete()
@@ -264,7 +272,7 @@ suite "GossipSub":
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topic: string, data: seq[byte]) {.async, gcsafe, closure.} =
handler = proc(topic: string, data: seq[byte]) {.async, closure.} =
if peerName notin seen:
seen[peerName] = 0
seen[peerName].inc
@@ -316,7 +324,7 @@ suite "GossipSub":
# Adding again subscriptions
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
for i in 0..<runs:
@@ -360,7 +368,7 @@ suite "GossipSub":
)
var handlerFut = newFuture[void]()
proc handler(topic: string, data: seq[byte]) {.async, gcsafe.} =
proc handler(topic: string, data: seq[byte]) {.async.} =
handlerFut.complete()
await subscribeNodes(nodes)

View File

@@ -2,10 +2,10 @@ import unittest2
{.used.}
import options
import options, strutils
import stew/byteutils
import ../../libp2p/[peerid, peerinfo,
crypto/crypto,
crypto/crypto as crypto,
protocols/pubsub/errors,
protocols/pubsub/rpc/message,
protocols/pubsub/rpc/messages]
@@ -28,7 +28,7 @@ suite "Message":
"""08011240B9EA7F0357B5C1247E4FCB5AD09C46818ECB07318CA84711875F4C6C
E6B946186A4EB44E0D714B2A2D48263D75CF52D30BEF9D9AE2A9FEB7DAF1775F
E731065A"""
seckey = PrivateKey.init(fromHex(stripSpaces(pkHex)))
seckey = PrivateKey.init(crypto.fromHex(stripSpaces(pkHex)))
.expect("valid private key bytes")
peer = PeerInfo.new(seckey)
msg = Message.init(some(peer), @[], "topic", some(seqno), sign = true)
@@ -46,7 +46,7 @@ suite "Message":
"""08011240B9EA7F0357B5C1247E4FCB5AD09C46818ECB07318CA84711875F4C6C
E6B946186A4EB44E0D714B2A2D48263D75CF52D30BEF9D9AE2A9FEB7DAF1775F
E731065A"""
seckey = PrivateKey.init(fromHex(stripSpaces(pkHex)))
seckey = PrivateKey.init(crypto.fromHex(stripSpaces(pkHex)))
.expect("valid private key bytes")
peer = PeerInfo.new(seckey)
@@ -64,7 +64,7 @@ suite "Message":
"""08011240B9EA7F0357B5C1247E4FCB5AD09C46818ECB07318CA84711875F4C6C
E6B946186A4EB44E0D714B2A2D48263D75CF52D30BEF9D9AE2A9FEB7DAF1775F
E731065A"""
seckey = PrivateKey.init(fromHex(stripSpaces(pkHex)))
seckey = PrivateKey.init(crypto.fromHex(stripSpaces(pkHex)))
.expect("valid private key bytes")
peer = PeerInfo.new(seckey)
msg = Message.init(some(peer), @[], "topic", uint64.none, sign = true)
@@ -73,3 +73,55 @@ suite "Message":
check:
msgIdResult.isErr
msgIdResult.error == ValidationResult.Reject
test "byteSize for RPCMsg":
var msg = Message(
fromPeer: PeerId(data: @['a'.byte, 'b'.byte]), # 2 bytes
data: @[1'u8, 2, 3], # 3 bytes
seqno: @[4'u8, 5], # 2 bytes
signature: @['c'.byte, 'd'.byte], # 2 bytes
key: @[6'u8, 7], # 2 bytes
topicIds: @["abc", "defgh"] # 3 + 5 = 8 bytes
)
var peerInfo = PeerInfoMsg(
peerId: PeerId(data: @['e'.byte]), # 1 byte
signedPeerRecord: @['f'.byte, 'g'.byte] # 2 bytes
)
var controlIHave = ControlIHave(
topicId: "ijk", # 3 bytes
messageIds: @[ @['l'.byte], @['m'.byte, 'n'.byte] ] # 1 + 2 = 3 bytes
)
var controlIWant = ControlIWant(
messageIds: @[ @['o'.byte, 'p'.byte], @['q'.byte] ] # 2 + 1 = 3 bytes
)
var controlGraft = ControlGraft(
topicId: "rst" # 3 bytes
)
var controlPrune = ControlPrune(
topicId: "uvw", # 3 bytes
peers: @[peerInfo, peerInfo], # (1 + 2) * 2 = 6 bytes
backoff: 12345678 # 8 bytes for uint64
)
var control = ControlMessage(
ihave: @[controlIHave, controlIHave], # (3 + 3) * 2 = 12 bytes
iwant: @[controlIWant], # 3 bytes
graft: @[controlGraft], # 3 bytes
prune: @[controlPrune], # 3 + 6 + 8 = 17 bytes
idontwant: @[controlIWant] # 3 bytes
)
var rpcMsg = RPCMsg(
subscriptions: @[SubOpts(subscribe: true, topic: "a".repeat(12)), SubOpts(subscribe: false, topic: "b".repeat(14))], # 1 + 12 + 1 + 14 = 28 bytes
messages: @[msg, msg], # 19 * 2 = 38 bytes
ping: @[1'u8, 2], # 2 bytes
pong: @[3'u8, 4], # 2 bytes
control: some(control) # 12 + 3 + 3 + 17 + 3 = 38 bytes
)
check byteSize(rpcMsg) == 28 + 38 + 2 + 2 + 38 # Total: 108 bytes

View File

@@ -5,20 +5,43 @@ const
libp2p_pubsub_anonymize {.booldefine.} = false
import hashes, random, tables, sets, sequtils
import chronos, stew/[byteutils, results]
import chronos, stew/[byteutils, results], chronos/ratelimit
import ../../libp2p/[builders,
protocols/pubsub/errors,
protocols/pubsub/pubsub,
protocols/pubsub/pubsubpeer,
protocols/pubsub/gossipsub,
protocols/pubsub/floodsub,
protocols/pubsub/rpc/messages,
protocols/secure/secure]
import ../helpers
import chronicles
export builders
randomize()
type
TestGossipSub* = ref object of GossipSub
proc getPubSubPeer*(p: TestGossipSub, peerId: PeerId): PubSubPeer =
proc getConn(): Future[Connection] =
p.switch.dial(peerId, GossipSubCodec)
let pubSubPeer = PubSubPeer.new(peerId, getConn, nil, GossipSubCodec, 1024 * 1024)
debug "created new pubsub peer", peerId
p.peers[peerId] = pubSubPeer
onNewPeer(p, pubSubPeer)
pubSubPeer
proc randomPeerId*(): PeerId =
try:
PeerId.init(PrivateKey.random(ECDSA, rng[]).get()).tryGet()
except CatchableError as exc:
raise newException(Defect, exc.msg)
func defaultMsgIdProvider*(m: Message): Result[MessageId, ValidationResult] =
let mid =
if m.seqno.len > 0 and m.fromPeer.data.len > 0:
@@ -44,7 +67,8 @@ proc generateNodes*(
sendSignedPeerRecord = false,
unsubscribeBackoff = 1.seconds,
maxMessageSize: int = 1024 * 1024,
enablePX: bool = false): seq[PubSub] =
enablePX: bool = false,
overheadRateLimit: Opt[tuple[bytes: int, interval: Duration]] = Opt.none(tuple[bytes: int, interval: Duration])): seq[PubSub] =
for i in 0..<num:
let switch = newStandardSwitch(secureManagers = secureManagers, sendSignedPeerRecord = sendSignedPeerRecord)
@@ -57,7 +81,7 @@ proc generateNodes*(
msgIdProvider = msgIdProvider,
anonymize = anonymize,
maxMessageSize = maxMessageSize,
parameters = (var p = GossipSubParams.init(); p.floodPublish = false; p.historyLength = 20; p.historyGossip = 20; p.unsubscribeBackoff = unsubscribeBackoff; p.enablePX = enablePX; p))
parameters = (var p = GossipSubParams.init(); p.floodPublish = false; p.historyLength = 20; p.historyGossip = 20; p.unsubscribeBackoff = unsubscribeBackoff; p.enablePX = enablePX; p.overheadRateLimit = overheadRateLimit; p))
# set some testing params, to enable scores
g.topicParams.mgetOrPut("foobar", TopicParams.init()).topicWeight = 1.0
g.topicParams.mgetOrPut("foo", TopicParams.init()).topicWeight = 1.0
@@ -104,7 +128,7 @@ proc subscribeRandom*(nodes: seq[PubSub]) {.async.} =
await dialer.switch.connect(node.peerInfo.peerId, node.peerInfo.addrs)
dialed.add(node.peerInfo.peerId)
proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
proc waitSub*(sender, receiver: auto; key: string) {.async.} =
if sender == receiver:
return
let timeout = Moment.now() + 5.seconds
@@ -124,7 +148,7 @@ proc waitSub*(sender, receiver: auto; key: string) {.async, gcsafe.} =
await sleepAsync(5.milliseconds)
doAssert Moment.now() < timeout, "waitSub timeout!"
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async, gcsafe.} =
proc waitSubGraph*(nodes: seq[PubSub], key: string) {.async.} =
let timeout = Moment.now() + 5.seconds
while true:
var

View File

@@ -24,7 +24,7 @@ type
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.gcsafe, async.}
dir = Direction.Out): Future[void] {.async.}
method connect*(
self: SwitchStub,
@@ -32,11 +32,11 @@ method connect*(
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out) {.async.} =
dir = Direction.Out) {.async.} =
if (self.connectStub != nil):
await self.connectStub(self, peerId, addrs, forceDial, reuseConnection, upgradeDir)
await self.connectStub(self, peerId, addrs, forceDial, reuseConnection, dir)
else:
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, dir)
proc new*(T: typedesc[SwitchStub], switch: Switch, connectStub: connectStubType = nil): T =
return SwitchStub(

View File

@@ -39,7 +39,7 @@ proc createAutonatSwitch(nameResolver: NameResolver = nil): Switch =
proc makeAutonatServicePrivate(): Switch =
var autonatProtocol = new LPProtocol
autonatProtocol.handler = proc (conn: Connection, proto: string) {.async, gcsafe.} =
autonatProtocol.handler = proc (conn: Connection, proto: string) {.async.} =
discard await conn.readLp(1024)
await conn.writeLp(AutonatDialResponse(
status: DialError,

View File

@@ -87,7 +87,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
awaiter.complete()
@@ -131,7 +131,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
autonatClientStub.answer = Reachable
@@ -173,7 +173,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@@ -213,7 +213,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and confidence.isSome() and confidence.get() >= 0.3:
if not awaiter.finished:
autonatClientStub.answer = Unknown
@@ -267,7 +267,7 @@ suite "Autonat Service":
let awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@@ -302,12 +302,12 @@ suite "Autonat Service":
let awaiter2 = newFuture[void]()
let awaiter3 = newFuture[void]()
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler2(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter2.finished:
awaiter2.complete()
@@ -345,7 +345,7 @@ suite "Autonat Service":
let awaiter1 = newFuture[void]()
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler1(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter1.finished:
awaiter1.complete()
@@ -388,7 +388,7 @@ suite "Autonat Service":
var awaiter = newFuture[void]()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.Reachable and confidence.isSome() and confidence.get() == 1:
if not awaiter.finished:
awaiter.complete()
@@ -428,7 +428,7 @@ suite "Autonat Service":
let switch1 = createSwitch(autonatService)
let switch2 = createSwitch()
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.gcsafe, async.} =
proc statusAndConfidenceHandler(networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
fail()
check autonatService.networkReachability == NetworkReachability.Unknown

View File

@@ -32,7 +32,7 @@ method newStream*(
m: TestMuxer,
name: string = "",
lazy: bool = false):
Future[Connection] {.async, gcsafe.} =
Future[Connection] {.async.} =
result = Connection.new(m.peerId, Direction.Out, Opt.none(MultiAddress))
suite "Connection Manager":

View File

@@ -57,14 +57,15 @@ suite "Dcutr":
for t in behindNATSwitch.transports:
t.networkReachability = NetworkReachability.NotReachable
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
expect CatchableError:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
checkExpiring:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
# dial will succeed.
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@@ -83,8 +84,8 @@ suite "Dcutr":
body
checkExpiring:
# no connection will be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@@ -95,7 +96,7 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
await sleepAsync(100.millis)
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectTimeoutProc)
@@ -114,7 +115,7 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
raise newException(CatchableError, "error")
let behindNATSwitch = SwitchStub.new(newStandardSwitch(), connectErrorProc)
@@ -142,13 +143,16 @@ suite "Dcutr":
for t in behindNATSwitch.transports:
t.networkReachability = NetworkReachability.NotReachable
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
expect CatchableError:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. This dial is going to fail because the dcutr client is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case.
await DcutrClient.new().startSync(behindNATSwitch, publicSwitch.peerInfo.peerId, behindNATSwitch.peerInfo.addrs)
.wait(300.millis)
checkExpiring:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail, but the client dial will succeed.
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 2
# we still expect a new connection to be open by the receiver peer acting as the dcutr server
behindNATSwitch.connManager.connCount(publicSwitch.peerInfo.peerId) == 1
await allFutures(behindNATSwitch.stop(), publicSwitch.stop())
@@ -159,7 +163,7 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
await sleepAsync(100.millis)
await ductrServerTest(connectProc)
@@ -171,7 +175,23 @@ suite "Dcutr":
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
raise newException(CatchableError, "error")
await ductrServerTest(connectProc)
test "should return valid TCP/IP and TCP/DNS addresses only":
let testAddrs = @[MultiAddress.init("/ip4/192.0.2.1/tcp/1234").tryGet(),
MultiAddress.init("/ip4/203.0.113.5/tcp/5678/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N").tryGet(),
MultiAddress.init("/ip6/::1/tcp/9012").tryGet(),
MultiAddress.init("/dns4/example.com/tcp/3456/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N").tryGet(),
MultiAddress.init("/ip4/198.51.100.42/udp/7890").tryGet()]
let expected = @[MultiAddress.init("/ip4/192.0.2.1/tcp/1234").tryGet(),
MultiAddress.init("/ip4/203.0.113.5/tcp/5678").tryGet(),
MultiAddress.init("/ip6/::1/tcp/9012").tryGet(),
MultiAddress.init("/dns4/example.com/tcp/3456").tryGet()]
let result = getHolePunchableAddrs(testAddrs)
check result == expected

View File

@@ -65,7 +65,7 @@ suite "Hole Punching":
let publicPeerSwitch = createSwitch(RelayClient.new())
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
proc addressMapper(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return @[MultiAddress.init("/dns4/localhost/").tryGet() & listenAddrs[0][1].tryGet()]
publicPeerSwitch.peerInfo.addressMappers.add(addressMapper)
await publicPeerSwitch.peerInfo.update()
@@ -193,38 +193,24 @@ suite "Hole Punching":
await privatePeerSwitch2.connect(privatePeerSwitch1.peerInfo.peerId, (await privatePeerRelayAddr1))
privatePeerSwitch2.connectStub = rcvConnectStub
checkExpiring:
# we can't hole punch when both peers are in the same machine. This means that the simultaneous dialings will result
# in two connections attemps, instead of one. The server dial is going to fail because it is acting as the
# tcp simultaneous incoming upgrader in the dialer which works only in the simultaneous open case, but the client
# dial will succeed.
privatePeerSwitch1.connManager.connCount(privatePeerSwitch2.peerInfo.peerId) == 1 and
not isRelayed(privatePeerSwitch1.connManager.selectMuxer(privatePeerSwitch2.peerInfo.peerId).connection)
# wait for hole punching to finish in the background
await sleepAsync(600.millis)
await allFuturesThrowing(
privatePeerSwitch1.stop(), privatePeerSwitch2.stop(), switchRelay.stop(),
switchAux.stop(), switchAux2.stop(), switchAux3.stop(), switchAux4.stop())
asyncTest "Hole punching when peers addresses are private":
proc connectStub(self: SwitchStub,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
self.connectStub = nil # this stub should be called only once
await sleepAsync(100.millis) # avoid simultaneous dialing that causes address in use error
await self.switch.connect(peerId, addrs, forceDial, reuseConnection, upgradeDir)
await holePunchingTest(nil, connectStub, NotReachable)
await holePunchingTest(nil, nil, NotReachable)
asyncTest "Hole punching when there is an error during unilateral direct connection":
asyncTest "Hole punching when peers addresses are private and there is an error in the initiator side":
proc connectStub(self: SwitchStub,
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true,
upgradeDir = Direction.Out): Future[void] {.async.} =
dir = Direction.Out): Future[void] {.async.} =
self.connectStub = nil # this stub should be called only once
raise newException(CatchableError, "error")

View File

@@ -73,7 +73,7 @@ suite "Identify":
asyncTest "default agent version":
msListen.addHandler(IdentifyCodec, identifyProto1)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)
@@ -95,7 +95,7 @@ suite "Identify":
remotePeerInfo.agentVersion = customAgentVersion
msListen.addHandler(IdentifyCodec, identifyProto1)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)
@@ -136,7 +136,7 @@ suite "Identify":
asyncTest "can send signed peer record":
msListen.addHandler(IdentifyCodec, identifyProto1)
identifyProto1.sendSignedPeerRecord = true
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)

View File

@@ -97,7 +97,7 @@ suite "Mplex":
suite "channel half-closed":
asyncTest "(local close) - should close for write":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -112,7 +112,7 @@ suite "Mplex":
asyncTest "(local close) - should allow reads until remote closes":
let
conn = TestBufferStream.new(
proc (data: seq[byte]) {.gcsafe, async.} =
proc (data: seq[byte]) {.async.} =
discard,
)
chann = LPChannel.init(1, conn, true)
@@ -139,7 +139,7 @@ suite "Mplex":
asyncTest "(remote close) - channel should close for reading by remote":
let
conn = TestBufferStream.new(
proc (data: seq[byte]) {.gcsafe, async.} =
proc (data: seq[byte]) {.async.} =
discard,
)
chann = LPChannel.init(1, conn, true)
@@ -162,7 +162,7 @@ suite "Mplex":
let
testData = "Hello!".toBytes
conn = TestBufferStream.new(
proc (data: seq[byte]) {.gcsafe, async.} =
proc (data: seq[byte]) {.async.} =
discard
)
chann = LPChannel.init(1, conn, true)
@@ -175,7 +175,7 @@ suite "Mplex":
await conn.close()
asyncTest "should not allow pushing data to channel when remote end closed":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -192,7 +192,7 @@ suite "Mplex":
suite "channel reset":
asyncTest "channel should fail reading":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -205,7 +205,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete read":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -220,7 +220,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete pushData":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -239,7 +239,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and push":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -254,7 +254,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and pushes":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -279,7 +279,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete both read and push with cancel":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -293,7 +293,7 @@ suite "Mplex":
await conn.close()
asyncTest "should complete both read and push after reset":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -311,7 +311,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete ongoing push without reader":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -323,7 +323,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should complete ongoing read without a push":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -335,7 +335,7 @@ suite "Mplex":
await conn.close()
asyncTest "reset should allow all reads and pushes to complete":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -364,7 +364,7 @@ suite "Mplex":
await conn.close()
asyncTest "channel should fail writing":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(1, conn, true)
@@ -376,7 +376,7 @@ suite "Mplex":
await conn.close()
asyncTest "channel should reset on timeout":
proc writeHandler(data: seq[byte]) {.async, gcsafe.} = discard
proc writeHandler(data: seq[byte]) {.async.} = discard
let
conn = TestBufferStream.new(writeHandler)
chann = LPChannel.init(
@@ -392,11 +392,11 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == "HELLO"
await stream.close()
@@ -429,11 +429,11 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == "HELLO"
await stream.close()
@@ -473,12 +473,12 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
try:
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(MaxMsgSize)
check msg == bigseq
trace "Bigseq check passed!"
@@ -520,11 +520,11 @@ suite "Mplex":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
await stream.writeLp("Hello from stream!")
await stream.close()
@@ -557,12 +557,12 @@ suite "Mplex":
let listenFut = transport1.start(ma)
let done = newFuture[void]()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var count = 1
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == &"stream {count}!"
count.inc
@@ -601,12 +601,12 @@ suite "Mplex":
let listenFut = transport1.start(ma)
let done = newFuture[void]()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var count = 1
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(1024)
check string.fromBytes(msg) == &"stream {count} from dialer!"
await stream.writeLp(&"stream {count} from listener!")
@@ -646,12 +646,12 @@ suite "Mplex":
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
try:
discard await stream.readLp(1024)
@@ -697,11 +697,11 @@ suite "Mplex":
var count = 0
var done = newFuture[void]()
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
count.inc()
if count == 10:
@@ -761,11 +761,11 @@ suite "Mplex":
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -805,11 +805,11 @@ suite "Mplex":
var mplexListen: Mplex
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -851,11 +851,11 @@ suite "Mplex":
var mplexHandle: Future[void]
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -896,11 +896,11 @@ suite "Mplex":
let transport1 = TcpTransport.new(upgrade = Upgrade())
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -943,11 +943,11 @@ suite "Mplex":
var listenConn: Connection
var listenStreams: seq[Connection]
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
listenConn = await transport1.accept()
let mplexListen = Mplex.new(listenConn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
listenStreams.add(stream)
await stream.join()
@@ -992,11 +992,11 @@ suite "Mplex":
var complete = newFuture[void]()
const MsgSize = 1024
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
try:
let msg = await stream.readLp(MsgSize)
check msg.len == MsgSize
@@ -1064,11 +1064,11 @@ suite "Mplex":
var complete = newFuture[void]()
const MsgSize = 512
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let mplexListen = Mplex.new(conn)
mplexListen.streamHandler = proc(stream: Connection)
{.async, gcsafe.} =
{.async.} =
let msg = await stream.readLp(MsgSize)
check msg.len == MsgSize
await stream.close()

View File

@@ -60,6 +60,7 @@ const
"/ip4/127.0.0.1/tcp/1234",
"/ip4/127.0.0.1/tcp/1234/",
"/ip4/127.0.0.1/udp/1234/quic",
"/ip4/192.168.80.3/udp/33422/quic-v1",
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",
"/ip4/127.0.0.1/ipfs/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC/tcp/1234",
"/ip4/127.0.0.1/p2p/QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNKC",

View File

@@ -34,7 +34,7 @@ type
method readOnce*(s: TestSelectStream,
pbytes: pointer,
nbytes: int): Future[int] {.async, gcsafe.} =
nbytes: int): Future[int] {.async.} =
case s.step:
of 1:
var buf = newSeq[byte](1)
@@ -64,9 +64,9 @@ method readOnce*(s: TestSelectStream,
return "\0x3na\n".len()
method write*(s: TestSelectStream, msg: seq[byte]) {.async, gcsafe.} = discard
method write*(s: TestSelectStream, msg: seq[byte]) {.async.} = discard
method close(s: TestSelectStream) {.async, gcsafe.} =
method close(s: TestSelectStream) {.async.} =
s.isClosed = true
s.isEof = true
@@ -113,11 +113,11 @@ method readOnce*(s: TestLsStream,
copyMem(pbytes, addr buf[0], buf.len())
return buf.len()
method write*(s: TestLsStream, msg: seq[byte]) {.async, gcsafe.} =
method write*(s: TestLsStream, msg: seq[byte]) {.async.} =
if s.step == 4:
await s.ls(msg)
method close(s: TestLsStream) {.async, gcsafe.} =
method close(s: TestLsStream) {.async.} =
s.isClosed = true
s.isEof = true
@@ -137,7 +137,7 @@ type
method readOnce*(s: TestNaStream,
pbytes: pointer,
nbytes: int):
Future[int] {.async, gcsafe.} =
Future[int] {.async.} =
case s.step:
of 1:
var buf = newSeq[byte](1)
@@ -167,11 +167,11 @@ method readOnce*(s: TestNaStream,
return "\0x3na\n".len()
method write*(s: TestNaStream, msg: seq[byte]) {.async, gcsafe.} =
method write*(s: TestNaStream, msg: seq[byte]) {.async.} =
if s.step == 4:
await s.na(string.fromBytes(msg))
method close(s: TestNaStream) {.async, gcsafe.} =
method close(s: TestNaStream) {.async.} =
s.isClosed = true
s.isEof = true
@@ -197,7 +197,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
check proto == "/test/proto/1.0.0"
await conn.close()
@@ -210,7 +210,7 @@ suite "Multistream select":
var conn: Connection = nil
let done = newFuture[void]()
proc testLsHandler(proto: seq[byte]) {.async, gcsafe.} =
proc testLsHandler(proto: seq[byte]) {.async.} =
var strProto: string = string.fromBytes(proto)
check strProto == "\x26/test/proto1/1.0.0\n/test/proto2/1.0.0\n"
await conn.close()
@@ -218,7 +218,7 @@ suite "Multistream select":
conn = Connection(newTestLsStream(testLsHandler))
proc testHandler(conn: Connection, proto: string): Future[void]
{.async, gcsafe.} = discard
{.async.} = discard
var protocol: LPProtocol = new LPProtocol
protocol.handler = testHandler
ms.addHandler("/test/proto1/1.0.0", protocol)
@@ -230,7 +230,7 @@ suite "Multistream select":
let ms = MultistreamSelect.new()
var conn: Connection = nil
proc testNaHandler(msg: string): Future[void] {.async, gcsafe.} =
proc testNaHandler(msg: string): Future[void] {.async.} =
check msg == "\x03na\n"
await conn.close()
conn = newTestNaStream(testNaHandler)
@@ -238,7 +238,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} = discard
Future[void] {.async.} = discard
protocol.handler = testHandler
ms.addHandler("/unabvailable/proto/1.0.0", protocol)
@@ -250,7 +250,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
check proto == "/test/proto/1.0.0"
await conn.writeLp("Hello!")
await conn.close()
@@ -262,7 +262,7 @@ suite "Multistream select":
let transport1 = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
await msListen.handle(conn)
await conn.close()
@@ -293,7 +293,7 @@ suite "Multistream select":
# Unblock the 5 streams, check that we can open a new one
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
await blocker
await conn.writeLp("Hello!")
await conn.close()
@@ -315,7 +315,7 @@ suite "Multistream select":
await msListen.handle(c)
await c.close()
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
while true:
let conn = await transport1.accept()
asyncSpawn acceptedOne(conn)
@@ -362,7 +362,7 @@ suite "Multistream select":
let msListen = MultistreamSelect.new()
var protocol: LPProtocol = new LPProtocol
protocol.handler = proc(conn: Connection, proto: string) {.async, gcsafe.} =
protocol.handler = proc(conn: Connection, proto: string) {.async.} =
# never reached
discard
@@ -379,7 +379,7 @@ suite "Multistream select":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
let listenFut = transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
try:
await msListen.handle(conn)
@@ -412,7 +412,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
check proto == "/test/proto/1.0.0"
await conn.writeLp("Hello!")
await conn.close()
@@ -424,7 +424,7 @@ suite "Multistream select":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
await msListen.handle(conn)
@@ -450,7 +450,7 @@ suite "Multistream select":
var protocol: LPProtocol = new LPProtocol
proc testHandler(conn: Connection,
proto: string):
Future[void] {.async, gcsafe.} =
Future[void] {.async.} =
await conn.writeLp(&"Hello from {proto}!")
await conn.close()
@@ -462,7 +462,7 @@ suite "Multistream select":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(ma)
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let conn = await transport1.accept()
await msListen.handle(conn)

View File

@@ -41,7 +41,7 @@ type
{.push raises: [].}
method init(p: TestProto) {.gcsafe.} =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
let msg = string.fromBytes(await conn.readLp(1024))
check "Hello!" == msg
await conn.writeLp("Hello!")
@@ -72,7 +72,7 @@ proc createSwitch(ma: MultiAddress; outgoing: bool, secio: bool = false): (Switc
[Secure(Noise.new(rng, privateKey, outgoing = outgoing))]
connManager = ConnManager.new()
ms = MultistreamSelect.new()
muxedUpgrade = MuxedUpgrade.new(muxers, secureManagers, connManager, ms)
muxedUpgrade = MuxedUpgrade.new(muxers, secureManagers, ms)
transports = @[Transport(TcpTransport.new(upgrade = muxedUpgrade))]
let switch = newSwitch(
@@ -100,7 +100,7 @@ suite "Noise":
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
try:
await sconn.write("Hello!")
finally:
@@ -115,7 +115,7 @@ suite "Noise":
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
conn = await transport2.dial(transport1.addrs[0])
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
var msg = newSeq[byte](6)
await sconn.readExactly(addr msg[0], 6)
@@ -140,11 +140,11 @@ suite "Noise":
asyncSpawn transport1.start(server)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
var conn: Connection
try:
conn = await transport1.accept()
discard await serverNoise.secure(conn, false, Opt.none(PeerId))
discard await serverNoise.secure(conn, Opt.none(PeerId))
except CatchableError:
discard
finally:
@@ -160,7 +160,7 @@ suite "Noise":
var sconn: Connection = nil
expect(NoiseDecryptTagError):
sconn = await clientNoise.secure(conn, true, Opt.some(conn.peerId))
sconn = await clientNoise.secure(conn, Opt.some(conn.peerId))
await conn.close()
await handlerWait
@@ -178,9 +178,9 @@ suite "Noise":
let transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
asyncSpawn transport1.start(server)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
defer:
await sconn.close()
await conn.close()
@@ -196,7 +196,7 @@ suite "Noise":
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
conn = await transport2.dial(transport1.addrs[0])
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
await sconn.write("Hello!")
await acceptFut
@@ -221,9 +221,9 @@ suite "Noise":
transport1: TcpTransport = TcpTransport.new(upgrade = Upgrade())
listenFut = transport1.start(server)
proc acceptHandler() {.async, gcsafe.} =
proc acceptHandler() {.async.} =
let conn = await transport1.accept()
let sconn = await serverNoise.secure(conn, false, Opt.none(PeerId))
let sconn = await serverNoise.secure(conn, Opt.none(PeerId))
defer:
await sconn.close()
let msg = await sconn.readLp(1024*1024)
@@ -237,7 +237,7 @@ suite "Noise":
clientInfo = PeerInfo.new(clientPrivKey, transport1.addrs)
clientNoise = Noise.new(rng, clientPrivKey, outgoing = true)
conn = await transport2.dial(transport1.addrs[0])
let sconn = await clientNoise.secure(conn, true, Opt.some(serverInfo.peerId))
let sconn = await clientNoise.secure(conn, Opt.some(serverInfo.peerId))
await sconn.writeLp(hugePayload)
await readTask

View File

@@ -42,7 +42,7 @@ suite "Ping":
transport1 = TcpTransport.new(upgrade = Upgrade())
transport2 = TcpTransport.new(upgrade = Upgrade())
proc handlePing(peer: PeerId) {.async, gcsafe, closure.} =
proc handlePing(peer: PeerId) {.async, closure.} =
inc pingReceivedCount
pingProto1 = Ping.new()
pingProto2 = Ping.new(handlePing)
@@ -63,7 +63,7 @@ suite "Ping":
asyncTest "simple ping":
msListen.addHandler(PingCodec, pingProto1)
serverFut = transport1.start(@[ma])
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)
@@ -78,7 +78,7 @@ suite "Ping":
asyncTest "ping callback":
msDial.addHandler(PingCodec, pingProto2)
serverFut = transport1.start(@[ma])
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
discard await msListen.select(c, PingCodec)
discard await pingProto1.ping(c)
@@ -92,7 +92,7 @@ suite "Ping":
asyncTest "bad ping data ack":
type FakePing = ref object of LPProtocol
let fakePingProto = FakePing()
proc fakeHandle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc fakeHandle(conn: Connection, proto: string) {.async, closure.} =
var
buf: array[32, byte]
fakebuf: array[32, byte]
@@ -103,7 +103,7 @@ suite "Ping":
msListen.addHandler(PingCodec, fakePingProto)
serverFut = transport1.start(@[ma])
proc acceptHandler(): Future[void] {.async, gcsafe.} =
proc acceptHandler(): Future[void] {.async.} =
let c = await transport1.accept()
await msListen.handle(c)

Some files were not shown because too many files have changed in this diff Show More