Compare commits

...

80 Commits

Author SHA1 Message Date
Vlado Pajić
4e90ad2056 test unused dependency 2025-04-22 17:17:22 +02:00
Gabriel Cruz
ffc114e8d9 chore: fix broken old status-im links (#1332) 2025-04-22 09:14:26 +00:00
Radosław Kamiński
f2be2d6ed5 test: include missing tests in testall (#1338) 2025-04-22 09:45:38 +01:00
Radosław Kamiński
ab690a06a6 test: combine tests (#1335) 2025-04-21 17:39:42 +01:00
Radosław Kamiński
10cdaf14c5 chore(ci): decouple examples from unit tests (#1334) 2025-04-21 16:31:50 +01:00
Radosław Kamiński
ebbfb63c17 chore(test): remove unused flags and simplify testpubsub (#1328) 2025-04-17 13:38:27 +02:00
Álex
ac25da6cea test(gossipsub): message propagation (#1184)
Co-authored-by: Radoslaw Kaminski <radoslaw@status.im>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-04-14 15:49:13 +01:00
Gabriel Cruz
fb41972ba3 chore: rendezvous improvements (#1319) 2025-04-11 13:31:24 +00:00
Richard Ramos
504d1618af fix: bump nim-quic 2025-04-10 17:54:20 -04:00
richΛrd
0f91b23f12 fix: do not use while loop for quic transport errors (#1317) 2025-04-10 21:47:42 +00:00
vladopajic
5ddd62a8b9 chore(git): ignore auto generated test binaries (#1320) 2025-04-10 13:39:04 +00:00
vladopajic
e7f13a7e73 refactor: utilize singe bridgedConnections (#1309) 2025-04-10 08:37:26 -04:00
vladopajic
89e825fb0d fix(quic): continue accept when client certificate is incorrect (#1312) 2025-04-08 21:03:47 +02:00
richΛrd
1b706e84fa chore: bump nim-quic (#1314) 2025-04-08 10:04:31 -04:00
richΛrd
5cafcb70dc chore: remove range checks from rendezvous (#1306) 2025-04-07 12:16:18 +00:00
vladopajic
8c71266058 chore(readme): add quic and memory transports (#1311) 2025-04-04 15:07:31 +00:00
vladopajic
9c986c5c13 feat(transport): add memory transport (#1304) 2025-04-04 15:43:34 +02:00
vladopajic
3d0451d7f2 chore(protocols): remove deprecated utilities (#1305) 2025-04-04 08:44:36 +00:00
richΛrd
b1f65c97ae fix: unsafe string usage (#1308) 2025-04-03 15:33:08 -04:00
vladopajic
5584809fca chore(certificate): update test vectors (#1294) 2025-04-01 17:15:26 +02:00
richΛrd
7586f17b15 fix: set peerId on incoming Quic connection (#1302) 2025-03-31 09:38:30 -04:00
richΛrd
0e16d873c8 feat: withQuicTransport (#1301) 2025-03-30 04:44:49 +00:00
richΛrd
b11acd2118 chore: update quic and expect exception in test (#1300)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-03-27 12:19:49 -04:00
vladopajic
1376f5b077 chore(quic): add tests with invalid certs (#1297) 2025-03-27 15:19:14 +01:00
richΛrd
340ea05ae5 feat: quic (#1265)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-03-26 10:17:15 -04:00
vladopajic
024ec51f66 feat(certificate): add date verification (#1299) 2025-03-25 11:50:25 +01:00
richΛrd
efe453df87 refactor: use openssl instead of mbedtls (#1298) 2025-03-24 10:22:52 -04:00
vladopajic
c0f4d903ba feat(certificate): set distinguishable issuer name with peer id (#1296) 2025-03-21 12:38:02 +00:00
vladopajic
28f2b268ae chore(certificate): cosmetics (#1293) 2025-03-19 17:02:14 +00:00
vladopajic
5abb6916b6 feat: X.509 certificate validation (#1292) 2025-03-19 15:40:14 +00:00
richΛrd
e6aec94c0c chore: use token per repo in autobump task (#1288) 2025-03-18 17:12:52 +00:00
vladopajic
9eddc7c662 chore: specify exceptions (#1284) 2025-03-17 13:09:18 +00:00
richΛrd
028c730a4f chore: remove python dependency (#1287) 2025-03-17 08:04:30 -04:00
richΛrd
3c93bdaf80 chore(version): update libp2p.nimble to 1.9.0 (#1282)
Co-authored-by: kaiserd <1684595+kaiserd@users.noreply.github.com>
2025-03-14 15:56:51 +00:00
vladopajic
037b99997e chore(ci): remove AppVeyor config (#1281) 2025-03-10 21:09:40 +00:00
vladopajic
e67744bf2a chore(connmanager): propagate CancelledError (#1276) 2025-03-05 17:31:46 +00:00
vladopajic
5843e6fb4f chore(protocol): handler to propagate CancelledError (#1275) 2025-03-05 15:04:29 +00:00
vladopajic
f0ff7e4c69 fix(ci): transport interoperability action (#1277) 2025-03-04 19:00:08 +01:00
diegomrsantos
24808ad534 feat(tls-certificate): generate and parse libp2p tls certificate (#1209)
Co-authored-by: Richard Ramos <info@richardramos.me>
2025-03-04 08:05:40 -04:00
vladopajic
c4bccef138 chore(relay): specify raised exceptions (#1274) 2025-03-03 15:18:27 +01:00
vladopajic
adf2345adb chore: specify raised exceptions in miscellaneous places (#1269) 2025-02-28 09:19:53 -04:00
vladopajic
f7daad91e6 chore(protocols): specify raised exceptions (part 2) (#1268) 2025-02-28 08:06:51 -04:00
vladopajic
65052d7b59 chore(transports): specify raised exceptions (#1266) 2025-02-27 10:42:05 +01:00
vladopajic
b07ec5c0c6 chore(protocol): list raised exceptions (#1260) 2025-02-25 17:33:24 -04:00
vladopajic
f4c94ddba1 chore(dialer): list raised exceptions (#1264) 2025-02-24 20:10:20 +01:00
vladopajic
a7ec485ca9 chore(transports): list raised exceptions (#1255) 2025-02-24 16:42:27 +01:00
vladopajic
86b6469e35 chore: list raised exceptions in switch services (#1251)
Co-authored-by: richΛrd <info@richardramos.me>
2025-02-24 15:33:06 +01:00
Ivan FB
3e16ca724d chore: add trace log to analyse remote stream reset (#1253) 2025-02-24 08:24:27 -04:00
vladopajic
93dd5a6768 chore(connmanager): specify raised exceptions (#1263) 2025-02-19 15:55:36 +00:00
richΛrd
ec43d0cb9f chore: refactors to remove .closure., .gcsafe for .async. procs, and added callback compatibility to daemonapi (#1240) 2025-02-19 15:00:44 +00:00
vladopajic
8469a750e7 chore: add description of public pragma (#1262) 2025-02-19 14:14:50 +01:00
vladopajic
fc6ac07ce8 ci: utilize github action for nph check (#1257) 2025-02-17 15:42:58 +00:00
vladopajic
79cdc31b37 ci: remove commit message check (#1256) 2025-02-17 12:35:42 +00:00
vladopajic
be33ad6ac7 chore: specify raising exceptions in daemon module (#1249) 2025-02-14 15:36:29 +01:00
vladopajic
a6e45d6157 chore: list raised exceptions in utils module (#1252)
part of https://github.com/vacp2p/nim-libp2p/issues/962 effort.
2025-02-13 11:27:29 -04:00
richΛrd
37e0f61679 chore: update maintainers (#1248) 2025-02-11 14:32:12 -04:00
vladopajic
5d382b6423 style: fix code style with nph (#1246) 2025-02-11 15:39:08 +00:00
richΛrd
78a4344054 refactor(pubsub): do not raise exceptions on publish (#1244) 2025-02-07 01:30:21 +00:00
Ivan FB
a4f0a638e7 chore: add isClosedRemotely public bool attribute in lpstream (#1242)
Also adds the gcsafe to getStreams method so that we can invoke it from async procs
2025-02-06 09:54:00 -04:00
richΛrd
c5aa3736f9 chore(version): update libp2p.nimble to 1.8.0 (#1236)
This PR's commit is planned to be tagged as v1.8.0
2025-01-22 08:45:56 -04:00
richΛrd
b0f83fd48c fix: use target repository branch as suffix for nim-libp2p-auto-bump- (#1238)
The branch `nim-libp2p-auto-bump-unstable` was not getting autobumped
because at one point of time in nim-libp2p `unstable` branch was renamed
to `master` branch. Since the workflow file depended on `GITHUB_REF`, it
was instead updating `nim-libp2p-auto-bump-master` instead of
`nim-libp2p-auto-bump-unstable`
2025-01-15 14:25:21 -04:00
richΛrd
d6e5094095 fix(pubsub): revert async: raises: [] annotation for TopicHandler and ValidatorHandler (#1237) 2025-01-15 03:35:24 +00:00
richΛrd
483e1d91ba feat: idontwant on publish (#1230)
Closes #1224 

The following changes were done:
1. Once a message is broadcasted, if it exceeds the threeshold to
consider it a large message it will send an IDONTWANT message before
publishing the message

4f9c21f699/libp2p/protocols/pubsub/gossipsub.nim (L800-L801)
2. Extracts the logic to broadcast an IDONTWANT message and to verify
the size of a message into separate functions
3. Adds a template to filter values of a hashset without modifying the
original
2025-01-14 16:59:38 -04:00
richΛrd
d215bb21e0 fix(multiaddress): don't re-export minprotobuf (#1235) 2025-01-14 15:47:06 -04:00
richΛrd
61ac0c5b95 feat(pubsub): add {.async: (raises).} annotations (#1233)
This PR adds `{.async: (raises).}` annotations to the pubsub package.
The cases in which a `raises:[CatchableError]` was added were due to not
being part of the package and should probably be changed in a separate
PR
2025-01-14 17:01:02 +01:00
diegomrsantos
1fa30f07e8 chore(ci): add arm64 for macOS (#1212)
This PR adds the macOS 14 GitHub runner that uses the arm64 cpu.
2024-12-20 21:18:56 -04:00
richΛrd
39d0451a10 chore: validate PR titles and commits, and autoassign PRs (#1227) 2024-12-20 15:42:54 +01:00
richΛrd
4dc7a89f45 chore: use latest nimpng and nico (#1229) 2024-12-17 14:37:06 -04:00
richΛrd
fd26f93b80 fix(ci): use nim 2.0 branch (#1228) 2024-12-09 19:41:08 +00:00
Etan Kissling
dd2c74d413 feat(nameresolving): Add {.async: (raises).} annotations (#1214)
Modernize `nameresolving` modules to track `{.async: (raises).}`.

---------

Co-authored-by: kaiserd <1684595+kaiserd@users.noreply.github.com>
Co-authored-by: richΛrd <info@richardramos.me>
2024-12-09 12:43:21 +01:00
Eugene Kabanov
b7e0df127f Fix PeerStore missing remote endpoints of established connection. (#1226) 2024-11-27 14:49:41 +02:00
kaiserd
f591e692fc chore(version): update libp2p.nimble to 1.7.1 (#1223)
minor version for rendezvous fix
2024-11-09 10:51:33 +07:00
NagyZoltanPeter
8855bce085 fix:missing raises pragma for one of RendezVous.new (#1222) 2024-11-08 11:11:51 +01:00
kaiserd
ed5670408b chore(version): update libp2p.nimble to 1.7.0 (#1213)
This commit is planned to be tagged with v1.7.0.
The only feature added in this version are configurable min and max TTL
for rendezvous.

Co-authored-by: ksr <kaiserd@users.noreply.github.com>
2024-11-03 18:15:33 +00:00
Álex
97192a3c80 fix(ci): nim 2.0 dependency failure (#1216)
Broken due to an update to Nimble 0.16.2 for the version-2-0 branch.
This PR sets the ref to the previous commit. Also, updates the key
naming so it fits better.

Nim's commit list: https://github.com/nim-lang/Nim/commits/version-2-0/

# Important Note
In this PR some required job's names were changed. They need to be
updated at repo-level so this PR can be merged.
2024-10-31 17:56:13 +00:00
Álex
294d06323c docs(test): handle IHAVE / IWANT tests (#1202)
Add documentation as requested.
2024-10-31 17:23:24 +00:00
Álex
a3b8729cbe fix(ci): Daily workflows report (#1200)
Fix daily workflows' green tick when jobs failed.

Based of off: https://stackoverflow.com/a/58859404

Closes:  https://github.com/vacp2p/nim-libp2p/issues/1197

---------

Co-authored-by: Diego <diego@status.im>
2024-10-10 12:10:49 +00:00
Álex
6c970911f2 fix(CI): free disk space on interop transport job (#1206)
Readd the free disk space job, as space issues still happen when
building images.

---------

Co-authored-by: Diego <diego@status.im>
2024-10-10 09:42:25 +00:00
Álex
5d48776b02 chore(ci): Enable S3 caching for interop (#1193)
- Adds our S3 bucket for caching docker images as Protocol Labs shut
down their shared one.
- Remove the free disk space workaround that prevented the jobs from
failing for using too much space for the images.

---------

Co-authored-by: diegomrsantos <diego@status.im>
2024-09-26 09:56:09 +00:00
Simon-Pierre Vivier
d389d96789 feat: rendezvous refactor (#1183)
Hello!

This PR aim to refactor rendezvous code so that it is easier to impl.
Waku rdv strategy. The hardcoded min and max TTL were out of range with
what we needed and specifying which peers to interact with is also
needed since Waku deals with peers on multiple separate shards.

I tried to keep the changes to a minimum, specifically I did not change
the name of any public procs which result in less than descriptive names
in some cases. I also wanted to return results instead of raising
exceptions but didn't. Would it be acceptable to do so?

Please advise on best practices, thank you.

---------

Co-authored-by: Ludovic Chenut <ludovic@status.im>
2024-09-25 09:11:57 +00:00
149 changed files with 6105 additions and 2150 deletions

View File

@@ -1,52 +0,0 @@
version: '{build}'
image: Visual Studio 2015
cache:
- NimBinaries
- p2pdCache
matrix:
# We always want 32 and 64-bit compilation
fast_finish: false
platform:
- x86
- x64
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
clone_depth: 10
install:
- git submodule update --init --recursive
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
# build nim from our own branch - this to avoid the day-to-day churn and
# regressions of the fast-paced Nim development while maintaining the
# flexibility to apply patches
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
- SET PATH=%CD%\Nim\bin;%PATH%
# set path for produced Go binaries
- MKDIR goblin
- CD goblin
- SET GOPATH=%CD%
- SET PATH=%GOPATH%\bin;%PATH%
- CD ..
# install and build go-libp2p-daemon
- bash scripts/build_p2pd.sh p2pdCache v0.3.0
build_script:
- nimble install -y --depsOnly
test_script:
- nimble test
- nimble examples_build
deploy: off

View File

@@ -6,7 +6,7 @@ inputs:
cpu:
description: "CPU to build for"
default: "amd64"
nim_branch:
nim_ref:
description: "Nim version"
default: "version-1-6"
shell:
@@ -88,6 +88,8 @@ runs:
run: |
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
elif [[ '${{ inputs.cpu }}' == 'arm64' ]]; then
PLATFORM=arm64
else
PLATFORM=x86
fi
@@ -117,7 +119,7 @@ runs:
uses: actions/cache@v4
with:
path: '${{ github.workspace }}/nim'
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_branch }}-cache-${{ env.cache_nonce }}
key: ${{ inputs.os }}-${{ inputs.cpu }}-nim-${{ inputs.nim_ref }}-cache-${{ env.cache_nonce }}
- name: Build Nim and Nimble
shell: ${{ inputs.shell }}
@@ -126,6 +128,6 @@ runs:
# We don't want partial matches of the cache restored
rm -rf nim
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_branch }} \
env MAKE="${MAKE_CMD} -j${ncpu}" ARCH_OVERRIDE=${PLATFORM} NIM_COMMIT=${{ inputs.nim_ref }} \
QUICK_AND_DIRTY_COMPILER=1 QUICK_AND_DIRTY_NIMBLE=1 CC=gcc \
bash build_nim.sh nim csources dist/nimble NimBinaries

12
.github/workflows/auto_assign_pr.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
name: Auto Assign PR to Creator
on:
pull_request:
types:
- opened
jobs:
assign_creator:
runs-on: ubuntu-latest
steps:
- uses: toshimaru/auto-author-assign@v1.6.2

View File

@@ -27,12 +27,14 @@ jobs:
cpu: amd64
- os: macos
cpu: amd64
- os: macos-14
cpu: arm64
- os: windows
cpu: amd64
nim:
- branch: version-1-6
nim:
- ref: version-1-6
memory_management: refc
- branch: version-2-0
- ref: version-2-0
memory_management: refc
include:
- platform:
@@ -47,6 +49,10 @@ jobs:
os: macos
builder: macos-13
shell: bash
- platform:
os: macos-14
builder: macos-14
shell: bash
- platform:
os: windows
builder: windows-2022
@@ -56,7 +62,7 @@ jobs:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.platform.os }}-${{ matrix.platform.cpu }} (Nim ${{ matrix.nim.branch }})'
name: '${{ matrix.platform.os }}-${{ matrix.platform.cpu }} (Nim ${{ matrix.nim.ref }})'
runs-on: ${{ matrix.builder }}
steps:
- name: Checkout
@@ -70,12 +76,12 @@ jobs:
os: ${{ matrix.platform.os }}
cpu: ${{ matrix.platform.cpu }}
shell: ${{ matrix.shell }}
nim_branch: ${{ matrix.nim.branch }}
nim_ref: ${{ matrix.nim.ref }}
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.15.5'
go-version: '~1.16.0' # That's the minimum Go version that works with arm.
- name: Install p2pd
run: |
@@ -86,9 +92,9 @@ jobs:
uses: actions/cache@v3
with:
path: nimbledeps
# Using nim.branch as a simple way to differentiate between nimble using the "pkgs" or "pkgs2" directories.
# The change happened on Nimble v0.14.0.
key: nimbledeps-${{ matrix.nim.branch }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
# Using nim.ref as a simple way to differentiate between nimble using the "pkgs" or "pkgs2" directories.
# The change happened on Nimble v0.14.0. Also forcing the deps to be reinstalled on each os and cpu.
key: nimbledeps-${{ matrix.nim.ref }}-${{ matrix.builder }}-${{ matrix.platform.cpu }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
@@ -109,5 +115,6 @@ jobs:
nim --version
nimble --version
gcc --version
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble test

View File

@@ -10,5 +10,5 @@ jobs:
name: Daily amd64
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'branch': 'version-1-6', 'memory_management': 'refc'}, {'branch': 'version-2-0', 'memory_management': 'refc'}]"
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}]"
cpu: "['amd64']"

View File

@@ -7,7 +7,7 @@ on:
nim:
description: 'Nim Configuration'
required: true
type: string # Following this format: [{"branch": ..., "memory_management": ...}, ...]
type: string # Following this format: [{"ref": ..., "memory_management": ...}, ...]
cpu:
description: 'CPU'
required: true
@@ -58,9 +58,8 @@ jobs:
run:
shell: ${{ matrix.platform.shell }}
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.nim.branch }})'
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.nim.ref }})'
runs-on: ${{ matrix.platform.builder }}
continue-on-error: ${{ matrix.nim.branch == 'devel' || matrix.nim.branch == 'version-2-0' }}
steps:
- name: Checkout
uses: actions/checkout@v4
@@ -70,13 +69,13 @@ jobs:
with:
os: ${{ matrix.platform.os }}
shell: ${{ matrix.platform.shell }}
nim_branch: ${{ matrix.nim.branch }}
nim_ref: ${{ matrix.nim.ref }}
cpu: ${{ matrix.cpu }}
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.15.5'
go-version: '~1.16.0'
cache: false
- name: Install p2pd

View File

@@ -10,5 +10,5 @@ jobs:
name: Daily Nim Devel
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'branch': 'devel', 'memory_management': 'orc'}]"
nim: "[{'ref': 'devel', 'memory_management': 'orc'}]"
cpu: "['amd64']"

View File

@@ -10,6 +10,6 @@ jobs:
name: Daily i386 (Linux)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'branch': 'version-1-6', 'memory_management': 'refc'}, {'branch': 'version-2-0', 'memory_management': 'refc'}, {'branch': 'devel', 'memory_management': 'orc'}]"
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}, {'ref': 'devel', 'memory_management': 'orc'}]"
cpu: "['i386']"
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"

View File

@@ -10,6 +10,6 @@ jobs:
name: Daily SAT
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'branch': 'version-2-0', 'memory_management': 'refc'}]"
nim: "[{'ref': 'version-2-0', 'memory_management': 'refc'}]"
cpu: "['amd64']"
use_sat_solver: true

View File

@@ -17,10 +17,13 @@ jobs:
target:
- repository: status-im/nimbus-eth2
ref: unstable
token: ${{ secrets.ACTIONS_GITHUB_NIMBUS_ETH2 }}
- repository: waku-org/nwaku
ref: master
token: ${{ secrets.ACTIONS_GITHUB_NWAKU }}
- repository: codex-storage/nim-codex
ref: master
token: ${{ secrets.ACTIONS_GITHUB_NIM_CODEX }}
steps:
- name: Clone target repository
uses: actions/checkout@v4
@@ -29,7 +32,7 @@ jobs:
ref: ${{ matrix.target.ref}}
path: nbc
fetch-depth: 0
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
token: ${{ matrix.target.token }}
- name: Checkout this ref in target repository
run: |
@@ -44,7 +47,7 @@ jobs:
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
git config --global user.name = "${{ github.actor }}"
git commit --allow-empty -a -m "auto-bump nim-libp2p"
git branch -D nim-libp2p-auto-bump-${GITHUB_REF##*/} || true
git switch -c nim-libp2p-auto-bump-${GITHUB_REF##*/}
git push -f origin nim-libp2p-auto-bump-${GITHUB_REF##*/}
git branch -D nim-libp2p-auto-bump-${{ matrix.target.ref }} || true
git switch -c nim-libp2p-auto-bump-${{ matrix.target.ref }}
git push -f origin nim-libp2p-auto-bump-${{ matrix.target.ref }}

60
.github/workflows/examples.yml vendored Normal file
View File

@@ -0,0 +1,60 @@
name: Examples
on:
push:
branches:
- master
pull_request:
merge_group:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
examples:
timeout-minutes: 30
strategy:
fail-fast: false
defaults:
run:
shell: bash
name: "Build Examples"
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: true
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
shell: bash
os: linux
cpu: amd64
nim_ref: version-1-6
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3
with:
path: nimbledeps
key: nimbledeps-${{ hashFiles('.pinned') }}
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
run: |
nimble install_pinned
- name: Build and run examples
run: |
nim --version
nimble --version
gcc --version
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble examples

View File

@@ -17,8 +17,9 @@ jobs:
name: Run transport interoperability tests
runs-on: ubuntu-22.04
steps:
- name: Free Disk Space (Ubuntu)
# For some reason the original job (libp2p/test-plans) has enough disk space, but this one doesn't.
- name: Free Disk Space
# For some reason we have space issues while running this action. Likely while building the image.
# This action will free up some space to avoid the issue.
uses: jlumbroso/free-disk-space@v1.3.1
with:
tool-cache: true
@@ -31,7 +32,14 @@ jobs:
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
with:
test-filter: nim-libp2p-head
# without suffix action fails because "hole-punching-interop" artifacts have
# the same name as "transport-interop" artifacts
test-results-suffix: transport-interop
extra-versions: ${{ github.workspace }}/tests/transport-interop/version.json
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
run-hole-punching-interop:
name: Run hole-punching interoperability tests
@@ -46,3 +54,7 @@ jobs:
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}

View File

@@ -18,17 +18,10 @@ jobs:
with:
fetch-depth: 2 # In PR, has extra merge commit: ^1 = PR, ^2 = base
- name: Setup NPH
# Pin nph to a specific version to avoid sudden style differences.
# Updating nph version should be accompanied with running the new version on the fluffy directory.
run: |
VERSION="v0.5.1"
ARCHIVE="nph-linux_x64.tar.gz"
curl -L "https://github.com/arnetheduck/nph/releases/download/${VERSION}/${ARCHIVE}" -o ${ARCHIVE}
tar -xzf ${ARCHIVE}
- name: Check style
run: |
shopt -s extglob # Enable extended globbing
./nph examples libp2p tests tools *.@(nim|nims|nimble)
git diff --exit-code
- name: Check `nph` formatting
uses: arnetheduck/nph-action@v1
with:
version: 0.6.1
options: "examples libp2p tests tools *.nim*"
fail: true
suggest: true

35
.github/workflows/pr_lint.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
name: "Conventional Commits"
on:
pull_request:
types:
- opened
- edited
- reopened
- synchronize
jobs:
main:
name: Validate PR title
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: amannn/action-semantic-pull-request@v5
id: lint_pr_title
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- uses: marocchino/sticky-pull-request-comment@v2
# When the previous steps fails, the workflow would stop. By adding this
# condition you can continue the execution with the populated error message.
if: always() && (steps.lint_pr_title.outputs.error_message != null)
with:
header: pr-title-lint-error
message: |
Pull requests titles must follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/)
# Delete a previous comment when the issue has been resolved
- if: ${{ steps.lint_pr_title.outputs.error_message == null }}
uses: marocchino/sticky-pull-request-comment@v2
with:
header: pr-title-lint-error
delete: true

7
.gitignore vendored
View File

@@ -17,3 +17,10 @@ examples/*.md
nimble.develop
nimble.paths
go-libp2p-daemon/
# Ignore all test build files in tests folder (auto generated when running tests),
# by ignoring anything that does not have following file name scheme:
# has extension or is Dockerfile...
/tests/*
!/tests/*.*
!/tests/Dockerfile

View File

@@ -2,13 +2,12 @@ bearssl;https://github.com/status-im/nim-bearssl@#667b40440a53a58e9f922e29e20818
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
chronos;https://github.com/status-im/nim-chronos@#c04576d829b8a0a1b12baaa8bc92037501b3a4a0
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
ngtcp2;https://github.com/status-im/nim-ngtcp2@#6834f4756b6af58356ac9c4fef3d71db3c3ae5fe
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
quic;https://github.com/status-im/nim-quic.git@#ddcb31ffb74b5460ab37fd13547eca90594248bc
quic;https://github.com/status-im/nim-quic.git@#d54e8f0f2e454604b767fadeae243d95c30c383f
results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35

View File

@@ -70,6 +70,8 @@ List of packages modules implemented in nim-libp2p:
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
| [libp2p-quic](libp2p/transports/quictransport.nim) | Quic Transport |
| [libp2p-memory](libp2p/transports/memorytransport.nim) | Memory Transport |
| **Secure Channels** | |
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
@@ -145,9 +147,8 @@ The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-st
<table>
<tbody>
<tr>
<td align="center"><a href="https://github.com/Menduist"><img src="https://avatars.githubusercontent.com/u/13471753?v=4?s=100" width="100px;" alt="Tanguy"/><br /><sub><b>Tanguy (Menduist)</b></sub></a></td>
<td align="center"><a href="https://github.com/lchenut"><img src="https://avatars.githubusercontent.com/u/11214565?v=4?s=100" width="100px;" alt="Ludovic"/><br /><sub><b>Ludovic</b></sub></a></td>
<td align="center"><a href="https://github.com/diegomrsantos"><img src="https://avatars.githubusercontent.com/u/7316595?v=4?s=100" width="100px;" alt="Diego"/><br /><sub><b>Diego</b></sub></a></td>
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
</tr>
</tbody>
</table>

View File

@@ -26,15 +26,22 @@ proc main() {.async.} =
let customProtoCodec = "/test"
var proto = new LPProtocol
proto.codec = customProtoCodec
proto.handler = proc(conn: Connection, proto: string) {.async.} =
var msg = string.fromBytes(await conn.readLp(1024))
echo "1 - Dst Received: ", msg
assert "test1" == msg
await conn.writeLp("test2")
msg = string.fromBytes(await conn.readLp(1024))
echo "2 - Dst Received: ", msg
assert "test3" == msg
await conn.writeLp("test4")
proto.handler = proc(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
var msg = string.fromBytes(await conn.readLp(1024))
echo "1 - Dst Received: ", msg
assert "test1" == msg
await conn.writeLp("test2")
msg = string.fromBytes(await conn.readLp(1024))
echo "2 - Dst Received: ", msg
assert "test3" == msg
await conn.writeLp("test4")
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
let
relay = Relay.new()

View File

@@ -43,12 +43,17 @@ proc new(T: typedesc[ChatProto], c: Chat): T =
let chatproto = T()
# create handler for incoming connection
proc handle(stream: Connection, proto: string) {.async.} =
if c.connected and not c.conn.closed:
c.writeStdout "a chat session is already in progress - refusing incoming peer!"
await stream.close()
else:
await c.handlePeer(stream)
proc handle(stream: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
if c.connected and not c.conn.closed:
c.writeStdout "a chat session is already in progress - refusing incoming peer!"
else:
await c.handlePeer(stream)
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await stream.close()
# assign the new handler

View File

@@ -0,0 +1,3 @@
{.used.}
import directchat, tutorial_6_game

View File

@@ -0,0 +1,5 @@
{.used.}
import
helloworld, circuitrelay, tutorial_1_connect, tutorial_2_customproto,
tutorial_3_protobuf, tutorial_4_gossipsub, tutorial_5_discovery

View File

@@ -93,8 +93,8 @@ proc serveThread(udata: CustomData) {.async.} =
pending.add(item.write(msg))
if len(pending) > 0:
var results = await all(pending)
except:
echo getCurrentException().msg
except CatchableError as err:
echo err.msg
proc main() {.async.} =
var data = new CustomData

View File

@@ -13,7 +13,7 @@ For more information about the go daemon, check out [this repository](https://gi
> **Required only** for running the tests.
# Prerequisites
Go with version `1.15.15`.
Go with version `1.16.0`.
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
# Installation
@@ -21,7 +21,7 @@ Follow one of the methods below:
## Script
Run the build script while having the `go` command pointing to the correct Go version.
We recommend using `1.15.15`, as previously stated.
We recommend using `1.16.0`, as previously stated.
```sh
./scripts/build_p2pd.sh
```

View File

@@ -11,12 +11,17 @@ type TestProto = ref object of LPProtocol # declare a custom protocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async.} =
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
await conn.writeLp("Roger p2p!")
# We must close the connections ourselves when we're done with it
await conn.close()
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
await conn.writeLp("Roger p2p!")
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
# We must close the connections ourselves when we're done with it
await conn.close()
return T.new(codecs = @[TestCodec], handler = handle)

View File

@@ -25,12 +25,17 @@ type TestProto = ref object of LPProtocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will in be handled in this closure
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
# Read up to 1024 bytes from this connection, and transform them into
# a string
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
# We must close the connections ourselves when we're done with it
await conn.close()
try:
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await conn.close()
return T.new(codecs = @[TestCodec], handler = handle)

View File

@@ -108,12 +108,18 @@ type
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
var res: MetricProto
proc handle(conn: Connection, proto: string) {.async.} =
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
await conn.writeLp(asProtobuf.buffer)
await conn.close()
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
await conn.writeLp(asProtobuf.buffer)
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await conn.close()
res = MetricProto.new(@["/metric-getter/1.0.0"], handle)
res.metricGetter = cb

View File

@@ -79,8 +79,7 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
let decoded = MetricList.decode(message.data)
if decoded.isErr:
return ValidationResult.Reject
return ValidationResult.Accept
,
return ValidationResult.Accept,
)
# This "validator" will attach to the `metrics` topic and make sure
# that every message in this topic is valid. This allows us to stop
@@ -93,7 +92,8 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
node.gossip.subscribe(
"metrics",
proc(topic: string, data: seq[byte]) {.async.} =
echo MetricList.decode(data).tryGet()
let m = MetricList.decode(data).expect("metric can be decoded")
echo m
,
)
else:
@@ -158,8 +158,8 @@ waitFor(main())
## This is John receiving & logging everyone's metrics.
##
## ## Going further
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
## and [topic params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
## and [topic params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
## you can achieve very different properties.
##
## Also see reports for [GossipSub v1.1](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4)

View File

@@ -34,9 +34,15 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
const DumbCodec = "/dumb/proto/1.0.0"
type DumbProto = ref object of LPProtocol
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
await conn.close()
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await conn.close()
return T.new(codecs = @[DumbCodec], handler = handle)

View File

@@ -152,21 +152,26 @@ proc draw(g: Game) =
## peer know that we are available, check that he is also available,
## and launch the game.
proc new(T: typedesc[GameProto], g: Game): T =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
defer:
await conn.closeWithEof()
if g.peerFound.finished or g.hasCandidate:
await conn.close()
return
g.hasCandidate = true
await conn.writeLp("ok")
if "ok" != string.fromBytes(await conn.readLp(1024)):
g.hasCandidate = false
return
g.peerFound.complete(conn)
# The handler of a protocol must wait for the stream to
# be finished before returning
await conn.join()
try:
if g.peerFound.finished or g.hasCandidate:
await conn.close()
return
g.hasCandidate = true
await conn.writeLp("ok")
if "ok" != string.fromBytes(await conn.readLp(1024)):
g.hasCandidate = false
return
g.peerFound.complete(conn)
# The handler of a protocol must wait for the stream to
# be finished before returning
await conn.join()
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
return T.new(codecs = @["/tron/1.0.0"], handler = handle)
@@ -214,8 +219,7 @@ proc networking(g: Game) {.async.} =
# We are "player 2"
swap(g.localPlayer, g.remotePlayer)
except CatchableError as exc:
discard
,
discard,
)
await switch.start()
@@ -268,14 +272,11 @@ nico.init("Status", "Tron")
nico.createWindow("Tron", mapSize * 4, mapSize * 4, 4, false)
nico.run(
proc() =
discard
,
discard,
proc(dt: float32) =
game.update(dt)
,
game.update(dt),
proc() =
game.draw()
,
game.draw(),
)
waitFor(netFut.cancelAndWait())

View File

@@ -17,7 +17,7 @@ when defined(nimdoc):
## stay backward compatible during the Major version, whereas private ones can
## change at each new Minor version.
##
## If you're new to nim-libp2p, you can find a tutorial `here<https://status-im.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
## If you're new to nim-libp2p, you can find a tutorial `here<https://vacp2p.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
## that can help you get started.
# Import stuff for doc

View File

@@ -1,7 +1,7 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "1.6.0"
version = "1.9.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
@@ -11,7 +11,7 @@ requires "nim >= 1.6.0",
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
"chronicles >= 0.10.2", "chronos >= 4.0.3", "metrics", "secp256k1", "stew#head",
"websock", "unittest2",
"https://github.com/status-im/nim-quic.git#ddcb31ffb74b5460ab37fd13547eca90594248bc"
"https://github.com/status-im/nim-quic.git#d54e8f0f2e454604b767fadeae243d95c30c383f"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
@@ -25,12 +25,8 @@ let cfg =
import hashes, strutils
proc runTest(
filename: string, verify: bool = true, sign: bool = true, moreoptions: string = ""
) =
proc runTest(filename: string, moreoptions: string = "") =
var excstr = nimc & " " & lang & " -d:debug " & cfg & " " & flags
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
excstr.add(" " & moreoptions & " ")
if getEnv("CICOV").len > 0:
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
@@ -60,51 +56,16 @@ task testinterop, "Runs interop tests":
runTest("testinterop")
task testpubsub, "Runs pubsub tests":
runTest(
"pubsub/testgossipinternal",
sign = false,
verify = false,
moreoptions = "-d:pubsub_internal_testing",
)
runTest("pubsub/testpubsub")
runTest("pubsub/testpubsub", sign = false, verify = false)
runTest(
"pubsub/testpubsub",
sign = false,
verify = false,
moreoptions = "-d:libp2p_pubsub_anonymize=true",
)
task testpubsub_slim, "Runs pubsub tests":
runTest(
"pubsub/testgossipinternal",
sign = false,
verify = false,
moreoptions = "-d:pubsub_internal_testing",
)
runTest("pubsub/testgossipinternal")
runTest("pubsub/testpubsub")
task testfilter, "Run PKI filter test":
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1\"")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519\"")
runTest(
"testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519;ecnist\""
)
runTest("testpkifilter")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
task test, "Runs the test suite":
exec "nimble testnative"
exec "nimble testpubsub"
exec "nimble testdaemon"
exec "nimble testinterop"
runTest("testall")
exec "nimble testfilter"
exec "nimble examples_build"
task test_slim, "Runs the (slimmed down) test suite":
exec "nimble testnative"
exec "nimble testpubsub_slim"
exec "nimble testfilter"
exec "nimble examples_build"
task website, "Build the website":
tutorialToMd("examples/tutorial_1_connect.nim")
@@ -116,19 +77,12 @@ task website, "Build the website":
tutorialToMd("examples/circuitrelay.nim")
exec "mkdocs build"
task examples_build, "Build the samples":
buildSample("directchat")
buildSample("helloworld", true)
buildSample("circuitrelay", true)
buildSample("tutorial_1_connect", true)
buildSample("tutorial_2_customproto", true)
buildSample("tutorial_3_protobuf", true)
buildSample("tutorial_4_gossipsub", true)
buildSample("tutorial_5_discovery", true)
exec "nimble install -y nimpng@#HEAD"
# this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
exec "nimble install -y nico@#af99dd60bf2b395038ece815ea1012330a80d6e6"
buildSample("tutorial_6_game", false, "--styleCheck:off")
task examples, "Build and run examples":
exec "nimble install -y nimpng"
exec "nimble install -y nico --passNim=--skipParentCfg"
buildSample("examples_build", false, "--styleCheck:off") # build only
buildSample("examples_run", true)
# pin system
# while nimble lockfile

View File

@@ -23,7 +23,7 @@ import
stream/connection,
multiaddress,
crypto/crypto,
transports/[transport, tcptransport],
transports/[transport, tcptransport, quictransport, memorytransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
@@ -37,8 +37,11 @@ import services/wildcardresolverservice
export switch, peerid, peerinfo, connection, multiaddress, crypto, errors
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
type
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [].}
TransportProvider* {.public.} =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport {.gcsafe, raises: [].}
SecureProtocol* {.pure.} = enum
Noise
@@ -151,7 +154,7 @@ proc withTransport*(
let switch = SwitchBuilder
.new()
.withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TcpTransport.new(flags, upgr)
)
.build()
@@ -162,10 +165,22 @@ proc withTcpTransport*(
b: SwitchBuilder, flags: set[ServerFlags] = {}
): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TcpTransport.new(flags, upgr)
)
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
QuicTransport.new(upgr, privateKey)
)
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
MemoryTransport.new(upgr)
)
proc withRng*(b: SwitchBuilder, rng: ref HmacDrbgContext): SwitchBuilder {.public.} =
b.rng = rng
b
@@ -270,7 +285,7 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
let transports = block:
var transports: seq[Transport]
for tProvider in b.transports:
transports.add(tProvider(muxedUpgrade))
transports.add(tProvider(muxedUpgrade, seckey))
transports
if b.secureManagers.len == 0:

View File

@@ -42,8 +42,9 @@ type
else:
discard
ConnEventHandler* =
proc(peerId: PeerId, event: ConnEvent): Future[void] {.gcsafe, raises: [].}
ConnEventHandler* = proc(peerId: PeerId, event: ConnEvent): Future[void] {.
gcsafe, async: (raises: [CancelledError])
.}
PeerEventKind* {.pure.} = enum
Left
@@ -57,8 +58,9 @@ type
else:
discard
PeerEventHandler* =
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [].}
PeerEventHandler* = proc(peerId: PeerId, event: PeerEvent): Future[void] {.
gcsafe, async: (raises: [CancelledError])
.}
ConnManager* = ref object of RootObj
maxConnsPerPeer: int
@@ -123,7 +125,9 @@ proc removeConnEventHandler*(
) =
c.connEvents[kind].excl(handler)
proc triggerConnEvent*(c: ConnManager, peerId: PeerId, event: ConnEvent) {.async.} =
proc triggerConnEvent*(
c: ConnManager, peerId: PeerId, event: ConnEvent
) {.async: (raises: [CancelledError]).} =
try:
trace "About to trigger connection events", peer = peerId
if c.connEvents[event.kind].len() > 0:
@@ -154,7 +158,9 @@ proc removePeerEventHandler*(
) =
c.peerEvents[kind].excl(handler)
proc triggerPeerEvents*(c: ConnManager, peerId: PeerId, event: PeerEvent) {.async.} =
proc triggerPeerEvents*(
c: ConnManager, peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
trace "About to trigger peer events", peer = peerId
if c.peerEvents[event.kind].len == 0:
return
@@ -174,7 +180,7 @@ proc triggerPeerEvents*(c: ConnManager, peerId: PeerId, event: PeerEvent) {.asyn
proc expectConnection*(
c: ConnManager, p: PeerId, dir: Direction
): Future[Muxer] {.async.} =
): Future[Muxer] {.async: (raises: [AlreadyExpectingConnectionError, CancelledError]).} =
## Wait for a peer to connect to us. This will bypass the `MaxConnectionsPerPeer`
let key = (p, dir)
if key in c.expectedConnectionsOverLimit:
@@ -183,7 +189,7 @@ proc expectConnection*(
"Already expecting an incoming connection from that peer",
)
let future = newFuture[Muxer]()
let future = Future[Muxer].Raising([CancelledError]).init()
c.expectedConnectionsOverLimit[key] = future
try:
@@ -205,18 +211,18 @@ proc contains*(c: ConnManager, muxer: Muxer): bool =
let conn = muxer.connection
return muxer in c.muxed.getOrDefault(conn.peerId)
proc closeMuxer(muxer: Muxer) {.async.} =
proc closeMuxer(muxer: Muxer) {.async: (raises: [CancelledError]).} =
trace "Cleaning up muxer", m = muxer
await muxer.close()
if not (isNil(muxer.handler)):
try:
await muxer.handler # TODO noraises?
await muxer.handler
except CatchableError as exc:
trace "Exception in close muxer handler", description = exc.msg
trace "Cleaned up muxer", m = muxer
proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
proc muxCleanup(c: ConnManager, mux: Muxer) {.async: (raises: []).} =
try:
trace "Triggering disconnect events", mux
let peerId = mux.connection.peerId
@@ -238,7 +244,7 @@ proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
# do not need to propagate CancelledError and should handle other errors
warn "Unexpected exception peer cleanup handler", mux, description = exc.msg
proc onClose(c: ConnManager, mux: Muxer) {.async.} =
proc onClose(c: ConnManager, mux: Muxer) {.async: (raises: []).} =
## connection close even handler
##
## triggers the connections resource cleanup
@@ -272,7 +278,7 @@ proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer =
trace "connection not found", peerId
return mux
proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [CatchableError].} =
proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [LPError].} =
## store the connection and muxer
##
@@ -324,7 +330,9 @@ proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [CatchableError].} =
trace "Stored muxer", muxer, direction = $muxer.connection.dir, peers = c.muxed.len
proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} =
proc getIncomingSlot*(
c: ConnManager
): Future[ConnectionSlot] {.async: (raises: [CancelledError]).} =
await c.inSema.acquire()
return ConnectionSlot(connManager: c, direction: In)
@@ -339,25 +347,21 @@ proc getOutgoingSlot*(
raise newTooManyConnectionsError()
return ConnectionSlot(connManager: c, direction: Out)
func semaphore(c: ConnManager, dir: Direction): AsyncSemaphore {.inline.} =
return if dir == In: c.inSema else: c.outSema
proc slotsAvailable*(c: ConnManager, dir: Direction): int =
case dir
of Direction.In:
return c.inSema.count
of Direction.Out:
return c.outSema.count
return semaphore(c, dir).count
proc release*(cs: ConnectionSlot) =
if cs.direction == In:
cs.connManager.inSema.release()
else:
cs.connManager.outSema.release()
semaphore(cs.connManager, cs.direction).release()
proc trackConnection*(cs: ConnectionSlot, conn: Connection) =
if isNil(conn):
cs.release()
return
proc semaphoreMonitor() {.async.} =
proc semaphoreMonitor() {.async: (raises: [CancelledError]).} =
try:
await conn.join()
except CatchableError as exc:
@@ -373,14 +377,18 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
return
cs.trackConnection(mux.connection)
proc getStream*(c: ConnManager, muxer: Muxer): Future[Connection] {.async.} =
proc getStream*(
c: ConnManager, muxer: Muxer
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
## get a muxed stream for the passed muxer
##
if not (isNil(muxer)):
return await muxer.newStream()
proc getStream*(c: ConnManager, peerId: PeerId): Future[Connection] {.async.} =
proc getStream*(
c: ConnManager, peerId: PeerId
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
## get a muxed stream for the passed peer from any connection
##
@@ -388,13 +396,13 @@ proc getStream*(c: ConnManager, peerId: PeerId): Future[Connection] {.async.} =
proc getStream*(
c: ConnManager, peerId: PeerId, dir: Direction
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
## get a muxed stream for the passed peer from a connection with `dir`
##
return await c.getStream(c.selectMuxer(peerId, dir))
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async: (raises: [CancelledError]).} =
## drop connections and cleanup resources for peer
##
trace "Dropping peer", peerId
@@ -405,7 +413,7 @@ proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
trace "Peer dropped", peerId
proc close*(c: ConnManager) {.async.} =
proc close*(c: ConnManager) {.async: (raises: [CancelledError]).} =
## cleanup resources for the connection
## manager
##

View File

@@ -28,8 +28,7 @@ proc hkdf*[T: sha256, len: static int](
if salt.len > 0:
unsafeAddr salt[0]
else:
nil
,
nil,
csize_t(salt.len),
)
hkdfInject(
@@ -37,8 +36,7 @@ proc hkdf*[T: sha256, len: static int](
if ikm.len > 0:
unsafeAddr ikm[0]
else:
nil
,
nil,
csize_t(ikm.len),
)
hkdfFlip(ctx)
@@ -48,8 +46,7 @@ proc hkdf*[T: sha256, len: static int](
if info.len > 0:
unsafeAddr info[0]
else:
nil
,
nil,
csize_t(info.len),
addr outputs[i][0],
csize_t(outputs[i].len),

View File

@@ -146,7 +146,7 @@ type
PubsubTicket* = ref object
topic*: string
handler*: P2PPubSubCallback
handler*: P2PPubSubCallback2
transp*: StreamTransport
PubSubMessage* = object
@@ -158,12 +158,14 @@ type
key*: PublicKey
P2PStreamCallback* = proc(api: DaemonAPI, stream: P2PStream): Future[void] {.
gcsafe, raises: [CatchableError]
gcsafe, async: (raises: [CatchableError])
.}
P2PPubSubCallback* = proc(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.gcsafe, raises: [CatchableError].}
P2PPubSubCallback2* = proc(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).}
DaemonError* = object of LPError
DaemonRemoteError* = object of DaemonError
DaemonLocalError* = object of DaemonError
@@ -485,7 +487,11 @@ proc getErrorMessage(pb: ProtoBuffer): string {.inline, raises: [DaemonLocalErro
if initProtoBuffer(error).getRequiredField(1, result).isErr():
raise newException(DaemonLocalError, "Error message is missing!")
proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
proc recvMessage(
conn: StreamTransport
): Future[seq[byte]] {.
async: (raises: [TransportIncompleteError, TransportError, CancelledError])
.} =
var
size: uint
length: int
@@ -508,13 +514,19 @@ proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
result = buffer
proc newConnection*(api: DaemonAPI): Future[StreamTransport] {.raises: [LPError].} =
result = connect(api.address)
proc newConnection*(
api: DaemonAPI
): Future[StreamTransport] {.
async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError])
.} =
await connect(api.address)
proc closeConnection*(api: DaemonAPI, transp: StreamTransport): Future[void] =
result = transp.closeWait()
proc closeConnection*(
api: DaemonAPI, transp: StreamTransport
): Future[void] {.async: (raises: [CancelledError]).} =
await transp.closeWait()
proc socketExists(address: MultiAddress): Future[bool] {.async.} =
proc socketExists(address: MultiAddress): Future[bool] {.async: (raises: []).} =
try:
var transp = await connect(address)
await transp.closeWait()
@@ -534,7 +546,9 @@ else:
proc getProcessId(): int =
result = int(posix.getpid())
proc getSocket(pattern: string, count: ptr int): Future[MultiAddress] {.async.} =
proc getSocket(
pattern: string, count: ptr int
): Future[MultiAddress] {.async: (raises: [ValueError, LPError]).} =
var sockname = ""
var pid = $getProcessId()
sockname = pattern % [pid, $(count[])]
@@ -562,7 +576,35 @@ proc getSocket(pattern: string, count: ptr int): Future[MultiAddress] {.async.}
closeSocket(sock)
# This is forward declaration needed for newDaemonApi()
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
proc listPeers*(
api: DaemonAPI
): Future[seq[PeerInfo]] {.
async: (
raises: [
ValueError, DaemonLocalError, OSError, MaInvalidAddress, TransportError,
CancelledError, LPError,
]
)
.}
template exceptionToAssert(body: untyped): untyped =
block:
var res: type(body)
when defined(nimHasWarnBareExcept):
{.push warning[BareExcept]: off.}
try:
res = body
except OSError as exc:
raise exc
except IOError as exc:
raise exc
except Defect as exc:
raise exc
except Exception as exc:
raiseAssert exc.msg
when defined(nimHasWarnBareExcept):
{.pop.}
res
proc copyEnv(): StringTableRef =
## This procedure copy all environment variables into StringTable.
@@ -586,7 +628,14 @@ proc newDaemonApi*(
peersRequired = 2,
logFile = "",
logLevel = IpfsLogLevel.Debug,
): Future[DaemonAPI] {.async.} =
): Future[DaemonAPI] {.
async: (
raises: [
ValueError, DaemonLocalError, CancelledError, LPError, OSError, IOError,
AsyncError,
]
)
.} =
## Initialize connection to `go-libp2p-daemon` control socket.
##
## ``flags`` - set of P2PDaemonFlags.
@@ -780,7 +829,7 @@ proc newDaemonApi*(
result = api
proc close*(stream: P2PStream) {.async.} =
proc close*(stream: P2PStream) {.async: (raises: [DaemonLocalError]).} =
## Close ``stream``.
if P2PStreamFlags.Closed notin stream.flags:
await stream.transp.closeWait()
@@ -789,7 +838,9 @@ proc close*(stream: P2PStream) {.async.} =
else:
raise newException(DaemonLocalError, "Stream is already closed!")
proc close*(api: DaemonAPI) {.async.} =
proc close*(
api: DaemonAPI
) {.async: (raises: [TransportOsError, LPError, ValueError, OSError, CancelledError]).} =
## Shutdown connections to `go-libp2p-daemon` control socket.
# await api.pool.close()
# Closing all pending servers.
@@ -827,7 +878,9 @@ template withMessage(m, body: untyped): untyped =
proc transactMessage(
transp: StreamTransport, pb: ProtoBuffer
): Future[ProtoBuffer] {.async.} =
): Future[ProtoBuffer] {.
async: (raises: [DaemonLocalError, TransportError, CancelledError])
.} =
let length = pb.getLen()
let res = await transp.write(pb.getPtr(), length)
if res != length:
@@ -845,7 +898,11 @@ proc getPeerInfo(pb: ProtoBuffer): PeerInfo {.raises: [DaemonLocalError].} =
discard pb.getRepeatedField(2, result.addresses)
proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
proc identity*(
api: DaemonAPI
): Future[PeerInfo] {.
async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError])
.} =
## Get Node identity information
var transp = await api.newConnection()
try:
@@ -860,7 +917,7 @@ proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
proc connect*(
api: DaemonAPI, peer: PeerId, addresses: seq[MultiAddress], timeout = 0
) {.async.} =
) {.async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError]).} =
## Connect to remote peer with id ``peer`` and addresses ``addresses``.
var transp = await api.newConnection()
try:
@@ -870,7 +927,9 @@ proc connect*(
except CatchableError:
await api.closeConnection(transp)
proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
proc disconnect*(
api: DaemonAPI, peer: PeerId
) {.async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError]).} =
## Disconnect from remote peer with id ``peer``.
var transp = await api.newConnection()
try:
@@ -882,7 +941,12 @@ proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
proc openStream*(
api: DaemonAPI, peer: PeerId, protocols: seq[string], timeout = 0
): Future[P2PStream] {.async.} =
): Future[P2PStream] {.
async: (
raises:
[MaInvalidAddress, TransportError, CancelledError, LPError, DaemonLocalError]
)
.} =
## Open new stream to peer ``peer`` using one of the protocols in
## ``protocols``. Returns ``StreamTransport`` for the stream.
var transp = await api.newConnection()
@@ -903,11 +967,12 @@ proc openStream*(
stream.flags.incl(Outbound)
stream.transp = transp
result = stream
except CatchableError as exc:
except ResultError[ProtoError]:
await api.closeConnection(transp)
raise exc
raise newException(DaemonLocalError, "Wrong message type!")
proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
# must not specify raised exceptions as this is StreamCallback from chronos
var api = getUserData[DaemonAPI](server)
var message = await transp.recvMessage()
var pb = initProtoBuffer(message)
@@ -927,11 +992,28 @@ proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
proc addHandler*(
api: DaemonAPI, protocols: seq[string], handler: P2PStreamCallback
) {.async, raises: [LPError].} =
) {.
async: (
raises: [
MaInvalidAddress, DaemonLocalError, TransportError, CancelledError, LPError,
ValueError,
]
)
.} =
## Add stream handler ``handler`` for set of protocols ``protocols``.
var transp = await api.newConnection()
let maddress = await getSocket(api.pattern, addr api.ucounter)
var server = createStreamServer(maddress, streamHandler, udata = api)
var removeHandler = proc(): Future[void] {.
async: (raises: [CancelledError, TransportError])
.} =
for item in protocols:
api.handlers.del(item)
server.stop()
server.close()
await server.join()
try:
for item in protocols:
api.handlers[item] = handler
@@ -939,17 +1021,28 @@ proc addHandler*(
var pb = await transp.transactMessage(requestStreamHandler(maddress, protocols))
pb.withMessage:
api.servers.add(P2PServer(server: server, address: maddress))
except CatchableError as exc:
for item in protocols:
api.handlers.del(item)
server.stop()
server.close()
await server.join()
raise exc
except DaemonLocalError as e:
await removeHandler()
raise e
except TransportError as e:
await removeHandler()
raise e
except CancelledError as e:
await removeHandler()
raise e
finally:
await api.closeConnection(transp)
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.} =
proc listPeers*(
api: DaemonAPI
): Future[seq[PeerInfo]] {.
async: (
raises: [
ValueError, DaemonLocalError, OSError, MaInvalidAddress, TransportError,
CancelledError, LPError,
]
)
.} =
## Get list of remote peers to which we are currently connected.
var transp = await api.newConnection()
try:
@@ -964,7 +1057,14 @@ proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.} =
finally:
await api.closeConnection(transp)
proc cmTagPeer*(api: DaemonAPI, peer: PeerId, tag: string, weight: int) {.async.} =
proc cmTagPeer*(
api: DaemonAPI, peer: PeerId, tag: string, weight: int
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Tag peer with id ``peer`` using ``tag`` and ``weight``.
var transp = await api.newConnection()
try:
@@ -974,7 +1074,14 @@ proc cmTagPeer*(api: DaemonAPI, peer: PeerId, tag: string, weight: int) {.async.
finally:
await api.closeConnection(transp)
proc cmUntagPeer*(api: DaemonAPI, peer: PeerId, tag: string) {.async.} =
proc cmUntagPeer*(
api: DaemonAPI, peer: PeerId, tag: string
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Remove tag ``tag`` from peer with id ``peer``.
var transp = await api.newConnection()
try:
@@ -984,7 +1091,14 @@ proc cmUntagPeer*(api: DaemonAPI, peer: PeerId, tag: string) {.async.} =
finally:
await api.closeConnection(transp)
proc cmTrimPeers*(api: DaemonAPI) {.async.} =
proc cmTrimPeers*(
api: DaemonAPI
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Trim all connections.
var transp = await api.newConnection()
try:
@@ -1058,7 +1172,12 @@ proc getDhtMessageType(
proc dhtFindPeer*(
api: DaemonAPI, peer: PeerId, timeout = 0
): Future[PeerInfo] {.async.} =
): Future[PeerInfo] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Find peer with id ``peer`` and return peer information ``PeerInfo``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1073,7 +1192,12 @@ proc dhtFindPeer*(
proc dhtGetPublicKey*(
api: DaemonAPI, peer: PeerId, timeout = 0
): Future[PublicKey] {.async.} =
): Future[PublicKey] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get peer's public key from peer with id ``peer``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1088,7 +1212,12 @@ proc dhtGetPublicKey*(
proc dhtGetValue*(
api: DaemonAPI, key: string, timeout = 0
): Future[seq[byte]] {.async.} =
): Future[seq[byte]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get value associated with ``key``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1103,7 +1232,12 @@ proc dhtGetValue*(
proc dhtPutValue*(
api: DaemonAPI, key: string, value: seq[byte], timeout = 0
) {.async.} =
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Associate ``value`` with ``key``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1116,7 +1250,14 @@ proc dhtPutValue*(
finally:
await api.closeConnection(transp)
proc dhtProvide*(api: DaemonAPI, cid: Cid, timeout = 0) {.async.} =
proc dhtProvide*(
api: DaemonAPI, cid: Cid, timeout = 0
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Provide content with id ``cid``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1131,7 +1272,12 @@ proc dhtProvide*(api: DaemonAPI, cid: Cid, timeout = 0) {.async.} =
proc dhtFindPeersConnectedToPeer*(
api: DaemonAPI, peer: PeerId, timeout = 0
): Future[seq[PeerInfo]] {.async.} =
): Future[seq[PeerInfo]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Find peers which are connected to peer with id ``peer``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1157,7 +1303,12 @@ proc dhtFindPeersConnectedToPeer*(
proc dhtGetClosestPeers*(
api: DaemonAPI, key: string, timeout = 0
): Future[seq[PeerId]] {.async.} =
): Future[seq[PeerId]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get closest peers for ``key``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1183,7 +1334,12 @@ proc dhtGetClosestPeers*(
proc dhtFindProviders*(
api: DaemonAPI, cid: Cid, count: uint32, timeout = 0
): Future[seq[PeerInfo]] {.async.} =
): Future[seq[PeerInfo]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get ``count`` providers for content with id ``cid``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1209,7 +1365,12 @@ proc dhtFindProviders*(
proc dhtSearchValue*(
api: DaemonAPI, key: string, timeout = 0
): Future[seq[seq[byte]]] {.async.} =
): Future[seq[seq[byte]]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Search for value with ``key``, return list of values found.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1232,7 +1393,14 @@ proc dhtSearchValue*(
finally:
await api.closeConnection(transp)
proc pubsubGetTopics*(api: DaemonAPI): Future[seq[string]] {.async.} =
proc pubsubGetTopics*(
api: DaemonAPI
): Future[seq[string]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get list of topics this node is subscribed to.
var transp = await api.newConnection()
try:
@@ -1245,7 +1413,14 @@ proc pubsubGetTopics*(api: DaemonAPI): Future[seq[string]] {.async.} =
finally:
await api.closeConnection(transp)
proc pubsubListPeers*(api: DaemonAPI, topic: string): Future[seq[PeerId]] {.async.} =
proc pubsubListPeers*(
api: DaemonAPI, topic: string
): Future[seq[PeerId]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get list of peers we are connected to and which also subscribed to topic
## ``topic``.
var transp = await api.newConnection()
@@ -1260,7 +1435,14 @@ proc pubsubListPeers*(api: DaemonAPI, topic: string): Future[seq[PeerId]] {.asyn
finally:
await api.closeConnection(transp)
proc pubsubPublish*(api: DaemonAPI, topic: string, value: seq[byte]) {.async.} =
proc pubsubPublish*(
api: DaemonAPI, topic: string, value: seq[byte]
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get list of peer identifiers which are subscribed to topic ``topic``.
var transp = await api.newConnection()
try:
@@ -1280,7 +1462,13 @@ proc getPubsubMessage*(pb: ProtoBuffer): PubSubMessage =
discard pb.getField(5, result.signature)
discard pb.getField(6, result.key)
proc pubsubLoop(api: DaemonAPI, ticket: PubsubTicket) {.async.} =
proc pubsubLoop(
api: DaemonAPI, ticket: PubsubTicket
) {.
async: (
raises: [TransportIncompleteError, TransportError, CancelledError, CatchableError]
)
.} =
while true:
var pbmessage = await ticket.transp.recvMessage()
if len(pbmessage) == 0:
@@ -1295,8 +1483,13 @@ proc pubsubLoop(api: DaemonAPI, ticket: PubsubTicket) {.async.} =
break
proc pubsubSubscribe*(
api: DaemonAPI, topic: string, handler: P2PPubSubCallback
): Future[PubsubTicket] {.async.} =
api: DaemonAPI, topic: string, handler: P2PPubSubCallback2
): Future[PubsubTicket] {.
async: (
raises:
[MaInvalidAddress, TransportError, LPError, CancelledError, DaemonLocalError]
)
.} =
## Subscribe to topic ``topic``.
var transp = await api.newConnection()
try:
@@ -1308,9 +1501,27 @@ proc pubsubSubscribe*(
ticket.transp = transp
asyncSpawn pubsubLoop(api, ticket)
result = ticket
except CatchableError as exc:
except DaemonLocalError as exc:
await api.closeConnection(transp)
raise exc
except TransportError as exc:
await api.closeConnection(transp)
raise exc
except CancelledError as exc:
await api.closeConnection(transp)
raise exc
proc pubsubSubscribe*(
api: DaemonAPI, topic: string, handler: P2PPubSubCallback
): Future[PubsubTicket] {.
async: (raises: [CatchableError]), deprecated: "Use P2PPubSubCallback2 instead"
.} =
proc wrap(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).} =
await handler(api, ticket, message)
await pubsubSubscribe(api, topic, wrap)
proc shortLog*(pinfo: PeerInfo): string =
## Get string representation of ``PeerInfo`` object.

View File

@@ -55,7 +55,7 @@ proc newPool*(
address: TransportAddress,
poolsize: int = DefaultPoolSize,
bufferSize = DefaultStreamBufferSize,
): Future[TransportPool] {.async.} =
): Future[TransportPool] {.async: (raises: [CancelledError]).} =
## Establish pool of connections to address ``address`` with size
## ``poolsize``.
var pool = new TransportPool
@@ -80,7 +80,9 @@ proc newPool*(
pool.state = Connected
result = pool
proc acquire*(pool: TransportPool): Future[StreamTransport] {.async.} =
proc acquire*(
pool: TransportPool
): Future[StreamTransport] {.async: (raises: [CancelledError, TransportPoolError]).} =
## Acquire non-busy connection from pool ``pool``.
var transp: StreamTransport
if pool.state in {Connected}:
@@ -102,7 +104,9 @@ proc acquire*(pool: TransportPool): Future[StreamTransport] {.async.} =
raise newException(TransportPoolError, "Pool is not ready!")
result = transp
proc release*(pool: TransportPool, transp: StreamTransport) =
proc release*(
pool: TransportPool, transp: StreamTransport
) {.async: (raises: [TransportPoolError]).} =
## Release connection ``transp`` back to pool ``pool``.
if pool.state in {Connected, Closing}:
var found = false
@@ -118,7 +122,9 @@ proc release*(pool: TransportPool, transp: StreamTransport) =
else:
raise newException(TransportPoolError, "Pool is not ready!")
proc join*(pool: TransportPool) {.async.} =
proc join*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
## Waiting for all connection to become available.
if pool.state in {Connected, Closing}:
while true:
@@ -130,7 +136,9 @@ proc join*(pool: TransportPool) {.async.} =
elif pool.state == Connecting:
raise newException(TransportPoolError, "Pool is not ready!")
proc close*(pool: TransportPool) {.async.} =
proc close*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
## Closes transports pool ``pool`` and release all resources.
if pool.state == Connected:
pool.state = Closing

View File

@@ -15,7 +15,9 @@ import peerid, stream/connection, transports/transport
export results
type Dial* = ref object of RootObj
type
Dial* = ref object of RootObj
DialFailedError* = object of LPError
method connect*(
self: Dial,
@@ -24,28 +26,28 @@ method connect*(
forceDial = false,
reuseConnection = true,
dir = Direction.Out,
) {.async, base.} =
) {.base, async: (raises: [DialFailedError, CancelledError]).} =
## connect remote peer without negotiating
## a protocol
##
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.connect] abstract method not implemented!")
method connect*(
self: Dial, address: MultiAddress, allowUnknownPeerId = false
): Future[PeerId] {.async, base.} =
): Future[PeerId] {.base, async: (raises: [DialFailedError, CancelledError]).} =
## Connects to a peer and retrieve its PeerId
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.connect] abstract method not implemented!")
method dial*(
self: Dial, peerId: PeerId, protos: seq[string]
): Future[Connection] {.async, base.} =
): Future[Connection] {.base, async: (raises: [DialFailedError, CancelledError]).} =
## create a protocol stream over an
## existing connection
##
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.dial] abstract method not implemented!")
method dial*(
self: Dial,
@@ -53,17 +55,19 @@ method dial*(
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false,
): Future[Connection] {.async, base.} =
): Future[Connection] {.base, async: (raises: [DialFailedError, CancelledError]).} =
## create a protocol stream and establish
## a connection if one doesn't exist already
##
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.dial] abstract method not implemented!")
method addTransport*(self: Dial, transport: Transport) {.base.} =
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.addTransport] abstract method not implemented!")
method tryDial*(
self: Dial, peerId: PeerId, addrs: seq[MultiAddress]
): Future[Opt[MultiAddress]] {.async, base.} =
doAssert(false, "Not implemented!")
): Future[Opt[MultiAddress]] {.
base, async: (raises: [DialFailedError, CancelledError])
.} =
doAssert(false, "[Dial.tryDial] abstract method not implemented!")

View File

@@ -36,16 +36,13 @@ declareCounter(libp2p_total_dial_attempts, "total attempted dials")
declareCounter(libp2p_successful_dials, "dialed successful peers")
declareCounter(libp2p_failed_dials, "failed dials")
type
DialFailedError* = object of LPError
Dialer* = ref object of Dial
localPeerId*: PeerId
connManager: ConnManager
dialLock: Table[PeerId, AsyncLock]
transports: seq[Transport]
peerStore: PeerStore
nameResolver: NameResolver
type Dialer* = ref object of Dial
localPeerId*: PeerId
connManager: ConnManager
dialLock: Table[PeerId, AsyncLock]
transports: seq[Transport]
peerStore: PeerStore
nameResolver: NameResolver
proc dialAndUpgrade(
self: Dialer,
@@ -53,7 +50,7 @@ proc dialAndUpgrade(
hostname: string,
address: MultiAddress,
dir = Direction.Out,
): Future[Muxer] {.async.} =
): Future[Muxer] {.async: (raises: [CancelledError]).} =
for transport in self.transports: # for each transport
if transport.handles(address): # check if it can dial it
trace "Dialing address", address, peerId = peerId.get(default(PeerId)), hostname
@@ -105,7 +102,9 @@ proc dialAndUpgrade(
proc expandDnsAddr(
self: Dialer, peerId: Opt[PeerId], address: MultiAddress
): Future[seq[(MultiAddress, Opt[PeerId])]] {.async.} =
): Future[seq[(MultiAddress, Opt[PeerId])]] {.
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
.} =
if not DNSADDR.matchPartial(address):
return @[(address, peerId)]
if isNil(self.nameResolver):
@@ -115,7 +114,10 @@ proc expandDnsAddr(
let
toResolve =
if peerId.isSome:
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
try:
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
except ResultError[void]:
raiseAssert "checked with if"
else:
address
resolved = await self.nameResolver.resolveDnsAddr(toResolve)
@@ -132,7 +134,9 @@ proc expandDnsAddr(
proc dialAndUpgrade(
self: Dialer, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
): Future[Muxer] {.async.} =
): Future[Muxer] {.
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
.} =
debug "Dialing peer", peerId = peerId.get(default(PeerId)), addrs
for rawAddress in addrs:
@@ -169,47 +173,59 @@ proc internalConnect(
forceDial: bool,
reuseConnection = true,
dir = Direction.Out,
): Future[Muxer] {.async.} =
): Future[Muxer] {.async: (raises: [DialFailedError, CancelledError]).} =
if Opt.some(self.localPeerId) == peerId:
raise newException(CatchableError, "can't dial self!")
raise newException(DialFailedError, "can't dial self!")
# Ensure there's only one in-flight attempt per peer
let lock = self.dialLock.mgetOrPut(peerId.get(default(PeerId)), newAsyncLock())
try:
await lock.acquire()
if reuseConnection:
peerId.withValue(peerId):
self.tryReusingConnection(peerId).withValue(mux):
return mux
let slot = self.connManager.getOutgoingSlot(forceDial)
let muxed =
try:
await self.dialAndUpgrade(peerId, addrs, dir)
except CatchableError as exc:
slot.release()
raise exc
slot.trackMuxer(muxed)
if isNil(muxed): # None of the addresses connected
raise newException(DialFailedError, "Unable to establish outgoing link")
await lock.acquire()
defer:
try:
self.connManager.storeMuxer(muxed)
await self.peerStore.identify(muxed)
await self.connManager.triggerPeerEvents(
muxed.connection.peerId,
PeerEvent(kind: PeerEventKind.Identified, initiator: true),
)
except CatchableError as exc:
trace "Failed to finish outgoung upgrade", description = exc.msg
await muxed.close()
raise exc
return muxed
finally:
if lock.locked():
lock.release()
except AsyncLockError:
raiseAssert "lock must have been acquired in line above"
if reuseConnection:
peerId.withValue(peerId):
self.tryReusingConnection(peerId).withValue(mux):
return mux
let slot =
try:
self.connManager.getOutgoingSlot(forceDial)
except TooManyConnectionsError as exc:
raise newException(DialFailedError, exc.msg)
let muxed =
try:
await self.dialAndUpgrade(peerId, addrs, dir)
except CancelledError as exc:
slot.release()
raise exc
except CatchableError as exc:
slot.release()
raise newException(DialFailedError, exc.msg)
slot.trackMuxer(muxed)
if isNil(muxed): # None of the addresses connected
raise newException(DialFailedError, "Unable to establish outgoing link")
try:
self.connManager.storeMuxer(muxed)
await self.peerStore.identify(muxed)
await self.connManager.triggerPeerEvents(
muxed.connection.peerId,
PeerEvent(kind: PeerEventKind.Identified, initiator: true),
)
return muxed
except CancelledError as exc:
await muxed.close()
raise exc
except CatchableError as exc:
trace "Failed to finish outgoing upgrade", description = exc.msg
await muxed.close()
raise newException(DialFailedError, "Failed to finish outgoing upgrade")
method connect*(
self: Dialer,
@@ -218,7 +234,7 @@ method connect*(
forceDial = false,
reuseConnection = true,
dir = Direction.Out,
) {.async.} =
) {.async: (raises: [DialFailedError, CancelledError]).} =
## connect remote peer without negotiating
## a protocol
##
@@ -231,7 +247,7 @@ method connect*(
method connect*(
self: Dialer, address: MultiAddress, allowUnknownPeerId = false
): Future[PeerId] {.async.} =
): Future[PeerId] {.async: (raises: [DialFailedError, CancelledError]).} =
## Connects to a peer and retrieve its PeerId
parseFullAddress(address).toOpt().withValue(fullAddress):
@@ -249,7 +265,7 @@ method connect*(
proc negotiateStream(
self: Dialer, conn: Connection, protos: seq[string]
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [CatchableError]).} =
trace "Negotiating stream", conn, protos
let selected = await MultistreamSelect.select(conn, protos)
if not protos.contains(selected):
@@ -260,7 +276,7 @@ proc negotiateStream(
method tryDial*(
self: Dialer, peerId: PeerId, addrs: seq[MultiAddress]
): Future[Opt[MultiAddress]] {.async.} =
): Future[Opt[MultiAddress]] {.async: (raises: [DialFailedError, CancelledError]).} =
## Create a protocol stream in order to check
## if a connection is possible.
## Doesn't use the Connection Manager to save it.
@@ -280,17 +296,24 @@ method tryDial*(
method dial*(
self: Dialer, peerId: PeerId, protos: seq[string]
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [DialFailedError, CancelledError]).} =
## create a protocol stream over an
## existing connection
##
trace "Dialing (existing)", peerId, protos
let stream = await self.connManager.getStream(peerId)
if stream.isNil:
raise newException(DialFailedError, "Couldn't get muxed stream")
return await self.negotiateStream(stream, protos)
try:
let stream = await self.connManager.getStream(peerId)
if stream.isNil:
raise newException(DialFailedError, "Couldn't get muxed stream")
return await self.negotiateStream(stream, protos)
except CancelledError as exc:
trace "Dial canceled"
raise exc
except CatchableError as exc:
trace "Error dialing", description = exc.msg
raise newException(DialFailedError, exc.msg)
method dial*(
self: Dialer,
@@ -298,7 +321,7 @@ method dial*(
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false,
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [DialFailedError, CancelledError]).} =
## create a protocol stream and establish
## a connection if one doesn't exist already
##
@@ -307,7 +330,7 @@ method dial*(
conn: Muxer
stream: Connection
proc cleanup() {.async.} =
proc cleanup() {.async: (raises: []).} =
if not (isNil(stream)):
await stream.closeWithEOF()
@@ -331,7 +354,7 @@ method dial*(
except CatchableError as exc:
debug "Error dialing", conn, description = exc.msg
await cleanup()
raise exc
raise newException(DialFailedError, exc.msg)
method addTransport*(self: Dialer, t: Transport) =
self.transports &= t

View File

@@ -38,8 +38,7 @@ proc add*[T](pa: var PeerAttributes, value: T) =
Attribute[T](
value: value,
comparator: proc(f: BaseAttr, c: BaseAttr): bool =
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T)
,
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T),
)
)
@@ -60,7 +59,7 @@ proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [KeyError].} =
pa{T}.valueOr:
raise newException(KeyError, "Attritute not found")
raise newException(KeyError, "Attribute not found")
proc match*(pa, candidate: PeerAttributes): bool =
for f in pa.attributes:
@@ -80,16 +79,21 @@ type
advertisementUpdated*: AsyncEvent
advertiseLoop*: Future[void]
method request*(self: DiscoveryInterface, pa: PeerAttributes) {.async, base.} =
doAssert(false, "Not implemented!")
method advertise*(self: DiscoveryInterface) {.async, base.} =
doAssert(false, "Not implemented!")
type
DiscoveryError* = object of LPError
DiscoveryFinished* = object of LPError
AdvertiseError* = object of DiscoveryError
method request*(
self: DiscoveryInterface, pa: PeerAttributes
) {.base, async: (raises: [DiscoveryError, CancelledError]).} =
doAssert(false, "[DiscoveryInterface.request] abstract method not implemented!")
method advertise*(
self: DiscoveryInterface
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
doAssert(false, "[DiscoveryInterface.advertise] abstract method not implemented!")
type
DiscoveryQuery* = ref object
attr: PeerAttributes
peers: AsyncQueue[PeerAttributes]
@@ -138,7 +142,9 @@ template forEach*(query: DiscoveryQuery, code: untyped) =
## peer attritubtes are available through the variable
## `peer`
proc forEachInternal(q: DiscoveryQuery) {.async.} =
proc forEachInternal(
q: DiscoveryQuery
) {.async: (raises: [CancelledError, DiscoveryError]).} =
while true:
let peer {.inject.} =
try:
@@ -163,7 +169,11 @@ proc stop*(dm: DiscoveryManager) =
continue
i.advertiseLoop.cancel()
proc getPeer*(query: DiscoveryQuery): Future[PeerAttributes] {.async.} =
proc getPeer*(
query: DiscoveryQuery
): Future[PeerAttributes] {.
async: (raises: [CancelledError, DiscoveryError, DiscoveryFinished])
.} =
let getter = query.peers.popFirst()
try:

View File

@@ -23,15 +23,17 @@ type
proc `==`*(a, b: RdvNamespace): bool {.borrow.}
method request*(self: RendezVousInterface, pa: PeerAttributes) {.async.} =
var namespace = ""
method request*(
self: RendezVousInterface, pa: PeerAttributes
) {.async: (raises: [DiscoveryError, CancelledError]).} =
var namespace = Opt.none(string)
for attr in pa:
if attr.ofType(RdvNamespace):
namespace = string attr.to(RdvNamespace)
namespace = Opt.some(string attr.to(RdvNamespace))
elif attr.ofType(DiscoveryService):
namespace = string attr.to(DiscoveryService)
namespace = Opt.some(string attr.to(DiscoveryService))
elif attr.ofType(PeerId):
namespace = $attr.to(PeerId)
namespace = Opt.some($attr.to(PeerId))
else:
# unhandled type
return
@@ -42,13 +44,15 @@ method request*(self: RendezVousInterface, pa: PeerAttributes) {.async.} =
for address in pr.addresses:
peer.add(address.address)
peer.add(DiscoveryService(namespace))
peer.add(RdvNamespace(namespace))
peer.add(DiscoveryService(namespace.get()))
peer.add(RdvNamespace(namespace.get()))
self.onPeerFound(peer)
await sleepAsync(self.timeToRequest)
method advertise*(self: RendezVousInterface) {.async.} =
method advertise*(
self: RendezVousInterface
) {.async: (raises: [CancelledError, AdvertiseError]).} =
while true:
var toAdvertise: seq[string]
for attr in self.toAdvertise:

View File

@@ -26,7 +26,7 @@ import
errors,
utility
import stew/[base58, base32, endians2]
export results, minprotobuf, vbuffer, errors, utility
export results, vbuffer, errors, utility
logScope:
topics = "libp2p multiaddress"
@@ -171,6 +171,18 @@ proc ip6zoneVB(vb: var VBuffer): bool =
## IPv6 validateBuffer() implementation.
pathValidateBufferNoSlash(vb)
proc memoryStB(s: string, vb: var VBuffer): bool =
## Memory stringToBuffer() implementation.
pathStringToBuffer(s, vb)
proc memoryBtS(vb: var VBuffer, s: var string): bool =
## Memory bufferToString() implementation.
pathBufferToString(vb, s)
proc memoryVB(vb: var VBuffer): bool =
## Memory validateBuffer() implementation.
pathValidateBuffer(vb)
proc portStB(s: string, vb: var VBuffer): bool =
## Port number stringToBuffer() implementation.
var port: array[2, byte]
@@ -355,6 +367,10 @@ const
)
TranscoderDNS* =
Transcoder(stringToBuffer: dnsStB, bufferToString: dnsBtS, validateBuffer: dnsVB)
TranscoderMemory* = Transcoder(
stringToBuffer: memoryStB, bufferToString: memoryBtS, validateBuffer: memoryVB
)
ProtocolsList = [
MAProtocol(mcodec: multiCodec("ip4"), kind: Fixed, size: 4, coder: TranscoderIP4),
MAProtocol(mcodec: multiCodec("tcp"), kind: Fixed, size: 2, coder: TranscoderPort),
@@ -393,6 +409,9 @@ const
MAProtocol(mcodec: multiCodec("p2p-websocket-star"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("p2p-webrtc-star"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("p2p-webrtc-direct"), kind: Marker, size: 0),
MAProtocol(
mcodec: multiCodec("memory"), kind: Path, size: 0, coder: TranscoderMemory
),
]
DNSANY* = mapEq("dns")
@@ -453,6 +472,8 @@ const
CircuitRelay* = mapEq("p2p-circuit")
Memory* = mapEq("memory")
proc initMultiAddressCodeTable(): Table[MultiCodec, MAProtocol] {.compileTime.} =
for item in ProtocolsList:
result[item.mcodec] = item

View File

@@ -396,6 +396,7 @@ const MultiCodecList = [
("onion3", 0x01BD),
("p2p-circuit", 0x0122),
("libp2p-peer-record", 0x0301),
("memory", 0x0309),
("dns", 0x35),
("dns4", 0x36),
("dns6", 0x37),

View File

@@ -169,6 +169,7 @@ method readOnce*(
## channel must not be done from within a callback / read handler of another
## or the reads will lock each other.
if s.remoteReset:
trace "reset stream in readOnce", s
raise newLPStreamResetError()
if s.localReset:
raise newLPStreamClosedError()
@@ -201,6 +202,7 @@ proc prepareWrite(
# prepareWrite is the slow path of writing a message - see conditions in
# write
if s.remoteReset:
trace "stream is reset when prepareWrite", s
raise newLPStreamResetError()
if s.closedLocal:
raise newLPStreamClosedError()

View File

@@ -247,7 +247,7 @@ method close*(m: Mplex) {.async: (raises: []).} =
trace "Closed mplex", m
method getStreams*(m: Mplex): seq[Connection] =
method getStreams*(m: Mplex): seq[Connection] {.gcsafe.} =
for c in m.channels[false].values:
result.add(c)
for c in m.channels[true].values:

View File

@@ -52,7 +52,7 @@ method newStream*(
): Future[Connection] {.
base, async: (raises: [CancelledError, LPStreamError, MuxerError], raw: true)
.} =
raiseAssert("Not implemented!")
raiseAssert("[Muxer.newStream] abstract method not implemented!")
method close*(m: Muxer) {.base, async: (raises: []).} =
if m.connection != nil:
@@ -67,5 +67,5 @@ proc new*(
let muxerProvider = T(newMuxer: creator, codec: codec)
muxerProvider
method getStreams*(m: Muxer): seq[Connection] {.base.} =
raiseAssert("Not implemented!")
method getStreams*(m: Muxer): seq[Connection] {.base, gcsafe.} =
raiseAssert("[Muxer.getStreams] abstract method not implemented!")

View File

@@ -82,8 +82,7 @@ proc `$`(header: YamuxHeader): string =
if a != "":
a & ", " & $b
else:
$b
,
$b,
"",
) & "}, " & "streamId: " & $header.streamId & ", " & "length: " & $header.length &
"}"
@@ -176,8 +175,7 @@ proc `$`(channel: YamuxChannel): string =
if a != "":
a & ", " & b
else:
b
,
b,
"",
) & "}"
@@ -205,6 +203,7 @@ proc remoteClosed(channel: YamuxChannel) {.async: (raises: []).} =
if not channel.closedRemotely.isSet():
channel.closedRemotely.fire()
await channel.actuallyClose()
channel.isClosedRemotely = true
method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
if not channel.closedLocally:
@@ -270,10 +269,13 @@ method readOnce*(
if channel.isReset:
raise
if channel.remoteReset:
trace "stream is remote reset when readOnce", channel = $channel
newLPStreamResetError()
elif channel.closedLocally:
trace "stream is closed locally when readOnce", channel = $channel
newLPStreamClosedError()
else:
trace "stream is down when readOnce", channel = $channel
newLPStreamConnDownError()
if channel.isEof:
raise newLPStreamRemoteClosedError()
@@ -397,6 +399,7 @@ method write*(
##
result = newFuture[void]("Yamux Send")
if channel.remoteReset:
trace "stream is reset when write", channel = $channel
result.fail(newLPStreamResetError())
return result
if channel.closedLocally or channel.isReset:
@@ -632,7 +635,7 @@ method handle*(m: Yamux) {.async: (raises: []).} =
await m.close()
trace "Stopped yamux handler"
method getStreams*(m: Yamux): seq[Connection] =
method getStreams*(m: Yamux): seq[Connection] {.gcsafe.} =
for c in m.channels.values:
result.add(c)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -10,7 +10,7 @@
{.push raises: [].}
import
std/[streams, strutils, sets, sequtils],
std/[streams, sets, sequtils],
chronos,
chronicles,
stew/byteutils,
@@ -39,24 +39,32 @@ proc questionToBuf(address: string, kind: QKind): seq[byte] =
var buf = newSeq[byte](dataLen)
discard requestStream.readData(addr buf[0], dataLen)
return buf
except CatchableError as exc:
buf
except IOError as exc:
info "Failed to created DNS buffer", description = exc.msg
return newSeq[byte](0)
newSeq[byte](0)
except OSError as exc:
info "Failed to created DNS buffer", description = exc.msg
newSeq[byte](0)
except ValueError as exc:
info "Failed to created DNS buffer", description = exc.msg
newSeq[byte](0)
proc getDnsResponse(
dnsServer: TransportAddress, address: string, kind: QKind
): Future[Response] {.async.} =
): Future[Response] {.
async: (raises: [CancelledError, IOError, OSError, TransportError, ValueError])
.} =
var sendBuf = questionToBuf(address, kind)
if sendBuf.len == 0:
raise newException(ValueError, "Incorrect DNS query")
let receivedDataFuture = newFuture[void]()
let receivedDataFuture = Future[void].Raising([CancelledError]).init()
proc datagramDataReceived(
transp: DatagramTransport, raddr: TransportAddress
): Future[void] {.async, closure.} =
): Future[void] {.async: (raises: []).} =
receivedDataFuture.complete()
let sock =
@@ -68,27 +76,41 @@ proc getDnsResponse(
try:
await sock.sendTo(dnsServer, addr sendBuf[0], sendBuf.len)
await receivedDataFuture or sleepAsync(5.seconds) #unix default
if not receivedDataFuture.finished:
try:
await receivedDataFuture.wait(5.seconds) #unix default
except AsyncTimeoutError:
raise newException(IOError, "DNS server timeout")
let rawResponse = sock.getMessage()
# parseResponse can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
return exceptionToAssert:
try:
parseResponse(string.fromBytes(rawResponse))
except IOError as exc:
raise exc
except OSError as exc:
raise exc
except ValueError as exc:
raise exc
except Exception as exc:
# Nim 1.6: parseResponse can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert exc.msg
finally:
await sock.closeWait()
method resolveIp*(
self: DnsResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.async.} =
): Future[seq[TransportAddress]] {.
async: (raises: [CancelledError, TransportAddressError])
.} =
trace "Resolving IP using DNS", address, servers = self.nameServers.mapIt($it), domain
for _ in 0 ..< self.nameServers.len:
let server = self.nameServers[0]
var responseFutures: seq[Future[Response]]
var responseFutures: seq[
Future[Response].Raising(
[CancelledError, IOError, OSError, TransportError, ValueError]
)
]
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
responseFutures.add(getDnsResponse(server, address, A))
@@ -103,23 +125,32 @@ method resolveIp*(
var
resolvedAddresses: OrderedSet[string]
resolveFailed = false
template handleFail(e): untyped =
info "Failed to query DNS", address, error = e.msg
resolveFailed = true
break
for fut in responseFutures:
try:
let resp = await fut
for answer in resp.answers:
# toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
resolvedAddresses.incl(exceptionToAssert(answer.toString()))
resolvedAddresses.incl(answer.toString())
except CancelledError as e:
raise e
except ValueError as e:
info "Invalid DNS query", address, error = e.msg
return @[]
except CatchableError as e:
info "Failed to query DNS", address, error = e.msg
resolveFailed = true
break
except IOError as e:
handleFail(e)
except OSError as e:
handleFail(e)
except TransportError as e:
handleFail(e)
except Exception as e:
# Nim 1.6: answer.toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert e.msg
if resolveFailed:
self.nameServers.add(self.nameServers[0])
@@ -132,27 +163,39 @@ method resolveIp*(
debug "Failed to resolve address, returning empty set"
return @[]
method resolveTxt*(self: DnsResolver, address: string): Future[seq[string]] {.async.} =
method resolveTxt*(
self: DnsResolver, address: string
): Future[seq[string]] {.async: (raises: [CancelledError]).} =
trace "Resolving TXT using DNS", address, servers = self.nameServers.mapIt($it)
for _ in 0 ..< self.nameServers.len:
let server = self.nameServers[0]
try:
# toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
let response = await getDnsResponse(server, address, TXT)
return exceptionToAssert:
trace "Got TXT response",
server = $server, answer = response.answers.mapIt(it.toString())
response.answers.mapIt(it.toString())
except CancelledError as e:
raise e
except CatchableError as e:
template handleFail(e): untyped =
info "Failed to query DNS", address, error = e.msg
self.nameServers.add(self.nameServers[0])
self.nameServers.delete(0)
continue
try:
let response = await getDnsResponse(server, address, TXT)
trace "Got TXT response",
server = $server, answer = response.answers.mapIt(it.toString())
return response.answers.mapIt(it.toString())
except CancelledError as e:
raise e
except IOError as e:
handleFail(e)
except OSError as e:
handleFail(e)
except TransportError as e:
handleFail(e)
except ValueError as e:
handleFail(e)
except Exception as e:
# Nim 1.6: toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert e.msg
debug "Failed to resolve TXT, returning empty set"
return @[]

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -25,17 +25,25 @@ type MockResolver* = ref object of NameResolver
method resolveIp*(
self: MockResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.async.} =
): Future[seq[TransportAddress]] {.
async: (raises: [CancelledError, TransportAddressError])
.} =
var res: seq[TransportAddress]
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
for resp in self.ipResponses.getOrDefault((address, false)):
result.add(initTAddress(resp, port))
res.add(initTAddress(resp, port))
if domain == Domain.AF_INET6 or domain == Domain.AF_UNSPEC:
for resp in self.ipResponses.getOrDefault((address, true)):
result.add(initTAddress(resp, port))
res.add(initTAddress(resp, port))
method resolveTxt*(self: MockResolver, address: string): Future[seq[string]] {.async.} =
return self.txtResponses.getOrDefault(address)
res
method resolveTxt*(
self: MockResolver, address: string
): Future[seq[string]] {.async: (raises: [CancelledError]).} =
self.txtResponses.getOrDefault(address)
proc new*(T: typedesc[MockResolver]): T =
T()

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -9,7 +9,7 @@
{.push raises: [].}
import std/[sugar, sets, sequtils, strutils]
import std/[sets, sequtils, strutils]
import chronos, chronicles, stew/endians2
import ".."/[multiaddress, multicodec]
@@ -20,19 +20,17 @@ type NameResolver* = ref object of RootObj
method resolveTxt*(
self: NameResolver, address: string
): Future[seq[string]] {.async, base.} =
): Future[seq[string]] {.async: (raises: [CancelledError]), base.} =
## Get TXT record
##
doAssert(false, "Not implemented!")
raiseAssert "[NameResolver.resolveTxt] abstract method not implemented!"
method resolveIp*(
self: NameResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.async, base.} =
): Future[seq[TransportAddress]] {.
async: (raises: [CancelledError, TransportAddressError]), base
.} =
## Resolve the specified address
##
doAssert(false, "Not implemented!")
raiseAssert "[NameResolver.resolveIp] abstract method not implemented!"
proc getHostname*(ma: MultiAddress): string =
let
@@ -46,30 +44,40 @@ proc getHostname*(ma: MultiAddress): string =
proc resolveOneAddress(
self: NameResolver, ma: MultiAddress, domain: Domain = Domain.AF_UNSPEC, prefix = ""
): Future[seq[MultiAddress]] {.async.} =
#Resolve a single address
): Future[seq[MultiAddress]] {.
async: (raises: [CancelledError, MaError, TransportAddressError])
.} =
# Resolve a single address
let portPart = ma[1].valueOr:
raise maErr error
var pbuf: array[2, byte]
var dnsval = getHostname(ma)
if ma[1].tryGet().protoArgument(pbuf).tryGet() == 0:
raise newException(MaError, "Incorrect port number")
let plen = portPart.protoArgument(pbuf).valueOr:
raise maErr error
if plen == 0:
raise maErr "Incorrect port number"
let
port = Port(fromBytesBE(uint16, pbuf))
dnsval = getHostname(ma)
resolvedAddresses = await self.resolveIp(prefix & dnsval, port, domain)
return collect(newSeqOfCap(4)):
for address in resolvedAddresses:
var createdAddress = MultiAddress.init(address).tryGet()[0].tryGet()
for part in ma:
if DNS.match(part.tryGet()):
continue
createdAddress &= part.tryGet()
createdAddress
resolvedAddresses.mapIt:
let address = MultiAddress.init(it).valueOr:
raise maErr error
var createdAddress = address[0].valueOr:
raise maErr error
for part in ma:
let part = part.valueOr:
raise maErr error
if DNS.match(part):
continue
createdAddress &= part
createdAddress
proc resolveDnsAddr*(
self: NameResolver, ma: MultiAddress, depth: int = 0
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.
async: (raises: [CancelledError, MaError, TransportAddressError])
.} =
if not DNSADDR.matchPartial(ma):
return @[ma]
@@ -78,54 +86,67 @@ proc resolveDnsAddr*(
info "Stopping DNSADDR recursion, probably malicious", ma
return @[]
var dnsval = getHostname(ma)
let txt = await self.resolveTxt("_dnsaddr." & dnsval)
let
dnsval = getHostname(ma)
txt = await self.resolveTxt("_dnsaddr." & dnsval)
trace "txt entries", txt
var result: seq[MultiAddress]
const codec = multiCodec("p2p")
let maCodec = block:
let hasCodec = ma.contains(codec).valueOr:
raise maErr error
if hasCodec:
ma[codec]
else:
(static(default(MaResult[MultiAddress])))
var res: seq[MultiAddress]
for entry in txt:
if not entry.startsWith("dnsaddr="):
continue
let entryValue = MultiAddress.init(entry[8 ..^ 1]).tryGet()
if entryValue.contains(multiCodec("p2p")).tryGet() and
ma.contains(multiCodec("p2p")).tryGet():
if entryValue[multiCodec("p2p")] != ma[multiCodec("p2p")]:
continue
let
entryValue = MultiAddress.init(entry[8 ..^ 1]).valueOr:
raise maErr error
entryHasCodec = entryValue.contains(multiCodec("p2p")).valueOr:
raise maErr error
if entryHasCodec and maCodec.isOk and entryValue[codec] != maCodec:
continue
let resolved = await self.resolveDnsAddr(entryValue, depth + 1)
for r in resolved:
result.add(r)
res.add(r)
if result.len == 0:
if res.len == 0:
debug "Failed to resolve a DNSADDR", ma
return @[]
return result
res
proc resolveMAddress*(
self: NameResolver, address: MultiAddress
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.
async: (raises: [CancelledError, MaError, TransportAddressError])
.} =
var res = initOrderedSet[MultiAddress]()
if not DNS.matchPartial(address):
res.incl(address)
else:
let code = address[0].tryGet().protoCode().tryGet()
let seq =
case code
of multiCodec("dns"):
await self.resolveOneAddress(address)
of multiCodec("dns4"):
await self.resolveOneAddress(address, Domain.AF_INET)
of multiCodec("dns6"):
await self.resolveOneAddress(address, Domain.AF_INET6)
of multiCodec("dnsaddr"):
await self.resolveDnsAddr(address)
else:
assert false
@[address]
for ad in seq:
let
firstPart = address[0].valueOr:
raise maErr error
code = firstPart.protoCode().valueOr:
raise maErr error
ads =
case code
of multiCodec("dns"):
await self.resolveOneAddress(address)
of multiCodec("dns4"):
await self.resolveOneAddress(address, Domain.AF_INET)
of multiCodec("dns6"):
await self.resolveOneAddress(address, Domain.AF_INET6)
of multiCodec("dnsaddr"):
await self.resolveDnsAddr(address)
else:
raise maErr("Unsupported codec " & $code)
for ad in ads:
res.incl(ad)
return res.toSeq
res.toSeq

View File

@@ -22,7 +22,7 @@ type
PeerInfoError* = object of LPError
AddressMapper* = proc(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.
gcsafe, raises: []
gcsafe, async: (raises: [CancelledError])
.} ## A proc that expected to resolve the listen addresses into dialable addresses
PeerInfo* {.public.} = ref object
@@ -52,7 +52,7 @@ func shortLog*(p: PeerInfo): auto =
chronicles.formatIt(PeerInfo):
shortLog(it)
proc update*(p: PeerInfo) {.async.} =
proc update*(p: PeerInfo) {.async: (raises: [CancelledError]).} =
# p.addrs.len == 0 overrides addrs only if it is the first time update is being executed or if the field is empty.
# p.addressMappers.len == 0 is for when all addressMappers have been removed,
# and we wish to have addrs in its initial state, i.e., a copy of listenAddrs.

View File

@@ -63,6 +63,7 @@ type
KeyBook* {.public.} = ref object of PeerBook[PublicKey]
AgentBook* {.public.} = ref object of PeerBook[string]
LastSeenBook* {.public.} = ref object of PeerBook[Opt[MultiAddress]]
ProtoVersionBook* {.public.} = ref object of PeerBook[string]
SPRBook* {.public.} = ref object of PeerBook[Envelope]
@@ -145,10 +146,16 @@ proc del*(peerStore: PeerStore, peerId: PeerId) {.public.} =
for _, book in peerStore.books:
book.deletor(peerId)
proc updatePeerInfo*(peerStore: PeerStore, info: IdentifyInfo) =
if info.addrs.len > 0:
proc updatePeerInfo*(
peerStore: PeerStore,
info: IdentifyInfo,
observedAddr: Opt[MultiAddress] = Opt.none(MultiAddress),
) =
if len(info.addrs) > 0:
peerStore[AddressBook][info.peerId] = info.addrs
peerStore[LastSeenBook][info.peerId] = observedAddr
info.pubkey.withValue(pubkey):
peerStore[KeyBook][info.peerId] = pubkey
@@ -181,7 +188,16 @@ proc cleanup*(peerStore: PeerStore, peerId: PeerId) =
peerStore.del(peerStore.toClean[0])
peerStore.toClean.delete(0)
proc identify*(peerStore: PeerStore, muxer: Muxer) {.async.} =
proc identify*(
peerStore: PeerStore, muxer: Muxer
) {.
async: (
raises: [
CancelledError, IdentityNoMatchError, IdentityInvalidMsgError, MultiStreamError,
LPStreamError, MuxerError,
]
)
.} =
# new stream for identify
var stream = await muxer.newStream()
if stream == nil:
@@ -200,7 +216,7 @@ proc identify*(peerStore: PeerStore, muxer: Muxer) {.async.} =
knownAgent = shortAgent
muxer.connection.setShortAgent(knownAgent)
peerStore.updatePeerInfo(info)
peerStore.updatePeerInfo(info, stream.observedAddr)
finally:
await stream.closeWithEOF()

View File

@@ -16,8 +16,6 @@ export results, utility
{.push public.}
const MaxMessageSize = 1'u shl 22
type
ProtoFieldKind* = enum
## Protobuf's field types enum
@@ -39,7 +37,6 @@ type
buffer*: seq[byte]
offset*: int
length*: int
maxSize*: uint
ProtoHeader* = object
wire*: ProtoFieldKind
@@ -63,7 +60,6 @@ type
VarintDecode
MessageIncomplete
BufferOverflow
MessageTooBig
BadWireType
IncorrectBlob
RequiredFieldMissing
@@ -99,11 +95,14 @@ template getProtoHeader*(field: ProtoField): uint64 =
template toOpenArray*(pb: ProtoBuffer): untyped =
toOpenArray(pb.buffer, pb.offset, len(pb.buffer) - 1)
template lenu64*(x: untyped): untyped =
uint64(len(x))
template isEmpty*(pb: ProtoBuffer): bool =
len(pb.buffer) - pb.offset <= 0
template isEnough*(pb: ProtoBuffer, length: int): bool =
len(pb.buffer) - pb.offset - length >= 0
template isEnough*(pb: ProtoBuffer, length: uint64): bool =
pb.offset <= len(pb.buffer) and length <= uint64(len(pb.buffer) - pb.offset)
template getPtr*(pb: ProtoBuffer): pointer =
cast[pointer](unsafeAddr pb.buffer[pb.offset])
@@ -127,33 +126,25 @@ proc vsizeof*(field: ProtoField): int {.inline.} =
0
proc initProtoBuffer*(
data: seq[byte], offset = 0, options: set[ProtoFlags] = {}, maxSize = MaxMessageSize
data: seq[byte], offset = 0, options: set[ProtoFlags] = {}
): ProtoBuffer =
## Initialize ProtoBuffer with shallow copy of ``data``.
result.buffer = data
result.offset = offset
result.options = options
result.maxSize = maxSize
proc initProtoBuffer*(
data: openArray[byte],
offset = 0,
options: set[ProtoFlags] = {},
maxSize = MaxMessageSize,
data: openArray[byte], offset = 0, options: set[ProtoFlags] = {}
): ProtoBuffer =
## Initialize ProtoBuffer with copy of ``data``.
result.buffer = @data
result.offset = offset
result.options = options
result.maxSize = maxSize
proc initProtoBuffer*(
options: set[ProtoFlags] = {}, maxSize = MaxMessageSize
): ProtoBuffer =
proc initProtoBuffer*(options: set[ProtoFlags] = {}): ProtoBuffer =
## Initialize ProtoBuffer with new sequence of capacity ``cap``.
result.buffer = newSeq[byte]()
result.options = options
result.maxSize = maxSize
if WithVarintLength in options:
# Our buffer will start from position 10, so we can store length of buffer
# in [0, 9].
@@ -194,12 +185,12 @@ proc write*[T: ProtoScalar](pb: var ProtoBuffer, field: int, value: T) =
doAssert(vres.isOk())
pb.offset += length
elif T is float32:
doAssert(pb.isEnough(sizeof(T)))
doAssert(pb.isEnough(uint64(sizeof(T))))
let u32 = cast[uint32](value)
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u32.toBytesLE()
pb.offset += sizeof(T)
elif T is float64:
doAssert(pb.isEnough(sizeof(T)))
doAssert(pb.isEnough(uint64(sizeof(T))))
let u64 = cast[uint64](value)
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u64.toBytesLE()
pb.offset += sizeof(T)
@@ -242,12 +233,12 @@ proc writePacked*[T: ProtoScalar](
doAssert(vres.isOk())
pb.offset += length
elif T is float32:
doAssert(pb.isEnough(sizeof(T)))
doAssert(pb.isEnough(uint64(sizeof(T))))
let u32 = cast[uint32](item)
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u32.toBytesLE()
pb.offset += sizeof(T)
elif T is float64:
doAssert(pb.isEnough(sizeof(T)))
doAssert(pb.isEnough(uint64(sizeof(T))))
let u64 = cast[uint64](item)
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u64.toBytesLE()
pb.offset += sizeof(T)
@@ -268,7 +259,7 @@ proc write*[T: byte | char](pb: var ProtoBuffer, field: int, value: openArray[T]
doAssert(lres.isOk())
pb.offset += length
if len(value) > 0:
doAssert(pb.isEnough(len(value)))
doAssert(pb.isEnough(value.lenu64))
copyMem(addr pb.buffer[pb.offset], unsafeAddr value[0], len(value))
pb.offset += len(value)
@@ -327,13 +318,13 @@ proc skipValue(data: var ProtoBuffer, header: ProtoHeader): ProtoResult[void] =
else:
err(ProtoError.VarintDecode)
of ProtoFieldKind.Fixed32:
if data.isEnough(sizeof(uint32)):
if data.isEnough(uint64(sizeof(uint32))):
data.offset += sizeof(uint32)
ok()
else:
err(ProtoError.VarintDecode)
of ProtoFieldKind.Fixed64:
if data.isEnough(sizeof(uint64)):
if data.isEnough(uint64(sizeof(uint64))):
data.offset += sizeof(uint64)
ok()
else:
@@ -343,14 +334,11 @@ proc skipValue(data: var ProtoBuffer, header: ProtoHeader): ProtoResult[void] =
var bsize = 0'u64
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
data.offset += length
if bsize <= uint64(data.maxSize):
if data.isEnough(int(bsize)):
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageIncomplete)
if data.isEnough(bsize):
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageTooBig)
err(ProtoError.MessageIncomplete)
else:
err(ProtoError.VarintDecode)
of ProtoFieldKind.StartGroup, ProtoFieldKind.EndGroup:
@@ -382,7 +370,7 @@ proc getValue[T: ProtoScalar](
err(ProtoError.VarintDecode)
elif T is float32:
doAssert(header.wire == ProtoFieldKind.Fixed32)
if data.isEnough(sizeof(float32)):
if data.isEnough(uint64(sizeof(float32))):
outval = cast[float32](fromBytesLE(uint32, data.toOpenArray()))
data.offset += sizeof(float32)
ok()
@@ -390,7 +378,7 @@ proc getValue[T: ProtoScalar](
err(ProtoError.MessageIncomplete)
elif T is float64:
doAssert(header.wire == ProtoFieldKind.Fixed64)
if data.isEnough(sizeof(float64)):
if data.isEnough(uint64(sizeof(float64))):
outval = cast[float64](fromBytesLE(uint64, data.toOpenArray()))
data.offset += sizeof(float64)
ok()
@@ -410,22 +398,19 @@ proc getValue[T: byte | char](
outLength = 0
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
data.offset += length
if bsize <= uint64(data.maxSize):
if data.isEnough(int(bsize)):
outLength = int(bsize)
if len(outBytes) >= int(bsize):
if bsize > 0'u64:
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
data.offset += int(bsize)
ok()
else:
# Buffer overflow should not be critical failure
data.offset += int(bsize)
err(ProtoError.BufferOverflow)
if data.isEnough(bsize):
outLength = int(bsize)
if len(outBytes) >= int(bsize):
if bsize > 0'u64:
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageIncomplete)
# Buffer overflow should not be critical failure
data.offset += int(bsize)
err(ProtoError.BufferOverflow)
else:
err(ProtoError.MessageTooBig)
err(ProtoError.MessageIncomplete)
else:
err(ProtoError.VarintDecode)
@@ -439,17 +424,14 @@ proc getValue[T: seq[byte] | string](
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
data.offset += length
if bsize <= uint64(data.maxSize):
if data.isEnough(int(bsize)):
outBytes.setLen(bsize)
if bsize > 0'u64:
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageIncomplete)
if data.isEnough(bsize):
outBytes.setLen(bsize)
if bsize > 0'u64:
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageTooBig)
err(ProtoError.MessageIncomplete)
else:
err(ProtoError.VarintDecode)

View File

@@ -19,7 +19,9 @@ logScope:
type AutonatClient* = ref object of RootObj
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
proc sendDial(
conn: Connection, pid: PeerId, addrs: seq[MultiAddress]
) {.async: (raises: [CancelledError, LPStreamError]).} =
let pb = AutonatDial(
peerInfo: Opt.some(AutonatPeerInfo(id: Opt.some(pid), addrs: addrs))
).encode()
@@ -30,7 +32,9 @@ method dialMe*(
switch: Switch,
pid: PeerId,
addrs: seq[MultiAddress] = newSeq[MultiAddress](),
): Future[MultiAddress] {.base, async.} =
): Future[MultiAddress] {.
base, async: (raises: [AutonatError, AutonatUnreachableError, CancelledError])
.} =
proc getResponseOrRaise(
autonatMsg: Opt[AutonatMsg]
): AutonatDialResponse {.raises: [AutonatError].} =
@@ -47,7 +51,9 @@ method dialMe*(
await switch.dial(pid, @[AutonatCodec])
else:
await switch.dial(pid, addrs, AutonatCodec)
except CatchableError as err:
except CancelledError as err:
raise err
except DialFailedError as err:
raise
newException(AutonatError, "Unexpected error when dialling: " & err.msg, err)
@@ -61,14 +67,37 @@ method dialMe*(
incomingConnection.cancel()
# Safer to always try to cancel cause we aren't sure if the peer dialled us or not
if incomingConnection.completed():
await (await incomingConnection).connection.close()
trace "sending Dial", addrs = switch.peerInfo.addrs
await conn.sendDial(switch.peerInfo.peerId, switch.peerInfo.addrs)
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
try:
await (await incomingConnection).connection.close()
except AlreadyExpectingConnectionError as e:
# this err is already handled above and could not happen later
error "Unexpected error", description = e.msg
try:
trace "sending Dial", addrs = switch.peerInfo.addrs
await conn.sendDial(switch.peerInfo.peerId, switch.peerInfo.addrs)
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(AutonatError, "Sending dial failed", e)
var respBytes: seq[byte]
try:
respBytes = await conn.readLp(1024)
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(AutonatError, "read Dial response failed", e)
let response = getResponseOrRaise(AutonatMsg.decode(respBytes))
return
case response.status
of ResponseStatus.Ok:
response.ma.tryGet()
try:
response.ma.tryGet()
except ResultError[void]:
raiseAssert("checked with if")
of ResponseStatus.DialError:
raise newException(
AutonatUnreachableError, "Peer could not dial us back: " & response.text.get("")

View File

@@ -12,6 +12,7 @@
import stew/[results, objects]
import chronos, chronicles
import ../../../multiaddress, ../../../peerid, ../../../errors
import ../../../protobuf/minprotobuf
logScope:
topics = "libp2p autonat"

View File

@@ -32,7 +32,9 @@ type Autonat* = ref object of LPProtocol
switch*: Switch
dialTimeout: Duration
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
proc sendDial(
conn: Connection, pid: PeerId, addrs: seq[MultiAddress]
) {.async: (raises: [LPStreamError, CancelledError]).} =
let pb = AutonatDial(
peerInfo: Opt.some(AutonatPeerInfo(id: Opt.some(pid), addrs: addrs))
).encode()
@@ -40,28 +42,37 @@ proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.}
proc sendResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async.} =
) {.async: (raises: [CancelledError]).} =
let pb = AutonatDialResponse(
status: status,
text:
if text == "":
Opt.none(string)
else:
Opt.some(text)
,
Opt.some(text),
ma: Opt.none(MultiAddress),
).encode()
await conn.writeLp(pb.buffer)
try:
await conn.writeLp(pb.buffer)
except LPStreamError as exc:
trace "autonat failed to send response error", description = exc.msg, conn
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
proc sendResponseOk(
conn: Connection, ma: MultiAddress
) {.async: (raises: [CancelledError]).} =
let pb = AutonatDialResponse(
status: ResponseStatus.Ok, text: Opt.some("Ok"), ma: Opt.some(ma)
).encode()
await conn.writeLp(pb.buffer)
try:
await conn.writeLp(pb.buffer)
except LPStreamError as exc:
trace "autonat failed to send response ok", description = exc.msg, conn
proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.async.} =
proc tryDial(
autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]
) {.async: (raises: [DialFailedError, CancelledError]).} =
await autonat.sem.acquire()
var futs: seq[Future[Opt[MultiAddress]]]
var futs: seq[Future[Opt[MultiAddress]].Raising([DialFailedError, CancelledError])]
try:
# This is to bypass the per peer max connections limit
let outgoingConnection =
@@ -72,7 +83,8 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
return
# Safer to always try to cancel cause we aren't sure if the connection was established
defer:
outgoingConnection.cancel()
outgoingConnection.cancelSoon()
# tryDial is to bypass the global max connections limit
futs = addrs.mapIt(autonat.switch.dialer.tryDial(conn.peerId, @[it]))
let fut = await anyCompleted(futs).wait(autonat.dialTimeout)
@@ -89,9 +101,6 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
except AsyncTimeoutError as exc:
debug "Dial timeout", addrs, description = exc.msg
await conn.sendResponseError(DialError, "Dial timeout")
except CatchableError as exc:
debug "Unexpected error", addrs, description = exc.msg
await conn.sendResponseError(DialError, "Unexpected error")
finally:
autonat.sem.release()
for f in futs:
@@ -155,7 +164,9 @@ proc new*(
): T =
let autonat =
T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
proc handleStream(conn: Connection, proto: string) {.async.} =
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
raise newException(AutonatError, "Received malformed message")
@@ -163,6 +174,7 @@ proc new*(
raise newException(AutonatError, "Message type should be dial")
await autonat.handleDial(conn, msg)
except CancelledError as exc:
trace "cancelled autonat handler"
raise exc
except CatchableError as exc:
debug "exception in autonat handler", description = exc.msg, conn

View File

@@ -50,7 +50,7 @@ type
StatusAndConfidenceHandler* = proc(
networkReachability: NetworkReachability, confidence: Opt[float]
): Future[void] {.gcsafe, raises: [].}
): Future[void] {.gcsafe, async: (raises: [CancelledError]).}
proc new*(
T: typedesc[AutonatService],
@@ -79,7 +79,7 @@ proc new*(
enableAddressMapper: enableAddressMapper,
)
proc callHandler(self: AutonatService) {.async.} =
proc callHandler(self: AutonatService) {.async: (raises: [CancelledError]).} =
if not isNil(self.statusAndConfidenceHandler):
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
@@ -92,7 +92,7 @@ proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
proc handleAnswer(
self: AutonatService, ans: NetworkReachability
): Future[bool] {.async.} =
): Future[bool] {.async: (raises: [CancelledError]).} =
if ans == Unknown:
return
@@ -127,7 +127,7 @@ proc handleAnswer(
proc askPeer(
self: AutonatService, switch: Switch, peerId: PeerId
): Future[NetworkReachability] {.async.} =
): Future[NetworkReachability] {.async: (raises: [CancelledError]).} =
logScope:
peerId = $peerId
@@ -160,7 +160,9 @@ proc askPeer(
await switch.peerInfo.update()
return ans
proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
proc askConnectedPeers(
self: AutonatService, switch: Switch
) {.async: (raises: [CancelledError]).} =
trace "Asking peers for reachability"
var peers = switch.connectedPeers(Direction.Out)
self.rng.shuffle(peers)
@@ -175,13 +177,15 @@ proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
if (await askPeer(self, switch, peer)) != Unknown:
answersFromPeers.inc()
proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.async.} =
proc schedule(
service: AutonatService, switch: Switch, interval: Duration
) {.async: (raises: [CancelledError]).} =
heartbeat "Scheduling AutonatService run", interval:
await service.run(switch)
proc addressMapper(
self: AutonatService, peerStore: PeerStore, listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
if self.networkReachability != NetworkReachability.Reachable:
return listenAddrs
@@ -198,10 +202,12 @@ proc addressMapper(
addrs.add(processedMA)
return addrs
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
method setup*(
self: AutonatService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
self.addressMapper = proc(
listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
return await addressMapper(self, switch.peerStore, listenAddrs)
info "Setting up AutonatService"
@@ -210,22 +216,30 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
if self.askNewConnectedPeers:
self.newConnectedPeerHandler = proc(
peerId: PeerId, event: PeerEvent
): Future[void] {.async.} =
): Future[void] {.async: (raises: [CancelledError]).} =
discard askPeer(self, switch, peerId)
switch.connManager.addPeerEventHandler(
self.newConnectedPeerHandler, PeerEventKind.Joined
)
self.scheduleInterval.withValue(interval):
self.scheduleHandle = schedule(self, switch, interval)
if self.enableAddressMapper:
switch.peerInfo.addressMappers.add(self.addressMapper)
return hasBeenSetup
method run*(self: AutonatService, switch: Switch) {.async, public.} =
method run*(
self: AutonatService, switch: Switch
) {.public, async: (raises: [CancelledError]).} =
trace "Running AutonatService"
await askConnectedPeers(self, switch)
method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public.} =
method stop*(
self: AutonatService, switch: Switch
): Future[bool] {.public, async: (raises: [CancelledError]).} =
info "Stopping AutonatService"
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:

View File

@@ -34,7 +34,7 @@ proc new*(
proc startSync*(
self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: seq[MultiAddress]
) {.async.} =
) {.async: (raises: [DcutrError, CancelledError]).} =
logScope:
peerId = switch.peerInfo.peerId

View File

@@ -15,6 +15,7 @@ import chronos
import stew/objects
import ../../../multiaddress, ../../../errors, ../../../stream/connection
import ../../../protobuf/minprotobuf
export multiaddress
@@ -49,7 +50,9 @@ proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [DcutrEr
raise newException(DcutrError, "Received malformed message")
return dcutrMsg
proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async.} =
proc send*(
conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]
) {.async: (raises: [CancelledError, LPStreamError]).} =
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
await conn.writeLp(pb.buffer)

View File

@@ -30,7 +30,9 @@ proc new*(
connectTimeout = 15.seconds,
maxDialableAddrs = 8,
): T =
proc handleStream(stream: Connection, proto: string) {.async.} =
proc handleStream(
stream: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
var peerDialableAddrs: seq[MultiAddress]
try:
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
@@ -77,6 +79,7 @@ proc new*(
for fut in futs:
fut.cancel()
except CancelledError as err:
trace "cancelled Dcutr receiver"
raise err
except AllFuturesFailedError as err:
debug "Dcutr receiver could not connect to the remote peer, " &

View File

@@ -29,11 +29,12 @@ const RelayClientMsgSize = 4096
type
RelayClientError* = object of LPError
ReservationError* = object of RelayClientError
RelayV1DialError* = object of RelayClientError
RelayV2DialError* = object of RelayClientError
RelayDialError* = object of DialFailedError
RelayV1DialError* = object of RelayDialError
RelayV2DialError* = object of RelayDialError
RelayClientAddConn* = proc(
conn: Connection, duration: uint32, data: uint64
): Future[void] {.gcsafe, raises: [].}
): Future[void] {.gcsafe, async: (raises: [CancelledError]).}
RelayClient* = ref object of Relay
onNewConnection*: RelayClientAddConn
canHop: bool
@@ -45,14 +46,21 @@ type
limitDuration*: uint32 # seconds
limitData*: uint64 # bytes
proc sendStopError(conn: Connection, code: StatusV2) {.async.} =
proc sendStopError(
conn: Connection, code: StatusV2
) {.async: (raises: [CancelledError]).} =
trace "send stop status", status = $code & " (" & $ord(code) & ")"
let msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
await conn.writeLp(encode(msg).buffer)
try:
let msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
await conn.writeLp(encode(msg).buffer)
except CancelledError as e:
raise e
except LPStreamError as e:
trace "failed to send stop status", description = e.msg
proc handleRelayedConnect(
cl: RelayClient, conn: Connection, msg: StopMessage
) {.async.} =
) {.async: (raises: [CancelledError, LPStreamError]).} =
let
# TODO: check the go version to see in which way this could fail
# it's unclear in the spec
@@ -80,7 +88,7 @@ proc handleRelayedConnect(
proc reserve*(
cl: RelayClient, peerId: PeerId, addrs: seq[MultiAddress] = @[]
): Future[Rsvp] {.async.} =
): Future[Rsvp] {.async: (raises: [ReservationError, DialFailedError, CancelledError]).} =
let conn = await cl.switch.dial(peerId, addrs, RelayV2HopCodec)
defer:
await conn.close()
@@ -121,7 +129,7 @@ proc reserve*(
proc dialPeerV1*(
cl: RelayClient, conn: Connection, dstPeerId: PeerId, dstAddrs: seq[MultiAddress]
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [CancelledError, RelayV1DialError]).} =
var
msg = RelayMessage(
msgType: Opt.some(RelayType.Hop),
@@ -138,19 +146,19 @@ proc dialPeerV1*(
await conn.writeLp(pb.buffer)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except LPStreamError as exc:
trace "error writing hop request", description = exc.msg
raise exc
raise newException(RelayV1DialError, "error writing hop request", exc)
let msgRcvFromRelayOpt =
try:
RelayMessage.decode(await conn.readLp(RelayClientMsgSize))
except CancelledError as exc:
raise exc
except CatchableError as exc:
except LPStreamError as exc:
trace "error reading stop response", description = exc.msg
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise exc
raise newException(RelayV1DialError, "error reading stop response", exc)
try:
let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
@@ -176,7 +184,7 @@ proc dialPeerV2*(
conn: RelayConnection,
dstPeerId: PeerId,
dstAddrs: seq[MultiAddress],
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [RelayV2DialError, CancelledError]).} =
let
p = Peer(peerId: dstPeerId, addrs: dstAddrs)
pb = encode(HopMessage(msgType: HopMessageType.Connect, peer: Opt.some(p)))
@@ -202,7 +210,9 @@ proc dialPeerV2*(
conn.limitData = msgRcvFromRelay.limit.data
return conn
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
proc handleStopStreamV2(
cl: RelayClient, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@@ -214,7 +224,9 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
trace "Unexpected client / relayv2 handshake", msgType = msg.msgType
await sendStopError(conn, MalformedMessage)
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
proc handleStop(
cl: RelayClient, conn: Connection, msg: RelayMessage
) {.async: (raises: [CancelledError]).} =
let src = msg.srcPeer.valueOr:
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
return
@@ -241,7 +253,9 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.}
else:
await conn.close()
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
proc handleStreamV1(
cl: RelayClient, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@@ -290,7 +304,9 @@ proc new*(
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1,
)
proc handleStream(conn: Connection, proto: string) {.async.} =
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
case proto
of RelayV1Codec:
@@ -300,6 +316,7 @@ proc new*(
of RelayV2HopCodec:
await cl.handleHopStreamV2(conn)
except CancelledError as exc:
trace "cancelled client handler"
raise exc
except CatchableError as exc:
trace "exception in client handler", description = exc.msg, conn

View File

@@ -12,6 +12,7 @@
import macros
import stew/[objects, results]
import ../../../peerinfo, ../../../signed_envelope
import ../../../protobuf/minprotobuf
# Circuit Relay V1 Message

View File

@@ -112,7 +112,9 @@ proc isRelayed*(conn: Connection): bool =
wrappedConn = wrappedConn.getWrapped()
return false
proc handleReserve(r: Relay, conn: Connection) {.async.} =
proc handleReserve(
r: Relay, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
if conn.isRelayed():
trace "reservation attempt over relay connection", pid = conn.peerId
await sendHopStatus(conn, PermissionDenied)
@@ -133,7 +135,9 @@ proc handleReserve(r: Relay, conn: Connection) {.async.} =
r.rsvp[pid] = expire
await conn.writeLp(encode(msg).buffer)
proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
proc handleConnect(
r: Relay, connSrc: Connection, msg: HopMessage
) {.async: (raises: [CancelledError, LPStreamError]).} =
if connSrc.isRelayed():
trace "connection attempt over relay connection"
await sendHopStatus(connSrc, PermissionDenied)
@@ -166,14 +170,14 @@ proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
await r.switch.dial(dst, RelayV2StopCodec)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except DialFailedError as exc:
trace "error opening relay stream", dst, description = exc.msg
await sendHopStatus(connSrc, ConnectionFailed)
return
defer:
await connDst.close()
proc sendStopMsg() {.async.} =
proc sendStopMsg() {.async: (raises: [SendStopError, CancelledError, LPStreamError]).} =
let stopMsg = StopMessage(
msgType: StopMessageType.Connect,
peer: Opt.some(Peer(peerId: src, addrs: @[])),
@@ -209,7 +213,9 @@ proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
await rconnDst.close()
await bridge(rconnSrc, rconnDst)
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
proc handleHopStreamV2*(
r: Relay, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@@ -225,7 +231,9 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
# Relay V1
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
proc handleHop*(
r: Relay, connSrc: Connection, msg: RelayMessage
) {.async: (raises: [CancelledError]).} =
r.streamCount.inc()
defer:
r.streamCount.dec()
@@ -271,7 +279,7 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
await r.switch.dial(dst.peerId, RelayV1Codec)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except DialFailedError as exc:
trace "error opening relay stream", dst, description = exc.msg
await sendStatus(connSrc, StatusV1.HopCantDialDst)
return
@@ -309,7 +317,9 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
trace "relaying connection", src, dst
await bridge(connSrc, connDst)
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
proc handleStreamV1(
r: Relay, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@@ -333,9 +343,8 @@ proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
proc setup*(r: Relay, switch: Switch) =
r.switch = switch
r.switch.addPeerEventHandler(
proc(peerId: PeerId, event: PeerEvent) {.async.} =
r.rsvp.del(peerId)
,
proc(peerId: PeerId, event: PeerEvent) {.async: (raises: [CancelledError]).} =
r.rsvp.del(peerId),
Left,
)
@@ -360,7 +369,9 @@ proc new*(
isCircuitRelayV1: circuitRelayV1,
)
proc handleStream(conn: Connection, proto: string) {.async.} =
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
case proto
of RelayV2HopCodec:
@@ -368,6 +379,7 @@ proc new*(
of RelayV1Codec:
await r.handleStreamV1(conn)
except CancelledError as exc:
trace "cancelled relayv2 handler"
raise exc
except CatchableError as exc:
debug "exception in relayv2 handler", description = exc.msg, conn
@@ -383,12 +395,15 @@ proc new*(
r.handler = handleStream
r
proc deletesReservation(r: Relay) {.async.} =
proc deletesReservation(r: Relay) {.async: (raises: [CancelledError]).} =
heartbeat "Reservation timeout", r.heartbeatSleepTime.seconds():
let n = now().utc
for k in toSeq(r.rsvp.keys):
if n > r.rsvp[k]:
r.rsvp.del(k)
try:
let n = now().utc
for k in toSeq(r.rsvp.keys):
if n > r.rsvp[k]:
r.rsvp.del(k)
except KeyError:
raiseAssert "checked with in"
method start*(r: Relay): Future[void] {.async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()

View File

@@ -29,71 +29,99 @@ type RelayTransport* = ref object of Transport
queue: AsyncQueue[Connection]
selfRunning: bool
method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
method start*(
self: RelayTransport, ma: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError]).} =
if self.selfRunning:
trace "Relay transport already running"
return
self.client.onNewConnection = proc(
conn: Connection, duration: uint32 = 0, data: uint64 = 0
) {.async.} =
) {.async: (raises: [CancelledError]).} =
await self.queue.addLast(RelayConnection.new(conn, duration, data))
await conn.join()
self.selfRunning = true
await procCall Transport(self).start(ma)
trace "Starting Relay transport"
method stop*(self: RelayTransport) {.async.} =
method stop*(self: RelayTransport) {.async: (raises: []).} =
self.running = false
self.selfRunning = false
self.client.onNewConnection = nil
while not self.queue.empty():
await self.queue.popFirstNoWait().close()
try:
await self.queue.popFirstNoWait().close()
except AsyncQueueEmptyError:
continue # checked with self.queue.empty()
method accept*(self: RelayTransport): Future[Connection] {.async.} =
method accept*(
self: RelayTransport
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
result = await self.queue.popFirst()
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
let
sma = toSeq(ma.items())
relayAddrs = sma[0 .. sma.len - 4].mapIt(it.tryGet()).foldl(a & b)
proc dial*(
self: RelayTransport, ma: MultiAddress
): Future[Connection] {.async: (raises: [RelayDialError, CancelledError]).} =
var
relayAddrs: MultiAddress
relayPeerId: PeerId
dstPeerId: PeerId
if not relayPeerId.init(($(sma[^3].tryGet())).split('/')[2]):
raise newException(RelayV2DialError, "Relay doesn't exist")
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
raise newException(RelayV2DialError, "Destination doesn't exist")
try:
let sma = toSeq(ma.items())
relayAddrs = sma[0 .. sma.len - 4].mapIt(it.tryGet()).foldl(a & b)
if not relayPeerId.init(($(sma[^3].tryGet())).split('/')[2]):
raise newException(RelayDialError, "Relay doesn't exist")
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
raise newException(RelayDialError, "Destination doesn't exist")
except RelayDialError as e:
raise e
except CatchableError:
raise newException(RelayDialError, "dial address not valid")
trace "Dial", relayPeerId, dstPeerId
let conn = await self.client.switch.dial(
relayPeerId, @[relayAddrs], @[RelayV2HopCodec, RelayV1Codec]
)
conn.dir = Direction.Out
var rc: RelayConnection
try:
let conn = await self.client.switch.dial(
relayPeerId, @[relayAddrs], @[RelayV2HopCodec, RelayV1Codec]
)
conn.dir = Direction.Out
case conn.protocol
of RelayV1Codec:
return await self.client.dialPeerV1(conn, dstPeerId, @[])
of RelayV2HopCodec:
rc = RelayConnection.new(conn, 0, 0)
return await self.client.dialPeerV2(rc, dstPeerId, @[])
except CancelledError as exc:
raise exc
except CatchableError as exc:
if not rc.isNil:
await rc.close()
raise exc
except CancelledError as e:
safeClose(rc)
raise e
except DialFailedError as e:
safeClose(rc)
raise newException(RelayDialError, "dial relay peer failed", e)
except RelayV1DialError as e:
safeClose(rc)
raise e
except RelayV2DialError as e:
safeClose(rc)
raise e
method dial*(
self: RelayTransport,
hostname: string,
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId),
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
peerId.withValue(pid):
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
result = await self.dial(address)
try:
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
result = await self.dial(address)
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(transport.TransportDialError, e.msg, e)
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe.} =
try:

View File

@@ -22,12 +22,17 @@ const
proc sendStatus*(
conn: Connection, code: StatusV1
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
) {.async: (raises: [CancelledError]).} =
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
let
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
conn.writeLp(pb.buffer)
try:
let
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
except CancelledError as e:
raise e
except LPStreamError as e:
trace "error sending relay status", description = e.msg
proc sendHopStatus*(
conn: Connection, code: StatusV2

View File

@@ -148,12 +148,13 @@ proc new*(
identify
method init*(p: Identify) =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
trace "handling identify request", conn
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
await conn.writeLp(pb.buffer)
except CancelledError as exc:
trace "cancelled identify handler"
raise exc
except CatchableError as exc:
trace "exception in identify handler", description = exc.msg, conn
@@ -166,7 +167,12 @@ method init*(p: Identify) =
proc identify*(
self: Identify, conn: Connection, remotePeerId: PeerId
): Future[IdentifyInfo] {.async.} =
): Future[IdentifyInfo] {.
async: (
raises:
[IdentityInvalidMsgError, IdentityNoMatchError, LPStreamError, CancelledError]
)
.} =
trace "initiating identify", conn
var message = await conn.readLp(64 * 1024)
if len(message) == 0:
@@ -205,7 +211,7 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
identifypush
proc init*(p: IdentifyPush) =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
trace "handling identify push", conn
try:
var message = await conn.readLp(64 * 1024)
@@ -224,6 +230,7 @@ proc init*(p: IdentifyPush) =
if not isNil(p.identifyHandler):
await p.identifyHandler(conn.peerId, identInfo)
except CancelledError as exc:
trace "cancelled identify push handler"
raise exc
except CatchableError as exc:
info "exception in identify push handler", description = exc.msg, conn
@@ -234,7 +241,9 @@ proc init*(p: IdentifyPush) =
p.handler = handle
p.codec = IdentifyPushCodec
proc push*(p: IdentifyPush, peerInfo: PeerInfo, conn: Connection) {.async, public.} =
proc push*(
p: IdentifyPush, peerInfo: PeerInfo, conn: Connection
) {.public, async: (raises: [CancelledError, LPStreamError]).} =
## Send new `peerInfo`s to a connection
var pb = encodeMsg(peerInfo, conn.observedAddr, true)
await conn.writeLp(pb.buffer)

View File

@@ -23,7 +23,7 @@ proc perf*(
conn: Connection,
sizeToWrite: uint64 = 0,
sizeToRead: uint64 = 0,
): Future[Duration] {.async, public.} =
): Future[Duration] {.public, async: (raises: [CancelledError, LPStreamError]).} =
var
size = sizeToWrite
buf: array[PerfSize, byte]

View File

@@ -24,7 +24,7 @@ type Perf* = ref object of LPProtocol
proc new*(T: typedesc[Perf]): T {.public.} =
var p = T()
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
var bytesRead = 0
try:
trace "Received benchmark performance check", conn
@@ -47,10 +47,12 @@ proc new*(T: typedesc[Perf]): T {.public.} =
await conn.write(buf[0 ..< toWrite])
size -= toWrite
except CancelledError as exc:
trace "cancelled perf handler"
raise exc
except CatchableError as exc:
trace "exception in perf handler", description = exc.msg, conn
await conn.close()
finally:
await conn.close()
p.handler = handle
p.codec = PerfCodec

View File

@@ -51,7 +51,7 @@ proc new*(
ping
method init*(p: Ping) =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
trace "handling ping", conn
var buf: array[PingSize, byte]
@@ -61,6 +61,7 @@ method init*(p: Ping) =
if not isNil(p.pingHandler):
await p.pingHandler(conn.peerId)
except CancelledError as exc:
trace "cancelled ping handler"
raise exc
except CatchableError as exc:
trace "exception in ping handler", description = exc.msg, conn
@@ -68,7 +69,11 @@ method init*(p: Ping) =
p.handler = handle
p.codec = PingCodec
proc ping*(p: Ping, conn: Connection): Future[Duration] {.async, public.} =
proc ping*(
p: Ping, conn: Connection
): Future[Duration] {.
public, async: (raises: [CancelledError, LPStreamError, WrongPingAckError])
.} =
## Sends ping to `conn`, returns the delay
trace "initiating ping", conn

View File

@@ -17,7 +17,9 @@ export results
const DefaultMaxIncomingStreams* = 10
type
LPProtoHandler* = proc(conn: Connection, proto: string): Future[void] {.async.}
LPProtoHandler* = proc(conn: Connection, proto: string): Future[void] {.
async: (raises: [CancelledError])
.}
LPProtocol* = ref object of RootObj
codecs*: seq[string]
@@ -28,13 +30,13 @@ type
method init*(p: LPProtocol) {.base, gcsafe.} =
discard
method start*(p: LPProtocol) {.async: (raises: [CancelledError], raw: true), base.} =
method start*(p: LPProtocol) {.base, async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()
fut.complete()
p.started = true
fut
method stop*(p: LPProtocol) {.async: (raises: [], raw: true), base.} =
method stop*(p: LPProtocol) {.base, async: (raises: [], raw: true).} =
let fut = newFuture[void]()
fut.complete()
p.started = false
@@ -64,21 +66,6 @@ template `handler`*(p: LPProtocol, conn: Connection, proto: string): Future[void
func `handler=`*(p: LPProtocol, handler: LPProtoHandler) =
p.handlerImpl = handler
# Callbacks that are annotated with `{.async: (raises).}` explicitly
# document the types of errors that they may raise, but are not compatible
# with `LPProtoHandler` and need to use a custom `proc` type.
# They are internally wrapped into a `LPProtoHandler`, but still allow the
# compiler to check that their `{.async: (raises).}` annotation is correct.
# https://github.com/nim-lang/Nim/issues/23432
func `handler=`*[E](
p: LPProtocol,
handler: proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
) =
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
await handler(conn, proto)
p.handlerImpl = wrap
proc new*(
T: type LPProtocol,
codecs: seq[string],
@@ -92,17 +79,5 @@ proc new*(
when maxIncomingStreams is int:
Opt.some(maxIncomingStreams)
else:
maxIncomingStreams
,
maxIncomingStreams,
)
proc new*[E](
T: type LPProtocol,
codecs: seq[string],
handler: proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
maxIncomingStreams: Opt[int] | int = Opt.none(int),
): T =
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
await handler(conn, proto)
T.new(codec, wrap, maxIncomingStreams)

View File

@@ -101,10 +101,12 @@ method unsubscribePeer*(f: FloodSub, peer: PeerId) =
procCall PubSub(f).unsubscribePeer(peer)
method rpcHandler*(f: FloodSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
method rpcHandler*(
f: FloodSub, peer: PubSubPeer, data: seq[byte]
) {.async: (raises: [CancelledError, PeerMessageDecodeError, PeerRateLimitError]).} =
var rpcMsg = decodeRpcMsg(data).valueOr:
debug "failed to decode msg from peer", peer, err = error
raise newException(CatchableError, "Peer msg couldn't be decoded")
raise newException(PeerMessageDecodeError, "Peer msg couldn't be decoded")
trace "decoded msg from peer", peer, payload = rpcMsg.shortLog
# trigger hooks
@@ -175,24 +177,23 @@ method rpcHandler*(f: FloodSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
f.updateMetrics(rpcMsg)
method init*(f: FloodSub) =
proc handler(conn: Connection, proto: string) {.async.} =
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
## main protocol handler that gets triggered on every
## connection for a protocol string
## e.g. ``/floodsub/1.0.0``, etc...
##
try:
await f.handleConn(conn, proto)
except CancelledError:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.
except CancelledError as exc:
trace "Unexpected cancellation in floodsub handler", conn
except CatchableError as exc:
trace "FloodSub handler leaks an error", description = exc.msg, conn
raise exc
f.handler = handler
f.codec = FloodSubCodec
method publish*(f: FloodSub, topic: string, data: seq[byte]): Future[int] {.async.} =
method publish*(
f: FloodSub, topic: string, data: seq[byte]
): Future[int] {.async: (raises: []).} =
# base returns always 0
discard await procCall PubSub(f).publish(topic, data)

View File

@@ -102,6 +102,7 @@ proc init*(
overheadRateLimit = Opt.none(tuple[bytes: int, interval: Duration]),
disconnectPeerAboveRateLimit = false,
maxNumElementsInNonPriorityQueue = DefaultMaxNumElementsInNonPriorityQueue,
sendIDontWantOnPublish = false,
): GossipSubParams =
GossipSubParams(
explicit: true,
@@ -139,6 +140,7 @@ proc init*(
overheadRateLimit: overheadRateLimit,
disconnectPeerAboveRateLimit: disconnectPeerAboveRateLimit,
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue,
sendIDontWantOnPublish: sendIDontWantOnPublish,
)
proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
@@ -208,19 +210,16 @@ proc validateParameters*(parameters: TopicParams): Result[void, cstring] =
ok()
method init*(g: GossipSub) =
proc handler(conn: Connection, proto: string) {.async.} =
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
## main protocol handler that gets triggered on every
## connection for a protocol string
## e.g. ``/floodsub/1.0.0``, etc...
##
try:
await g.handleConn(conn, proto)
except CancelledError:
# This is top-level procedure which will work as separate task, so it
# do not need to propogate CancelledError.
except CancelledError as exc:
trace "Unexpected cancellation in gossipsub handler", conn
except CatchableError as exc:
trace "GossipSub handler leaks an error", description = exc.msg, conn
raise exc
g.handler = handler
g.codecs &= GossipSubCodec_12
@@ -383,9 +382,43 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
trace "sending iwant reply messages", peer
g.send(peer, RPCMsg(messages: messages), isHighPriority = false)
proc sendIDontWant(
g: GossipSub,
msg: Message,
msgId: MessageId,
peersToSendIDontWant: HashSet[PubSubPeer],
) =
# If the message is "large enough", let the mesh know that we do not want
# any more copies of it, regardless if it is valid or not.
#
# In the case that it is not valid, this leads to some redundancy
# (since the other peer should not send us an invalid message regardless),
# but the expectation is that this is rare (due to such peers getting
# descored) and that the savings from honest peers are greater than the
# cost a dishonest peer can incur in short time (since the IDONTWANT is
# small).
# IDONTWANT is only supported by >= GossipSubCodec_12
let peers = peersToSendIDontWant.filterIt(
it.codec != GossipSubCodec_10 and it.codec != GossipSubCodec_11
)
g.broadcast(
peers,
RPCMsg(
control: some(ControlMessage(idontwant: @[ControlIWant(messageIDs: @[msgId])]))
),
isHighPriority = true,
)
const iDontWantMessageSizeThreshold* = 512
proc isLargeMessage(msg: Message, msgId: MessageId): bool =
msg.data.len > max(iDontWantMessageSizeThreshold, msgId.len * 10)
proc validateAndRelay(
g: GossipSub, msg: Message, msgId: MessageId, saltedId: SaltedId, peer: PubSubPeer
) {.async.} =
) {.async: (raises: []).} =
try:
template topic(): string =
msg.topic
@@ -399,29 +432,10 @@ proc validateAndRelay(
toSendPeers.incl(peers[])
toSendPeers.excl(peer)
if msg.data.len > max(512, msgId.len * 10):
# If the message is "large enough", let the mesh know that we do not want
# any more copies of it, regardless if it is valid or not.
#
# In the case that it is not valid, this leads to some redundancy
# (since the other peer should not send us an invalid message regardless),
# but the expectation is that this is rare (due to such peers getting
# descored) and that the savings from honest peers are greater than the
# cost a dishonest peer can incur in short time (since the IDONTWANT is
# small).
if isLargeMessage(msg, msgId):
var peersToSendIDontWant = HashSet[PubSubPeer]()
addToSendPeers(peersToSendIDontWant)
peersToSendIDontWant.exclIfIt(
it.codec == GossipSubCodec_10 or it.codec == GossipSubCodec_11
)
g.broadcast(
peersToSendIDontWant,
RPCMsg(
control:
some(ControlMessage(idontwant: @[ControlIWant(messageIDs: @[msgId])]))
),
isHighPriority = true,
)
g.sendIDontWant(msg, msgId, peersToSendIDontWant)
let validation = await g.validate(msg)
@@ -490,7 +504,9 @@ proc validateAndRelay(
)
await handleData(g, topic, msg.data)
except CatchableError as exc:
except CancelledError as exc:
info "validateAndRelay failed", description = exc.msg
except PeerRateLimitError as exc:
info "validateAndRelay failed", description = exc.msg
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
@@ -511,7 +527,9 @@ proc messageOverhead(g: GossipSub, msg: RPCMsg, msgSize: int): int =
msgSize - payloadSize - controlSize
proc rateLimit*(g: GossipSub, peer: PubSubPeer, overhead: int) {.async.} =
proc rateLimit*(
g: GossipSub, peer: PubSubPeer, overhead: int
) {.async: (raises: [PeerRateLimitError]).} =
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(overhead):
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()])
@@ -524,7 +542,9 @@ proc rateLimit*(g: GossipSub, peer: PubSubPeer, overhead: int) {.async.} =
PeerRateLimitError, "Peer disconnected because it's above rate limit."
)
method rpcHandler*(g: GossipSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
method rpcHandler*(
g: GossipSub, peer: PubSubPeer, data: seq[byte]
) {.async: (raises: [CancelledError, PeerMessageDecodeError, PeerRateLimitError]).} =
let msgSize = data.len
var rpcMsg = decodeRpcMsg(data).valueOr:
debug "failed to decode msg from peer", peer, err = error
@@ -534,7 +554,7 @@ method rpcHandler*(g: GossipSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
# TODO evaluate behaviour penalty values
peer.behaviourPenalty += 0.1
raise newException(CatchableError, "Peer msg couldn't be decoded")
raise newException(PeerMessageDecodeError, "Peer msg couldn't be decoded")
when defined(libp2p_expensive_metrics):
for m in rpcMsg.messages:
@@ -682,7 +702,9 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
# Send unsubscribe (in reverse order to sub/graft)
procCall PubSub(g).onTopicSubscription(topic, subscribed)
method publish*(g: GossipSub, topic: string, data: seq[byte]): Future[int] {.async.} =
method publish*(
g: GossipSub, topic: string, data: seq[byte]
): Future[int] {.async: (raises: []).} =
logScope:
topic
@@ -782,6 +804,9 @@ method publish*(g: GossipSub, topic: string, data: seq[byte]): Future[int] {.asy
g.mcache.put(msgId, msg)
if g.parameters.sendIDontWantOnPublish and isLargeMessage(msg, msgId):
g.sendIDontWant(msg, msgId, peers)
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
if g.knownTopics.contains(topic):
@@ -792,7 +817,9 @@ method publish*(g: GossipSub, topic: string, data: seq[byte]): Future[int] {.asy
trace "Published message to peers", peers = peers.len
return peers.len
proc maintainDirectPeer(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.async.} =
proc maintainDirectPeer(
g: GossipSub, id: PeerId, addrs: seq[MultiAddress]
) {.async: (raises: [CancelledError]).} =
if id notin g.peers:
trace "Attempting to dial a direct peer", peer = id
if g.switch.isConnected(id):
@@ -805,14 +832,16 @@ proc maintainDirectPeer(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.as
except CancelledError as exc:
trace "Direct peer dial canceled"
raise exc
except CatchableError as exc:
except DialFailedError as exc:
debug "Direct peer error dialing", description = exc.msg
proc addDirectPeer*(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.async.} =
proc addDirectPeer*(
g: GossipSub, id: PeerId, addrs: seq[MultiAddress]
) {.async: (raises: [CancelledError]).} =
g.parameters.directPeers[id] = addrs
await g.maintainDirectPeer(id, addrs)
proc maintainDirectPeers(g: GossipSub) {.async.} =
proc maintainDirectPeers(g: GossipSub) {.async: (raises: [CancelledError]).} =
heartbeat "GossipSub DirectPeers", 1.minutes:
for id, addrs in g.parameters.directPeers:
await g.addDirectPeer(id, addrs)
@@ -846,9 +875,9 @@ method stop*(g: GossipSub): Future[void] {.async: (raises: [], raw: true).} =
return fut
# stop heartbeat interval
g.directPeersLoop.cancel()
g.scoringHeartbeatFut.cancel()
g.heartbeatFut.cancel()
g.directPeersLoop.cancelSoon()
g.scoringHeartbeatFut.cancelSoon()
g.heartbeatFut.cancelSoon()
g.heartbeatFut = nil
fut

View File

@@ -126,8 +126,7 @@ proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] =
if x.peerId in sprBook:
sprBook[x.peerId].encode().get(default(seq[byte]))
else:
default(seq[byte])
,
default(seq[byte]),
)
proc handleGraft*(
@@ -770,7 +769,7 @@ proc onHeartbeat(g: GossipSub) =
g.mcache.shift() # shift the cache
proc heartbeat*(g: GossipSub) {.async.} =
proc heartbeat*(g: GossipSub) {.async: (raises: [CancelledError]).} =
heartbeat "GossipSub", g.parameters.heartbeatInterval:
trace "running heartbeat", instance = cast[int](g)
g.onHeartbeat()

View File

@@ -131,7 +131,7 @@ proc colocationFactor(g: GossipSub, peer: PubSubPeer): float64 =
else:
0.0
proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async.} =
proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async: (raises: []).} =
try:
await g.switch.disconnect(peer.peerId)
except CatchableError as exc: # Never cancelled
@@ -313,12 +313,14 @@ proc updateScores*(g: GossipSub) = # avoid async
trace "updated scores", peers = g.peers.len
proc scoringHeartbeat*(g: GossipSub) {.async.} =
proc scoringHeartbeat*(g: GossipSub) {.async: (raises: [CancelledError]).} =
heartbeat "Gossipsub scoring", g.parameters.decayInterval:
trace "running scoring heartbeat", instance = cast[int](g)
g.updateScores()
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
proc punishInvalidMessage*(
g: GossipSub, peer: PubSubPeer, msg: Message
) {.async: (raises: [PeerRateLimitError]).} =
let uselessAppBytesNum = msg.data.len
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(uselessAppBytesNum):

View File

@@ -154,6 +154,9 @@ type
# Max number of elements allowed in the non-priority queue. When this limit has been reached, the peer will be disconnected.
maxNumElementsInNonPriorityQueue*: int
# Broadcast an IDONTWANT message automatically when the message exceeds the IDONTWANT message size threshold
sendIDontWantOnPublish*: bool
BackoffTable* = Table[string, Table[PeerId, Moment]]
ValidationSeenTable* = Table[SaltedId, HashSet[PubSubPeer]]

View File

@@ -125,6 +125,8 @@ declarePublicCounter(
type
InitializationError* = object of LPError
PeerMessageDecodeError* = object of CatchableError
TopicHandler* {.public.} =
proc(topic: string, data: seq[byte]): Future[void] {.gcsafe, raises: [].}
@@ -327,7 +329,9 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
method rpcHandler*(
p: PubSub, peer: PubSubPeer, data: seq[byte]
): Future[void] {.base, async.} =
): Future[void] {.
base, async: (raises: [CancelledError, PeerMessageDecodeError, PeerRateLimitError])
.} =
## Handler that must be overridden by concrete implementation
raiseAssert "Unimplemented"
@@ -355,8 +359,15 @@ method getOrCreatePeer*(
peer[].codec = protoNegotiated
return peer[]
proc getConn(): Future[Connection] {.async.} =
return await p.switch.dial(peerId, protosToDial)
proc getConn(): Future[Connection] {.
async: (raises: [CancelledError, GetConnDialError])
.} =
try:
return await p.switch.dial(peerId, protosToDial)
except CancelledError as exc:
raise exc
except DialFailedError as e:
raise (ref GetConnDialError)(parent: e)
proc onEvent(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
p.onPubSubPeerEvent(peer, event)
@@ -376,7 +387,9 @@ method getOrCreatePeer*(
return pubSubPeer
proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
proc handleData*(
p: PubSub, topic: string, data: seq[byte]
): Future[void] {.async: (raises: [], raw: true).} =
# Start work on all data handlers without copying data into closure like
# happens on {.async.} transformation
p.topics.withValue(topic, handlers):
@@ -389,7 +402,7 @@ proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
futs.add(fut)
if futs.len() > 0:
proc waiter(): Future[void] {.async.} =
proc waiter(): Future[void] {.async: (raises: []).} =
# slow path - we have to wait for the handlers to complete
try:
futs = await allFinished(futs)
@@ -397,12 +410,12 @@ proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
# propagate cancellation
for fut in futs:
if not (fut.finished):
fut.cancel()
fut.cancelSoon()
# check for errors in futures
for fut in futs:
if fut.failed:
let err = fut.readError()
let err = fut.error()
warn "Error in topic handler", description = err.msg
return waiter()
@@ -412,7 +425,9 @@ proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
res.complete()
return res
method handleConn*(p: PubSub, conn: Connection, proto: string) {.base, async.} =
method handleConn*(
p: PubSub, conn: Connection, proto: string
) {.base, async: (raises: [CancelledError]).} =
## handle incoming connections
##
## this proc will:
@@ -424,7 +439,9 @@ method handleConn*(p: PubSub, conn: Connection, proto: string) {.base, async.} =
## that we're interested in
##
proc handler(peer: PubSubPeer, data: seq[byte]): Future[void] =
proc handler(
peer: PubSubPeer, data: seq[byte]
): Future[void] {.async: (raises: []).} =
# call pubsub rpc handler
p.rpcHandler(peer, data)
@@ -436,7 +453,7 @@ method handleConn*(p: PubSub, conn: Connection, proto: string) {.base, async.} =
trace "pubsub peer handler ended", conn
except CancelledError as exc:
raise exc
except CatchableError as exc:
except PeerMessageDecodeError as exc:
trace "exception ocurred in pubsub handle", description = exc.msg, conn
finally:
await conn.closeWithEOF()
@@ -542,7 +559,7 @@ proc subscribe*(p: PubSub, topic: string, handler: TopicHandler) {.public.} =
method publish*(
p: PubSub, topic: string, data: seq[byte]
): Future[int] {.base, async, public.} =
): Future[int] {.base, async: (raises: []), public.} =
## publish to a ``topic``
##
## The return value is the number of neighbours that we attempted to send the
@@ -581,7 +598,7 @@ method removeValidator*(
method validate*(
p: PubSub, message: Message
): Future[ValidationResult] {.async, base.} =
): Future[ValidationResult] {.async: (raises: [CancelledError]), base.} =
var pending: seq[Future[ValidationResult]]
trace "about to validate message"
let topic = message.topic
@@ -589,22 +606,27 @@ method validate*(
topic = topic, registered = toSeq(p.validators.keys)
if topic in p.validators:
trace "running validators for topic", topic = topic
for validator in p.validators[topic]:
pending.add(validator(topic, message))
result = ValidationResult.Accept
p.validators.withValue(topic, validators):
for validator in validators[]:
pending.add(validator(topic, message))
var valResult = ValidationResult.Accept
let futs = await allFinished(pending)
for fut in futs:
if fut.failed:
result = ValidationResult.Reject
valResult = ValidationResult.Reject
break
let res = fut.read()
if res != ValidationResult.Accept:
result = res
if res == ValidationResult.Reject:
break
try:
let res = fut.read()
if res != ValidationResult.Accept:
valResult = res
if res == ValidationResult.Reject:
break
except CatchableError as e:
trace "validator for message could not be executed, ignoring",
topic = topic, err = e.msg
valResult = ValidationResult.Ignore
case result
case valResult
of ValidationResult.Accept:
libp2p_pubsub_validation_success.inc()
of ValidationResult.Reject:
@@ -612,6 +634,8 @@ method validate*(
of ValidationResult.Ignore:
libp2p_pubsub_validation_ignore.inc()
valResult
proc init*[PubParams: object | bool](
P: typedesc[PubSub],
switch: Switch,
@@ -656,7 +680,9 @@ proc init*[PubParams: object | bool](
topicsHigh: int.high,
)
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
proc peerEventHandler(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
pubsub.subscribePeer(peerId)
else:

View File

@@ -64,6 +64,8 @@ const DefaultMaxNumElementsInNonPriorityQueue* = 1024
type
PeerRateLimitError* = object of CatchableError
GetConnDialError* = object of CatchableError
PubSubObserver* = ref object
onRecv*: proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
onSend*: proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
@@ -79,7 +81,8 @@ type
PubSubPeerEvent* = object
kind*: PubSubPeerEventKind
GetConn* = proc(): Future[Connection] {.gcsafe, raises: [].}
GetConn* =
proc(): Future[Connection] {.async: (raises: [CancelledError, GetConnDialError]).}
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [].}
# have to pass peer as it's unknown during init
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
@@ -122,7 +125,7 @@ type
disconnected: bool
RPCHandler* =
proc(peer: PubSubPeer, data: seq[byte]): Future[void] {.gcsafe, raises: [].}
proc(peer: PubSubPeer, data: seq[byte]): Future[void] {.async: (raises: []).}
when defined(libp2p_agents_metrics):
func shortAgent*(p: PubSubPeer): string =
@@ -190,7 +193,7 @@ proc sendObservers(p: PubSubPeer, msg: var RPCMsg) =
if not (isNil(obs.onSend)):
obs.onSend(p, msg)
proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
proc handle*(p: PubSubPeer, conn: Connection) {.async: (raises: []).} =
debug "starting pubsub read loop", conn, peer = p, closed = conn.closed
try:
try:
@@ -206,7 +209,7 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
except PeerRateLimitError as exc:
debug "Peer rate limit exceeded, exiting read while",
conn, peer = p, description = exc.msg
except CatchableError as exc:
except LPStreamError as exc:
debug "Exception occurred in PubSubPeer.handle",
conn, peer = p, closed = conn.closed, description = exc.msg
finally:
@@ -215,13 +218,12 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.
trace "Unexpected cancellation in PubSubPeer.handle"
except CatchableError as exc:
trace "Exception occurred in PubSubPeer.handle",
conn, peer = p, closed = conn.closed, description = exc.msg
finally:
debug "exiting pubsub read loop", conn, peer = p, closed = conn.closed
proc closeSendConn(p: PubSubPeer, event: PubSubPeerEventKind) {.async.} =
proc closeSendConn(
p: PubSubPeer, event: PubSubPeerEventKind
) {.async: (raises: [CancelledError]).} =
if p.sendConn != nil:
trace "Removing send connection", p, conn = p.sendConn
await p.sendConn.close()
@@ -235,17 +237,20 @@ proc closeSendConn(p: PubSubPeer, event: PubSubPeerEventKind) {.async.} =
p.onEvent(p, PubSubPeerEvent(kind: event))
except CancelledError as exc:
raise exc
except CatchableError as exc:
debug "Errors during diconnection events", description = exc.msg
# don't cleanup p.address else we leak some gossip stat table
proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
proc connectOnce(
p: PubSubPeer
): Future[void] {.async: (raises: [CancelledError, GetConnDialError, LPError]).} =
try:
if p.connectedFut.finished:
p.connectedFut = newFuture[void]()
let newConn = await p.getConn().wait(5.seconds)
if newConn.isNil:
raise (ref LPError)(msg: "Cannot establish send connection")
let newConn =
try:
await p.getConn().wait(5.seconds)
except AsyncTimeoutError as error:
trace "getConn timed out", description = error.msg
raise (ref LPError)(msg: "Cannot establish send connection")
# When the send channel goes up, subscriptions need to be sent to the
# remote peer - if we had multiple channels up and one goes down, all
@@ -271,7 +276,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
finally:
await p.closeSendConn(PubSubPeerEventKind.StreamClosed)
proc connectImpl(p: PubSubPeer) {.async.} =
proc connectImpl(p: PubSubPeer) {.async: (raises: []).} =
try:
# Keep trying to establish a connection while it's possible to do so - the
# send connection might get disconnected due to a timeout or an unrelated
@@ -282,7 +287,11 @@ proc connectImpl(p: PubSubPeer) {.async.} =
p.connectedFut.complete()
return
await connectOnce(p)
except CatchableError as exc: # never cancelled
except CancelledError as exc:
debug "Could not establish send connection", description = exc.msg
except LPError as exc:
debug "Could not establish send connection", description = exc.msg
except GetConnDialError as exc:
debug "Could not establish send connection", description = exc.msg
proc connect*(p: PubSubPeer) =
@@ -317,7 +326,7 @@ proc clearSendPriorityQueue(p: PubSubPeer) =
value = p.rpcmessagequeue.sendPriorityQueue.len.int64, labelValues = [$p.peerId]
)
proc sendMsgContinue(conn: Connection, msgFut: Future[void]) {.async.} =
proc sendMsgContinue(conn: Connection, msgFut: Future[void]) {.async: (raises: []).} =
# Continuation for a pending `sendMsg` future from below
try:
await msgFut
@@ -331,7 +340,7 @@ proc sendMsgContinue(conn: Connection, msgFut: Future[void]) {.async.} =
await conn.close() # This will clean up the send connection
proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async.} =
proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async: (raises: [CancelledError]).} =
# Slow path of `sendMsg` where msg is held in memory while send connection is
# being set up
if p.sendConn == nil:
@@ -347,7 +356,7 @@ proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async.} =
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
await sendMsgContinue(conn, conn.writeLp(msg))
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] =
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] {.async: (raises: []).} =
if p.sendConn != nil and not p.sendConn.closed():
# Fast path that avoids copying msg (which happens for {.async.})
let conn = p.sendConn
@@ -493,7 +502,7 @@ proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
return true
return false
proc sendNonPriorityTask(p: PubSubPeer) {.async.} =
proc sendNonPriorityTask(p: PubSubPeer) {.async: (raises: [CancelledError]).} =
while true:
# we send non-priority messages only if there are no pending priority messages
let msg = await p.rpcmessagequeue.nonPriorityQueue.popFirst()

View File

@@ -65,7 +65,10 @@ proc init*(
topic: string,
seqno: Option[uint64],
sign: bool = true,
): Message {.gcsafe, raises: [LPError].} =
): Message {.gcsafe, raises: [].} =
if sign and peer.isNone():
doAssert(false, "Cannot sign message without peer info")
var msg = Message(data: data, topic: topic)
# order matters, we want to include seqno in the signature
@@ -81,9 +84,6 @@ proc init*(
.expect("Invalid private key!")
.getBytes()
.expect("Couldn't get public key bytes!")
else:
if sign:
raise (ref LPError)(msg: "Cannot sign message without peer info")
msg

View File

@@ -303,15 +303,14 @@ proc decodeMessages*(pb: ProtoBuffer): ProtoResult[seq[Message]] {.inline.} =
if ?pb.getRepeatedField(2, msgpbs):
trace "decodeMessages: read messages", count = len(msgpbs)
for item in msgpbs:
# size is constrained at the network level
msgs.add(?decodeMessage(initProtoBuffer(item, maxSize = uint.high)))
msgs.add(?decodeMessage(initProtoBuffer(item)))
else:
trace "decodeMessages: no messages found"
ok(msgs)
proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
trace "encodeRpcMsg: encoding message", payload = msg.shortLog()
var pb = initProtoBuffer(maxSize = uint.high)
var pb = initProtoBuffer()
for item in msg.subscriptions:
pb.write(1, item)
for item in msg.messages:
@@ -330,7 +329,7 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
proc decodeRpcMsg*(msg: seq[byte]): ProtoResult[RPCMsg] {.inline.} =
trace "decodeRpcMsg: decoding message", payload = msg.shortLog()
var pb = initProtoBuffer(msg, maxSize = uint.high)
var pb = initProtoBuffer(msg)
var rpcMsg = RPCMsg()
assign(rpcMsg.messages, ?pb.decodeMessages())
assign(rpcMsg.subscriptions, ?pb.decodeSubscriptions())

View File

@@ -11,15 +11,17 @@
import tables, sequtils, sugar, sets
import metrics except collect
import chronos, chronicles, bearssl/rand, stew/[byteutils, objects, results]
import chronos, chronicles, bearssl/rand, stew/[byteutils, objects]
import
./protocol,
../protobuf/minprotobuf,
../switch,
../routing_record,
../utils/heartbeat,
../stream/connection,
../utils/offsettedseq,
../utils/semaphore
../utils/semaphore,
../discovery/discoverymngr
export chronicles
@@ -35,8 +37,9 @@ const
RendezVousCodec* = "/rendezvous/1.0.0"
MinimumDuration* = 2.hours
MaximumDuration = 72.hours
MinimumTTL = MinimumDuration.seconds.uint64
MaximumTTL = MaximumDuration.seconds.uint64
MaximumMessageLen = 1 shl 22 # 4MB
MinimumNamespaceLen = 1
MaximumNamespaceLen = 255
RegistrationLimitPerPeer = 1000
DiscoverLimit = 1000'u64
SemaphoreDefaultSize = 5
@@ -61,7 +64,7 @@ type
Cookie = object
offset: uint64
ns: string
ns: Opt[string]
Register = object
ns: string
@@ -77,7 +80,7 @@ type
ns: string
Discover = object
ns: string
ns: Opt[string]
limit: Opt[uint64]
cookie: Opt[seq[byte]]
@@ -98,7 +101,8 @@ type
proc encode(c: Cookie): ProtoBuffer =
result = initProtoBuffer()
result.write(1, c.offset)
result.write(2, c.ns)
if c.ns.isSome():
result.write(2, c.ns.get())
result.finish()
proc encode(r: Register): ProtoBuffer =
@@ -125,7 +129,8 @@ proc encode(u: Unregister): ProtoBuffer =
proc encode(d: Discover): ProtoBuffer =
result = initProtoBuffer()
result.write(1, d.ns)
if d.ns.isSome():
result.write(1, d.ns.get())
d.limit.withValue(limit):
result.write(2, limit)
d.cookie.withValue(cookie):
@@ -159,13 +164,17 @@ proc encode(msg: Message): ProtoBuffer =
result.finish()
proc decode(_: typedesc[Cookie], buf: seq[byte]): Opt[Cookie] =
var c: Cookie
var
c: Cookie
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, c.offset)
r2 = pb.getRequiredField(2, c.ns)
r2 = pb.getField(2, ns)
if r1.isErr() or r2.isErr():
return Opt.none(Cookie)
if r2.get(false):
c.ns = Opt.some(ns)
Opt.some(c)
proc decode(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
@@ -217,13 +226,16 @@ proc decode(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
d: Discover
limit: uint64
cookie: seq[byte]
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, d.ns)
r1 = pb.getField(1, ns)
r2 = pb.getField(2, limit)
r3 = pb.getField(3, cookie)
if r1.isErr() or r2.isErr() or r3.isErr:
return Opt.none(Discover)
if r1.get(false):
d.ns = Opt.some(ns)
if r2.get(false):
d.limit = Opt.some(limit)
if r3.get(false):
@@ -296,7 +308,7 @@ proc decode(_: typedesc[Message], buf: seq[byte]): Opt[Message] =
Opt.some(msg)
type
RendezVousError* = object of LPError
RendezVousError* = object of DiscoveryError
RegisteredData = object
expiration: Moment
peerId: PeerId
@@ -320,6 +332,10 @@ type
peers: seq[PeerId]
cookiesSaved: Table[PeerId, Table[string, seq[byte]]]
switch: Switch
minDuration: Duration
maxDuration: Duration
minTTL: uint64
maxTTL: uint64
proc checkPeerRecord(spr: seq[byte], peerId: PeerId): Result[void, string] =
if spr.len == 0:
@@ -329,7 +345,9 @@ proc checkPeerRecord(spr: seq[byte], peerId: PeerId): Result[void, string] =
return err("Bad Peer ID")
return ok()
proc sendRegisterResponse(conn: Connection, ttl: uint64) {.async.} =
proc sendRegisterResponse(
conn: Connection, ttl: uint64
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
@@ -340,7 +358,7 @@ proc sendRegisterResponse(conn: Connection, ttl: uint64) {.async.} =
proc sendRegisterResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async.} =
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
@@ -351,7 +369,7 @@ proc sendRegisterResponseError(
proc sendDiscoverResponse(
conn: Connection, s: seq[Register], cookie: Cookie
) {.async.} =
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
@@ -366,7 +384,7 @@ proc sendDiscoverResponse(
proc sendDiscoverResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async.} =
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
@@ -395,7 +413,7 @@ proc save(
rdv.registered.add(
RegisteredData(
peerId: peerId,
expiration: Moment.now() + r.ttl.get(MinimumTTL).int64.seconds,
expiration: Moment.now() + r.ttl.get(rdv.minTTL).int64.seconds,
data: r,
)
)
@@ -407,10 +425,10 @@ proc save(
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
trace "Received Register", peerId = conn.peerId, ns = r.ns
libp2p_rendezvous_register.inc()
if r.ns.len notin 1 .. 255:
if r.ns.len < MinimumNamespaceLen or r.ns.len > MaximumNamespaceLen:
return conn.sendRegisterResponseError(InvalidNamespace)
let ttl = r.ttl.get(MinimumTTL)
if ttl notin MinimumTTL .. MaximumTTL:
let ttl = r.ttl.get(rdv.minTTL)
if ttl < rdv.minTTL or ttl > rdv.maxTTL:
return conn.sendRegisterResponseError(InvalidTTL)
let pr = checkPeerRecord(r.signedPeerRecord, conn.peerId)
if pr.isErr():
@@ -433,10 +451,12 @@ proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
except KeyError:
return
proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
proc discover(
rdv: RendezVous, conn: Connection, d: Discover
) {.async: (raises: [CancelledError, LPStreamError]).} =
trace "Received Discover", peerId = conn.peerId, ns = d.ns
libp2p_rendezvous_discover.inc()
if d.ns.len notin 0 .. 255:
if d.ns.isSome() and d.ns.get().len > MaximumNamespaceLen:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
var limit = min(DiscoverLimit, d.limit.get(DiscoverLimit))
@@ -449,20 +469,19 @@ proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
return
else:
Cookie(offset: rdv.registered.low().uint64 - 1)
if cookie.ns != d.ns or
cookie.offset notin rdv.registered.low().uint64 .. rdv.registered.high().uint64:
if d.ns.isSome() and cookie.ns.isSome() and cookie.ns.get() != d.ns.get() or
cookie.offset < rdv.registered.low().uint64 or
cookie.offset > rdv.registered.high().uint64:
cookie = Cookie(offset: rdv.registered.low().uint64 - 1)
let
nsSalted = d.ns & rdv.salt
namespaces =
if d.ns != "":
try:
rdv.namespaces[nsSalted]
except KeyError:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
else:
toSeq(cookie.offset.int .. rdv.registered.high())
let namespaces =
if d.ns.isSome():
try:
rdv.namespaces[d.ns.get() & rdv.salt]
except KeyError:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
else:
toSeq(max(cookie.offset.int, rdv.registered.offset) .. rdv.registered.high())
if namespaces.len() == 0:
await conn.sendDiscoverResponse(@[], Cookie())
return
@@ -482,8 +501,10 @@ proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
rdv.rng.shuffle(s)
await conn.sendDiscoverResponse(s, Cookie(offset: offset.uint64, ns: d.ns))
proc advertisePeer(rdv: RendezVous, peer: PeerId, msg: seq[byte]) {.async.} =
proc advertiseWrap() {.async.} =
proc advertisePeer(
rdv: RendezVous, peer: PeerId, msg: seq[byte]
) {.async: (raises: [CancelledError]).} =
proc advertiseWrap() {.async: (raises: []).} =
try:
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer:
@@ -504,26 +525,37 @@ proc advertisePeer(rdv: RendezVous, peer: PeerId, msg: seq[byte]) {.async.} =
rdv.sema.release()
await rdv.sema.acquire()
discard await advertiseWrap().withTimeout(5.seconds)
await advertiseWrap()
proc advertise*(
rdv: RendezVous, ns: string, ttl: Duration, peers: seq[PeerId]
) {.async: (raises: [CancelledError, AdvertiseError]).} =
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
if ttl < rdv.minDuration or ttl > rdv.maxDuration:
raise newException(AdvertiseError, "Invalid time to live: " & $ttl)
method advertise*(
rdv: RendezVous, ns: string, ttl: Duration = MinimumDuration
) {.async, base.} =
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
raise newException(RendezVousError, "Wrong Signed Peer Record")
if ns.len notin 1 .. 255:
raise newException(RendezVousError, "Invalid namespace")
if ttl notin MinimumDuration .. MaximumDuration:
raise newException(RendezVousError, "Invalid time to live")
raise newException(AdvertiseError, "Wrong Signed Peer Record")
let
r = Register(ns: ns, signedPeerRecord: sprBuff, ttl: Opt.some(ttl.seconds.uint64))
msg = encode(Message(msgType: MessageType.Register, register: Opt.some(r)))
rdv.save(ns, rdv.switch.peerInfo.peerId, r)
let fut = collect(newSeq()):
for peer in rdv.peers:
let futs = collect(newSeq()):
for peer in peers:
trace "Send Advertise", peerId = peer, ns
rdv.advertisePeer(peer, msg.buffer)
await allFutures(fut)
rdv.advertisePeer(peer, msg.buffer).withTimeout(5.seconds)
await allFutures(futs)
method advertise*(
rdv: RendezVous, ns: string, ttl: Duration = rdv.minDuration
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
await rdv.advertise(ns, ttl, rdv.peers)
proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
let
@@ -540,34 +572,39 @@ proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
@[]
proc request*(
rdv: RendezVous, ns: string, l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async.} =
let nsSalted = ns & rdv.salt
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int, peers: seq[PeerId]
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
var
s: Table[PeerId, (PeerRecord, Register)]
limit: uint64
d = Discover(ns: ns)
if l <= 0 or l > DiscoverLimit.int:
raise newException(RendezVousError, "Invalid limit")
if ns.len notin 0 .. 255:
raise newException(RendezVousError, "Invalid namespace")
raise newException(AdvertiseError, "Invalid limit")
if ns.isSome() and ns.get().len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
limit = l.uint64
proc requestPeer(peer: PeerId) {.async.} =
proc requestPeer(
peer: PeerId
) {.async: (raises: [CancelledError, DialFailedError, LPStreamError]).} =
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer:
await conn.close()
d.limit = Opt.some(limit)
d.cookie =
try:
Opt.some(rdv.cookiesSaved[peer][ns])
except KeyError as exc:
if ns.isSome():
try:
Opt.some(rdv.cookiesSaved[peer][ns.get()])
except KeyError, CatchableError:
Opt.none(seq[byte])
else:
Opt.none(seq[byte])
await conn.writeLp(
encode(Message(msgType: MessageType.Discover, discover: Opt.some(d))).buffer
)
let
buf = await conn.readLp(65536)
buf = await conn.readLp(MaximumMessageLen)
msgRcv = Message.decode(buf).valueOr:
debug "Message undecodable"
return
@@ -581,32 +618,40 @@ proc request*(
trace "Cannot discover", ns, status = resp.status, text = resp.text
return
resp.cookie.withValue(cookie):
if cookie.len() < 1000 and
rdv.cookiesSaved.hasKeyOrPut(peer, {ns: cookie}.toTable()):
rdv.cookiesSaved[peer][ns] = cookie
if ns.isSome:
let namespace = ns.get()
if cookie.len() < 1000 and
rdv.cookiesSaved.hasKeyOrPut(peer, {namespace: cookie}.toTable()):
try:
rdv.cookiesSaved[peer][namespace] = cookie
except KeyError:
raiseAssert "checked with hasKeyOrPut"
for r in resp.registrations:
if limit == 0:
return
let ttl = r.ttl.get(MaximumTTL + 1)
if ttl > MaximumTTL:
let ttl = r.ttl.get(rdv.maxTTL + 1)
if ttl > rdv.maxTTL:
continue
let
spr = SignedPeerRecord.decode(r.signedPeerRecord).valueOr:
continue
pr = spr.data
if s.hasKey(pr.peerId):
let (prSaved, rSaved) = s[pr.peerId]
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(MaximumTTL) < ttl) or
let (prSaved, rSaved) =
try:
s[pr.peerId]
except KeyError:
raiseAssert "checked with hasKey"
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(rdv.maxTTL) < ttl) or
prSaved.seqNo < pr.seqNo:
s[pr.peerId] = (pr, r)
else:
s[pr.peerId] = (pr, r)
limit.dec()
for (_, r) in s.values():
rdv.save(ns, peer, r, false)
if ns.isSome():
for (_, r) in s.values():
rdv.save(ns.get(), peer, r, false)
# copy to avoid resizes during the loop
let peers = rdv.peers
for peer in peers:
if limit == 0:
break
@@ -615,12 +660,24 @@ proc request*(
try:
trace "Send Request", peerId = peer, ns
await peer.requestPeer()
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "exception catch in request", description = exc.msg
except CancelledError as e:
raise e
except DialFailedError as e:
trace "failed to dial a peer", description = e.msg
except LPStreamError as e:
trace "failed to communicate with a peer", description = e.msg
return toSeq(s.values()).mapIt(it[0])
proc request*(
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(ns, l, rdv.peers)
proc request*(
rdv: RendezVous, l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(Opt.none(string), l, rdv.peers)
proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
let nsSalted = ns & rdv.salt
try:
@@ -630,16 +687,17 @@ proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
except KeyError:
return
proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
# TODO: find a way to improve this, maybe something similar to the advertise
if ns.len notin 1 .. 255:
proc unsubscribe*(
rdv: RendezVous, ns: string, peerIds: seq[PeerId]
) {.async: (raises: [RendezVousError, CancelledError]).} =
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(RendezVousError, "Invalid namespace")
rdv.unsubscribeLocally(ns)
let msg = encode(
Message(msgType: MessageType.Unregister, unregister: Opt.some(Unregister(ns: ns)))
)
proc unsubscribePeer(rdv: RendezVous, peerId: PeerId) {.async.} =
proc unsubscribePeer(peerId: PeerId) {.async: (raises: []).} =
try:
let conn = await rdv.switch.dial(peerId, RendezVousCodec)
defer:
@@ -648,12 +706,24 @@ proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
except CatchableError as exc:
trace "exception while unsubscribing", description = exc.msg
for peer in rdv.peers:
discard await rdv.unsubscribePeer(peer).withTimeout(5.seconds)
let futs = collect(newSeq()):
for peer in peerIds:
unsubscribePeer(peer)
await allFutures(futs)
proc unsubscribe*(
rdv: RendezVous, ns: string
) {.async: (raises: [RendezVousError, CancelledError]).} =
rdv.unsubscribeLocally(ns)
await rdv.unsubscribe(ns, rdv.peers)
proc setup*(rdv: RendezVous, switch: Switch) =
rdv.switch = switch
proc handlePeer(peerId: PeerId, event: PeerEvent) {.async.} =
proc handlePeer(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
rdv.peers.add(peerId)
elif event.kind == PeerEventKind.Left:
@@ -662,7 +732,25 @@ proc setup*(rdv: RendezVous, switch: Switch) =
rdv.switch.addPeerEventHandler(handlePeer, Joined)
rdv.switch.addPeerEventHandler(handlePeer, Left)
proc new*(T: typedesc[RendezVous], rng: ref HmacDrbgContext = newRng()): T =
proc new*(
T: typedesc[RendezVous],
rng: ref HmacDrbgContext = newRng(),
minDuration = MinimumDuration,
maxDuration = MaximumDuration,
): T {.raises: [RendezVousError].} =
if minDuration < 1.minutes:
raise newException(RendezVousError, "TTL too short: 1 minute minimum")
if maxDuration > 72.hours:
raise newException(RendezVousError, "TTL too long: 72 hours maximum")
if minDuration >= maxDuration:
raise newException(RendezVousError, "Minimum TTL longer than maximum")
let
minTTL = minDuration.seconds.uint64
maxTTL = maxDuration.seconds.uint64
let rdv = T(
rng: rng,
salt: string.fromBytes(generateBytes(rng[], 8)),
@@ -670,10 +758,16 @@ proc new*(T: typedesc[RendezVous], rng: ref HmacDrbgContext = newRng()): T =
defaultDT: Moment.now() - 1.days,
#registerEvent: newAsyncEvent(),
sema: newAsyncSemaphore(SemaphoreDefaultSize),
minDuration: minDuration,
maxDuration: maxDuration,
minTTL: minTTL,
maxTTL: maxTTL,
)
logScope:
topics = "libp2p discovery rendezvous"
proc handleStream(conn: Connection, proto: string) {.async.} =
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
let
buf = await conn.readLp(4096)
@@ -690,6 +784,7 @@ proc new*(T: typedesc[RendezVous], rng: ref HmacDrbgContext = newRng()): T =
of MessageType.DiscoverResponse:
trace "Got an unexpected Discover Response", response = msg.discoverResponse
except CancelledError as exc:
trace "cancelled rendezvous handler"
raise exc
except CatchableError as exc:
trace "exception in rendezvous handler", description = exc.msg
@@ -701,14 +796,20 @@ proc new*(T: typedesc[RendezVous], rng: ref HmacDrbgContext = newRng()): T =
return rdv
proc new*(
T: typedesc[RendezVous], switch: Switch, rng: ref HmacDrbgContext = newRng()
): T =
let rdv = T.new(rng)
T: typedesc[RendezVous],
switch: Switch,
rng: ref HmacDrbgContext = newRng(),
minDuration = MinimumDuration,
maxDuration = MaximumDuration,
): T {.raises: [RendezVousError].} =
let rdv = T.new(rng, minDuration, maxDuration)
rdv.setup(switch)
return rdv
proc deletesRegister(rdv: RendezVous) {.async.} =
heartbeat "Register timeout", 1.minutes:
proc deletesRegister(
rdv: RendezVous, interval = 1.minutes
) {.async: (raises: [CancelledError]).} =
heartbeat "Register timeout", interval:
let n = Moment.now()
var total = 0
rdv.registered.flushIfIt(it.expiration < n)

View File

@@ -17,7 +17,7 @@ const PlainTextCodec* = "/plaintext/1.0.0"
type PlainText* = ref object of Secure
method init(p: PlainText) {.gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
## plain text doesn't do anything
discard

View File

@@ -82,7 +82,7 @@ method readMessage*(
): Future[seq[byte]] {.
async: (raises: [CancelledError, LPStreamError], raw: true), base
.} =
raiseAssert("Not implemented!")
raiseAssert("[SecureConn.readMessage] abstract method not implemented!")
method getWrapped*(s: SecureConn): Connection =
s.stream
@@ -92,7 +92,7 @@ method handshake*(
): Future[SecureConn] {.
async: (raises: [CancelledError, LPStreamError], raw: true), base
.} =
raiseAssert("Not implemented!")
raiseAssert("[Secure.handshake] abstract method not implemented!")
proc handleConn(
s: Secure, conn: Connection, initiator: bool, peerId: Opt[PeerId]
@@ -143,7 +143,7 @@ proc handleConn(
method init*(s: Secure) =
procCall LPProtocol(s).init()
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
trace "handling connection upgrade", proto, conn
try:
# We don't need the result but we
@@ -152,10 +152,10 @@ method init*(s: Secure) =
trace "connection secured", conn
except CancelledError as exc:
warn "securing connection canceled", conn
await conn.close()
raise exc
except LPStreamError as exc:
warn "securing connection failed", description = exc.msg, conn
finally:
await conn.close()
s.handler = handle

View File

@@ -36,12 +36,14 @@ proc isRunning*(self: AutoRelayService): bool =
proc addressMapper(
self: AutoRelayService, listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.async: (raises: []).} =
return concat(toSeq(self.relayAddresses.values)) & listenAddrs
proc reserveAndUpdate(
self: AutoRelayService, relayPid: PeerId, switch: Switch
) {.async.} =
) {.async: (raises: [CatchableError]).} =
# CatchableError used to simplify raised errors here, as there could be
# many different errors raised but caller don't really care what is cause of error
while self.running:
let
rsvp = await self.client.reserve(relayPid).wait(chronos.seconds(5))
@@ -58,20 +60,26 @@ proc reserveAndUpdate(
self.onReservation(concat(toSeq(self.relayAddresses.values)))
await sleepAsync chronos.seconds(ttl - 30)
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
method setup*(
self: AutoRelayService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
self.addressMapper = proc(
listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
return await addressMapper(self, listenAddrs)
let hasBeenSetUp = await procCall Service(self).setup(switch)
if hasBeenSetUp:
proc handlePeerIdentified(peerId: PeerId, event: PeerEvent) {.async.} =
proc handlePeerIdentified(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
trace "Peer Identified", peerId
if self.relayPeers.len < self.maxNumRelays:
self.peerAvailable.fire()
proc handlePeerLeft(peerId: PeerId, event: PeerEvent) {.async.} =
proc handlePeerLeft(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
trace "Peer Left", peerId
self.relayPeers.withValue(peerId, future):
future[].cancel()
@@ -82,24 +90,31 @@ method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
await self.run(switch)
return hasBeenSetUp
proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
proc manageBackedOff(
self: AutoRelayService, pid: PeerId
) {.async: (raises: [CancelledError]).} =
await sleepAsync(chronos.seconds(5))
self.backingOff.keepItIf(it != pid)
self.peerAvailable.fire()
proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
proc innerRun(
self: AutoRelayService, switch: Switch
) {.async: (raises: [CancelledError]).} =
while true:
# Remove relayPeers that failed
let peers = toSeq(self.relayPeers.keys())
for k in peers:
if self.relayPeers[k].finished():
self.relayPeers.del(k)
self.relayAddresses.del(k)
if not self.onReservation.isNil():
self.onReservation(concat(toSeq(self.relayAddresses.values)))
# To avoid ddosing our peers in certain conditions
self.backingOff.add(k)
asyncSpawn self.manageBackedOff(k)
try:
if self.relayPeers[k].finished():
self.relayPeers.del(k)
self.relayAddresses.del(k)
if not self.onReservation.isNil():
self.onReservation(concat(toSeq(self.relayAddresses.values)))
# To avoid ddosing our peers in certain conditions
self.backingOff.add(k)
asyncSpawn self.manageBackedOff(k)
except KeyError:
raiseAssert "checked with in"
# Get all connected relayPeers
self.peerAvailable.clear()
@@ -116,18 +131,25 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
self.relayPeers[relayPid] = self.reserveAndUpdate(relayPid, switch)
if self.relayPeers.len() > 0:
await one(toSeq(self.relayPeers.values())) or self.peerAvailable.wait()
try:
await one(toSeq(self.relayPeers.values())) or self.peerAvailable.wait()
except ValueError:
raiseAssert "checked with relayPeers.len()"
else:
await self.peerAvailable.wait()
method run*(self: AutoRelayService, switch: Switch) {.async.} =
method run*(
self: AutoRelayService, switch: Switch
) {.async: (raises: [CancelledError]).} =
if self.running:
trace "Autorelay is already running"
return
self.running = true
self.runner = self.innerRun(switch)
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
method stop*(
self: AutoRelayService, switch: Switch
): Future[bool] {.public, async: (raises: [CancelledError]).} =
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
self.running = false

View File

@@ -39,8 +39,10 @@ proc new*(
proc tryStartingDirectConn(
self: HPService, switch: Switch, peerId: PeerId
): Future[bool] {.async.} =
proc tryConnect(address: MultiAddress): Future[bool] {.async.} =
): Future[bool] {.async: (raises: [CancelledError]).} =
proc tryConnect(
address: MultiAddress
): Future[bool] {.async: (raises: [DialFailedError, CancelledError]).} =
debug "Trying to create direct connection", peerId, address
await switch.connect(peerId, @[address], true, false)
debug "Direct connection created."
@@ -57,13 +59,13 @@ proc tryStartingDirectConn(
continue
return false
proc closeRelayConn(relayedConn: Connection) {.async.} =
proc closeRelayConn(relayedConn: Connection) {.async: (raises: [CancelledError]).} =
await sleepAsync(2000.milliseconds) # grace period before closing relayed connection
await relayedConn.close()
proc newConnectedPeerHandler(
self: HPService, switch: Switch, peerId: PeerId, event: PeerEvent
) {.async.} =
) {.async: (raises: [CancelledError]).} =
try:
# Get all connections to the peer. If there is at least one non-relayed connection, return.
let connections = switch.connManager.getConnections()[peerId].mapIt(it.connection)
@@ -86,18 +88,27 @@ proc newConnectedPeerHandler(
switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it))
await dcutrClient.startSync(switch, peerId, natAddrs)
await closeRelayConn(relayedConn)
except CancelledError as err:
raise err
except CatchableError as err:
debug "Hole punching failed during dcutr", err = err.msg
method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
method setup*(
self: HPService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
var hasBeenSetup = await procCall Service(self).setup(switch)
hasBeenSetup = hasBeenSetup and await self.autonatService.setup(switch)
if hasBeenSetup:
let dcutrProto = Dcutr.new(switch)
switch.mount(dcutrProto)
try:
let dcutrProto = Dcutr.new(switch)
switch.mount(dcutrProto)
except LPError as err:
error "Failed to mount Dcutr", err = err.msg
self.newConnectedPeerHandler = proc(peerId: PeerId, event: PeerEvent) {.async.} =
self.newConnectedPeerHandler = proc(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
await newConnectedPeerHandler(self, switch, peerId, event)
switch.connManager.addPeerEventHandler(
@@ -106,7 +117,7 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
self.onNewStatusHandler = proc(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async.} =
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.NotReachable and
not self.autoRelayService.isRunning():
discard await self.autoRelayService.setup(switch)
@@ -121,10 +132,14 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
self.autonatService.statusAndConfidenceHandler(self.onNewStatusHandler)
return hasBeenSetup
method run*(self: HPService, switch: Switch) {.async, public.} =
method run*(
self: HPService, switch: Switch
) {.public, async: (raises: [CancelledError]).} =
await self.autonatService.run(switch)
method stop*(self: HPService, switch: Switch): Future[bool] {.async, public.} =
method stop*(
self: HPService, switch: Switch
): Future[bool] {.public, async: (raises: [CancelledError]).} =
discard await self.autonatService.stop(switch)
if not isNil(self.newConnectedPeerHandler):
switch.connManager.removePeerEventHandler(

View File

@@ -148,7 +148,7 @@ proc expandWildcardAddresses(
method setup*(
self: WildcardAddressResolverService, switch: Switch
): Future[bool] {.async.} =
): Future[bool] {.async: (raises: [CancelledError]).} =
## Sets up the `WildcardAddressResolverService`.
##
## This method adds the address mapper to the peer's list of address mappers.
@@ -161,7 +161,7 @@ method setup*(
## - A `Future[bool]` that resolves to `true` if the setup was successful, otherwise `false`.
self.addressMapper = proc(
listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
return expandWildcardAddresses(self.networkInterfaceProvider, listenAddrs)
debug "Setting up WildcardAddressResolverService"
@@ -170,7 +170,9 @@ method setup*(
switch.peerInfo.addressMappers.add(self.addressMapper)
return hasBeenSetup
method run*(self: WildcardAddressResolverService, switch: Switch) {.async, public.} =
method run*(
self: WildcardAddressResolverService, switch: Switch
) {.public, async: (raises: [CancelledError]).} =
## Runs the WildcardAddressResolverService for a given switch.
##
## It updates the peer information for the provided switch by running the registered address mapper. Any other
@@ -181,7 +183,7 @@ method run*(self: WildcardAddressResolverService, switch: Switch) {.async, publi
method stop*(
self: WildcardAddressResolverService, switch: Switch
): Future[bool] {.async, public.} =
): Future[bool] {.public, async: (raises: [CancelledError]).} =
## Stops the WildcardAddressResolverService.
##
## Handles the shutdown process of the WildcardAddressResolverService for a given switch.

View File

@@ -0,0 +1,63 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import pkg/chronos
import connection, bufferstream
export connection
type
WriteHandler = proc(data: seq[byte]): Future[void] {.
async: (raises: [CancelledError, LPStreamError])
.}
BridgeStream* = ref object of BufferStream
writeHandler: WriteHandler
closeHandler: proc(): Future[void] {.async: (raises: []).}
method write*(
s: BridgeStream, msg: seq[byte]
): Future[void] {.public, async: (raises: [CancelledError, LPStreamError], raw: true).} =
s.writeHandler(msg)
method closeImpl*(s: BridgeStream): Future[void] {.async: (raises: [], raw: true).} =
if not isNil(s.closeHandler):
discard s.closeHandler()
procCall BufferStream(s).closeImpl()
method getWrapped*(s: BridgeStream): Connection =
nil
proc bridgedConnections*(
closeTogether: bool = true, dirA = Direction.In, dirB = Direction.In
): (BridgeStream, BridgeStream) =
let connA = BridgeStream()
let connB = BridgeStream()
connA.dir = dirA
connB.dir = dirB
connA.initStream()
connB.initStream()
connA.writeHandler = proc(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
connB.pushData(data)
connB.writeHandler = proc(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
connA.pushData(data)
if closeTogether:
connA.closeHandler = proc(): Future[void] {.async: (raises: []).} =
await noCancel connB.close()
connB.closeHandler = proc(): Future[void] {.async: (raises: []).} =
await noCancel connA.close()
return (connA, connB)

View File

@@ -44,8 +44,7 @@ chronicles.formatIt(BufferStream):
proc len*(s: BufferStream): int =
s.readBuf.len + (if s.readQueue.len > 0: s.readQueue[0].len()
else: 0
)
else: 0)
method initStream*(s: BufferStream) =
if s.objName.len == 0:

View File

@@ -124,7 +124,7 @@ proc timeoutMonitor(s: Connection) {.async: (raises: []).} =
return
method getWrapped*(s: Connection): Connection {.base.} =
raiseAssert("Not implemented!")
raiseAssert("[Connection.getWrapped] abstract method not implemented!")
when defined(libp2p_agents_metrics):
proc setShortAgent*(s: Connection, shortAgent: string) =

View File

@@ -43,6 +43,7 @@ type
oid*: Oid
dir*: Direction
closedWithEOF: bool # prevent concurrent calls
isClosedRemotely*: bool
LPStreamError* = object of LPError
LPStreamIncompleteError* = object of LPStreamError
@@ -132,7 +133,7 @@ method readOnce*(
## Reads whatever is available in the stream,
## up to `nbytes`. Will block if nothing is
## available
raiseAssert("Not implemented!")
raiseAssert("[LPStream.readOnce] abstract method not implemented!")
proc readExactly*(
s: LPStream, pbytes: pointer, nbytes: int
@@ -241,7 +242,7 @@ method write*(
async: (raises: [CancelledError, LPStreamError], raw: true), base, public
.} =
# Write `msg` to stream, waiting for the write to be finished
raiseAssert("Not implemented!")
raiseAssert("[LPStream.write] abstract method not implemented!")
proc writeLp*(
s: LPStream, msg: openArray[byte]

View File

@@ -27,6 +27,7 @@ import
protocols/secure/secure,
peerinfo,
utils/semaphore,
./muxers/muxer,
connmanager,
nameresolving/nameresolver,
peerid,
@@ -61,20 +62,26 @@ type
started: bool
services*: seq[Service]
UpgradeError* = object of LPError
Service* = ref object of RootObj
inUse: bool
method setup*(self: Service, switch: Switch): Future[bool] {.base, async.} =
method setup*(
self: Service, switch: Switch
): Future[bool] {.base, async: (raises: [CancelledError]).} =
if self.inUse:
warn "service setup has already been called"
return false
self.inUse = true
return true
method run*(self: Service, switch: Switch) {.base, async.} =
doAssert(false, "Not implemented!")
method run*(self: Service, switch: Switch) {.base, async: (raises: [CancelledError]).} =
doAssert(false, "[Service.run] abstract method not implemented!")
method stop*(self: Service, switch: Switch): Future[bool] {.base, async.} =
method stop*(
self: Service, switch: Switch
): Future[bool] {.base, async: (raises: [CancelledError]).} =
if not self.inUse:
warn "service is already stopped"
return false
@@ -135,14 +142,16 @@ method connect*(
forceDial = false,
reuseConnection = true,
dir = Direction.Out,
): Future[void] {.public.} =
): Future[void] {.
public, async: (raises: [DialFailedError, CancelledError], raw: true)
.} =
## Connects to a peer without opening a stream to it
s.dialer.connect(peerId, addrs, forceDial, reuseConnection, dir)
method connect*(
s: Switch, address: MultiAddress, allowUnknownPeerId = false
): Future[PeerId] =
): Future[PeerId] {.async: (raises: [DialFailedError, CancelledError], raw: true).} =
## Connects to a peer and retrieve its PeerId
##
## If the P2P part is missing from the MA and `allowUnknownPeerId` is set
@@ -153,12 +162,18 @@ method connect*(
method dial*(
s: Switch, peerId: PeerId, protos: seq[string]
): Future[Connection] {.public.} =
): Future[Connection] {.
public, async: (raises: [DialFailedError, CancelledError], raw: true)
.} =
## Open a stream to a connected peer with the specified `protos`
s.dialer.dial(peerId, protos)
proc dial*(s: Switch, peerId: PeerId, proto: string): Future[Connection] {.public.} =
proc dial*(
s: Switch, peerId: PeerId, proto: string
): Future[Connection] {.
public, async: (raises: [DialFailedError, CancelledError], raw: true)
.} =
## Open a stream to a connected peer with the specified `proto`
dial(s, peerId, @[proto])
@@ -169,7 +184,9 @@ method dial*(
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false,
): Future[Connection] {.public.} =
): Future[Connection] {.
public, async: (raises: [DialFailedError, CancelledError], raw: true)
.} =
## Connected to a peer and open a stream
## with the specified `protos`
@@ -177,7 +194,9 @@ method dial*(
proc dial*(
s: Switch, peerId: PeerId, addrs: seq[MultiAddress], proto: string
): Future[Connection] {.public.} =
): Future[Connection] {.
public, async: (raises: [DialFailedError, CancelledError], raw: true)
.} =
## Connected to a peer and open a stream
## with the specified `proto`
@@ -200,30 +219,44 @@ proc mount*[T: LPProtocol](
s.ms.addHandler(proto.codecs, proto, matcher)
s.peerInfo.protocols.add(proto.codec)
proc upgrader(switch: Switch, trans: Transport, conn: Connection) {.async.} =
let muxed = await trans.upgrade(conn, Opt.none(PeerId))
switch.connManager.storeMuxer(muxed)
await switch.peerStore.identify(muxed)
await switch.connManager.triggerPeerEvents(
muxed.connection.peerId, PeerEvent(kind: PeerEventKind.Identified, initiator: false)
)
trace "Connection upgrade succeeded"
proc upgrader(
switch: Switch, trans: Transport, conn: Connection
) {.async: (raises: [CancelledError, UpgradeError]).} =
try:
let muxed = await trans.upgrade(conn, Opt.none(PeerId))
switch.connManager.storeMuxer(muxed)
await switch.peerStore.identify(muxed)
await switch.connManager.triggerPeerEvents(
muxed.connection.peerId,
PeerEvent(kind: PeerEventKind.Identified, initiator: false),
)
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(UpgradeError, e.msg, e)
proc upgradeMonitor(
switch: Switch, trans: Transport, conn: Connection, upgrades: AsyncSemaphore
) {.async.} =
) {.async: (raises: []).} =
var upgradeSuccessful = false
try:
await switch.upgrader(trans, conn).wait(30.seconds)
except CatchableError as exc:
if exc isnot CancelledError:
libp2p_failed_upgrades_incoming.inc()
if not isNil(conn):
await conn.close()
trace "Exception awaiting connection upgrade", description = exc.msg, conn
trace "Connection upgrade succeeded"
upgradeSuccessful = true
except CancelledError:
trace "Connection upgrade cancelled", conn
except AsyncTimeoutError:
trace "Connection upgrade timeout", conn
libp2p_failed_upgrades_incoming.inc()
except UpgradeError as e:
trace "Connection upgrade failed", description = e.msg, conn
libp2p_failed_upgrades_incoming.inc()
finally:
if (not upgradeSuccessful) and (not isNil(conn)):
await conn.close()
upgrades.release()
proc accept(s: Switch, transport: Transport) {.async.} = # noraises
proc accept(s: Switch, transport: Transport) {.async: (raises: []).} =
## switch accept loop, ran for every transport
##
@@ -270,7 +303,7 @@ proc accept(s: Switch, transport: Transport) {.async.} = # noraises
await conn.close()
return
proc stop*(s: Switch) {.async, public.} =
proc stop*(s: Switch) {.public, async: (raises: [CancelledError]).} =
## Stop listening on every transport, and
## close every active connections
@@ -302,7 +335,7 @@ proc stop*(s: Switch) {.async, public.} =
trace "Switch stopped"
proc start*(s: Switch) {.async, public.} =
proc start*(s: Switch) {.public, async: (raises: [CancelledError, LPError]).} =
## Start listening on every transport
if s.started:
@@ -324,7 +357,7 @@ proc start*(s: Switch) {.async, public.} =
for fut in startFuts:
if fut.failed:
await s.stop()
raise fut.error
raise newException(LPError, "starting transports failed", fut.error)
for t in s.transports: # for each transport
if t.addrs.len > 0 or t.running:

View File

@@ -0,0 +1,125 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import locks
import tables
import std/sequtils
import stew/byteutils
import pkg/chronos
import pkg/chronicles
import ./transport
import ../multiaddress
import ../stream/connection
import ../stream/bridgestream
import ../muxers/muxer
type
MemoryTransportError* = object of transport.TransportError
MemoryTransportAcceptStopped* = object of MemoryTransportError
type MemoryListener* = object
address: string
accept: Future[Connection]
onListenerEnd: proc(address: string) {.closure, gcsafe, raises: [].}
proc init(
_: type[MemoryListener],
address: string,
onListenerEnd: proc(address: string) {.closure, gcsafe, raises: [].},
): MemoryListener =
return MemoryListener(
accept: newFuture[Connection]("MemoryListener.accept"),
address: address,
onListenerEnd: onListenerEnd,
)
proc close*(self: MemoryListener) =
if not (self.accept.finished):
self.accept.fail(newException(MemoryTransportAcceptStopped, "Listener closed"))
self.onListenerEnd(self.address)
proc accept*(
self: MemoryListener
): Future[Connection] {.gcsafe, raises: [CatchableError].} =
return self.accept
proc dial*(
self: MemoryListener
): Future[Connection] {.gcsafe, raises: [CatchableError].} =
let (connA, connB) = bridgedConnections()
self.onListenerEnd(self.address)
self.accept.complete(connA)
let dFut = newFuture[Connection]("MemoryListener.dial")
dFut.complete(connB)
return dFut
type memoryConnManager = ref object
listeners: Table[string, MemoryListener]
connections: Table[string, Connection]
lock: Lock
proc init(_: type[memoryConnManager]): memoryConnManager =
var m = memoryConnManager()
initLock(m.lock)
return m
proc onListenerEnd(
self: memoryConnManager
): proc(address: string) {.closure, gcsafe, raises: [].} =
proc cb(address: string) {.closure, gcsafe, raises: [].} =
acquire(self.lock)
defer:
release(self.lock)
try:
if address in self.listeners:
self.listeners.del(address)
except KeyError:
raiseAssert "checked with if"
return cb
proc accept*(
self: memoryConnManager, address: string
): MemoryListener {.raises: [MemoryTransportError].} =
acquire(self.lock)
defer:
release(self.lock)
if address in self.listeners:
raise newException(MemoryTransportError, "Memory address already in use")
let listener = MemoryListener.init(address, self.onListenerEnd())
self.listeners[address] = listener
return listener
proc dial*(
self: memoryConnManager, address: string
): MemoryListener {.raises: [MemoryTransportError].} =
acquire(self.lock)
defer:
release(self.lock)
if address notin self.listeners:
raise newException(MemoryTransportError, "No memory listener found")
try:
return self.listeners[address]
except KeyError:
raiseAssert "checked with if"
let instance: memoryConnManager = memoryConnManager.init()
proc getInstance*(): memoryConnManager {.gcsafe.} =
{.gcsafe.}:
instance

View File

@@ -0,0 +1,128 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## Memory transport implementation
import std/sequtils
import pkg/chronos
import pkg/chronicles
import ./transport
import ../multiaddress
import ../stream/connection
import ../crypto/crypto
import ../upgrademngrs/upgrade
import ../muxers/muxer
import ./memorymanager
export connection
export MemoryTransportError, MemoryTransportAcceptStopped
const MemoryAutoAddress* = "/memory/*"
logScope:
topics = "libp2p memorytransport"
type MemoryTransport* = ref object of Transport
rng: ref HmacDrbgContext
connections: seq[Connection]
listener: Opt[MemoryListener]
proc new*(
T: typedesc[MemoryTransport],
upgrade: Upgrade = Upgrade(),
rng: ref HmacDrbgContext = newRng(),
): T =
T(upgrader: upgrade, rng: rng)
proc listenAddress(self: MemoryTransport, ma: MultiAddress): MultiAddress =
if $ma != MemoryAutoAddress:
return ma
# when special address is used `/memory/*` use any free address.
# here we assume that any random generated address will be free.
var randomBuf: array[10, byte]
hmacDrbgGenerate(self.rng[], randomBuf)
return MultiAddress.init("/memory/" & toHex(randomBuf)).get()
method start*(
self: MemoryTransport, addrs: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError]).} =
if self.running:
return
trace "starting memory transport on addrs", address = $addrs
self.addrs = addrs.mapIt(self.listenAddress(it))
self.running = true
method stop*(self: MemoryTransport) {.async: (raises: []).} =
if not self.running:
return
trace "stopping memory transport", address = $self.addrs
self.running = false
# closing listener will throw interruption error to caller of accept()
let listener = self.listener
if listener.isSome:
listener.get().close()
# end all connections
await noCancel allFutures(self.connections.mapIt(it.close()))
method accept*(
self: MemoryTransport
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
if not self.running:
raise newException(MemoryTransportError, "Transport closed, no more connections!")
var listener: MemoryListener
try:
listener = getInstance().accept($self.addrs[0])
self.listener = Opt.some(listener)
let conn = await listener.accept()
self.connections.add(conn)
self.listener = Opt.none(MemoryListener)
return conn
except CancelledError as e:
listener.close()
raise e
except MemoryTransportError as e:
raise e
except CatchableError:
raiseAssert "should never happen"
method dial*(
self: MemoryTransport,
hostname: string,
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId),
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
try:
let listener = getInstance().dial($ma)
let conn = await listener.dial()
self.connections.add(conn)
return conn
except CancelledError as e:
raise e
except MemoryTransportError as e:
raise e
except CatchableError:
raiseAssert "should never happen"
proc dial*(
self: MemoryTransport, ma: MultiAddress, peerId: Opt[PeerId] = Opt.none(PeerId)
): Future[Connection] {.gcsafe.} =
self.dial("", ma)
method handles*(self: MemoryTransport, ma: MultiAddress): bool {.gcsafe, raises: [].} =
if procCall Transport(self).handles(ma):
if ma.protocols.isOk:
return Memory.match(ma)

View File

@@ -2,6 +2,7 @@ import std/sequtils
import pkg/chronos
import pkg/chronicles
import pkg/quic
import results
import ../multiaddress
import ../multicodec
import ../stream/connection
@@ -9,6 +10,7 @@ import ../wire
import ../muxers/muxer
import ../upgrademngrs/upgrade
import ./transport
import tls/certificate
export multiaddress
export multicodec
@@ -21,6 +23,11 @@ logScope:
type
P2PConnection = connection.Connection
QuicConnection = quic.Connection
QuicTransportError* = object of transport.TransportError
QuicTransportDialError* = object of transport.TransportDialError
QuicTransportAcceptStopped* = object of QuicTransportError
const alpn = "libp2p"
# Stream
type QuicStream* = ref object of P2PConnection
@@ -73,21 +80,25 @@ method closeImpl*(stream: QuicStream) {.async: (raises: []).} =
type QuicSession* = ref object of P2PConnection
connection: QuicConnection
method close*(session: QuicSession) {.async, base.} =
await session.connection.close()
method close*(session: QuicSession) {.async: (raises: []).} =
safeClose(session.connection)
await procCall P2PConnection(session).close()
proc getStream*(
session: QuicSession, direction = Direction.In
): Future[QuicStream] {.async.} =
var stream: Stream
case direction
of Direction.In:
stream = await session.connection.incomingStream()
of Direction.Out:
stream = await session.connection.openStream()
await stream.write(@[]) # QUIC streams do not exist until data is sent
return QuicStream.new(stream, session.observedAddr, session.peerId)
): Future[QuicStream] {.async: (raises: [QuicTransportError]).} =
try:
var stream: Stream
case direction
of Direction.In:
stream = await session.connection.incomingStream()
of Direction.Out:
stream = await session.connection.openStream()
await stream.write(@[]) # QUIC streams do not exist until data is sent
return QuicStream.new(stream, session.observedAddr, session.peerId)
except CatchableError as exc:
# TODO: incomingStream is using {.async.} with no raises
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
method getWrapped*(self: QuicSession): P2PConnection =
nil
@@ -107,7 +118,7 @@ method newStream*(
except CatchableError as exc:
raise newException(MuxerError, exc.msg, exc)
proc handleStream(m: QuicMuxer, chann: QuicStream) {.async.} =
proc handleStream(m: QuicMuxer, chann: QuicStream) {.async: (raises: []).} =
## call the muxer stream handler for this channel
##
try:
@@ -129,84 +140,194 @@ method handle*(m: QuicMuxer): Future[void] {.async: (raises: []).} =
method close*(m: QuicMuxer) {.async: (raises: []).} =
try:
await m.quicSession.close()
m.handleFut.cancel()
m.handleFut.cancelSoon()
except CatchableError as exc:
discard
# Transport
type QuicUpgrade = ref object of Upgrade
type CertGenerator =
proc(kp: KeyPair): CertificateX509 {.gcsafe, raises: [TLSCertificateError].}
type QuicTransport* = ref object of Transport
listener: Listener
client: QuicClient
privateKey: PrivateKey
connections: seq[P2PConnection]
rng: ref HmacDrbgContext
certGenerator: CertGenerator
func new*(_: type QuicTransport, u: Upgrade): QuicTransport =
QuicTransport(upgrader: QuicUpgrade(ms: u.ms))
proc makeCertificateVerifier(): CertificateVerifier =
proc certificateVerifier(serverName: string, certificatesDer: seq[seq[byte]]): bool =
if certificatesDer.len != 1:
trace "CertificateVerifier: expected one certificate in the chain",
cert_count = certificatesDer.len
return false
method handles*(transport: QuicTransport, address: MultiAddress): bool =
let cert =
try:
parse(certificatesDer[0])
except CertificateParsingError as e:
trace "CertificateVerifier: failed to parse certificate", msg = e.msg
return false
return cert.verify()
return CustomCertificateVerifier.init(certificateVerifier)
proc defaultCertGenerator(
kp: KeyPair
): CertificateX509 {.gcsafe, raises: [TLSCertificateError].} =
return generateX509(kp, encodingFormat = EncodingFormat.PEM)
proc new*(_: type QuicTransport, u: Upgrade, privateKey: PrivateKey): QuicTransport =
return QuicTransport(
upgrader: QuicUpgrade(ms: u.ms),
privateKey: privateKey,
certGenerator: defaultCertGenerator,
)
proc new*(
_: type QuicTransport,
u: Upgrade,
privateKey: PrivateKey,
certGenerator: CertGenerator,
): QuicTransport =
return QuicTransport(
upgrader: QuicUpgrade(ms: u.ms),
privateKey: privateKey,
certGenerator: certGenerator,
)
method handles*(transport: QuicTransport, address: MultiAddress): bool {.raises: [].} =
if not procCall Transport(transport).handles(address):
return false
QUIC_V1.match(address)
method start*(transport: QuicTransport, addrs: seq[MultiAddress]) {.async.} =
doAssert transport.listener.isNil, "start() already called"
method start*(
self: QuicTransport, addrs: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError]).} =
doAssert self.listener.isNil, "start() already called"
#TODO handle multiple addr
transport.listener = listen(initTAddress(addrs[0]).tryGet)
await procCall Transport(transport).start(addrs)
transport.addrs[0] =
MultiAddress.init(transport.listener.localAddress(), IPPROTO_UDP).tryGet() &
MultiAddress.init("/quic-v1").get()
transport.running = true
method stop*(transport: QuicTransport) {.async.} =
let pubkey = self.privateKey.getPublicKey().valueOr:
doAssert false, "could not obtain public key"
return
try:
if self.rng.isNil:
self.rng = newRng()
let cert = self.certGenerator(KeyPair(seckey: self.privateKey, pubkey: pubkey))
let tlsConfig = TLSConfig.init(
cert.certificate, cert.privateKey, @[alpn], Opt.some(makeCertificateVerifier())
)
self.client = QuicClient.init(tlsConfig, rng = self.rng)
self.listener =
QuicServer.init(tlsConfig, rng = self.rng).listen(initTAddress(addrs[0]).tryGet)
await procCall Transport(self).start(addrs)
self.addrs[0] =
MultiAddress.init(self.listener.localAddress(), IPPROTO_UDP).tryGet() &
MultiAddress.init("/quic-v1").get()
except QuicConfigError as exc:
doAssert false, "invalid quic setup: " & $exc.msg
except TLSCertificateError as exc:
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
except QuicError as exc:
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
except TransportOsError as exc:
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
self.running = true
method stop*(transport: QuicTransport) {.async: (raises: []).} =
if transport.running:
for c in transport.connections:
await c.close()
await procCall Transport(transport).stop()
await transport.listener.stop()
try:
await transport.listener.stop()
except CatchableError as exc:
trace "Error shutting down Quic transport", description = exc.msg
transport.listener.destroy()
transport.running = false
transport.listener = nil
proc wrapConnection(
transport: QuicTransport, connection: QuicConnection
): P2PConnection {.raises: [Defect, TransportOsError, LPError].} =
): QuicSession {.raises: [TransportOsError, MaError].} =
let
remoteAddr = connection.remoteAddress()
observedAddr =
MultiAddress.init(remoteAddr, IPPROTO_UDP).get() &
MultiAddress.init("/quic-v1").get()
conres = QuicSession(connection: connection, observedAddr: Opt.some(observedAddr))
conres.initStream()
session = QuicSession(connection: connection, observedAddr: Opt.some(observedAddr))
transport.connections.add(conres)
proc onClose() {.async.} =
await conres.join()
transport.connections.keepItIf(it != conres)
session.initStream()
transport.connections.add(session)
proc onClose() {.async: (raises: []).} =
await noCancel session.join()
transport.connections.keepItIf(it != session)
trace "Cleaned up client"
asyncSpawn onClose()
return conres
method accept*(transport: QuicTransport): Future[P2PConnection] {.async.} =
doAssert not transport.listener.isNil, "call start() before calling accept()"
let connection = await transport.listener.accept()
return transport.wrapConnection(connection)
return session
method accept*(
self: QuicTransport
): Future[connection.Connection] {.
async: (raises: [transport.TransportError, CancelledError])
.} =
doAssert not self.listener.isNil, "call start() before calling accept()"
if not self.running:
# stop accept only when transport is stopped (not when error occurs)
raise newException(QuicTransportAcceptStopped, "Quic transport stopped")
try:
let connection = await self.listener.accept()
return self.wrapConnection(connection)
except CancelledError as exc:
raise exc
except QuicError as exc:
debug "Quic Error", description = exc.msg
except MaError as exc:
debug "Multiaddr Error", description = exc.msg
except CatchableError as exc: # TODO: removing this requires async/raises in nim-quic
info "Unexpected error accepting quic connection", description = exc.msg
except TransportOsError as exc:
debug "OS Error", description = exc.msg
method dial*(
transport: QuicTransport,
self: QuicTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId),
): Future[P2PConnection] {.async, gcsafe.} =
let connection = await dial(initTAddress(address).tryGet)
return transport.wrapConnection(connection)
): Future[connection.Connection] {.
async: (raises: [transport.TransportError, CancelledError])
.} =
try:
let quicConnection = await self.client.dial(initTAddress(address).tryGet)
return self.wrapConnection(quicConnection)
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(QuicTransportDialError, e.msg, e)
method upgrade*(
self: QuicTransport, conn: P2PConnection, peerId: Opt[PeerId]
): Future[Muxer] {.async: (raises: [CancelledError, LPError]).} =
let qs = QuicSession(conn)
if peerId.isSome:
qs.peerId = peerId.get()
qs.peerId =
if peerId.isSome:
peerId.get()
else:
let certificates = qs.connection.certificates()
let cert = parse(certificates[0])
cert.peerId()
let muxer = QuicMuxer(quicSession: qs, connection: conn)
muxer.streamHandler = proc(conn: P2PConnection) {.async: (raises: []).} =

View File

@@ -99,122 +99,107 @@ proc new*(
if ServerFlags.TcpNoDelay in flags:
{SocketFlags.TcpNoDelay}
else:
default(set[SocketFlags])
,
default(set[SocketFlags]),
upgrader: upgrade,
networkReachability: NetworkReachability.Unknown,
connectionsTimeout: connectionsTimeout,
)
method start*(self: TcpTransport, addrs: seq[MultiAddress]): Future[void] =
method start*(
self: TcpTransport, addrs: seq[MultiAddress]
): Future[void] {.async: (raises: [LPError, transport.TransportError]).} =
## Start transport listening to the given addresses - for dial-only transports,
## start with an empty list
# TODO remove `impl` indirection throughout when `raises` is added to base
if self.running:
warn "TCP transport already running"
return
proc impl(
self: TcpTransport, addrs: seq[MultiAddress]
): Future[void] {.async: (raises: [transport.TransportError, CancelledError]).} =
if self.running:
warn "TCP transport already running"
return
trace "Starting TCP transport"
trace "Starting TCP transport"
self.flags.incl(ServerFlags.ReusePort)
self.flags.incl(ServerFlags.ReusePort)
var supported: seq[MultiAddress]
var initialized = false
try:
for i, ma in addrs:
if not self.handles(ma):
trace "Invalid address detected, skipping!", address = ma
continue
var supported: seq[MultiAddress]
var initialized = false
try:
for i, ma in addrs:
if not self.handles(ma):
trace "Invalid address detected, skipping!", address = ma
continue
let
ta = initTAddress(ma).expect("valid address per handles check above")
server =
try:
createStreamServer(ta, flags = self.flags)
except common.TransportError as exc:
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
let
ta = initTAddress(ma).expect("valid address per handles check above")
server =
try:
createStreamServer(ta, flags = self.flags)
except common.TransportError as exc:
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
self.servers &= server
self.servers &= server
trace "Listening on", address = ma
supported.add(
MultiAddress.init(server.sock.getLocalAddress()).expect(
"Can init from local address"
)
trace "Listening on", address = ma
supported.add(
MultiAddress.init(server.sock.getLocalAddress()).expect(
"Can init from local address"
)
initialized = true
finally:
if not initialized:
# Clean up partial success on exception
await noCancel allFutures(self.servers.mapIt(it.closeWait()))
reset(self.servers)
try:
await procCall Transport(self).start(supported)
except CatchableError:
raiseAssert "Base method does not raise"
trackCounter(TcpTransportTrackerName)
impl(self, addrs)
method stop*(self: TcpTransport): Future[void] =
## Stop the transport and close all connections it created
proc impl(self: TcpTransport) {.async: (raises: []).} =
trace "Stopping TCP transport"
self.stopping = true
defer:
self.stopping = false
if self.running:
# Reset the running flag
try:
await noCancel procCall Transport(self).stop()
except CatchableError: # TODO remove when `accept` is annotated with raises
raiseAssert "doesn't actually raise"
# Stop each server by closing the socket - this will cause all accept loops
# to fail - since the running flag has been reset, it's also safe to close
# all known clients since no more of them will be added
await noCancel allFutures(
self.servers.mapIt(it.closeWait()) &
self.clients[Direction.In].mapIt(it.closeWait()) &
self.clients[Direction.Out].mapIt(it.closeWait())
)
self.servers = @[]
initialized = true
finally:
if not initialized:
# Clean up partial success on exception
await noCancel allFutures(self.servers.mapIt(it.closeWait()))
reset(self.servers)
for acceptFut in self.acceptFuts:
if acceptFut.completed():
await acceptFut.value().closeWait()
self.acceptFuts = @[]
await procCall Transport(self).start(supported)
if self.clients[Direction.In].len != 0 or self.clients[Direction.Out].len != 0:
# Future updates could consider turning this warn into an assert since
# it should never happen if the shutdown code is correct
warn "Couldn't clean up clients",
len = self.clients[Direction.In].len + self.clients[Direction.Out].len
trackCounter(TcpTransportTrackerName)
trace "Transport stopped"
untrackCounter(TcpTransportTrackerName)
else:
# For legacy reasons, `stop` on a transpart that wasn't started is
# expected to close outgoing connections created by the transport
warn "TCP transport already stopped"
method stop*(self: TcpTransport): Future[void] {.async: (raises: []).} =
trace "Stopping TCP transport"
self.stopping = true
defer:
self.stopping = false
doAssert self.clients[Direction.In].len == 0,
"No incoming connections possible without start"
await noCancel allFutures(self.clients[Direction.Out].mapIt(it.closeWait()))
if self.running:
# Reset the running flag
await noCancel procCall Transport(self).stop()
# Stop each server by closing the socket - this will cause all accept loops
# to fail - since the running flag has been reset, it's also safe to close
# all known clients since no more of them will be added
await noCancel allFutures(
self.servers.mapIt(it.closeWait()) &
self.clients[Direction.In].mapIt(it.closeWait()) &
self.clients[Direction.Out].mapIt(it.closeWait())
)
impl(self)
self.servers = @[]
method accept*(self: TcpTransport): Future[Connection] =
for acceptFut in self.acceptFuts:
if acceptFut.completed():
await acceptFut.value().closeWait()
self.acceptFuts = @[]
if self.clients[Direction.In].len != 0 or self.clients[Direction.Out].len != 0:
# Future updates could consider turning this warn into an assert since
# it should never happen if the shutdown code is correct
warn "Couldn't clean up clients",
len = self.clients[Direction.In].len + self.clients[Direction.Out].len
trace "Transport stopped"
untrackCounter(TcpTransportTrackerName)
else:
# For legacy reasons, `stop` on a transpart that wasn't started is
# expected to close outgoing connections created by the transport
warn "TCP transport already stopped"
doAssert self.clients[Direction.In].len == 0,
"No incoming connections possible without start"
await noCancel allFutures(self.clients[Direction.Out].mapIt(it.closeWait()))
method accept*(
self: TcpTransport
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
## accept a new TCP connection, returning nil on non-fatal errors
##
## Raises an exception when the transport is broken and cannot be used for
@@ -223,130 +208,121 @@ method accept*(self: TcpTransport): Future[Connection] =
# information is lost and must be logged here instead of being
# available to the caller - further refactoring should propagate errors
# to the caller instead
proc impl(
self: TcpTransport
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
proc cancelAcceptFuts() =
for fut in self.acceptFuts:
if not fut.completed():
fut.cancel()
if not self.running:
raise newTransportClosedError()
proc cancelAcceptFuts() =
for fut in self.acceptFuts:
if not fut.completed():
fut.cancel()
if self.servers.len == 0:
raise (ref TcpTransportError)(msg: "No listeners configured")
elif self.acceptFuts.len == 0:
# Holds futures representing ongoing accept calls on multiple servers.
self.acceptFuts = self.servers.mapIt(it.accept())
if not self.running:
raise newTransportClosedError()
let
finished =
try:
# Waits for any one of these futures to complete, indicating that a new connection has been accepted on one of the servers.
await one(self.acceptFuts)
except ValueError:
raiseAssert "Accept futures should not be empty"
except CancelledError as exc:
cancelAcceptFuts()
raise exc
index = self.acceptFuts.find(finished)
if self.servers.len == 0:
raise (ref TcpTransportError)(msg: "No listeners configured")
elif self.acceptFuts.len == 0:
# Holds futures representing ongoing accept calls on multiple servers.
self.acceptFuts = self.servers.mapIt(it.accept())
# A new connection has been accepted. The corresponding server should immediately start accepting another connection.
# Thus we replace the completed future with a new one by calling accept on the same server again.
self.acceptFuts[index] = self.servers[index].accept()
let transp =
let
finished =
try:
await finished
except TransportTooManyError as exc:
debug "Too many files opened", description = exc.msg
return nil
except TransportAbortedError as exc:
debug "Connection aborted", description = exc.msg
return nil
except TransportUseClosedError as exc:
raise newTransportClosedError(exc)
except TransportOsError as exc:
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
except common.TransportError as exc: # Needed for chronos 4.0.0 support
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
# Waits for any one of these futures to complete, indicating that a new connection has been accepted on one of the servers.
await one(self.acceptFuts)
except ValueError:
raiseAssert "Accept futures should not be empty"
except CancelledError as exc:
cancelAcceptFuts()
raise exc
index = self.acceptFuts.find(finished)
if not self.running: # Stopped while waiting
await transp.closeWait()
raise newTransportClosedError()
# A new connection has been accepted. The corresponding server should immediately start accepting another connection.
# Thus we replace the completed future with a new one by calling accept on the same server again.
self.acceptFuts[index] = self.servers[index].accept()
let transp =
try:
await finished
except TransportTooManyError as exc:
debug "Too many files opened", description = exc.msg
return nil
except TransportAbortedError as exc:
debug "Connection aborted", description = exc.msg
return nil
except TransportUseClosedError as exc:
raise newTransportClosedError(exc)
except TransportOsError as exc:
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
except common.TransportError as exc: # Needed for chronos 4.0.0 support
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
except CancelledError as exc:
cancelAcceptFuts()
raise exc
let remote =
try:
transp.remoteAddress
except TransportOsError as exc:
# The connection had errors / was closed before `await` returned control
await transp.closeWait()
debug "Cannot read remote address", description = exc.msg
return nil
if not self.running: # Stopped while waiting
safeCloseWait(transp)
raise newTransportClosedError()
let observedAddr =
MultiAddress.init(remote).expect("Can initialize from remote address")
self.connHandler(transp, Opt.some(observedAddr), Direction.In)
let remote =
try:
transp.remoteAddress
except TransportOsError as exc:
# The connection had errors / was closed before `await` returned control
safeCloseWait(transp)
debug "Cannot read remote address", description = exc.msg
return nil
impl(self)
let observedAddr =
MultiAddress.init(remote).expect("Can initialize from remote address")
self.connHandler(transp, Opt.some(observedAddr), Direction.In)
method dial*(
self: TcpTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId),
): Future[Connection] =
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
## dial a peer
proc impl(
self: TcpTransport, hostname: string, address: MultiAddress, peerId: Opt[PeerId]
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
if self.stopping:
raise newTransportClosedError()
if self.stopping:
raise newTransportClosedError()
let ta = initTAddress(address).valueOr:
raise (ref TcpTransportError)(msg: "Unsupported address: " & $address)
let ta = initTAddress(address).valueOr:
raise (ref TcpTransportError)(msg: "Unsupported address: " & $address)
trace "Dialing remote peer", address = $address
let transp =
try:
await(
if self.networkReachability == NetworkReachability.NotReachable and
self.addrs.len > 0:
let local = initTAddress(self.addrs[0]).expect("self address is valid")
self.clientFlags.incl(SocketFlags.ReusePort)
connect(ta, flags = self.clientFlags, localAddress = local)
else:
connect(ta, flags = self.clientFlags)
)
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
trace "Dialing remote peer", address = $address
let transp =
try:
await(
if self.networkReachability == NetworkReachability.NotReachable and
self.addrs.len > 0:
let local = initTAddress(self.addrs[0]).expect("self address is valid")
self.clientFlags.incl(SocketFlags.ReusePort)
connect(ta, flags = self.clientFlags, localAddress = local)
else:
connect(ta, flags = self.clientFlags)
)
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
# If `stop` is called after `connect` but before `await` returns, we might
# end up with a race condition where `stop` returns but not all connections
# have been closed - we drop connections in this case in order not to leak
# them
if self.stopping:
# Stopped while waiting for new connection
await transp.closeWait()
raise newTransportClosedError()
# If `stop` is called after `connect` but before `await` returns, we might
# end up with a race condition where `stop` returns but not all connections
# have been closed - we drop connections in this case in order not to leak
# them
if self.stopping:
# Stopped while waiting for new connection
safeCloseWait(transp)
raise newTransportClosedError()
let observedAddr =
try:
MultiAddress.init(transp.remoteAddress).expect("remote address is valid")
except TransportOsError as exc:
await transp.closeWait()
raise (ref TcpTransportError)(msg: exc.msg)
let observedAddr =
try:
MultiAddress.init(transp.remoteAddress).expect("remote address is valid")
except TransportOsError as exc:
safeCloseWait(transp)
raise (ref TcpTransportError)(msg: exc.msg)
self.connHandler(transp, Opt.some(observedAddr), Direction.Out)
self.connHandler(transp, Opt.some(observedAddr), Direction.Out)
impl(self, hostname, address, peerId)
method handles*(t: TcpTransport, address: MultiAddress): bool =
method handles*(t: TcpTransport, address: MultiAddress): bool {.raises: [].} =
if procCall Transport(t).handles(address):
if address.protocols.isOk:
return TCP.match(address)

View File

@@ -0,0 +1,962 @@
#include <openssl/bn.h>
#include <openssl/ec.h>
#include <openssl/err.h>
#include <openssl/evp.h>
#include <openssl/objects.h>
#include <openssl/pem.h>
#include <openssl/rand.h>
#include <openssl/x509.h>
#include <openssl/x509v3.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
#include <openssl/core_names.h>
#include <openssl/param_build.h>
#include <openssl/types.h>
#else
#include <openssl/rand_drbg.h>
#endif
#include "certificate.h"
#define LIBP2P_OID "1.3.6.1.4.1.53594.1.1"
struct cert_context_s {
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
OSSL_LIB_CTX *lib_ctx; /* OpenSSL library context */
EVP_RAND_CTX *drbg; /* DRBG context */
#else
RAND_DRBG *drbg;
#endif
};
struct cert_key_s {
EVP_PKEY *pkey; /* OpenSSL EVP_PKEY */
};
// Function to initialize CTR_DRBG
cert_error_t cert_init_drbg(const char *seed, size_t seed_len,
cert_context_t *ctx) {
if (!seed) {
return CERT_ERROR_NULL_PARAM;
}
// Allocate context
struct cert_context_s *c = calloc(1, sizeof(struct cert_context_s));
if (c == NULL) {
return CERT_ERROR_MEMORY;
}
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
EVP_RAND_CTX *drbg = NULL;
EVP_RAND *rand_algo = NULL;
OSSL_LIB_CTX *libctx = OSSL_LIB_CTX_new(); // Create a new library context
if (!libctx) {
return CERT_ERROR_MEMORY;
}
rand_algo = EVP_RAND_fetch(libctx, "CTR-DRBG", NULL);
if (!rand_algo) {
return CERT_ERROR_DRBG_INIT;
}
drbg = EVP_RAND_CTX_new(rand_algo, NULL);
EVP_RAND_free(rand_algo); // Free the algorithm object, no longer needed
if (!drbg) {
return CERT_ERROR_MEMORY;
}
OSSL_PARAM params[2];
params[0] = OSSL_PARAM_construct_utf8_string(OSSL_DRBG_PARAM_CIPHER,
"AES-256-CTR", 0);
params[1] = OSSL_PARAM_construct_end();
if (!EVP_RAND_CTX_set_params(drbg, params)) {
RANDerr(0, RAND_R_ERROR_INITIALISING_DRBG);
EVP_RAND_CTX_free(drbg);
return CERT_ERROR_DRBG_CONFIG;
}
int res = EVP_RAND_instantiate(drbg, 0, 0, (const unsigned char *)seed,
seed_len, NULL);
if (res != 1) {
EVP_RAND_CTX_free(drbg);
return CERT_ERROR_DRBG_SEED;
}
c->lib_ctx = libctx;
c->drbg = drbg;
#else
RAND_DRBG *drbg = RAND_DRBG_new(NID_aes_256_ctr, 0, NULL);
if (!drbg)
return CERT_ERROR_DRBG_INIT;
if (RAND_DRBG_instantiate(drbg, (const unsigned char *)seed, seed_len) != 1) {
RAND_DRBG_free(drbg);
return CERT_ERROR_DRBG_SEED;
}
c->drbg = drbg;
#endif
*ctx = c;
return CERT_SUCCESS;
}
// Function to free CTR_DRBG context resources
void cert_free_ctr_drbg(cert_context_t ctx) {
if (ctx == NULL)
return;
struct cert_context_s *c = (struct cert_context_s *)ctx;
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
EVP_RAND_CTX_free(c->drbg);
OSSL_LIB_CTX_free(c->lib_ctx);
#else
RAND_DRBG_free(c->drbg);
#endif
free(c);
}
// Function to ensure the libp2p OID is registered
int ensure_libp2p_oid() {
int nid = OBJ_txt2nid(LIBP2P_OID);
if (nid == NID_undef) {
// OID not yet registered, create it
nid = OBJ_create(LIBP2P_OID, "libp2p_tls", "libp2p TLS extension");
if (!nid) {
return CERT_ERROR_NID;
}
}
return nid;
}
// Function to generate a key
cert_error_t cert_generate_key(cert_context_t ctx, cert_key_t *out) {
unsigned char priv_key_bytes[32]; // 256 bits for secp256r1
BIGNUM *priv_bn = NULL;
EVP_PKEY *pkey = NULL;
cert_error_t ret_code = CERT_SUCCESS;
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
EVP_PKEY_CTX *pctx;
#else
EC_KEY *ec_key = NULL;
#endif
if (ctx == NULL || out == NULL) {
return CERT_ERROR_NULL_PARAM;
}
// Allocate key structure
struct cert_key_s *key = calloc(1, sizeof(struct cert_key_s));
if (key == NULL) {
return CERT_ERROR_MEMORY;
}
// Generate random bytes for private key using our RNG
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
if (EVP_RAND_generate(ctx->drbg, priv_key_bytes, sizeof(priv_key_bytes), 0, 0,
NULL, 0) <= 0) {
ret_code = CERT_ERROR_RAND;
goto cleanup;
}
#else
if (RAND_DRBG_bytes(ctx->drbg, priv_key_bytes, sizeof(priv_key_bytes)) != 1) {
ret_code = CERT_ERROR_RAND;
goto cleanup;
}
#endif
// Convert bytes to BIGNUM for private key
priv_bn = BN_bin2bn(priv_key_bytes, sizeof(priv_key_bytes), NULL);
if (!priv_bn) {
ret_code = CERT_ERROR_BIGNUM_CONV;
goto cleanup;
}
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL);
if (!pctx) {
ret_code = CERT_ERROR_KEY_GEN;
goto cleanup;
}
if (EVP_PKEY_keygen_init(pctx) <= 0) {
ret_code = CERT_ERROR_INIT_KEYGEN;
goto cleanup;
}
if (EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, NID_X9_62_prime256v1) <= 0) {
fprintf(stderr, "Error setting curve\n");
ret_code = CERT_ERROR_SET_CURVE;
goto cleanup;
}
// Generate the public key from the private key
if (EVP_PKEY_keygen(pctx, &pkey) <= 0) {
ret_code = CERT_ERROR_KEY_GEN;
goto cleanup;
}
#else
// Create EC key from random bytes
ec_key = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1);
if (!ec_key) {
ret_code = CERT_ERROR_ECKEY_GEN;
goto cleanup;
}
// Set private key and compute public key
if (!EC_KEY_set_private_key(ec_key, priv_bn)) {
ret_code = CERT_ERROR_SET_KEY;
goto cleanup;
}
// Generate the public key from the private key
if (!EC_KEY_generate_key(ec_key)) {
ret_code = CERT_ERROR_KEY_GEN;
goto cleanup;
}
// Convert EC_KEY to EVP_PKEY
pkey = EVP_PKEY_new();
if (!pkey || !EVP_PKEY_set1_EC_KEY(pkey, ec_key)) {
ret_code = CERT_ERROR_EVP_PKEY_EC_KEY;
goto cleanup;
}
#endif
key->pkey = pkey;
*out = key;
cleanup:
OPENSSL_cleanse(priv_key_bytes, sizeof(priv_key_bytes));
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
if (pctx)
EVP_PKEY_CTX_free(pctx);
#else
if (ec_key)
EC_KEY_free(ec_key);
#endif
if (priv_bn)
BN_free(priv_bn);
if (ret_code != CERT_SUCCESS) {
if (pkey)
EVP_PKEY_free(pkey);
free(key);
*out = NULL;
}
return ret_code;
}
int init_cert_buffer(cert_buffer **buffer, const unsigned char *src_data,
size_t data_len) {
if (!buffer) {
return CERT_ERROR_NULL_PARAM;
}
*buffer = (cert_buffer *)malloc(sizeof(cert_buffer));
if (!*buffer) {
return CERT_ERROR_MEMORY;
}
memset(*buffer, 0, sizeof(cert_buffer));
(*buffer)->data = (unsigned char *)malloc(data_len);
if (!(*buffer)->data) {
free(*buffer);
*buffer = NULL;
return CERT_ERROR_MEMORY;
}
memcpy((*buffer)->data, src_data, data_len);
(*buffer)->len = data_len;
return CERT_SUCCESS;
}
// Function to generate a self-signed X.509 certificate with custom extension
cert_error_t cert_generate(cert_context_t ctx, cert_key_t key,
cert_buffer **out, cert_buffer *signature,
cert_buffer *ident_pubk, const char *cn,
const char *validFrom, const char *validTo,
cert_format_t format) {
X509 *x509 = NULL;
BIO *bio = NULL;
BUF_MEM *bptr = NULL;
X509_EXTENSION *ex = NULL;
BIGNUM *serial_bn = NULL;
X509_NAME *name = NULL;
X509_EXTENSION *ku_ex = NULL;
ASN1_BIT_STRING *usage = NULL;
ASN1_OCTET_STRING *oct_sign = NULL;
ASN1_OCTET_STRING *oct_pubk = NULL;
ASN1_OCTET_STRING *ext_oct = NULL;
ASN1_TIME *start_time = NULL;
ASN1_TIME *end_time = NULL;
unsigned char *seq_der = NULL;
int ret = 0;
cert_error_t ret_code = CERT_SUCCESS;
if (ctx == NULL || key == NULL) {
ret_code = CERT_ERROR_NULL_PARAM;
goto cleanup;
}
// Get the EVP_PKEY from our opaque key structure
EVP_PKEY *pkey = ((struct cert_key_s *)key)->pkey;
if (pkey == NULL) {
ret_code = CERT_ERROR_NULL_PARAM;
goto cleanup;
}
// Allocate result structure
*out = (cert_buffer *)malloc(sizeof(cert_buffer));
if (!*out) {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
memset(*out, 0, sizeof(cert_buffer));
// Create X509 certificate
x509 = X509_new();
if (!x509) {
ret_code = CERT_ERROR_CERT_GEN;
goto cleanup;
}
// Set version to X509v3
if (!X509_set_version(x509, 2)) {
ret_code = CERT_ERROR_X509_VER;
goto cleanup;
}
// Set random serial number
serial_bn = BN_new();
if (!serial_bn) {
ret_code = CERT_ERROR_BIGNUM_GEN;
goto cleanup;
}
unsigned char serial_bytes[20]; // Adjust size as needed
#if OPENSSL_VERSION_NUMBER >= 0x30000000L
if (EVP_RAND_generate(ctx->drbg, serial_bytes, sizeof(serial_bytes), 0, 0,
NULL, 0) <= 0) {
ret_code = CERT_ERROR_RAND;
goto cleanup;
}
#else
if (RAND_DRBG_bytes(ctx->drbg, serial_bytes, sizeof(serial_bytes)) != 1) {
ret_code = CERT_ERROR_RAND;
goto cleanup;
}
#endif
if (!BN_bin2bn(serial_bytes, sizeof(serial_bytes), serial_bn)) {
ret_code = CERT_ERROR_BIGNUM_CONV;
goto cleanup;
}
if (!BN_to_ASN1_INTEGER(serial_bn, X509_get_serialNumber(x509))) {
ret_code = CERT_ERROR_SERIAL_WRITE;
goto cleanup;
}
// Set subject and issuer using the provided cn
name = X509_NAME_new();
if (!name) {
ret_code = CERT_ERROR_X509_NAME;
goto cleanup;
}
if (!X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC,
(const unsigned char *)cn, -1, -1, 0)) {
ret_code = CERT_ERROR_X509_CN;
goto cleanup;
}
if (!X509_set_subject_name(x509, name)) {
ret_code = CERT_ERROR_X509_SUBJECT;
goto cleanup;
}
if (!X509_set_issuer_name(x509, name)) {
ret_code = CERT_ERROR_X509_ISSUER;
goto cleanup;
}
// Set validity period
start_time = ASN1_TIME_new();
end_time = ASN1_TIME_new();
if (!start_time || !end_time) {
ret_code = CERT_ERROR_AS1_TIME_GEN;
goto cleanup;
}
if (!ASN1_TIME_set_string(start_time, validFrom) ||
!ASN1_TIME_set_string(end_time, validTo)) {
ret_code = CERT_ERROR_VALIDITY_PERIOD;
goto cleanup;
}
if (!X509_set1_notBefore(x509, start_time) ||
!X509_set1_notAfter(x509, end_time)) {
ret_code = CERT_ERROR_VALIDITY_PERIOD;
goto cleanup;
}
// Set public key
if (!X509_set_pubkey(x509, pkey)) {
ret_code = CERT_ERROR_PUBKEY_SET;
goto cleanup;
}
// Add custom extension
int nid = ensure_libp2p_oid();
if (nid <= 0) {
ret_code = nid;
goto cleanup;
}
unsigned char *p;
int seq_len, total_len;
// Allocate and initialize ASN1_OCTET_STRING objects
oct_pubk = ASN1_OCTET_STRING_new();
if (!oct_pubk) {
ret_code = CERT_ERROR_AS1_OCTET;
goto cleanup;
}
oct_sign = ASN1_OCTET_STRING_new();
if (!oct_sign) {
ret_code = CERT_ERROR_AS1_OCTET;
goto cleanup;
}
if (!ASN1_OCTET_STRING_set(oct_pubk, ident_pubk->data, ident_pubk->len)) {
ret_code = CERT_ERROR_EXTENSION_DATA;
goto cleanup;
}
if (!ASN1_OCTET_STRING_set(oct_sign, signature->data, signature->len)) {
ret_code = CERT_ERROR_EXTENSION_DATA;
goto cleanup;
}
// Calculate DER-encoded lengths of the OCTET STRINGs
int oct_pubk_len = i2d_ASN1_OCTET_STRING(oct_pubk, NULL);
int oct_sign_len = i2d_ASN1_OCTET_STRING(oct_sign, NULL);
seq_len = oct_pubk_len + oct_sign_len;
// Compute the exact required length for the SEQUENCE
total_len = ASN1_object_size(1, seq_len, V_ASN1_SEQUENCE);
// Allocate the exact required space
seq_der = OPENSSL_malloc(total_len);
if (!seq_der) {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
// Encode the sequence. p is moved fwd as it is written
p = seq_der;
ASN1_put_object(&p, 1, seq_len, V_ASN1_SEQUENCE, V_ASN1_UNIVERSAL);
i2d_ASN1_OCTET_STRING(oct_pubk, &p);
i2d_ASN1_OCTET_STRING(oct_sign, &p);
// Wrap the encoded sequence in an ASN1_OCTET_STRING
ext_oct = ASN1_OCTET_STRING_new();
if (!ext_oct) {
ret_code = CERT_ERROR_AS1_OCTET;
goto cleanup;
}
if (!ASN1_OCTET_STRING_set(ext_oct, seq_der, total_len)) {
ret_code = CERT_ERROR_EXTENSION_DATA;
goto cleanup;
}
// Create extension with the octet string
ex = X509_EXTENSION_create_by_NID(NULL, nid, 0, ext_oct);
if (!ex) {
ret_code = CERT_ERROR_EXTENSION_GEN;
goto cleanup;
}
// Add extension to certificate
if (!X509_add_ext(x509, ex, -1)) {
ret_code = CERT_ERROR_EXTENSION_ADD;
goto cleanup;
}
/*
// Add Key Usage extension
usage = ASN1_BIT_STRING_new();
if (!usage) {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
// Set bits for DIGITAL_SIGNATURE (bit 0) and KEY_ENCIPHERMENT (bit 2)
if (!ASN1_BIT_STRING_set_bit(usage, 0, 1) ||
!ASN1_BIT_STRING_set_bit(usage, 2, 1)) {
ret_code = CERT_ERROR_EXTENSION_DATA;
goto cleanup;
}
*/
// Create Key Usage extension
/*ku_ex = X509_EXTENSION_create_by_NID(NULL, NID_key_usage, 1,
usage); // 1 for critical
if (!ku_ex) {
ret_code = CERT_ERROR_EXTENSION_GEN;
goto cleanup;
}
// Add extension to certificate
if (!X509_add_ext(x509, ku_ex, -1)) {
ret_code = CERT_ERROR_EXTENSION_ADD;
goto cleanup;
}*/
// Sign the certificate with SHA256
if (!X509_sign(x509, pkey, EVP_sha256())) {
ret_code = CERT_ERROR_SIGN;
goto cleanup;
}
// Convert to requested format (DER or PEM)
bio = BIO_new(BIO_s_mem());
if (!bio) {
ret_code = CERT_ERROR_BIO_GEN;
goto cleanup;
}
if (format == CERT_FORMAT_DER) {
ret = i2d_X509_bio(bio, x509);
} else { // PEM format
ret = PEM_write_bio_X509(bio, x509);
}
if (!ret) {
ret_code = CERT_ERROR_BIO_WRITE;
goto cleanup;
}
BIO_get_mem_ptr(bio, &bptr);
(*out)->data = (unsigned char *)malloc(bptr->length);
if (!(*out)->data) {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
memcpy((*out)->data, bptr->data, bptr->length);
(*out)->len = bptr->length;
cleanup:
if (bio)
BIO_free(bio);
if (ex)
X509_EXTENSION_free(ex);
// if (usage)
// ASN1_BIT_STRING_free(usage);
if (ku_ex)
X509_EXTENSION_free(ku_ex);
if (oct_sign)
ASN1_OCTET_STRING_free(oct_sign);
if (oct_pubk)
ASN1_OCTET_STRING_free(oct_pubk);
if (ext_oct)
ASN1_OCTET_STRING_free(ext_oct);
if (seq_der)
OPENSSL_free(seq_der);
if (serial_bn)
BN_free(serial_bn);
if (name)
X509_NAME_free(name);
if (x509)
X509_free(x509);
if (start_time)
ASN1_TIME_free(start_time);
if (end_time)
ASN1_TIME_free(end_time);
if (ret_code != CERT_SUCCESS && (*out)) {
if ((*out)->data)
free((*out)->data);
free((*out));
*out = NULL;
}
return ret_code;
}
// Function to parse a certificate and extract custom extension and public key
cert_error_t cert_parse(cert_buffer *cert, cert_format_t format,
cert_parsed **out) {
X509 *x509 = NULL;
BIO *bio = NULL;
int extension_index;
X509_EXTENSION *ex = NULL;
ASN1_OCTET_STRING *ext_data = NULL;
ASN1_SEQUENCE_ANY *seq = NULL;
ASN1_OCTET_STRING *oct1 = NULL;
ASN1_OCTET_STRING *oct2 = NULL;
EVP_PKEY *pkey = NULL;
unsigned char *pubkey_buf = NULL;
cert_error_t ret_code;
// Allocate result structure
*out = (cert_parsed *)malloc(sizeof(cert_parsed));
if (!*out) {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
memset(*out, 0, sizeof(cert_parsed));
// Create BIO from memory
bio = BIO_new_mem_buf(cert->data, cert->len);
if (!bio) {
ret_code = CERT_ERROR_BIO_GEN;
goto cleanup;
}
// Parse certificate based on format
if (format == 0) { // DER format
x509 = d2i_X509_bio(bio, NULL);
} else { // PEM format
x509 = PEM_read_bio_X509(bio, NULL, NULL, NULL);
}
if (!x509) {
ret_code = CERT_ERROR_X509_READ;
goto cleanup;
}
// Find custom extension by OID - use the existing OID or create it if needed
int nid = ensure_libp2p_oid();
if (!nid) {
goto cleanup;
}
extension_index = X509_get_ext_by_NID(x509, nid, -1);
if (extension_index < 0) {
ret_code = CERT_ERROR_EXTENSION_NOT_FOUND;
goto cleanup;
}
// Get extension
ex = X509_get_ext(x509, extension_index);
if (!ex) {
ret_code = CERT_ERROR_EXTENSION_GET;
goto cleanup;
}
// Get extension data
ext_data = X509_EXTENSION_get_data(ex);
if (!ext_data) {
ret_code = CERT_ERROR_EXTENSION_DATA;
goto cleanup;
}
// Point to the data
const unsigned char *p;
p = ASN1_STRING_get0_data(ext_data);
// Decode the SEQUENCE
seq = d2i_ASN1_SEQUENCE_ANY(NULL, &p, ASN1_STRING_length(ext_data));
if (!seq) {
ret_code = CERT_ERROR_DECODE_SEQUENCE;
goto cleanup;
}
// Check if we have exactly two items in the sequence
if (sk_ASN1_TYPE_num(seq) != 2) {
ret_code = CERT_ERROR_NOT_ENOUGH_SEQ_ELEMS;
goto cleanup;
}
// Get the first octet string
ASN1_TYPE *type1 = sk_ASN1_TYPE_value(seq, 0);
if (type1->type != V_ASN1_OCTET_STRING) {
ret_code = CERT_ERROR_NOT_OCTET_STR;
goto cleanup;
}
oct1 = type1->value.octet_string;
// Get the second octet string
ASN1_TYPE *type2 = sk_ASN1_TYPE_value(seq, 1);
if (type2->type != V_ASN1_OCTET_STRING) {
ret_code = CERT_ERROR_NOT_OCTET_STR;
goto cleanup;
}
oct2 = type2->value.octet_string;
ret_code =
init_cert_buffer(&((*out)->ident_pubk), ASN1_STRING_get0_data(oct1),
ASN1_STRING_length(oct1));
if (ret_code != 0) {
goto cleanup;
}
ret_code = init_cert_buffer(&((*out)->signature), ASN1_STRING_get0_data(oct2),
ASN1_STRING_length(oct2));
if (ret_code != 0) {
goto cleanup;
}
// Get public key
pkey = X509_get_pubkey(x509);
if (!pkey) {
ret_code = CERT_ERROR_PUBKEY_GET;
goto cleanup;
}
// Get public key length
int pubkey_len = i2d_PUBKEY(pkey, NULL);
if (pubkey_len <= 0) {
ret_code = CERT_ERROR_PUBKEY_DER_LEN;
goto cleanup;
}
pubkey_buf = (unsigned char *)malloc(pubkey_len);
if (!pubkey_buf) {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
unsigned char *temp = pubkey_buf;
if (i2d_PUBKEY(pkey, &temp) <= 0) {
ret_code = CERT_ERROR_PUBKEY_DER_CONV;
goto cleanup;
}
ret_code = init_cert_buffer(&(*out)->cert_pubkey, pubkey_buf, pubkey_len);
if (ret_code != CERT_SUCCESS) {
goto cleanup;
}
const ASN1_TIME *not_before = X509_get0_notBefore(x509);
if (not_before) {
// Convert ASN1_TIME to a more usable format
char *not_before_str = NULL;
BIO *bio_nb = BIO_new(BIO_s_mem());
if (bio_nb) {
if (ASN1_TIME_print(bio_nb, not_before)) {
size_t len = BIO_ctrl_pending(bio_nb);
not_before_str = malloc(len + 1);
if (not_before_str) {
BIO_read(bio_nb, not_before_str, len);
not_before_str[len] = '\0';
// Store in the output structure
(*out)->valid_from = not_before_str;
}
}
BIO_free(bio_nb);
} else {
ret_code = CERT_ERROR_MEMORY;
goto cleanup;
}
}
const ASN1_TIME *not_after = X509_get0_notAfter(x509);
if (not_after) {
// Convert ASN1_TIME to a more usable format
char *not_after_str = NULL;
BIO *bio_na = BIO_new(BIO_s_mem());
if (bio_na) {
if (ASN1_TIME_print(bio_na, not_after)) {
size_t len = BIO_ctrl_pending(bio_na);
not_after_str = malloc(len + 1);
if (not_after_str) {
BIO_read(bio_na, not_after_str, len);
not_after_str[len] = '\0';
// Store in the output structure
(*out)->valid_to = not_after_str;
}
}
BIO_free(bio_na);
}
}
ret_code = CERT_SUCCESS;
cleanup:
if (pkey)
EVP_PKEY_free(pkey);
if (x509)
X509_free(x509);
if (bio)
BIO_free(bio);
if (pubkey_buf)
free(pubkey_buf);
if (ret_code != CERT_SUCCESS && (*out)) {
cert_free_parsed(*out);
out = NULL;
}
return ret_code;
}
cert_error_t cert_serialize_privk(cert_key_t key, cert_buffer **out,
cert_format_t format) {
BIO *bio = NULL;
BUF_MEM *bptr = NULL;
int ret;
cert_error_t ret_code = CERT_SUCCESS;
if (key == NULL || out == NULL) {
return CERT_ERROR_NULL_PARAM;
}
// Get the EVP_PKEY from our opaque key structure
EVP_PKEY *pkey = ((struct cert_key_s *)key)->pkey;
if (!pkey) {
return CERT_ERROR_NULL_PARAM;
}
// Create memory BIO
bio = BIO_new(BIO_s_mem());
if (!bio) {
ret_code = CERT_ERROR_BIO_GEN;
goto cleanup;
}
if (format == CERT_FORMAT_DER) {
// Write key in DER format to BIO
ret = i2d_PrivateKey_bio(bio, pkey);
if (!ret) {
ret_code = CERT_ERROR_BIO_WRITE;
goto cleanup;
}
} else {
// No encryption is used (NULL cipher, NULL password)
ret = PEM_write_bio_PrivateKey(bio, pkey, NULL, NULL, 0, NULL, NULL);
if (!ret) {
ret_code = CERT_ERROR_BIO_WRITE;
goto cleanup;
}
}
// Get the data from BIO
BIO_get_mem_ptr(bio, &bptr);
ret = init_cert_buffer(out, (const unsigned char *)bptr->data, bptr->length);
if (ret != CERT_SUCCESS) {
goto cleanup;
}
cleanup:
if (bio)
BIO_free(bio);
if (ret_code != CERT_SUCCESS && *out) {
if ((*out)->data)
free((*out)->data);
free(*out);
*out = NULL;
}
return ret_code;
}
cert_error_t cert_serialize_pubk(cert_key_t key, cert_buffer **out,
cert_format_t format) {
BIO *bio = NULL;
BUF_MEM *bptr = NULL;
int ret;
cert_error_t ret_code = CERT_SUCCESS;
if (key == NULL || out == NULL) {
return CERT_ERROR_NULL_PARAM;
}
// Get the EVP_PKEY from our opaque key structure
EVP_PKEY *pkey = ((struct cert_key_s *)key)->pkey;
if (!pkey) {
return CERT_ERROR_NULL_PARAM;
}
// Create memory BIO
bio = BIO_new(BIO_s_mem());
if (!bio) {
ret_code = CERT_ERROR_BIO_GEN;
goto cleanup;
}
if (format == CERT_FORMAT_DER) {
// Write key in DER format to BIO
ret = i2d_PUBKEY_bio(bio, pkey);
if (!ret) {
unsigned long err = ERR_get_error();
printf("openssl err: %s\n", ERR_error_string(err, NULL));
ret_code = CERT_ERROR_BIO_WRITE;
goto cleanup;
}
} else {
ret = PEM_write_bio_PUBKEY(bio, pkey);
if (!ret) {
ret_code = CERT_ERROR_BIO_WRITE;
goto cleanup;
}
}
// Get the data from BIO
BIO_get_mem_ptr(bio, &bptr);
ret = init_cert_buffer(out, (const unsigned char *)bptr->data, bptr->length);
if (ret != CERT_SUCCESS) {
goto cleanup;
}
cleanup:
if (bio)
BIO_free(bio);
if (ret_code != CERT_SUCCESS && *out) {
if ((*out)->data)
free((*out)->data);
free(*out);
*out = NULL;
}
return ret_code;
}
void cert_free_buffer(cert_buffer *buffer) {
if (buffer) {
if (buffer->data)
free(buffer->data);
free(buffer);
}
}
// Function to free the parsed certificate struct
void cert_free_parsed(cert_parsed *cert) {
if (cert) {
if (cert->cert_pubkey)
cert_free_buffer(cert->cert_pubkey);
if (cert->ident_pubk)
cert_free_buffer(cert->ident_pubk);
if (cert->signature)
cert_free_buffer(cert->signature);
if (cert->valid_from)
free(cert->valid_from);
if (cert->valid_to)
free(cert->valid_to);
free(cert);
}
}
// Function to free key resources
void cert_free_key(cert_key_t key) {
if (key == NULL)
return;
struct cert_key_s *k = (struct cert_key_s *)key;
EVP_PKEY_free(k->pkey);
free(k);
}

View File

@@ -0,0 +1,187 @@
#ifndef LIBP2P_CERT_H
#define LIBP2P_CERT_H
#include <stddef.h>
#include <stdint.h>
typedef struct cert_context_s *cert_context_t;
typedef struct cert_key_s *cert_key_t;
typedef int32_t cert_error_t;
#define CERT_SUCCESS 0
#define CERT_ERROR_NULL_PARAM -1
#define CERT_ERROR_MEMORY -2
#define CERT_ERROR_DRBG_INIT -3
#define CERT_ERROR_DRBG_CONFIG -4
#define CERT_ERROR_DRBG_SEED -5
#define CERT_ERROR_KEY_GEN -6
#define CERT_ERROR_CERT_GEN -7
#define CERT_ERROR_EXTENSION_GEN -8
#define CERT_ERROR_EXTENSION_ADD -9
#define CERT_ERROR_EXTENSION_DATA -10
#define CERT_ERROR_BIO_GEN -11
#define CERT_ERROR_SIGN -12
#define CERT_ERROR_ENCODING -13
#define CERT_ERROR_PARSE -14
#define CERT_ERROR_RAND -15
#define CERT_ERROR_ECKEY_GEN -16
#define CERT_ERROR_BIGNUM_CONV -17
#define CERT_ERROR_SET_KEY -18
#define CERT_ERROR_VALIDITY_PERIOD -19
#define CERT_ERROR_BIO_WRITE -20
#define CERT_ERROR_SERIAL_WRITE -21
#define CERT_ERROR_EVP_PKEY_EC_KEY -22
#define CERT_ERROR_X509_VER -23
#define CERT_ERROR_BIGNUM_GEN -24
#define CERT_ERROR_X509_NAME -25
#define CERT_ERROR_X509_CN -26
#define CERT_ERROR_X509_SUBJECT -27
#define CERT_ERROR_X509_ISSUER -28
#define CERT_ERROR_AS1_TIME_GEN -29
#define CERT_ERROR_PUBKEY_SET -30
#define CERT_ERROR_AS1_OCTET -31
#define CERT_ERROR_X509_READ -32
#define CERT_ERROR_PUBKEY_GET -33
#define CERT_ERROR_EXTENSION_NOT_FOUND -34
#define CERT_ERROR_EXTENSION_GET -35
#define CERT_ERROR_DECODE_SEQUENCE -36
#define CERT_ERROR_NOT_ENOUGH_SEQ_ELEMS -37
#define CERT_ERROR_NOT_OCTET_STR -38
#define CERT_ERROR_NID -39
#define CERT_ERROR_PUBKEY_DER_LEN -40
#define CERT_ERROR_PUBKEY_DER_CONV -41
#define CERT_ERROR_INIT_KEYGEN -42
#define CERT_ERROR_SET_CURVE -43
typedef enum { CERT_FORMAT_DER = 0, CERT_FORMAT_PEM = 1 } cert_format_t;
/* Buffer structure for raw key data */
typedef struct {
unsigned char *data; /* data buffer */
size_t len; /* Length of data */
} cert_buffer;
/* Struct to hold the parsed certificate data */
typedef struct {
cert_buffer *signature;
cert_buffer *ident_pubk;
cert_buffer *cert_pubkey;
char *valid_from;
char *valid_to;
} cert_parsed;
/**
* Initialize the CTR-DRBG for cryptographic operations
* This function creates and initializes a CTR-DRBG context using
* the provided seed for entropy. The DRBG is configured to use
* AES-256-CTR as the underlying cipher.
*
* @param seed A null-terminated string used to seed the DRBG. Must not be NULL.
* @param ctx Pointer to a context pointer that will be allocated and
* initialized. The caller is responsible for eventually freeing this context
* with the appropriate cleanup function.
*
* @return CERT_SUCCESS on successful initialization, an error code otherwise
*/
cert_error_t cert_init_drbg(const char *seed, size_t seed_len,
cert_context_t *ctx);
/**
* Generate an EC key pair for use with certificates
*
* @param ctx Context pointer obtained from `cert_init_drbg`
* @param out Pointer to store the generated key
*
* @return CERT_SUCCESS on successful execution, an error code otherwise
*/
cert_error_t cert_generate_key(cert_context_t ctx, cert_key_t *out);
/**
* Serialize a key's private key to a format
*
* @param key The key to export
* @param out Pointer to a buffer structure that will be populated with the key
* @param format output format
*
* @return CERT_SUCCESS on successful execution, an error code otherwise
*/
cert_error_t cert_serialize_privk(cert_key_t key, cert_buffer **out,
cert_format_t format);
/**
* Serialize a key's public key to a format
*
* @param key The key to export
* @param out Pointer to a buffer structure that will be populated with the key
* @param format output format
*
* @return CERT_SUCCESS on successful execution, an error code otherwise
*/
cert_error_t cert_serialize_pubk(cert_key_t key, cert_buffer **out,
cert_format_t format);
/**
* Generate a self-signed X.509 certificate with libp2p extension
*
* @param ctx Context pointer obtained from `cert_init_drbg`
* @param key Key to use
* @param out Pointer to a buffer that will be populated with a certificate
* @param signature buffer that contains a signature
* @param ident_pubk buffer that contains the bytes of an identity pubk
* @param common_name Common name to use for the certificate subject/issuer
* @param validFrom Date from which certificate is issued
* @param validTo Date to which certificate is issued
* @param format Certificate format
*
* @return CERT_SUCCESS on successful execution, an error code otherwise
*/
cert_error_t cert_generate(cert_context_t ctx, cert_key_t key,
cert_buffer **out, cert_buffer *signature,
cert_buffer *ident_pubk, const char *cn,
const char *validFrom, const char *validTo,
cert_format_t format);
/**
* Parse a certificate to extract the custom extension and public key
*
* @param cert Buffer containing the certificate data
* @param format Certificate format
* @param cert_parsed Pointer to a structure containing the parsed
* certificate data.
*
* @return CERT_SUCCESS on successful execution, an error code otherwise
*/
cert_error_t cert_parse(cert_buffer *cert, cert_format_t format,
cert_parsed **out);
/**
* Free all resources associated with a CTR-DRBG context
*
* @param ctx The context to free
*/
void cert_free_ctr_drbg(cert_context_t ctx);
/**
* Free memory allocated for a parsed certificate
*
* @param cert Pointer to the parsed certificate structure
*/
void cert_free_parsed(cert_parsed *cert);
/**
* Free all resources associated with a key
*
* @param key The key to free
*/
void cert_free_key(cert_key_t key);
/**
* Free memory allocated for a buffer
*
* @param buffer Pointer to the buffer structure
*/
void cert_free_buffer(cert_buffer *buffer);
#endif /* LIBP2P_CERT_H */

View File

@@ -0,0 +1,312 @@
# Nim-LibP2P
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import std/[sequtils, exitprocs]
import strutils
import times
import stew/byteutils
import chronicles
import ../../crypto/crypto
import ../../errors
import ./certificate_ffi
import ../../../libp2p/peerid
logScope:
topics = "libp2p tls certificate"
# Exception types for TLS certificate errors
type
TLSCertificateError* = object of LPError
KeyGenerationError* = object of TLSCertificateError
CertificateCreationError* = object of TLSCertificateError
CertificatePubKeySerializationError* = object of TLSCertificateError
CertificateParsingError* = object of TLSCertificateError
IdentityPubKeySerializationError* = object of TLSCertificateError
IdentitySigningError* = object of TLSCertificateError
# Define the P2pExtension and P2pCertificate types
type
P2pExtension* = object
publicKey*: seq[byte]
signature*: seq[byte]
P2pCertificate* = object
extension*: P2pExtension
pubKeyDer: seq[byte]
validFrom: Time
validTo: Time
CertificateX509* = object
certificate*: seq[byte]
# Complete ASN.1 DER content (certificate, signature algorithm and signature).
privateKey*: seq[byte] # Private key used to sign certificate
type EncodingFormat* = enum
DER
PEM
proc cert_format_t(self: EncodingFormat): cert_format_t =
if self == EncodingFormat.DER: CERT_FORMAT_DER else: CERT_FORMAT_PEM
proc toCertBuffer(self: seq[uint8]): cert_buffer =
cert_buffer(data: self[0].unsafeAddr, length: self.len.csize_t)
proc toSeq(self: ptr cert_buffer): seq[byte] =
toOpenArray(cast[ptr UncheckedArray[byte]](self.data), 0, self.length.int - 1).toSeq()
# Initialize entropy and DRBG contexts at the module level
var
cert_ctx: cert_context_t = nil
drbgInitialized = false
func publicKey*(cert: P2pCertificate): PublicKey =
return PublicKey.init(cert.extension.publicKey).get()
func peerId*(cert: P2pCertificate): PeerId =
return PeerId.init(cert.publicKey()).tryGet()
proc initializeDRBG() {.raises: [KeyGenerationError].} =
## Function to initialize entropy and DRBG context if not already initialized.
if not drbgInitialized:
# Seed the random number generator
let personalization = "libp2p_tls"
let ret = cert_init_drbg(
personalization.cstring, personalization.len.csize_t, addr cert_ctx
)
if ret != CERT_SUCCESS:
raise newException(KeyGenerationError, "Failed to seed CTR_DRBG")
drbgInitialized = true
proc cleanupDRBG() =
## Function to free entropy and DRBG context.
if drbgInitialized:
cert_free_ctr_drbg(cert_ctx)
drbgInitialized = false
# Register cleanup function to free entropy and DRBG context
addExitProc(cleanupDRBG)
func makeSignatureMessage(pubKey: seq[byte]): seq[byte] {.inline.} =
## Creates message used for certificate signature.
##
let P2P_SIGNING_PREFIX = "libp2p-tls-handshake:".toBytes()
let prefixLen = P2P_SIGNING_PREFIX.len.int
let msg = newSeq[byte](prefixLen + pubKey.len)
copyMem(msg[0].unsafeAddr, P2P_SIGNING_PREFIX[0].unsafeAddr, prefixLen)
copyMem(msg[prefixLen].unsafeAddr, pubKey[0].unsafeAddr, pubKey.len.int)
return msg
func makeIssuerDN(identityKeyPair: KeyPair): string {.inline.} =
let issuerDN =
try:
"CN=" & $(PeerId.init(identityKeyPair.pubkey).tryGet())
except LPError:
raiseAssert "pubkey must be set"
return issuerDN
proc makeASN1Time(time: Time): string {.inline.} =
let str =
try:
let f = initTimeFormat("yyyyMMddhhmmss")
format(time.utc(), f)
except TimeFormatParseError:
raiseAssert "time format is const and checked with test"
return str & "Z"
proc makeExtValues(
identityKeypair: KeyPair, certKey: cert_key_t
): tuple[signature: cert_buffer, pubkey: cert_buffer] {.
raises: [
CertificatePubKeySerializationError, IdentitySigningError,
IdentityPubKeySerializationError,
]
.} =
## Creates the buffers to be used for writing the libp2p extension
##
## Parameters:
## - `identityKeypair`: The peer's identity key pair.
## - `certificateKey`: The key used for the certificate.
##
## Returns:
## A sequence of bytes representing the libp2p extension.
##
## Raises:
## - `IdentitySigningError` if signing the message fails.
## - `CertificatePubKeySerializationError` if serialization of certificate public key fails
## - `IdentityPubKeySerializationError` if serialization of identity public key fails.
var derCert: ptr cert_buffer = nil
let ret = cert_serialize_pubk(certKey, derCert.addr, DER.cert_format_t())
if ret != CERT_SUCCESS:
raise newException(
CertificatePubKeySerializationError, "Failed to serialize the certificate pubkey"
)
let certificatePubKeyDer = derCert.toSeq()
let msg = makeSignatureMessage(certificatePubKeyDer)
# Sign the message with the Identity Key
let signatureResult = identityKeypair.seckey.sign(msg)
if signatureResult.isErr:
raise newException(
IdentitySigningError, "Failed to sign the message with the identity key"
)
let signature = signatureResult.get().data
# Get the public key bytes
let pubKeyBytesResult = identityKeypair.pubkey.getBytes()
if pubKeyBytesResult.isErr:
raise newException(
IdentityPubKeySerializationError, "Failed to get identity public key bytes"
)
let pubKeyBytes = pubKeyBytesResult.get()
return (signature.toCertBuffer(), pubKeyBytes.toCertBuffer())
proc generateX509*(
identityKeyPair: KeyPair,
validFrom: Time = fromUnix(157813200),
validTo: Time = fromUnix(67090165200),
encodingFormat: EncodingFormat = EncodingFormat.DER,
): CertificateX509 {.
raises: [
KeyGenerationError, IdentitySigningError, IdentityPubKeySerializationError,
CertificateCreationError, CertificatePubKeySerializationError,
]
.} =
## Generates a self-signed X.509 certificate with the libp2p extension.
##
## Parameters:
## - `identityKeyPair`: The peer's identity key pair.
## - `encodingFormat`: The encoding format of generated certificate.
##
## Returns:
## A tuple containing:
## - `raw` - The certificate content (encoded using encodingFormat).
## - `privateKey` - The private key.
##
## Raises:
## - `KeyGenerationError` if key generation fails.
## - `CertificateCreationError` if certificate creation fails.
# Ensure DRBG contexts are initialized
initializeDRBG()
var certKey: cert_key_t
var ret = cert_generate_key(cert_ctx, certKey.addr)
if ret != CERT_SUCCESS:
raise
newException(KeyGenerationError, "Failed to generate certificate key - " & $ret)
let issuerDN = makeIssuerDN(identityKeyPair)
let libp2pExtension = makeExtValues(identityKeyPair, certKey)
let validFromAsn1 = makeASN1Time(validFrom)
let validToAsn1 = makeASN1Time(validTo)
var certificate: ptr cert_buffer = nil
ret = cert_generate(
cert_ctx, certKey, certificate.addr, libp2pExtension.signature.unsafeAddr,
libp2pExtension.pubkey.unsafeAddr, issuerDN.cstring, validFromAsn1.cstring,
validToAsn1.cstring, encodingFormat.cert_format_t,
)
if ret != CERT_SUCCESS:
raise
newException(CertificateCreationError, "Failed to generate certificate - " & $ret)
var privKDer: ptr cert_buffer = nil
ret = cert_serialize_privk(certKey, privKDer.addr, encodingFormat.cert_format_t)
if ret != CERT_SUCCESS:
raise newException(KeyGenerationError, "Failed to serialize privK - " & $ret)
let outputCertificate = certificate.toSeq()
let outputPrivateKey = privKDer.toSeq()
cert_free_buffer(certificate)
cert_free_buffer(privKDer)
return CertificateX509(certificate: outputCertificate, privateKey: outputPrivateKey)
proc parseCertTime*(certTime: string): Time {.raises: [TimeParseError].} =
var timeNoZone = certTime[0 ..^ 5] # removes GMT part
# days with 1 digit have additional space -> strip it
timeNoZone = timeNoZone.replace(" ", " ")
const certTimeFormat = "MMM d hh:mm:ss yyyy"
const f = initTimeFormat(certTimeFormat)
return parse(timeNoZone, f, utc()).toTime()
proc parse*(
certificateDer: seq[byte]
): P2pCertificate {.raises: [CertificateParsingError].} =
## Parses a DER-encoded certificate and extracts the P2pCertificate.
##
## Parameters:
## - `certificateDer`: The DER-encoded certificate bytes.
##
## Returns:
## A `P2pCertificate` object containing the certificate and its libp2p extension.
##
## Raises:
## - `CertificateParsingError` if certificate parsing fails.
let certDerBuffer = certificateDer.toCertBuffer()
let certParsed: ptr cert_parsed = nil
defer:
cert_free_parsed(certParsed)
let ret =
cert_parse(certDerBuffer.unsafeAddr, DER.cert_format_t(), certParsed.unsafeAddr)
if ret != CERT_SUCCESS:
raise newException(
CertificateParsingError, "Failed to parse certificate, error code: " & $ret
)
var validFrom, validTo: Time
try:
validFrom = parseCertTime($certParsed.valid_from)
validTo = parseCertTime($certParsed.valid_to)
except TimeParseError as e:
raise newException(
CertificateParsingError, "Failed to parse certificate validity time, " & $e.msg
)
P2pCertificate(
extension: P2pExtension(
signature: certParsed.signature.toSeq(), publicKey: certParsed.ident_pubk.toSeq()
),
pubKeyDer: certParsed.cert_pbuk.toSeq(),
validFrom: validFrom,
validTo: validTo,
)
proc verify*(self: P2pCertificate): bool =
## Verifies that P2pCertificate has signature that was signed by owner of the certificate.
##
## Parameters:
## - `self`: The P2pCertificate.
##
## Returns:
## `true` if certificate is valid.
let currentTime = now().utc().toTime()
if not (currentTime >= self.validFrom and currentTime < self.validTo):
return false
var sig: Signature
var key: PublicKey
if sig.init(self.extension.signature) and key.init(self.extension.publicKey):
let msg = makeSignatureMessage(self.pubKeyDer)
return sig.verify(msg, key)
return false

View File

@@ -0,0 +1,81 @@
when defined(macosx):
{.passl: "-L/opt/homebrew/opt/openssl@3/lib -lcrypto".}
{.passc: "-I/opt/homebrew/opt/openssl@3/include".}
else:
{.passl: "-lcrypto".}
{.compile: "./certificate.c".}
type
cert_error_t* = int32
cert_format_t* {.size: sizeof(cuint).} = enum
CERT_FORMAT_DER = 0
CERT_FORMAT_PEM = 1
cert_buffer* {.pure, inheritable, bycopy.} = object
data*: ptr uint8
length*: csize_t
cert_parsed* {.pure, inheritable, bycopy.} = object
signature*: ptr cert_buffer
ident_pubk*: ptr cert_buffer
cert_pbuk*: ptr cert_buffer
valid_from*: cstring
valid_to*: cstring
cert_context_s* = object
cert_key_s* = object
cert_context_t* = ptr cert_context_s
cert_key_t* = ptr cert_key_s
const CERT_SUCCESS* = 0
proc cert_init_drbg*(
seed: cstring, seed_len: csize_t, ctx: ptr cert_context_t
): cert_error_t {.cdecl, importc: "cert_init_drbg".}
proc cert_generate_key*(
ctx: cert_context_t, out_arg: ptr cert_key_t
): cert_error_t {.cdecl, importc: "cert_generate_key".}
proc cert_serialize_privk*(
key: cert_key_t, out_arg: ptr ptr cert_buffer, format: cert_format_t
): cert_error_t {.cdecl, importc: "cert_serialize_privk".}
proc cert_serialize_pubk*(
key: cert_key_t, out_arg: ptr ptr cert_buffer, format: cert_format_t
): cert_error_t {.cdecl, importc: "cert_serialize_pubk".}
proc cert_generate*(
ctx: cert_context_t,
key: cert_key_t,
out_arg: ptr ptr cert_buffer,
signature: ptr cert_buffer,
ident_pubk: ptr cert_buffer,
cn: cstring,
validFrom: cstring,
validTo: cstring,
format: cert_format_t,
): cert_error_t {.cdecl, importc: "cert_generate".}
proc cert_parse*(
cert: ptr cert_buffer, format: cert_format_t, out_arg: ptr ptr cert_parsed
): cert_error_t {.cdecl, importc: "cert_parse".}
proc cert_free_ctr_drbg*(
ctx: cert_context_t
): void {.cdecl, importc: "cert_free_ctr_drbg".}
proc cert_free_key*(key: cert_key_t): void {.cdecl, importc: "cert_free_key".}
proc cert_free_buffer*(
buffer: ptr cert_buffer
): void {.cdecl, importc: "cert_free_buffer".}
proc cert_free_parsed*(
cert: ptr cert_parsed
): void {.cdecl, importc: "cert_free_parsed".}

View File

@@ -94,26 +94,37 @@ proc handlesStart(address: MultiAddress): bool {.gcsafe.} =
proc connectToTorServer(
transportAddress: TransportAddress
): Future[StreamTransport] {.async.} =
let transp = await connect(transportAddress)
try:
discard await transp.write(
@[Socks5ProtocolVersion, NMethods, Socks5AuthMethod.NoAuth.byte]
): Future[StreamTransport] {.
async: (
raises: [
Socks5VersionError, Socks5AuthFailedError, Socks5ServerReplyError, LPError,
common.TransportError, CancelledError,
]
)
let
serverReply = await transp.read(2)
socks5ProtocolVersion = serverReply[0]
serverSelectedMethod = serverReply[1]
if socks5ProtocolVersion != Socks5ProtocolVersion:
raise newException(Socks5VersionError, "Unsupported socks version")
if serverSelectedMethod != Socks5AuthMethod.NoAuth.byte:
raise newException(Socks5AuthFailedError, "Unsupported auth method")
return transp
except CatchableError as err:
await transp.closeWait()
raise err
.} =
let transp = await connect(transportAddress)
discard
await transp.write(@[Socks5ProtocolVersion, NMethods, Socks5AuthMethod.NoAuth.byte])
let
serverReply = await transp.read(2)
socks5ProtocolVersion = serverReply[0]
serverSelectedMethod = serverReply[1]
if socks5ProtocolVersion != Socks5ProtocolVersion:
raise newException(Socks5VersionError, "Unsupported socks version")
if serverSelectedMethod != Socks5AuthMethod.NoAuth.byte:
raise newException(Socks5AuthFailedError, "Unsupported auth method")
return transp
proc readServerReply(transp: StreamTransport) {.async.} =
proc readServerReply(
transp: StreamTransport
) {.
async: (
raises: [
Socks5VersionError, Socks5ServerReplyError, LPError, common.TransportError,
CancelledError,
]
)
.} =
## The specification for this code is defined on
## [link text](https://www.rfc-editor.org/rfc/rfc1928#section-5)
## and [link text](https://www.rfc-editor.org/rfc/rfc1928#section-6).
@@ -129,10 +140,9 @@ proc readServerReply(transp: StreamTransport) {.async.} =
if serverReply != Socks5ReplyType.Succeeded.byte:
var socks5ReplyType: Socks5ReplyType
if socks5ReplyType.checkedEnumAssign(serverReply):
raise
newException(Socks5ServerReplyError, fmt"Server reply error: {socks5ReplyType}")
raise newException(Socks5ServerReplyError, "Server reply error")
else:
raise newException(LPError, fmt"Unexpected server reply: {serverReply}")
raise newException(LPError, "Unexpected server reply")
let atyp = firstFourOctets[3]
case atyp
of Socks5AddressType.IPv4.byte:
@@ -147,13 +157,13 @@ proc readServerReply(transp: StreamTransport) {.async.} =
proc parseOnion3(
address: MultiAddress
): (byte, seq[byte], seq[byte]) {.raises: [LPError, ValueError].} =
): (byte, seq[byte], seq[byte]) {.raises: [LPError].} =
var addressArray = ($address).split('/')
if addressArray.len < 2:
raise newException(LPError, fmt"Onion address not supported {address}")
raise newException(LPError, "Onion address not supported")
addressArray = addressArray[2].split(':')
if addressArray.len == 0:
raise newException(LPError, fmt"Onion address not supported {address}")
raise newException(LPError, "Onion address not supported")
let
addressStr = addressArray[0] & ".onion"
dstAddr = @(uint8(addressStr.len).toBytes()) & addressStr.toBytes()
@@ -162,14 +172,14 @@ proc parseOnion3(
proc parseIpTcp(
address: MultiAddress
): (byte, seq[byte], seq[byte]) {.raises: [LPError, ValueError].} =
): (byte, seq[byte], seq[byte]) {.raises: [LPError].} =
let (codec, atyp) =
if IPv4Tcp.match(address):
(multiCodec("ip4"), Socks5AddressType.IPv4.byte)
elif IPv6Tcp.match(address):
(multiCodec("ip6"), Socks5AddressType.IPv6.byte)
else:
raise newException(LPError, fmt"IP address not supported {address}")
raise newException(LPError, "IP address not supported")
let
dstAddr = address[codec].tryGet().protoArgument().tryGet()
dstPort = address[multiCodec("tcp")].tryGet().protoArgument().tryGet()
@@ -177,14 +187,23 @@ proc parseIpTcp(
proc parseDnsTcp(
address: MultiAddress
): (byte, seq[byte], seq[byte]) {.raises: [LPError, ValueError].} =
): (byte, seq[byte], seq[byte]) {.raises: [LPError].} =
let
dnsAddress = address[multiCodec("dns")].tryGet().protoArgument().tryGet()
dstAddr = @(uint8(dnsAddress.len).toBytes()) & dnsAddress
dstPort = address[multiCodec("tcp")].tryGet().protoArgument().tryGet()
(Socks5AddressType.FQDN.byte, dstAddr, dstPort)
proc dialPeer(transp: StreamTransport, address: MultiAddress) {.async.} =
proc dialPeer(
transp: StreamTransport, address: MultiAddress
) {.
async: (
raises: [
LPError, common.TransportError, CancelledError, Socks5ServerReplyError,
Socks5VersionError,
]
)
.} =
let (atyp, dstAddr, dstPort) =
if Onion3.match(address):
parseOnion3(address)
@@ -193,7 +212,7 @@ proc dialPeer(transp: StreamTransport, address: MultiAddress) {.async.} =
elif DnsTcp.match(address):
parseDnsTcp(address)
else:
raise newException(LPError, fmt"Address not supported: {address}")
raise newException(LPError, "Address not supported")
let reserved = byte(0)
let request =
@@ -207,22 +226,29 @@ method dial*(
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId),
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
## dial a peer
##
if not handlesDial(address):
raise newException(LPError, fmt"Address not supported: {address}")
raise newException(TransportDialError, "Address not supported")
trace "Dialing remote peer", address = $address
let transp = await connectToTorServer(self.transportAddress)
var transp: StreamTransport
try:
transp = await connectToTorServer(self.transportAddress)
await dialPeer(transp, address)
return self.tcpTransport.connHandler(transp, Opt.none(MultiAddress), Direction.Out)
except CatchableError as err:
await transp.closeWait()
raise err
except CancelledError as e:
safeCloseWait(transp)
raise e
except CatchableError as e:
safeCloseWait(transp)
raise newException(transport.TransportDialError, e.msg, e)
method start*(self: TorTransport, addrs: seq[MultiAddress]) {.async.} =
method start*(
self: TorTransport, addrs: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError]).} =
## listen on the transport
##
@@ -247,20 +273,22 @@ method start*(self: TorTransport, addrs: seq[MultiAddress]) {.async.} =
"Tor Transport couldn't start, no supported addr was provided.",
)
method accept*(self: TorTransport): Future[Connection] {.async.} =
method accept*(
self: TorTransport
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
## accept a new Tor connection
##
let conn = await self.tcpTransport.accept()
conn.observedAddr = Opt.none(MultiAddress)
return conn
method stop*(self: TorTransport) {.async.} =
method stop*(self: TorTransport) {.async: (raises: []).} =
## stop the transport
##
await procCall Transport(self).stop() # call base
await self.tcpTransport.stop()
method handles*(t: TorTransport, address: MultiAddress): bool {.gcsafe.} =
method handles*(t: TorTransport, address: MultiAddress): bool {.gcsafe, raises: [].} =
if procCall Transport(t).handles(address):
return handlesDial(address) or handlesStart(address)
@@ -274,7 +302,7 @@ proc new*(
flags: set[ServerFlags] = {},
): TorSwitch {.raises: [LPError], public.} =
var builder = SwitchBuilder.new().withRng(rng).withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TorTransport.new(torServer, flags, upgr)
)
if addresses.len != 0:
@@ -297,7 +325,7 @@ proc new*(
return torSwitch
method addTransport*(s: TorSwitch, t: Transport) =
doAssert(false, "not implemented!")
doAssert(false, "[TorSwitch.addTransport ] abstract method not implemented!")
method getTorTransport*(s: TorSwitch): Transport {.base.} =
return s.transports[0]

View File

@@ -29,6 +29,7 @@ type
TransportError* = object of LPError
TransportInvalidAddrError* = object of TransportError
TransportClosedError* = object of TransportError
TransportDialError* = object of TransportError
Transport* = ref object of RootObj
addrs*: seq[MultiAddress]
@@ -39,7 +40,9 @@ type
proc newTransportClosedError*(parent: ref Exception = nil): ref TransportError =
newException(TransportClosedError, "Transport closed, no more connections!", parent)
method start*(self: Transport, addrs: seq[MultiAddress]) {.base, async.} =
method start*(
self: Transport, addrs: seq[MultiAddress]
) {.base, async: (raises: [LPError, TransportError]).} =
## start the transport
##
@@ -47,7 +50,7 @@ method start*(self: Transport, addrs: seq[MultiAddress]) {.base, async.} =
self.addrs = addrs
self.running = true
method stop*(self: Transport) {.base, async.} =
method stop*(self: Transport) {.base, async: (raises: []).} =
## stop and cleanup the transport
## including all outstanding connections
##
@@ -55,22 +58,28 @@ method stop*(self: Transport) {.base, async.} =
trace "stopping transport", address = $self.addrs
self.running = false
method accept*(self: Transport): Future[Connection] {.base, gcsafe.} =
method accept*(
self: Transport
): Future[Connection] {.
gcsafe, base, async: (raises: [TransportError, CancelledError])
.} =
## accept incoming connections
##
doAssert(false, "Not implemented!")
doAssert(false, "[Transport.accept] abstract method not implemented!")
method dial*(
self: Transport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId),
): Future[Connection] {.base, gcsafe.} =
): Future[Connection] {.
base, gcsafe, async: (raises: [TransportError, CancelledError])
.} =
## dial a peer
##
doAssert(false, "Not implemented!")
doAssert(false, "[Transport.dial] abstract method not implemented!")
proc dial*(
self: Transport, address: MultiAddress, peerId: Opt[PeerId] = Opt.none(PeerId)
@@ -85,7 +94,9 @@ method upgrade*(
##
self.upgrader.upgrade(conn, peerId)
method handles*(self: Transport, address: MultiAddress): bool {.base, gcsafe.} =
method handles*(
self: Transport, address: MultiAddress
): bool {.base, gcsafe, raises: [].} =
## check if transport supports the multiaddress
##
# by default we skip circuit addresses to avoid
@@ -94,3 +105,17 @@ method handles*(self: Transport, address: MultiAddress): bool {.base, gcsafe.} =
return false
protocols.filterIt(it == multiCodec("p2p-circuit")).len == 0
template safeCloseWait*(stream: untyped) =
if not isNil(stream):
try:
await noCancel stream.closeWait()
except CatchableError as e:
trace "Error closing", description = e.msg
template safeClose*(stream: untyped) =
if not isNil(stream):
try:
await noCancel stream.close()
except CatchableError as e:
trace "Error closing", description = e.msg

View File

@@ -34,8 +34,11 @@ export transport, websock, results
const DefaultHeadersTimeout = 3.seconds
type WsStream = ref object of Connection
session: WSSession
type
WsStream = ref object of Connection
session: WSSession
WsTransportError* = object of transport.TransportError
method initStream*(s: WsStream) =
if s.objName.len == 0:
@@ -116,7 +119,9 @@ type WsTransport* = ref object of Transport
proc secure*(self: WsTransport): bool =
not (isNil(self.tlsPrivateKey) or isNil(self.tlsCertificate))
method start*(self: WsTransport, addrs: seq[MultiAddress]) {.async.} =
method start*(
self: WsTransport, addrs: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError]).} =
## listen on the transport
##
@@ -140,19 +145,22 @@ method start*(self: WsTransport, addrs: seq[MultiAddress]) {.async.} =
else:
false
let address = ma.initTAddress().tryGet()
let httpserver =
if isWss:
TlsHttpServer.create(
address = ma.initTAddress().tryGet(),
tlsPrivateKey = self.tlsPrivateKey,
tlsCertificate = self.tlsCertificate,
flags = self.flags,
handshakeTimeout = self.handshakeTimeout,
)
else:
HttpServer.create(
ma.initTAddress().tryGet(), handshakeTimeout = self.handshakeTimeout
)
try:
if isWss:
TlsHttpServer.create(
address = address,
tlsPrivateKey = self.tlsPrivateKey,
tlsCertificate = self.tlsCertificate,
flags = self.flags,
handshakeTimeout = self.handshakeTimeout,
)
else:
HttpServer.create(address, handshakeTimeout = self.handshakeTimeout)
except CatchableError as exc:
raise (ref WsTransportError)(msg: exc.msg, parent: exc)
self.httpservers &= httpserver
@@ -173,7 +181,7 @@ method start*(self: WsTransport, addrs: seq[MultiAddress]) {.async.} =
self.running = true
method stop*(self: WsTransport) {.async.} =
method stop*(self: WsTransport) {.async: (raises: []).} =
## stop the transport
##
@@ -210,7 +218,10 @@ method stop*(self: WsTransport) {.async.} =
proc connHandler(
self: WsTransport, stream: WSSession, secure: bool, dir: Direction
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [CatchableError]).} =
## Returning CatchableError is fine because we later handle different exceptions.
##
let observedAddr =
try:
let
@@ -225,21 +236,23 @@ proc connHandler(
except CatchableError as exc:
trace "Failed to create observedAddr", description = exc.msg
if not (isNil(stream) and stream.stream.reader.closed):
await stream.close()
safeClose(stream)
raise exc
let conn = WsStream.new(stream, dir, Opt.some(observedAddr))
self.connections[dir].add(conn)
proc onClose() {.async.} =
await conn.session.stream.reader.join()
proc onClose() {.async: (raises: []).} =
await noCancel conn.session.stream.reader.join()
self.connections[dir].keepItIf(it != conn)
trace "Cleaned up client"
asyncSpawn onClose()
return conn
method accept*(self: WsTransport): Future[Connection] {.async.} =
method accept*(
self: WsTransport
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
## accept a new WS connection
##
@@ -252,10 +265,15 @@ method accept*(self: WsTransport): Future[Connection] {.async.} =
if self.acceptFuts.len <= 0:
return
let
finished = await one(self.acceptFuts)
index = self.acceptFuts.find(finished)
let finished =
try:
await one(self.acceptFuts)
except ValueError:
raiseAssert("already checked with if")
except CancelledError as e:
raise e
let index = self.acceptFuts.find(finished)
self.acceptFuts[index] = self.httpservers[index].accept()
try:
@@ -268,7 +286,7 @@ method accept*(self: WsTransport): Future[Connection] {.async.} =
return await self.connHandler(wstransp, isSecure, Direction.In)
except CatchableError as exc:
await req.stream.closeWait()
await noCancel req.stream.closeWait()
raise exc
except WebSocketError as exc:
debug "Websocket Error", description = exc.msg
@@ -291,21 +309,22 @@ method accept*(self: WsTransport): Future[Connection] {.async.} =
debug "OS Error", description = exc.msg
except CatchableError as exc:
info "Unexpected error accepting connection", description = exc.msg
raise exc
raise newException(transport.TransportError, exc.msg, exc)
method dial*(
self: WsTransport,
hostname: string,
address: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId),
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
## dial a peer
##
trace "Dialing remote peer", address = $address
var transp: websock.WSSession
let
secure = WSS.match(address)
try:
let secure = WSS.match(address)
transp = await WebSocket.connect(
address.initTAddress().tryGet(),
"",
@@ -313,14 +332,15 @@ method dial*(
hostName = hostname,
flags = self.tlsFlags,
)
try:
return await self.connHandler(transp, secure, Direction.Out)
except CatchableError as exc:
await transp.close()
raise exc
except CancelledError as e:
safeClose(transp)
raise e
except CatchableError as e:
safeClose(transp)
raise newException(transport.TransportDialError, e.msg, e)
method handles*(t: WsTransport, address: MultiAddress): bool {.gcsafe.} =
method handles*(t: WsTransport, address: MultiAddress): bool {.gcsafe, raises: [].} =
if procCall Transport(t).handles(address):
if address.protocols.isOk:
return WebSockets.match(address)

Some files were not shown because too many files have changed in this diff Show More