Compare commits

...

85 Commits

Author SHA1 Message Date
Gabriel Cruz
2979fcba0e Merge branch 'master' into idontwant-mesh 2025-05-06 17:07:09 -03:00
Gabriel Cruz
ccb24b5f1f feat(cert): add certificate signing request (CSR) generation (#1355) 2025-05-06 18:56:51 +00:00
Marko Burčul
5cb493439d fix(ci): secrets token typo (#1357) 2025-05-05 09:49:42 -03:00
Ivan FB
24b284240a chore: add gcsafe pragma to removeValidator (#1356) 2025-05-02 18:39:00 +02:00
richΛrd
b0f77d24f9 chore(version): update libp2p.nimble to 1.10.0 (#1351) 2025-05-01 05:39:58 -04:00
richΛrd
e32ac492d3 chore: set @vacp2p/p2p team as codeowners of repo (#1352) 2025-05-01 05:03:54 -03:00
Gabriel Cruz
470a7f8cc5 chore: add libp2p CID codec (#1348) 2025-04-27 09:45:40 +00:00
Radosław Kamiński
b269fce289 test(gossipsub): reorganize tests by feature category (#1350) 2025-04-25 16:48:50 +01:00
vladopajic
bc4febe92c fix: git ignore for tests (#1349) 2025-04-24 15:36:46 +02:00
Radosław Kamiński
b5f9bfe0f4 test(gossipsub): optimise heartbeat interval and sleepAsync (#1342) 2025-04-23 18:10:16 +01:00
Gabriel Cruz
4ce1e8119b chore(readme): add gabe as a maintainer (#1346) 2025-04-23 15:57:32 +02:00
Miran
65136b38e2 chore: fix warnings (#1341)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-04-22 19:45:53 +00:00
Gabriel Cruz
ffc114e8d9 chore: fix broken old status-im links (#1332) 2025-04-22 09:14:26 +00:00
Radosław Kamiński
f2be2d6ed5 test: include missing tests in testall (#1338) 2025-04-22 09:45:38 +01:00
Radosław Kamiński
ab690a06a6 test: combine tests (#1335) 2025-04-21 17:39:42 +01:00
Radosław Kamiński
10cdaf14c5 chore(ci): decouple examples from unit tests (#1334) 2025-04-21 16:31:50 +01:00
Radosław Kamiński
ebbfb63c17 chore(test): remove unused flags and simplify testpubsub (#1328) 2025-04-17 13:38:27 +02:00
Álex
ac25da6cea test(gossipsub): message propagation (#1184)
Co-authored-by: Radoslaw Kaminski <radoslaw@status.im>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-04-14 15:49:13 +01:00
Gabriel Cruz
fb41972ba3 chore: rendezvous improvements (#1319) 2025-04-11 13:31:24 +00:00
Richard Ramos
504d1618af fix: bump nim-quic 2025-04-10 17:54:20 -04:00
richΛrd
0f91b23f12 fix: do not use while loop for quic transport errors (#1317) 2025-04-10 21:47:42 +00:00
vladopajic
5ddd62a8b9 chore(git): ignore auto generated test binaries (#1320) 2025-04-10 13:39:04 +00:00
vladopajic
e7f13a7e73 refactor: utilize singe bridgedConnections (#1309) 2025-04-10 08:37:26 -04:00
vladopajic
89e825fb0d fix(quic): continue accept when client certificate is incorrect (#1312) 2025-04-08 21:03:47 +02:00
richΛrd
1b706e84fa chore: bump nim-quic (#1314) 2025-04-08 10:04:31 -04:00
richΛrd
5cafcb70dc chore: remove range checks from rendezvous (#1306) 2025-04-07 12:16:18 +00:00
vladopajic
8c71266058 chore(readme): add quic and memory transports (#1311) 2025-04-04 15:07:31 +00:00
vladopajic
9c986c5c13 feat(transport): add memory transport (#1304) 2025-04-04 15:43:34 +02:00
vladopajic
3d0451d7f2 chore(protocols): remove deprecated utilities (#1305) 2025-04-04 08:44:36 +00:00
richΛrd
b1f65c97ae fix: unsafe string usage (#1308) 2025-04-03 15:33:08 -04:00
vladopajic
5584809fca chore(certificate): update test vectors (#1294) 2025-04-01 17:15:26 +02:00
richΛrd
7586f17b15 fix: set peerId on incoming Quic connection (#1302) 2025-03-31 09:38:30 -04:00
richΛrd
0e16d873c8 feat: withQuicTransport (#1301) 2025-03-30 04:44:49 +00:00
richΛrd
b11acd2118 chore: update quic and expect exception in test (#1300)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-03-27 12:19:49 -04:00
vladopajic
1376f5b077 chore(quic): add tests with invalid certs (#1297) 2025-03-27 15:19:14 +01:00
richΛrd
340ea05ae5 feat: quic (#1265)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-03-26 10:17:15 -04:00
vladopajic
024ec51f66 feat(certificate): add date verification (#1299) 2025-03-25 11:50:25 +01:00
richΛrd
efe453df87 refactor: use openssl instead of mbedtls (#1298) 2025-03-24 10:22:52 -04:00
vladopajic
c0f4d903ba feat(certificate): set distinguishable issuer name with peer id (#1296) 2025-03-21 12:38:02 +00:00
vladopajic
28f2b268ae chore(certificate): cosmetics (#1293) 2025-03-19 17:02:14 +00:00
vladopajic
5abb6916b6 feat: X.509 certificate validation (#1292) 2025-03-19 15:40:14 +00:00
richΛrd
e6aec94c0c chore: use token per repo in autobump task (#1288) 2025-03-18 17:12:52 +00:00
vladopajic
9eddc7c662 chore: specify exceptions (#1284) 2025-03-17 13:09:18 +00:00
richΛrd
028c730a4f chore: remove python dependency (#1287) 2025-03-17 08:04:30 -04:00
richΛrd
3c93bdaf80 chore(version): update libp2p.nimble to 1.9.0 (#1282)
Co-authored-by: kaiserd <1684595+kaiserd@users.noreply.github.com>
2025-03-14 15:56:51 +00:00
vladopajic
037b99997e chore(ci): remove AppVeyor config (#1281) 2025-03-10 21:09:40 +00:00
vladopajic
e67744bf2a chore(connmanager): propagate CancelledError (#1276) 2025-03-05 17:31:46 +00:00
vladopajic
5843e6fb4f chore(protocol): handler to propagate CancelledError (#1275) 2025-03-05 15:04:29 +00:00
vladopajic
f0ff7e4c69 fix(ci): transport interoperability action (#1277) 2025-03-04 19:00:08 +01:00
diegomrsantos
24808ad534 feat(tls-certificate): generate and parse libp2p tls certificate (#1209)
Co-authored-by: Richard Ramos <info@richardramos.me>
2025-03-04 08:05:40 -04:00
vladopajic
c4bccef138 chore(relay): specify raised exceptions (#1274) 2025-03-03 15:18:27 +01:00
vladopajic
adf2345adb chore: specify raised exceptions in miscellaneous places (#1269) 2025-02-28 09:19:53 -04:00
vladopajic
f7daad91e6 chore(protocols): specify raised exceptions (part 2) (#1268) 2025-02-28 08:06:51 -04:00
vladopajic
65052d7b59 chore(transports): specify raised exceptions (#1266) 2025-02-27 10:42:05 +01:00
vladopajic
b07ec5c0c6 chore(protocol): list raised exceptions (#1260) 2025-02-25 17:33:24 -04:00
vladopajic
f4c94ddba1 chore(dialer): list raised exceptions (#1264) 2025-02-24 20:10:20 +01:00
vladopajic
a7ec485ca9 chore(transports): list raised exceptions (#1255) 2025-02-24 16:42:27 +01:00
vladopajic
86b6469e35 chore: list raised exceptions in switch services (#1251)
Co-authored-by: richΛrd <info@richardramos.me>
2025-02-24 15:33:06 +01:00
Ivan FB
3e16ca724d chore: add trace log to analyse remote stream reset (#1253) 2025-02-24 08:24:27 -04:00
vladopajic
93dd5a6768 chore(connmanager): specify raised exceptions (#1263) 2025-02-19 15:55:36 +00:00
richΛrd
ec43d0cb9f chore: refactors to remove .closure., .gcsafe for .async. procs, and added callback compatibility to daemonapi (#1240) 2025-02-19 15:00:44 +00:00
vladopajic
8469a750e7 chore: add description of public pragma (#1262) 2025-02-19 14:14:50 +01:00
Richard Ramos
c98f3eefa0 fix: only send IDONT to mesh peers 2025-02-18 16:12:01 -04:00
vladopajic
fc6ac07ce8 ci: utilize github action for nph check (#1257) 2025-02-17 15:42:58 +00:00
vladopajic
79cdc31b37 ci: remove commit message check (#1256) 2025-02-17 12:35:42 +00:00
vladopajic
be33ad6ac7 chore: specify raising exceptions in daemon module (#1249) 2025-02-14 15:36:29 +01:00
vladopajic
a6e45d6157 chore: list raised exceptions in utils module (#1252)
part of https://github.com/vacp2p/nim-libp2p/issues/962 effort.
2025-02-13 11:27:29 -04:00
richΛrd
37e0f61679 chore: update maintainers (#1248) 2025-02-11 14:32:12 -04:00
vladopajic
5d382b6423 style: fix code style with nph (#1246) 2025-02-11 15:39:08 +00:00
richΛrd
78a4344054 refactor(pubsub): do not raise exceptions on publish (#1244) 2025-02-07 01:30:21 +00:00
Ivan FB
a4f0a638e7 chore: add isClosedRemotely public bool attribute in lpstream (#1242)
Also adds the gcsafe to getStreams method so that we can invoke it from async procs
2025-02-06 09:54:00 -04:00
richΛrd
c5aa3736f9 chore(version): update libp2p.nimble to 1.8.0 (#1236)
This PR's commit is planned to be tagged as v1.8.0
2025-01-22 08:45:56 -04:00
richΛrd
b0f83fd48c fix: use target repository branch as suffix for nim-libp2p-auto-bump- (#1238)
The branch `nim-libp2p-auto-bump-unstable` was not getting autobumped
because at one point of time in nim-libp2p `unstable` branch was renamed
to `master` branch. Since the workflow file depended on `GITHUB_REF`, it
was instead updating `nim-libp2p-auto-bump-master` instead of
`nim-libp2p-auto-bump-unstable`
2025-01-15 14:25:21 -04:00
richΛrd
d6e5094095 fix(pubsub): revert async: raises: [] annotation for TopicHandler and ValidatorHandler (#1237) 2025-01-15 03:35:24 +00:00
richΛrd
483e1d91ba feat: idontwant on publish (#1230)
Closes #1224 

The following changes were done:
1. Once a message is broadcasted, if it exceeds the threeshold to
consider it a large message it will send an IDONTWANT message before
publishing the message

4f9c21f699/libp2p/protocols/pubsub/gossipsub.nim (L800-L801)
2. Extracts the logic to broadcast an IDONTWANT message and to verify
the size of a message into separate functions
3. Adds a template to filter values of a hashset without modifying the
original
2025-01-14 16:59:38 -04:00
richΛrd
d215bb21e0 fix(multiaddress): don't re-export minprotobuf (#1235) 2025-01-14 15:47:06 -04:00
richΛrd
61ac0c5b95 feat(pubsub): add {.async: (raises).} annotations (#1233)
This PR adds `{.async: (raises).}` annotations to the pubsub package.
The cases in which a `raises:[CatchableError]` was added were due to not
being part of the package and should probably be changed in a separate
PR
2025-01-14 17:01:02 +01:00
diegomrsantos
1fa30f07e8 chore(ci): add arm64 for macOS (#1212)
This PR adds the macOS 14 GitHub runner that uses the arm64 cpu.
2024-12-20 21:18:56 -04:00
richΛrd
39d0451a10 chore: validate PR titles and commits, and autoassign PRs (#1227) 2024-12-20 15:42:54 +01:00
richΛrd
4dc7a89f45 chore: use latest nimpng and nico (#1229) 2024-12-17 14:37:06 -04:00
richΛrd
fd26f93b80 fix(ci): use nim 2.0 branch (#1228) 2024-12-09 19:41:08 +00:00
Etan Kissling
dd2c74d413 feat(nameresolving): Add {.async: (raises).} annotations (#1214)
Modernize `nameresolving` modules to track `{.async: (raises).}`.

---------

Co-authored-by: kaiserd <1684595+kaiserd@users.noreply.github.com>
Co-authored-by: richΛrd <info@richardramos.me>
2024-12-09 12:43:21 +01:00
Eugene Kabanov
b7e0df127f Fix PeerStore missing remote endpoints of established connection. (#1226) 2024-11-27 14:49:41 +02:00
kaiserd
f591e692fc chore(version): update libp2p.nimble to 1.7.1 (#1223)
minor version for rendezvous fix
2024-11-09 10:51:33 +07:00
NagyZoltanPeter
8855bce085 fix:missing raises pragma for one of RendezVous.new (#1222) 2024-11-08 11:11:51 +01:00
172 changed files with 8467 additions and 4383 deletions

View File

@@ -1,52 +0,0 @@
version: '{build}'
image: Visual Studio 2015
cache:
- NimBinaries
- p2pdCache
matrix:
# We always want 32 and 64-bit compilation
fast_finish: false
platform:
- x86
- x64
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
clone_depth: 10
install:
- git submodule update --init --recursive
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
# build nim from our own branch - this to avoid the day-to-day churn and
# regressions of the fast-paced Nim development while maintaining the
# flexibility to apply patches
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
- SET PATH=%CD%\Nim\bin;%PATH%
# set path for produced Go binaries
- MKDIR goblin
- CD goblin
- SET GOPATH=%CD%
- SET PATH=%GOPATH%\bin;%PATH%
- CD ..
# install and build go-libp2p-daemon
- bash scripts/build_p2pd.sh p2pdCache v0.3.0
build_script:
- nimble install -y --depsOnly
test_script:
- nimble test
- nimble examples_build
deploy: off

1
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1 @@
* @vacp2p/p2p

View File

@@ -88,6 +88,8 @@ runs:
run: |
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
PLATFORM=x64
elif [[ '${{ inputs.cpu }}' == 'arm64' ]]; then
PLATFORM=arm64
else
PLATFORM=x86
fi

12
.github/workflows/auto_assign_pr.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
name: Auto Assign PR to Creator
on:
pull_request:
types:
- opened
jobs:
assign_creator:
runs-on: ubuntu-latest
steps:
- uses: toshimaru/auto-author-assign@v1.6.2

View File

@@ -27,15 +27,14 @@ jobs:
cpu: amd64
- os: macos
cpu: amd64
- os: macos-14
cpu: arm64
- os: windows
cpu: amd64
nim:
- ref: version-1-6
memory_management: refc
# The ref below corresponds to the branch "version-2-0".
# Right before an update from Nimble 0.16.1 to 0.16.2.
# That update breaks our dependency resolution.
- ref: 8754469f4947844c5938f56e1fba846c349354b5
- ref: version-2-0
memory_management: refc
include:
- platform:
@@ -50,6 +49,10 @@ jobs:
os: macos
builder: macos-13
shell: bash
- platform:
os: macos-14
builder: macos-14
shell: bash
- platform:
os: windows
builder: windows-2022
@@ -78,7 +81,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.15.5'
go-version: '~1.16.0' # That's the minimum Go version that works with arm.
- name: Install p2pd
run: |
@@ -90,8 +93,8 @@ jobs:
with:
path: nimbledeps
# Using nim.ref as a simple way to differentiate between nimble using the "pkgs" or "pkgs2" directories.
# The change happened on Nimble v0.14.0.
key: nimbledeps-${{ matrix.nim.ref }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
# The change happened on Nimble v0.14.0. Also forcing the deps to be reinstalled on each os and cpu.
key: nimbledeps-${{ matrix.nim.ref }}-${{ matrix.builder }}-${{ matrix.platform.cpu }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}

View File

@@ -10,5 +10,5 @@ jobs:
name: Daily amd64
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': '8754469f4947844c5938f56e1fba846c349354b5', 'memory_management': 'refc'}]"
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}]"
cpu: "['amd64']"

View File

@@ -75,7 +75,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: '~1.15.5'
go-version: '~1.16.0'
cache: false
- name: Install p2pd

View File

@@ -10,6 +10,6 @@ jobs:
name: Daily i386 (Linux)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': '8754469f4947844c5938f56e1fba846c349354b5', 'memory_management': 'refc'}, {'ref': 'devel', 'memory_management': 'orc'}]"
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}, {'ref': 'devel', 'memory_management': 'orc'}]"
cpu: "['i386']"
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"

View File

@@ -10,6 +10,6 @@ jobs:
name: Daily SAT
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': '8754469f4947844c5938f56e1fba846c349354b5', 'memory_management': 'refc'}]"
nim: "[{'ref': 'version-2-0', 'memory_management': 'refc'}]"
cpu: "['amd64']"
use_sat_solver: true

View File

@@ -17,10 +17,13 @@ jobs:
target:
- repository: status-im/nimbus-eth2
ref: unstable
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NIMBUS_ETH2 }}
- repository: waku-org/nwaku
ref: master
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NWAKU }}
- repository: codex-storage/nim-codex
ref: master
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NIM_CODEX }}
steps:
- name: Clone target repository
uses: actions/checkout@v4
@@ -29,7 +32,7 @@ jobs:
ref: ${{ matrix.target.ref}}
path: nbc
fetch-depth: 0
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
token: ${{ matrix.target.token }}
- name: Checkout this ref in target repository
run: |
@@ -44,7 +47,7 @@ jobs:
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
git config --global user.name = "${{ github.actor }}"
git commit --allow-empty -a -m "auto-bump nim-libp2p"
git branch -D nim-libp2p-auto-bump-${GITHUB_REF##*/} || true
git switch -c nim-libp2p-auto-bump-${GITHUB_REF##*/}
git push -f origin nim-libp2p-auto-bump-${GITHUB_REF##*/}
git branch -D nim-libp2p-auto-bump-${{ matrix.target.ref }} || true
git switch -c nim-libp2p-auto-bump-${{ matrix.target.ref }}
git push -f origin nim-libp2p-auto-bump-${{ matrix.target.ref }}

60
.github/workflows/examples.yml vendored Normal file
View File

@@ -0,0 +1,60 @@
name: Examples
on:
push:
branches:
- master
pull_request:
merge_group:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
examples:
timeout-minutes: 30
strategy:
fail-fast: false
defaults:
run:
shell: bash
name: "Build Examples"
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: true
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
shell: bash
os: linux
cpu: amd64
nim_ref: version-1-6
- name: Restore deps from cache
id: deps-cache
uses: actions/cache@v3
with:
path: nimbledeps
key: nimbledeps-${{ hashFiles('.pinned') }}
- name: Install deps
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
run: |
nimble install_pinned
- name: Build and run examples
run: |
nim --version
nimble --version
gcc --version
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble examples

View File

@@ -32,6 +32,9 @@ jobs:
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
with:
test-filter: nim-libp2p-head
# without suffix action fails because "hole-punching-interop" artifacts have
# the same name as "transport-interop" artifacts
test-results-suffix: transport-interop
extra-versions: ${{ github.workspace }}/tests/transport-interop/version.json
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}

View File

@@ -18,17 +18,10 @@ jobs:
with:
fetch-depth: 2 # In PR, has extra merge commit: ^1 = PR, ^2 = base
- name: Setup NPH
# Pin nph to a specific version to avoid sudden style differences.
# Updating nph version should be accompanied with running the new version on the fluffy directory.
run: |
VERSION="v0.5.1"
ARCHIVE="nph-linux_x64.tar.gz"
curl -L "https://github.com/arnetheduck/nph/releases/download/${VERSION}/${ARCHIVE}" -o ${ARCHIVE}
tar -xzf ${ARCHIVE}
- name: Check style
run: |
shopt -s extglob # Enable extended globbing
./nph examples libp2p tests tools *.@(nim|nims|nimble)
git diff --exit-code
- name: Check `nph` formatting
uses: arnetheduck/nph-action@v1
with:
version: 0.6.1
options: "examples libp2p tests tools *.nim*"
fail: true
suggest: true

35
.github/workflows/pr_lint.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
name: "Conventional Commits"
on:
pull_request:
types:
- opened
- edited
- reopened
- synchronize
jobs:
main:
name: Validate PR title
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: amannn/action-semantic-pull-request@v5
id: lint_pr_title
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- uses: marocchino/sticky-pull-request-comment@v2
# When the previous steps fails, the workflow would stop. By adding this
# condition you can continue the execution with the populated error message.
if: always() && (steps.lint_pr_title.outputs.error_message != null)
with:
header: pr-title-lint-error
message: |
Pull requests titles must follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/)
# Delete a previous comment when the issue has been resolved
- if: ${{ steps.lint_pr_title.outputs.error_message == null }}
uses: marocchino/sticky-pull-request-comment@v2
with:
header: pr-title-lint-error
delete: true

8
.gitignore vendored
View File

@@ -17,3 +17,11 @@ examples/*.md
nimble.develop
nimble.paths
go-libp2p-daemon/
# Ignore all test build files in tests folder (auto generated when running tests).
# First rule (`tests/**/test*[^.]*`) will ignore all binaries: has prefix test + does not have dot in name.
# Second and third rules are here to un-ignores all files with extension and Docker file,
# because it appears that vs code is skipping text search is some tests files without these rules.
tests/**/test*[^.]*
!tests/**/*.*
!tests/**/Dockerfile

View File

@@ -6,9 +6,9 @@ faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af61
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
ngtcp2;https://github.com/status-im/nim-ngtcp2@#6834f4756b6af58356ac9c4fef3d71db3c3ae5fe
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
quic;https://github.com/status-im/nim-quic.git@#ddcb31ffb74b5460ab37fd13547eca90594248bc
quic;https://github.com/status-im/nim-quic.git@#d54e8f0f2e454604b767fadeae243d95c30c383f
results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35

View File

@@ -70,6 +70,8 @@ List of packages modules implemented in nim-libp2p:
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
| [libp2p-quic](libp2p/transports/quictransport.nim) | Quic Transport |
| [libp2p-memory](libp2p/transports/memorytransport.nim) | Memory Transport |
| **Secure Channels** | |
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
@@ -145,9 +147,9 @@ The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-st
<table>
<tbody>
<tr>
<td align="center"><a href="https://github.com/Menduist"><img src="https://avatars.githubusercontent.com/u/13471753?v=4?s=100" width="100px;" alt="Tanguy"/><br /><sub><b>Tanguy (Menduist)</b></sub></a></td>
<td align="center"><a href="https://github.com/lchenut"><img src="https://avatars.githubusercontent.com/u/11214565?v=4?s=100" width="100px;" alt="Ludovic"/><br /><sub><b>Ludovic</b></sub></a></td>
<td align="center"><a href="https://github.com/diegomrsantos"><img src="https://avatars.githubusercontent.com/u/7316595?v=4?s=100" width="100px;" alt="Diego"/><br /><sub><b>Diego</b></sub></a></td>
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
</tr>
</tbody>
</table>

View File

@@ -26,15 +26,22 @@ proc main() {.async.} =
let customProtoCodec = "/test"
var proto = new LPProtocol
proto.codec = customProtoCodec
proto.handler = proc(conn: Connection, proto: string) {.async.} =
var msg = string.fromBytes(await conn.readLp(1024))
echo "1 - Dst Received: ", msg
assert "test1" == msg
await conn.writeLp("test2")
msg = string.fromBytes(await conn.readLp(1024))
echo "2 - Dst Received: ", msg
assert "test3" == msg
await conn.writeLp("test4")
proto.handler = proc(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
var msg = string.fromBytes(await conn.readLp(1024))
echo "1 - Dst Received: ", msg
assert "test1" == msg
await conn.writeLp("test2")
msg = string.fromBytes(await conn.readLp(1024))
echo "2 - Dst Received: ", msg
assert "test3" == msg
await conn.writeLp("test4")
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
let
relay = Relay.new()

View File

@@ -43,12 +43,17 @@ proc new(T: typedesc[ChatProto], c: Chat): T =
let chatproto = T()
# create handler for incoming connection
proc handle(stream: Connection, proto: string) {.async.} =
if c.connected and not c.conn.closed:
c.writeStdout "a chat session is already in progress - refusing incoming peer!"
await stream.close()
else:
await c.handlePeer(stream)
proc handle(stream: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
if c.connected and not c.conn.closed:
c.writeStdout "a chat session is already in progress - refusing incoming peer!"
else:
await c.handlePeer(stream)
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await stream.close()
# assign the new handler

View File

@@ -0,0 +1,3 @@
{.used.}
import directchat, tutorial_6_game

View File

@@ -0,0 +1,5 @@
{.used.}
import
helloworld, circuitrelay, tutorial_1_connect, tutorial_2_customproto,
tutorial_3_protobuf, tutorial_4_gossipsub, tutorial_5_discovery

View File

@@ -93,8 +93,8 @@ proc serveThread(udata: CustomData) {.async.} =
pending.add(item.write(msg))
if len(pending) > 0:
var results = await all(pending)
except:
echo getCurrentException().msg
except CatchableError as err:
echo err.msg
proc main() {.async.} =
var data = new CustomData

View File

@@ -13,7 +13,7 @@ For more information about the go daemon, check out [this repository](https://gi
> **Required only** for running the tests.
# Prerequisites
Go with version `1.15.15`.
Go with version `1.16.0`.
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
# Installation
@@ -21,7 +21,7 @@ Follow one of the methods below:
## Script
Run the build script while having the `go` command pointing to the correct Go version.
We recommend using `1.15.15`, as previously stated.
We recommend using `1.16.0`, as previously stated.
```sh
./scripts/build_p2pd.sh
```

View File

@@ -11,12 +11,17 @@ type TestProto = ref object of LPProtocol # declare a custom protocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async.} =
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
await conn.writeLp("Roger p2p!")
# We must close the connections ourselves when we're done with it
await conn.close()
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
await conn.writeLp("Roger p2p!")
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
# We must close the connections ourselves when we're done with it
await conn.close()
return T.new(codecs = @[TestCodec], handler = handle)

View File

@@ -25,12 +25,17 @@ type TestProto = ref object of LPProtocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will in be handled in this closure
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
# Read up to 1024 bytes from this connection, and transform them into
# a string
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
# We must close the connections ourselves when we're done with it
await conn.close()
try:
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await conn.close()
return T.new(codecs = @[TestCodec], handler = handle)

View File

@@ -108,12 +108,18 @@ type
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
var res: MetricProto
proc handle(conn: Connection, proto: string) {.async.} =
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
await conn.writeLp(asProtobuf.buffer)
await conn.close()
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
await conn.writeLp(asProtobuf.buffer)
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await conn.close()
res = MetricProto.new(@["/metric-getter/1.0.0"], handle)
res.metricGetter = cb

View File

@@ -79,8 +79,7 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
let decoded = MetricList.decode(message.data)
if decoded.isErr:
return ValidationResult.Reject
return ValidationResult.Accept
,
return ValidationResult.Accept,
)
# This "validator" will attach to the `metrics` topic and make sure
# that every message in this topic is valid. This allows us to stop
@@ -93,7 +92,8 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
node.gossip.subscribe(
"metrics",
proc(topic: string, data: seq[byte]) {.async.} =
echo MetricList.decode(data).tryGet()
let m = MetricList.decode(data).expect("metric can be decoded")
echo m
,
)
else:
@@ -158,8 +158,8 @@ waitFor(main())
## This is John receiving & logging everyone's metrics.
##
## ## Going further
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
## and [topic params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
## and [topic params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
## you can achieve very different properties.
##
## Also see reports for [GossipSub v1.1](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4)

View File

@@ -34,9 +34,15 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
const DumbCodec = "/dumb/proto/1.0.0"
type DumbProto = ref object of LPProtocol
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
await conn.close()
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
finally:
await conn.close()
return T.new(codecs = @[DumbCodec], handler = handle)

View File

@@ -152,21 +152,26 @@ proc draw(g: Game) =
## peer know that we are available, check that he is also available,
## and launch the game.
proc new(T: typedesc[GameProto], g: Game): T =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
defer:
await conn.closeWithEof()
if g.peerFound.finished or g.hasCandidate:
await conn.close()
return
g.hasCandidate = true
await conn.writeLp("ok")
if "ok" != string.fromBytes(await conn.readLp(1024)):
g.hasCandidate = false
return
g.peerFound.complete(conn)
# The handler of a protocol must wait for the stream to
# be finished before returning
await conn.join()
try:
if g.peerFound.finished or g.hasCandidate:
await conn.close()
return
g.hasCandidate = true
await conn.writeLp("ok")
if "ok" != string.fromBytes(await conn.readLp(1024)):
g.hasCandidate = false
return
g.peerFound.complete(conn)
# The handler of a protocol must wait for the stream to
# be finished before returning
await conn.join()
except CancelledError as e:
raise e
except CatchableError as e:
echo "exception in handler", e.msg
return T.new(codecs = @["/tron/1.0.0"], handler = handle)
@@ -214,8 +219,7 @@ proc networking(g: Game) {.async.} =
# We are "player 2"
swap(g.localPlayer, g.remotePlayer)
except CatchableError as exc:
discard
,
discard,
)
await switch.start()
@@ -268,14 +272,11 @@ nico.init("Status", "Tron")
nico.createWindow("Tron", mapSize * 4, mapSize * 4, 4, false)
nico.run(
proc() =
discard
,
discard,
proc(dt: float32) =
game.update(dt)
,
game.update(dt),
proc() =
game.draw()
,
game.draw(),
)
waitFor(netFut.cancelAndWait())

View File

@@ -17,7 +17,7 @@ when defined(nimdoc):
## stay backward compatible during the Major version, whereas private ones can
## change at each new Minor version.
##
## If you're new to nim-libp2p, you can find a tutorial `here<https://status-im.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
## If you're new to nim-libp2p, you can find a tutorial `here<https://vacp2p.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
## that can help you get started.
# Import stuff for doc

View File

@@ -1,7 +1,7 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "1.7.0"
version = "1.10.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
@@ -10,8 +10,8 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
requires "nim >= 1.6.0",
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
"chronicles >= 0.10.2", "chronos >= 4.0.3", "metrics", "secp256k1", "stew#head",
"websock", "unittest2",
"https://github.com/status-im/nim-quic.git#ddcb31ffb74b5460ab37fd13547eca90594248bc"
"websock", "unittest2", "results",
"https://github.com/status-im/nim-quic.git#d54e8f0f2e454604b767fadeae243d95c30c383f"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
@@ -25,12 +25,8 @@ let cfg =
import hashes, strutils
proc runTest(
filename: string, verify: bool = true, sign: bool = true, moreoptions: string = ""
) =
proc runTest(filename: string, moreoptions: string = "") =
var excstr = nimc & " " & lang & " -d:debug " & cfg & " " & flags
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
excstr.add(" " & moreoptions & " ")
if getEnv("CICOV").len > 0:
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
@@ -60,51 +56,15 @@ task testinterop, "Runs interop tests":
runTest("testinterop")
task testpubsub, "Runs pubsub tests":
runTest(
"pubsub/testgossipinternal",
sign = false,
verify = false,
moreoptions = "-d:pubsub_internal_testing",
)
runTest("pubsub/testpubsub")
runTest("pubsub/testpubsub", sign = false, verify = false)
runTest(
"pubsub/testpubsub",
sign = false,
verify = false,
moreoptions = "-d:libp2p_pubsub_anonymize=true",
)
task testpubsub_slim, "Runs pubsub tests":
runTest(
"pubsub/testgossipinternal",
sign = false,
verify = false,
moreoptions = "-d:pubsub_internal_testing",
)
runTest("pubsub/testpubsub")
task testfilter, "Run PKI filter test":
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1\"")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519\"")
runTest(
"testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519;ecnist\""
)
runTest("testpkifilter")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
task test, "Runs the test suite":
exec "nimble testnative"
exec "nimble testpubsub"
exec "nimble testdaemon"
exec "nimble testinterop"
runTest("testall")
exec "nimble testfilter"
exec "nimble examples_build"
task test_slim, "Runs the (slimmed down) test suite":
exec "nimble testnative"
exec "nimble testpubsub_slim"
exec "nimble testfilter"
exec "nimble examples_build"
task website, "Build the website":
tutorialToMd("examples/tutorial_1_connect.nim")
@@ -116,19 +76,12 @@ task website, "Build the website":
tutorialToMd("examples/circuitrelay.nim")
exec "mkdocs build"
task examples_build, "Build the samples":
buildSample("directchat")
buildSample("helloworld", true)
buildSample("circuitrelay", true)
buildSample("tutorial_1_connect", true)
buildSample("tutorial_2_customproto", true)
buildSample("tutorial_3_protobuf", true)
buildSample("tutorial_4_gossipsub", true)
buildSample("tutorial_5_discovery", true)
exec "nimble install -y nimpng@#HEAD"
# this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
exec "nimble install -y nico@#af99dd60bf2b395038ece815ea1012330a80d6e6"
buildSample("tutorial_6_game", false, "--styleCheck:off")
task examples, "Build and run examples":
exec "nimble install -y nimpng"
exec "nimble install -y nico --passNim=--skipParentCfg"
buildSample("examples_build", false, "--styleCheck:off") # build only
buildSample("examples_run", true)
# pin system
# while nimble lockfile

View File

@@ -23,7 +23,7 @@ import
stream/connection,
multiaddress,
crypto/crypto,
transports/[transport, tcptransport],
transports/[transport, tcptransport, quictransport, memorytransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
@@ -37,8 +37,11 @@ import services/wildcardresolverservice
export switch, peerid, peerinfo, connection, multiaddress, crypto, errors
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
type
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [].}
TransportProvider* {.public.} =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport {.gcsafe, raises: [].}
SecureProtocol* {.pure.} = enum
Noise
@@ -151,7 +154,7 @@ proc withTransport*(
let switch = SwitchBuilder
.new()
.withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TcpTransport.new(flags, upgr)
)
.build()
@@ -162,10 +165,22 @@ proc withTcpTransport*(
b: SwitchBuilder, flags: set[ServerFlags] = {}
): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
TcpTransport.new(flags, upgr)
)
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
QuicTransport.new(upgr, privateKey)
)
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
MemoryTransport.new(upgr)
)
proc withRng*(b: SwitchBuilder, rng: ref HmacDrbgContext): SwitchBuilder {.public.} =
b.rng = rng
b
@@ -270,7 +285,7 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
let transports = block:
var transports: seq[Transport]
for tProvider in b.transports:
transports.add(tProvider(muxedUpgrade))
transports.add(tProvider(muxedUpgrade, seckey))
transports
if b.secureManagers.len == 0:

View File

@@ -12,8 +12,8 @@
{.push raises: [].}
import tables, hashes
import multibase, multicodec, multihash, vbuffer, varint
import stew/[base58, results]
import multibase, multicodec, multihash, vbuffer, varint, results
import stew/base58
export results
@@ -41,6 +41,7 @@ const ContentIdsList = [
multiCodec("dag-pb"),
multiCodec("dag-cbor"),
multiCodec("dag-json"),
multiCodec("libp2p-key"),
multiCodec("git-raw"),
multiCodec("eth-block"),
multiCodec("eth-block-list"),

View File

@@ -42,8 +42,9 @@ type
else:
discard
ConnEventHandler* =
proc(peerId: PeerId, event: ConnEvent): Future[void] {.gcsafe, raises: [].}
ConnEventHandler* = proc(peerId: PeerId, event: ConnEvent): Future[void] {.
gcsafe, async: (raises: [CancelledError])
.}
PeerEventKind* {.pure.} = enum
Left
@@ -57,8 +58,9 @@ type
else:
discard
PeerEventHandler* =
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [].}
PeerEventHandler* = proc(peerId: PeerId, event: PeerEvent): Future[void] {.
gcsafe, async: (raises: [CancelledError])
.}
ConnManager* = ref object of RootObj
maxConnsPerPeer: int
@@ -123,7 +125,9 @@ proc removeConnEventHandler*(
) =
c.connEvents[kind].excl(handler)
proc triggerConnEvent*(c: ConnManager, peerId: PeerId, event: ConnEvent) {.async.} =
proc triggerConnEvent*(
c: ConnManager, peerId: PeerId, event: ConnEvent
) {.async: (raises: [CancelledError]).} =
try:
trace "About to trigger connection events", peer = peerId
if c.connEvents[event.kind].len() > 0:
@@ -154,7 +158,9 @@ proc removePeerEventHandler*(
) =
c.peerEvents[kind].excl(handler)
proc triggerPeerEvents*(c: ConnManager, peerId: PeerId, event: PeerEvent) {.async.} =
proc triggerPeerEvents*(
c: ConnManager, peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
trace "About to trigger peer events", peer = peerId
if c.peerEvents[event.kind].len == 0:
return
@@ -174,7 +180,7 @@ proc triggerPeerEvents*(c: ConnManager, peerId: PeerId, event: PeerEvent) {.asyn
proc expectConnection*(
c: ConnManager, p: PeerId, dir: Direction
): Future[Muxer] {.async.} =
): Future[Muxer] {.async: (raises: [AlreadyExpectingConnectionError, CancelledError]).} =
## Wait for a peer to connect to us. This will bypass the `MaxConnectionsPerPeer`
let key = (p, dir)
if key in c.expectedConnectionsOverLimit:
@@ -183,7 +189,7 @@ proc expectConnection*(
"Already expecting an incoming connection from that peer",
)
let future = newFuture[Muxer]()
let future = Future[Muxer].Raising([CancelledError]).init()
c.expectedConnectionsOverLimit[key] = future
try:
@@ -205,18 +211,18 @@ proc contains*(c: ConnManager, muxer: Muxer): bool =
let conn = muxer.connection
return muxer in c.muxed.getOrDefault(conn.peerId)
proc closeMuxer(muxer: Muxer) {.async.} =
proc closeMuxer(muxer: Muxer) {.async: (raises: [CancelledError]).} =
trace "Cleaning up muxer", m = muxer
await muxer.close()
if not (isNil(muxer.handler)):
try:
await muxer.handler # TODO noraises?
await muxer.handler
except CatchableError as exc:
trace "Exception in close muxer handler", description = exc.msg
trace "Cleaned up muxer", m = muxer
proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
proc muxCleanup(c: ConnManager, mux: Muxer) {.async: (raises: []).} =
try:
trace "Triggering disconnect events", mux
let peerId = mux.connection.peerId
@@ -238,7 +244,7 @@ proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
# do not need to propagate CancelledError and should handle other errors
warn "Unexpected exception peer cleanup handler", mux, description = exc.msg
proc onClose(c: ConnManager, mux: Muxer) {.async.} =
proc onClose(c: ConnManager, mux: Muxer) {.async: (raises: []).} =
## connection close even handler
##
## triggers the connections resource cleanup
@@ -272,7 +278,7 @@ proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer =
trace "connection not found", peerId
return mux
proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [CatchableError].} =
proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [LPError].} =
## store the connection and muxer
##
@@ -324,7 +330,9 @@ proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [CatchableError].} =
trace "Stored muxer", muxer, direction = $muxer.connection.dir, peers = c.muxed.len
proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} =
proc getIncomingSlot*(
c: ConnManager
): Future[ConnectionSlot] {.async: (raises: [CancelledError]).} =
await c.inSema.acquire()
return ConnectionSlot(connManager: c, direction: In)
@@ -339,25 +347,21 @@ proc getOutgoingSlot*(
raise newTooManyConnectionsError()
return ConnectionSlot(connManager: c, direction: Out)
func semaphore(c: ConnManager, dir: Direction): AsyncSemaphore {.inline.} =
return if dir == In: c.inSema else: c.outSema
proc slotsAvailable*(c: ConnManager, dir: Direction): int =
case dir
of Direction.In:
return c.inSema.count
of Direction.Out:
return c.outSema.count
return semaphore(c, dir).count
proc release*(cs: ConnectionSlot) =
if cs.direction == In:
cs.connManager.inSema.release()
else:
cs.connManager.outSema.release()
semaphore(cs.connManager, cs.direction).release()
proc trackConnection*(cs: ConnectionSlot, conn: Connection) =
if isNil(conn):
cs.release()
return
proc semaphoreMonitor() {.async.} =
proc semaphoreMonitor() {.async: (raises: [CancelledError]).} =
try:
await conn.join()
except CatchableError as exc:
@@ -373,14 +377,18 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
return
cs.trackConnection(mux.connection)
proc getStream*(c: ConnManager, muxer: Muxer): Future[Connection] {.async.} =
proc getStream*(
c: ConnManager, muxer: Muxer
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
## get a muxed stream for the passed muxer
##
if not (isNil(muxer)):
return await muxer.newStream()
proc getStream*(c: ConnManager, peerId: PeerId): Future[Connection] {.async.} =
proc getStream*(
c: ConnManager, peerId: PeerId
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
## get a muxed stream for the passed peer from any connection
##
@@ -388,13 +396,13 @@ proc getStream*(c: ConnManager, peerId: PeerId): Future[Connection] {.async.} =
proc getStream*(
c: ConnManager, peerId: PeerId, dir: Direction
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
## get a muxed stream for the passed peer from a connection with `dir`
##
return await c.getStream(c.selectMuxer(peerId, dir))
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async: (raises: [CancelledError]).} =
## drop connections and cleanup resources for peer
##
trace "Dropping peer", peerId
@@ -405,7 +413,7 @@ proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
trace "Peer dropped", peerId
proc close*(c: ConnManager) {.async.} =
proc close*(c: ConnManager) {.async: (raises: [CancelledError]).} =
## cleanup resources for the connection
## manager
##

View File

@@ -76,7 +76,7 @@ import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import ../utility
import stew/results
import results
export results, utility
# This is workaround for Nim's `import` bug

View File

@@ -18,7 +18,7 @@
{.push raises: [].}
import bearssl/[ec, rand]
import stew/results
import results
from stew/assign2 import assign
export results

View File

@@ -21,7 +21,8 @@ import bearssl/[ec, rand, hash]
import nimcrypto/utils as ncrutils
import minasn1
export minasn1.Asn1Error
import stew/[results, ctops]
import stew/ctops
import results
import ../utility

View File

@@ -18,7 +18,8 @@ import constants
import nimcrypto/[hash, sha2]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import stew/[results, ctops]
import results
import stew/ctops
import ../../utility

View File

@@ -28,8 +28,7 @@ proc hkdf*[T: sha256, len: static int](
if salt.len > 0:
unsafeAddr salt[0]
else:
nil
,
nil,
csize_t(salt.len),
)
hkdfInject(
@@ -37,8 +36,7 @@ proc hkdf*[T: sha256, len: static int](
if ikm.len > 0:
unsafeAddr ikm[0]
else:
nil
,
nil,
csize_t(ikm.len),
)
hkdfFlip(ctx)
@@ -48,8 +46,7 @@ proc hkdf*[T: sha256, len: static int](
if info.len > 0:
unsafeAddr info[0]
else:
nil
,
nil,
csize_t(info.len),
addr outputs[i][0],
csize_t(outputs[i].len),

View File

@@ -11,7 +11,8 @@
{.push raises: [].}
import stew/[endians2, results, ctops]
import stew/[endians2, ctops]
import results
export results
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
@@ -291,28 +292,6 @@ proc asn1EncodeBitString*(
dest[2 + lenlen + bytelen - 1] = lastbyte and mask
res
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte], value: T): int =
var v = value
if value <= cast[T](0x7F):
if len(dest) >= 1:
dest[0] = cast[byte](value)
1
else:
var s = 0
var res = 0
while v != 0:
v = v shr 7
s += 7
inc(res)
if len(dest) >= res:
var k = 0
while s != 0:
s -= 7
dest[k] = cast[byte](((value shr s) and cast[T](0x7F)) or cast[T](0x80))
inc(k)
dest[k - 1] = dest[k - 1] and 0x7F'u8
res
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
## number of bytes (octets) used.
@@ -665,9 +644,6 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
return ok(field)
else:
return err(Asn1Error.NoSupport)
inclass = false
ttag = 0
else:
return err(Asn1Error.NoSupport)

View File

@@ -17,7 +17,8 @@
import bearssl/[rsa, rand, hash]
import minasn1
import stew/[results, ctops]
import results
import stew/ctops
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import bearssl/rand
import secp256k1, stew/[byteutils, results], nimcrypto/[hash, sha2]
import secp256k1, results, stew/byteutils, nimcrypto/[hash, sha2]
export sha2, results, rand

View File

@@ -146,7 +146,7 @@ type
PubsubTicket* = ref object
topic*: string
handler*: P2PPubSubCallback
handler*: P2PPubSubCallback2
transp*: StreamTransport
PubSubMessage* = object
@@ -158,12 +158,14 @@ type
key*: PublicKey
P2PStreamCallback* = proc(api: DaemonAPI, stream: P2PStream): Future[void] {.
gcsafe, raises: [CatchableError]
gcsafe, async: (raises: [CatchableError])
.}
P2PPubSubCallback* = proc(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.gcsafe, raises: [CatchableError].}
P2PPubSubCallback2* = proc(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).}
DaemonError* = object of LPError
DaemonRemoteError* = object of DaemonError
DaemonLocalError* = object of DaemonError
@@ -485,7 +487,11 @@ proc getErrorMessage(pb: ProtoBuffer): string {.inline, raises: [DaemonLocalErro
if initProtoBuffer(error).getRequiredField(1, result).isErr():
raise newException(DaemonLocalError, "Error message is missing!")
proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
proc recvMessage(
conn: StreamTransport
): Future[seq[byte]] {.
async: (raises: [TransportIncompleteError, TransportError, CancelledError])
.} =
var
size: uint
length: int
@@ -508,13 +514,19 @@ proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
result = buffer
proc newConnection*(api: DaemonAPI): Future[StreamTransport] {.raises: [LPError].} =
result = connect(api.address)
proc newConnection*(
api: DaemonAPI
): Future[StreamTransport] {.
async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError])
.} =
await connect(api.address)
proc closeConnection*(api: DaemonAPI, transp: StreamTransport): Future[void] =
result = transp.closeWait()
proc closeConnection*(
api: DaemonAPI, transp: StreamTransport
): Future[void] {.async: (raises: [CancelledError]).} =
await transp.closeWait()
proc socketExists(address: MultiAddress): Future[bool] {.async.} =
proc socketExists(address: MultiAddress): Future[bool] {.async: (raises: []).} =
try:
var transp = await connect(address)
await transp.closeWait()
@@ -534,7 +546,9 @@ else:
proc getProcessId(): int =
result = int(posix.getpid())
proc getSocket(pattern: string, count: ptr int): Future[MultiAddress] {.async.} =
proc getSocket(
pattern: string, count: ptr int
): Future[MultiAddress] {.async: (raises: [ValueError, LPError]).} =
var sockname = ""
var pid = $getProcessId()
sockname = pattern % [pid, $(count[])]
@@ -562,7 +576,35 @@ proc getSocket(pattern: string, count: ptr int): Future[MultiAddress] {.async.}
closeSocket(sock)
# This is forward declaration needed for newDaemonApi()
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
proc listPeers*(
api: DaemonAPI
): Future[seq[PeerInfo]] {.
async: (
raises: [
ValueError, DaemonLocalError, OSError, MaInvalidAddress, TransportError,
CancelledError, LPError,
]
)
.}
template exceptionToAssert(body: untyped): untyped =
block:
var res: type(body)
when defined(nimHasWarnBareExcept):
{.push warning[BareExcept]: off.}
try:
res = body
except OSError as exc:
raise exc
except IOError as exc:
raise exc
except Defect as exc:
raise exc
except Exception as exc:
raiseAssert exc.msg
when defined(nimHasWarnBareExcept):
{.pop.}
res
proc copyEnv(): StringTableRef =
## This procedure copy all environment variables into StringTable.
@@ -586,7 +628,14 @@ proc newDaemonApi*(
peersRequired = 2,
logFile = "",
logLevel = IpfsLogLevel.Debug,
): Future[DaemonAPI] {.async.} =
): Future[DaemonAPI] {.
async: (
raises: [
ValueError, DaemonLocalError, CancelledError, LPError, OSError, IOError,
AsyncError,
]
)
.} =
## Initialize connection to `go-libp2p-daemon` control socket.
##
## ``flags`` - set of P2PDaemonFlags.
@@ -780,7 +829,7 @@ proc newDaemonApi*(
result = api
proc close*(stream: P2PStream) {.async.} =
proc close*(stream: P2PStream) {.async: (raises: [DaemonLocalError]).} =
## Close ``stream``.
if P2PStreamFlags.Closed notin stream.flags:
await stream.transp.closeWait()
@@ -789,7 +838,9 @@ proc close*(stream: P2PStream) {.async.} =
else:
raise newException(DaemonLocalError, "Stream is already closed!")
proc close*(api: DaemonAPI) {.async.} =
proc close*(
api: DaemonAPI
) {.async: (raises: [TransportOsError, LPError, ValueError, OSError, CancelledError]).} =
## Shutdown connections to `go-libp2p-daemon` control socket.
# await api.pool.close()
# Closing all pending servers.
@@ -827,7 +878,9 @@ template withMessage(m, body: untyped): untyped =
proc transactMessage(
transp: StreamTransport, pb: ProtoBuffer
): Future[ProtoBuffer] {.async.} =
): Future[ProtoBuffer] {.
async: (raises: [DaemonLocalError, TransportError, CancelledError])
.} =
let length = pb.getLen()
let res = await transp.write(pb.getPtr(), length)
if res != length:
@@ -845,7 +898,11 @@ proc getPeerInfo(pb: ProtoBuffer): PeerInfo {.raises: [DaemonLocalError].} =
discard pb.getRepeatedField(2, result.addresses)
proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
proc identity*(
api: DaemonAPI
): Future[PeerInfo] {.
async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError])
.} =
## Get Node identity information
var transp = await api.newConnection()
try:
@@ -860,7 +917,7 @@ proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
proc connect*(
api: DaemonAPI, peer: PeerId, addresses: seq[MultiAddress], timeout = 0
) {.async.} =
) {.async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError]).} =
## Connect to remote peer with id ``peer`` and addresses ``addresses``.
var transp = await api.newConnection()
try:
@@ -870,7 +927,9 @@ proc connect*(
except CatchableError:
await api.closeConnection(transp)
proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
proc disconnect*(
api: DaemonAPI, peer: PeerId
) {.async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError]).} =
## Disconnect from remote peer with id ``peer``.
var transp = await api.newConnection()
try:
@@ -882,7 +941,12 @@ proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
proc openStream*(
api: DaemonAPI, peer: PeerId, protocols: seq[string], timeout = 0
): Future[P2PStream] {.async.} =
): Future[P2PStream] {.
async: (
raises:
[MaInvalidAddress, TransportError, CancelledError, LPError, DaemonLocalError]
)
.} =
## Open new stream to peer ``peer`` using one of the protocols in
## ``protocols``. Returns ``StreamTransport`` for the stream.
var transp = await api.newConnection()
@@ -903,11 +967,12 @@ proc openStream*(
stream.flags.incl(Outbound)
stream.transp = transp
result = stream
except CatchableError as exc:
except ResultError[ProtoError]:
await api.closeConnection(transp)
raise exc
raise newException(DaemonLocalError, "Wrong message type!")
proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
# must not specify raised exceptions as this is StreamCallback from chronos
var api = getUserData[DaemonAPI](server)
var message = await transp.recvMessage()
var pb = initProtoBuffer(message)
@@ -927,11 +992,28 @@ proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
proc addHandler*(
api: DaemonAPI, protocols: seq[string], handler: P2PStreamCallback
) {.async, raises: [LPError].} =
) {.
async: (
raises: [
MaInvalidAddress, DaemonLocalError, TransportError, CancelledError, LPError,
ValueError,
]
)
.} =
## Add stream handler ``handler`` for set of protocols ``protocols``.
var transp = await api.newConnection()
let maddress = await getSocket(api.pattern, addr api.ucounter)
var server = createStreamServer(maddress, streamHandler, udata = api)
var removeHandler = proc(): Future[void] {.
async: (raises: [CancelledError, TransportError])
.} =
for item in protocols:
api.handlers.del(item)
server.stop()
server.close()
await server.join()
try:
for item in protocols:
api.handlers[item] = handler
@@ -939,17 +1021,28 @@ proc addHandler*(
var pb = await transp.transactMessage(requestStreamHandler(maddress, protocols))
pb.withMessage:
api.servers.add(P2PServer(server: server, address: maddress))
except CatchableError as exc:
for item in protocols:
api.handlers.del(item)
server.stop()
server.close()
await server.join()
raise exc
except DaemonLocalError as e:
await removeHandler()
raise e
except TransportError as e:
await removeHandler()
raise e
except CancelledError as e:
await removeHandler()
raise e
finally:
await api.closeConnection(transp)
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.} =
proc listPeers*(
api: DaemonAPI
): Future[seq[PeerInfo]] {.
async: (
raises: [
ValueError, DaemonLocalError, OSError, MaInvalidAddress, TransportError,
CancelledError, LPError,
]
)
.} =
## Get list of remote peers to which we are currently connected.
var transp = await api.newConnection()
try:
@@ -964,7 +1057,14 @@ proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.} =
finally:
await api.closeConnection(transp)
proc cmTagPeer*(api: DaemonAPI, peer: PeerId, tag: string, weight: int) {.async.} =
proc cmTagPeer*(
api: DaemonAPI, peer: PeerId, tag: string, weight: int
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Tag peer with id ``peer`` using ``tag`` and ``weight``.
var transp = await api.newConnection()
try:
@@ -974,7 +1074,14 @@ proc cmTagPeer*(api: DaemonAPI, peer: PeerId, tag: string, weight: int) {.async.
finally:
await api.closeConnection(transp)
proc cmUntagPeer*(api: DaemonAPI, peer: PeerId, tag: string) {.async.} =
proc cmUntagPeer*(
api: DaemonAPI, peer: PeerId, tag: string
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Remove tag ``tag`` from peer with id ``peer``.
var transp = await api.newConnection()
try:
@@ -984,7 +1091,14 @@ proc cmUntagPeer*(api: DaemonAPI, peer: PeerId, tag: string) {.async.} =
finally:
await api.closeConnection(transp)
proc cmTrimPeers*(api: DaemonAPI) {.async.} =
proc cmTrimPeers*(
api: DaemonAPI
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Trim all connections.
var transp = await api.newConnection()
try:
@@ -1058,7 +1172,12 @@ proc getDhtMessageType(
proc dhtFindPeer*(
api: DaemonAPI, peer: PeerId, timeout = 0
): Future[PeerInfo] {.async.} =
): Future[PeerInfo] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Find peer with id ``peer`` and return peer information ``PeerInfo``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1073,7 +1192,12 @@ proc dhtFindPeer*(
proc dhtGetPublicKey*(
api: DaemonAPI, peer: PeerId, timeout = 0
): Future[PublicKey] {.async.} =
): Future[PublicKey] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get peer's public key from peer with id ``peer``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1088,7 +1212,12 @@ proc dhtGetPublicKey*(
proc dhtGetValue*(
api: DaemonAPI, key: string, timeout = 0
): Future[seq[byte]] {.async.} =
): Future[seq[byte]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get value associated with ``key``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1103,7 +1232,12 @@ proc dhtGetValue*(
proc dhtPutValue*(
api: DaemonAPI, key: string, value: seq[byte], timeout = 0
) {.async.} =
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Associate ``value`` with ``key``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1116,7 +1250,14 @@ proc dhtPutValue*(
finally:
await api.closeConnection(transp)
proc dhtProvide*(api: DaemonAPI, cid: Cid, timeout = 0) {.async.} =
proc dhtProvide*(
api: DaemonAPI, cid: Cid, timeout = 0
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Provide content with id ``cid``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1131,7 +1272,12 @@ proc dhtProvide*(api: DaemonAPI, cid: Cid, timeout = 0) {.async.} =
proc dhtFindPeersConnectedToPeer*(
api: DaemonAPI, peer: PeerId, timeout = 0
): Future[seq[PeerInfo]] {.async.} =
): Future[seq[PeerInfo]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Find peers which are connected to peer with id ``peer``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1157,7 +1303,12 @@ proc dhtFindPeersConnectedToPeer*(
proc dhtGetClosestPeers*(
api: DaemonAPI, key: string, timeout = 0
): Future[seq[PeerId]] {.async.} =
): Future[seq[PeerId]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get closest peers for ``key``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1183,7 +1334,12 @@ proc dhtGetClosestPeers*(
proc dhtFindProviders*(
api: DaemonAPI, cid: Cid, count: uint32, timeout = 0
): Future[seq[PeerInfo]] {.async.} =
): Future[seq[PeerInfo]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get ``count`` providers for content with id ``cid``.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1209,7 +1365,12 @@ proc dhtFindProviders*(
proc dhtSearchValue*(
api: DaemonAPI, key: string, timeout = 0
): Future[seq[seq[byte]]] {.async.} =
): Future[seq[seq[byte]]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Search for value with ``key``, return list of values found.
##
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
@@ -1232,7 +1393,14 @@ proc dhtSearchValue*(
finally:
await api.closeConnection(transp)
proc pubsubGetTopics*(api: DaemonAPI): Future[seq[string]] {.async.} =
proc pubsubGetTopics*(
api: DaemonAPI
): Future[seq[string]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get list of topics this node is subscribed to.
var transp = await api.newConnection()
try:
@@ -1245,7 +1413,14 @@ proc pubsubGetTopics*(api: DaemonAPI): Future[seq[string]] {.async.} =
finally:
await api.closeConnection(transp)
proc pubsubListPeers*(api: DaemonAPI, topic: string): Future[seq[PeerId]] {.async.} =
proc pubsubListPeers*(
api: DaemonAPI, topic: string
): Future[seq[PeerId]] {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get list of peers we are connected to and which also subscribed to topic
## ``topic``.
var transp = await api.newConnection()
@@ -1260,7 +1435,14 @@ proc pubsubListPeers*(api: DaemonAPI, topic: string): Future[seq[PeerId]] {.asyn
finally:
await api.closeConnection(transp)
proc pubsubPublish*(api: DaemonAPI, topic: string, value: seq[byte]) {.async.} =
proc pubsubPublish*(
api: DaemonAPI, topic: string, value: seq[byte]
) {.
async: (
raises:
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
)
.} =
## Get list of peer identifiers which are subscribed to topic ``topic``.
var transp = await api.newConnection()
try:
@@ -1280,7 +1462,13 @@ proc getPubsubMessage*(pb: ProtoBuffer): PubSubMessage =
discard pb.getField(5, result.signature)
discard pb.getField(6, result.key)
proc pubsubLoop(api: DaemonAPI, ticket: PubsubTicket) {.async.} =
proc pubsubLoop(
api: DaemonAPI, ticket: PubsubTicket
) {.
async: (
raises: [TransportIncompleteError, TransportError, CancelledError, CatchableError]
)
.} =
while true:
var pbmessage = await ticket.transp.recvMessage()
if len(pbmessage) == 0:
@@ -1295,8 +1483,13 @@ proc pubsubLoop(api: DaemonAPI, ticket: PubsubTicket) {.async.} =
break
proc pubsubSubscribe*(
api: DaemonAPI, topic: string, handler: P2PPubSubCallback
): Future[PubsubTicket] {.async.} =
api: DaemonAPI, topic: string, handler: P2PPubSubCallback2
): Future[PubsubTicket] {.
async: (
raises:
[MaInvalidAddress, TransportError, LPError, CancelledError, DaemonLocalError]
)
.} =
## Subscribe to topic ``topic``.
var transp = await api.newConnection()
try:
@@ -1308,9 +1501,27 @@ proc pubsubSubscribe*(
ticket.transp = transp
asyncSpawn pubsubLoop(api, ticket)
result = ticket
except CatchableError as exc:
except DaemonLocalError as exc:
await api.closeConnection(transp)
raise exc
except TransportError as exc:
await api.closeConnection(transp)
raise exc
except CancelledError as exc:
await api.closeConnection(transp)
raise exc
proc pubsubSubscribe*(
api: DaemonAPI, topic: string, handler: P2PPubSubCallback
): Future[PubsubTicket] {.
async: (raises: [CatchableError]), deprecated: "Use P2PPubSubCallback2 instead"
.} =
proc wrap(
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
): Future[bool] {.async: (raises: [CatchableError]).} =
await handler(api, ticket, message)
await pubsubSubscribe(api, topic, wrap)
proc shortLog*(pinfo: PeerInfo): string =
## Get string representation of ``PeerInfo`` object.

View File

@@ -55,7 +55,7 @@ proc newPool*(
address: TransportAddress,
poolsize: int = DefaultPoolSize,
bufferSize = DefaultStreamBufferSize,
): Future[TransportPool] {.async.} =
): Future[TransportPool] {.async: (raises: [CancelledError]).} =
## Establish pool of connections to address ``address`` with size
## ``poolsize``.
var pool = new TransportPool
@@ -80,7 +80,9 @@ proc newPool*(
pool.state = Connected
result = pool
proc acquire*(pool: TransportPool): Future[StreamTransport] {.async.} =
proc acquire*(
pool: TransportPool
): Future[StreamTransport] {.async: (raises: [CancelledError, TransportPoolError]).} =
## Acquire non-busy connection from pool ``pool``.
var transp: StreamTransport
if pool.state in {Connected}:
@@ -102,7 +104,9 @@ proc acquire*(pool: TransportPool): Future[StreamTransport] {.async.} =
raise newException(TransportPoolError, "Pool is not ready!")
result = transp
proc release*(pool: TransportPool, transp: StreamTransport) =
proc release*(
pool: TransportPool, transp: StreamTransport
) {.async: (raises: [TransportPoolError]).} =
## Release connection ``transp`` back to pool ``pool``.
if pool.state in {Connected, Closing}:
var found = false
@@ -118,7 +122,9 @@ proc release*(pool: TransportPool, transp: StreamTransport) =
else:
raise newException(TransportPoolError, "Pool is not ready!")
proc join*(pool: TransportPool) {.async.} =
proc join*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
## Waiting for all connection to become available.
if pool.state in {Connected, Closing}:
while true:
@@ -130,7 +136,9 @@ proc join*(pool: TransportPool) {.async.} =
elif pool.state == Connecting:
raise newException(TransportPoolError, "Pool is not ready!")
proc close*(pool: TransportPool) {.async.} =
proc close*(
pool: TransportPool
) {.async: (raises: [TransportPoolError, CancelledError]).} =
## Closes transports pool ``pool`` and release all resources.
if pool.state == Connected:
pool.state = Closing

View File

@@ -10,12 +10,14 @@
{.push raises: [].}
import chronos
import stew/results
import results
import peerid, stream/connection, transports/transport
export results
type Dial* = ref object of RootObj
type
Dial* = ref object of RootObj
DialFailedError* = object of LPError
method connect*(
self: Dial,
@@ -24,28 +26,28 @@ method connect*(
forceDial = false,
reuseConnection = true,
dir = Direction.Out,
) {.async, base.} =
) {.base, async: (raises: [DialFailedError, CancelledError]).} =
## connect remote peer without negotiating
## a protocol
##
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.connect] abstract method not implemented!")
method connect*(
self: Dial, address: MultiAddress, allowUnknownPeerId = false
): Future[PeerId] {.async, base.} =
): Future[PeerId] {.base, async: (raises: [DialFailedError, CancelledError]).} =
## Connects to a peer and retrieve its PeerId
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.connect] abstract method not implemented!")
method dial*(
self: Dial, peerId: PeerId, protos: seq[string]
): Future[Connection] {.async, base.} =
): Future[Connection] {.base, async: (raises: [DialFailedError, CancelledError]).} =
## create a protocol stream over an
## existing connection
##
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.dial] abstract method not implemented!")
method dial*(
self: Dial,
@@ -53,17 +55,19 @@ method dial*(
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false,
): Future[Connection] {.async, base.} =
): Future[Connection] {.base, async: (raises: [DialFailedError, CancelledError]).} =
## create a protocol stream and establish
## a connection if one doesn't exist already
##
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.dial] abstract method not implemented!")
method addTransport*(self: Dial, transport: Transport) {.base.} =
doAssert(false, "Not implemented!")
doAssert(false, "[Dial.addTransport] abstract method not implemented!")
method tryDial*(
self: Dial, peerId: PeerId, addrs: seq[MultiAddress]
): Future[Opt[MultiAddress]] {.async, base.} =
doAssert(false, "Not implemented!")
): Future[Opt[MultiAddress]] {.
base, async: (raises: [DialFailedError, CancelledError])
.} =
doAssert(false, "[Dial.tryDial] abstract method not implemented!")

View File

@@ -9,8 +9,7 @@
import std/tables
import stew/results
import pkg/[chronos, chronicles, metrics]
import pkg/[chronos, chronicles, metrics, results]
import
dial,
@@ -36,16 +35,13 @@ declareCounter(libp2p_total_dial_attempts, "total attempted dials")
declareCounter(libp2p_successful_dials, "dialed successful peers")
declareCounter(libp2p_failed_dials, "failed dials")
type
DialFailedError* = object of LPError
Dialer* = ref object of Dial
localPeerId*: PeerId
connManager: ConnManager
dialLock: Table[PeerId, AsyncLock]
transports: seq[Transport]
peerStore: PeerStore
nameResolver: NameResolver
type Dialer* = ref object of Dial
localPeerId*: PeerId
connManager: ConnManager
dialLock: Table[PeerId, AsyncLock]
transports: seq[Transport]
peerStore: PeerStore
nameResolver: NameResolver
proc dialAndUpgrade(
self: Dialer,
@@ -53,7 +49,7 @@ proc dialAndUpgrade(
hostname: string,
address: MultiAddress,
dir = Direction.Out,
): Future[Muxer] {.async.} =
): Future[Muxer] {.async: (raises: [CancelledError]).} =
for transport in self.transports: # for each transport
if transport.handles(address): # check if it can dial it
trace "Dialing address", address, peerId = peerId.get(default(PeerId)), hostname
@@ -105,7 +101,9 @@ proc dialAndUpgrade(
proc expandDnsAddr(
self: Dialer, peerId: Opt[PeerId], address: MultiAddress
): Future[seq[(MultiAddress, Opt[PeerId])]] {.async.} =
): Future[seq[(MultiAddress, Opt[PeerId])]] {.
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
.} =
if not DNSADDR.matchPartial(address):
return @[(address, peerId)]
if isNil(self.nameResolver):
@@ -115,7 +113,10 @@ proc expandDnsAddr(
let
toResolve =
if peerId.isSome:
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
try:
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
except ResultError[void]:
raiseAssert "checked with if"
else:
address
resolved = await self.nameResolver.resolveDnsAddr(toResolve)
@@ -132,7 +133,9 @@ proc expandDnsAddr(
proc dialAndUpgrade(
self: Dialer, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
): Future[Muxer] {.async.} =
): Future[Muxer] {.
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
.} =
debug "Dialing peer", peerId = peerId.get(default(PeerId)), addrs
for rawAddress in addrs:
@@ -169,47 +172,59 @@ proc internalConnect(
forceDial: bool,
reuseConnection = true,
dir = Direction.Out,
): Future[Muxer] {.async.} =
): Future[Muxer] {.async: (raises: [DialFailedError, CancelledError]).} =
if Opt.some(self.localPeerId) == peerId:
raise newException(CatchableError, "can't dial self!")
raise newException(DialFailedError, "can't dial self!")
# Ensure there's only one in-flight attempt per peer
let lock = self.dialLock.mgetOrPut(peerId.get(default(PeerId)), newAsyncLock())
try:
await lock.acquire()
if reuseConnection:
peerId.withValue(peerId):
self.tryReusingConnection(peerId).withValue(mux):
return mux
let slot = self.connManager.getOutgoingSlot(forceDial)
let muxed =
try:
await self.dialAndUpgrade(peerId, addrs, dir)
except CatchableError as exc:
slot.release()
raise exc
slot.trackMuxer(muxed)
if isNil(muxed): # None of the addresses connected
raise newException(DialFailedError, "Unable to establish outgoing link")
await lock.acquire()
defer:
try:
self.connManager.storeMuxer(muxed)
await self.peerStore.identify(muxed)
await self.connManager.triggerPeerEvents(
muxed.connection.peerId,
PeerEvent(kind: PeerEventKind.Identified, initiator: true),
)
except CatchableError as exc:
trace "Failed to finish outgoung upgrade", description = exc.msg
await muxed.close()
raise exc
return muxed
finally:
if lock.locked():
lock.release()
except AsyncLockError:
raiseAssert "lock must have been acquired in line above"
if reuseConnection:
peerId.withValue(peerId):
self.tryReusingConnection(peerId).withValue(mux):
return mux
let slot =
try:
self.connManager.getOutgoingSlot(forceDial)
except TooManyConnectionsError as exc:
raise newException(DialFailedError, exc.msg)
let muxed =
try:
await self.dialAndUpgrade(peerId, addrs, dir)
except CancelledError as exc:
slot.release()
raise exc
except CatchableError as exc:
slot.release()
raise newException(DialFailedError, exc.msg)
slot.trackMuxer(muxed)
if isNil(muxed): # None of the addresses connected
raise newException(DialFailedError, "Unable to establish outgoing link")
try:
self.connManager.storeMuxer(muxed)
await self.peerStore.identify(muxed)
await self.connManager.triggerPeerEvents(
muxed.connection.peerId,
PeerEvent(kind: PeerEventKind.Identified, initiator: true),
)
return muxed
except CancelledError as exc:
await muxed.close()
raise exc
except CatchableError as exc:
trace "Failed to finish outgoing upgrade", description = exc.msg
await muxed.close()
raise newException(DialFailedError, "Failed to finish outgoing upgrade")
method connect*(
self: Dialer,
@@ -218,7 +233,7 @@ method connect*(
forceDial = false,
reuseConnection = true,
dir = Direction.Out,
) {.async.} =
) {.async: (raises: [DialFailedError, CancelledError]).} =
## connect remote peer without negotiating
## a protocol
##
@@ -231,7 +246,7 @@ method connect*(
method connect*(
self: Dialer, address: MultiAddress, allowUnknownPeerId = false
): Future[PeerId] {.async.} =
): Future[PeerId] {.async: (raises: [DialFailedError, CancelledError]).} =
## Connects to a peer and retrieve its PeerId
parseFullAddress(address).toOpt().withValue(fullAddress):
@@ -249,7 +264,7 @@ method connect*(
proc negotiateStream(
self: Dialer, conn: Connection, protos: seq[string]
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [CatchableError]).} =
trace "Negotiating stream", conn, protos
let selected = await MultistreamSelect.select(conn, protos)
if not protos.contains(selected):
@@ -260,7 +275,7 @@ proc negotiateStream(
method tryDial*(
self: Dialer, peerId: PeerId, addrs: seq[MultiAddress]
): Future[Opt[MultiAddress]] {.async.} =
): Future[Opt[MultiAddress]] {.async: (raises: [DialFailedError, CancelledError]).} =
## Create a protocol stream in order to check
## if a connection is possible.
## Doesn't use the Connection Manager to save it.
@@ -280,17 +295,24 @@ method tryDial*(
method dial*(
self: Dialer, peerId: PeerId, protos: seq[string]
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [DialFailedError, CancelledError]).} =
## create a protocol stream over an
## existing connection
##
trace "Dialing (existing)", peerId, protos
let stream = await self.connManager.getStream(peerId)
if stream.isNil:
raise newException(DialFailedError, "Couldn't get muxed stream")
return await self.negotiateStream(stream, protos)
try:
let stream = await self.connManager.getStream(peerId)
if stream.isNil:
raise newException(DialFailedError, "Couldn't get muxed stream")
return await self.negotiateStream(stream, protos)
except CancelledError as exc:
trace "Dial canceled"
raise exc
except CatchableError as exc:
trace "Error dialing", description = exc.msg
raise newException(DialFailedError, exc.msg)
method dial*(
self: Dialer,
@@ -298,7 +320,7 @@ method dial*(
addrs: seq[MultiAddress],
protos: seq[string],
forceDial = false,
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [DialFailedError, CancelledError]).} =
## create a protocol stream and establish
## a connection if one doesn't exist already
##
@@ -307,7 +329,7 @@ method dial*(
conn: Muxer
stream: Connection
proc cleanup() {.async.} =
proc cleanup() {.async: (raises: []).} =
if not (isNil(stream)):
await stream.closeWithEOF()
@@ -331,7 +353,7 @@ method dial*(
except CatchableError as exc:
debug "Error dialing", conn, description = exc.msg
await cleanup()
raise exc
raise newException(DialFailedError, exc.msg)
method addTransport*(self: Dialer, t: Transport) =
self.transports &= t

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import std/sequtils
import chronos, chronicles, stew/results
import chronos, chronicles, results
import ../errors
type
@@ -38,8 +38,7 @@ proc add*[T](pa: var PeerAttributes, value: T) =
Attribute[T](
value: value,
comparator: proc(f: BaseAttr, c: BaseAttr): bool =
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T)
,
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T),
)
)
@@ -60,7 +59,7 @@ proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [KeyError].} =
pa{T}.valueOr:
raise newException(KeyError, "Attritute not found")
raise newException(KeyError, "Attribute not found")
proc match*(pa, candidate: PeerAttributes): bool =
for f in pa.attributes:
@@ -80,16 +79,21 @@ type
advertisementUpdated*: AsyncEvent
advertiseLoop*: Future[void]
method request*(self: DiscoveryInterface, pa: PeerAttributes) {.async, base.} =
doAssert(false, "Not implemented!")
method advertise*(self: DiscoveryInterface) {.async, base.} =
doAssert(false, "Not implemented!")
type
DiscoveryError* = object of LPError
DiscoveryFinished* = object of LPError
AdvertiseError* = object of DiscoveryError
method request*(
self: DiscoveryInterface, pa: PeerAttributes
) {.base, async: (raises: [DiscoveryError, CancelledError]).} =
doAssert(false, "[DiscoveryInterface.request] abstract method not implemented!")
method advertise*(
self: DiscoveryInterface
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
doAssert(false, "[DiscoveryInterface.advertise] abstract method not implemented!")
type
DiscoveryQuery* = ref object
attr: PeerAttributes
peers: AsyncQueue[PeerAttributes]
@@ -138,7 +142,9 @@ template forEach*(query: DiscoveryQuery, code: untyped) =
## peer attritubtes are available through the variable
## `peer`
proc forEachInternal(q: DiscoveryQuery) {.async.} =
proc forEachInternal(
q: DiscoveryQuery
) {.async: (raises: [CancelledError, DiscoveryError]).} =
while true:
let peer {.inject.} =
try:
@@ -163,7 +169,11 @@ proc stop*(dm: DiscoveryManager) =
continue
i.advertiseLoop.cancel()
proc getPeer*(query: DiscoveryQuery): Future[PeerAttributes] {.async.} =
proc getPeer*(
query: DiscoveryQuery
): Future[PeerAttributes] {.
async: (raises: [CancelledError, DiscoveryError, DiscoveryFinished])
.} =
let getter = query.peers.popFirst()
try:

View File

@@ -23,15 +23,17 @@ type
proc `==`*(a, b: RdvNamespace): bool {.borrow.}
method request*(self: RendezVousInterface, pa: PeerAttributes) {.async.} =
var namespace = ""
method request*(
self: RendezVousInterface, pa: PeerAttributes
) {.async: (raises: [DiscoveryError, CancelledError]).} =
var namespace = Opt.none(string)
for attr in pa:
if attr.ofType(RdvNamespace):
namespace = string attr.to(RdvNamespace)
namespace = Opt.some(string attr.to(RdvNamespace))
elif attr.ofType(DiscoveryService):
namespace = string attr.to(DiscoveryService)
namespace = Opt.some(string attr.to(DiscoveryService))
elif attr.ofType(PeerId):
namespace = $attr.to(PeerId)
namespace = Opt.some($attr.to(PeerId))
else:
# unhandled type
return
@@ -42,13 +44,15 @@ method request*(self: RendezVousInterface, pa: PeerAttributes) {.async.} =
for address in pr.addresses:
peer.add(address.address)
peer.add(DiscoveryService(namespace))
peer.add(RdvNamespace(namespace))
peer.add(DiscoveryService(namespace.get()))
peer.add(RdvNamespace(namespace.get()))
self.onPeerFound(peer)
await sleepAsync(self.timeToRequest)
method advertise*(self: RendezVousInterface) {.async.} =
method advertise*(
self: RendezVousInterface
) {.async: (raises: [CancelledError, AdvertiseError]).} =
while true:
var toAdvertise: seq[string]
for attr in self.toAdvertise:

View File

@@ -26,7 +26,7 @@ import
errors,
utility
import stew/[base58, base32, endians2]
export results, minprotobuf, vbuffer, errors, utility
export results, vbuffer, errors, utility
logScope:
topics = "libp2p multiaddress"
@@ -171,6 +171,18 @@ proc ip6zoneVB(vb: var VBuffer): bool =
## IPv6 validateBuffer() implementation.
pathValidateBufferNoSlash(vb)
proc memoryStB(s: string, vb: var VBuffer): bool =
## Memory stringToBuffer() implementation.
pathStringToBuffer(s, vb)
proc memoryBtS(vb: var VBuffer, s: var string): bool =
## Memory bufferToString() implementation.
pathBufferToString(vb, s)
proc memoryVB(vb: var VBuffer): bool =
## Memory validateBuffer() implementation.
pathValidateBuffer(vb)
proc portStB(s: string, vb: var VBuffer): bool =
## Port number stringToBuffer() implementation.
var port: array[2, byte]
@@ -355,6 +367,10 @@ const
)
TranscoderDNS* =
Transcoder(stringToBuffer: dnsStB, bufferToString: dnsBtS, validateBuffer: dnsVB)
TranscoderMemory* = Transcoder(
stringToBuffer: memoryStB, bufferToString: memoryBtS, validateBuffer: memoryVB
)
ProtocolsList = [
MAProtocol(mcodec: multiCodec("ip4"), kind: Fixed, size: 4, coder: TranscoderIP4),
MAProtocol(mcodec: multiCodec("tcp"), kind: Fixed, size: 2, coder: TranscoderPort),
@@ -393,6 +409,9 @@ const
MAProtocol(mcodec: multiCodec("p2p-websocket-star"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("p2p-webrtc-star"), kind: Marker, size: 0),
MAProtocol(mcodec: multiCodec("p2p-webrtc-direct"), kind: Marker, size: 0),
MAProtocol(
mcodec: multiCodec("memory"), kind: Path, size: 0, coder: TranscoderMemory
),
]
DNSANY* = mapEq("dns")
@@ -453,6 +472,8 @@ const
CircuitRelay* = mapEq("p2p-circuit")
Memory* = mapEq("memory")
proc initMultiAddressCodeTable(): Table[MultiCodec, MAProtocol] {.compileTime.} =
for item in ProtocolsList:
result[item.mcodec] = item

View File

@@ -16,7 +16,8 @@
{.push raises: [].}
import tables
import stew/[base32, base58, base64, results]
import results
import stew/[base32, base58, base64]
type
MultiBaseStatus* {.pure.} = enum

View File

@@ -13,7 +13,7 @@
import tables, hashes
import vbuffer
import stew/results
import results
export results
## List of officially supported codecs can BE found here
@@ -396,6 +396,7 @@ const MultiCodecList = [
("onion3", 0x01BD),
("p2p-circuit", 0x0122),
("libp2p-peer-record", 0x0301),
("memory", 0x0309),
("dns", 0x35),
("dns4", 0x36),
("dns6", 0x37),
@@ -403,6 +404,7 @@ const MultiCodecList = [
# IPLD formats
("dag-pb", 0x70),
("dag-cbor", 0x71),
("libp2p-key", 0x72),
("dag-json", 0x129),
("git-raw", 0x78),
("eth-block", 0x90),

View File

@@ -27,7 +27,7 @@ import tables
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
import varint, vbuffer, multicodec, multibase
import stew/base58
import stew/results
import results
export results
# This is workaround for Nim `import` bug.
export sha, sha2, keccak, blake2, hash, utils

View File

@@ -169,6 +169,7 @@ method readOnce*(
## channel must not be done from within a callback / read handler of another
## or the reads will lock each other.
if s.remoteReset:
trace "reset stream in readOnce", s
raise newLPStreamResetError()
if s.localReset:
raise newLPStreamClosedError()
@@ -201,6 +202,7 @@ proc prepareWrite(
# prepareWrite is the slow path of writing a message - see conditions in
# write
if s.remoteReset:
trace "stream is reset when prepareWrite", s
raise newLPStreamResetError()
if s.closedLocal:
raise newLPStreamClosedError()

View File

@@ -247,7 +247,7 @@ method close*(m: Mplex) {.async: (raises: []).} =
trace "Closed mplex", m
method getStreams*(m: Mplex): seq[Connection] =
method getStreams*(m: Mplex): seq[Connection] {.gcsafe.} =
for c in m.channels[false].values:
result.add(c)
for c in m.channels[true].values:

View File

@@ -52,7 +52,7 @@ method newStream*(
): Future[Connection] {.
base, async: (raises: [CancelledError, LPStreamError, MuxerError], raw: true)
.} =
raiseAssert("Not implemented!")
raiseAssert("[Muxer.newStream] abstract method not implemented!")
method close*(m: Muxer) {.base, async: (raises: []).} =
if m.connection != nil:
@@ -67,5 +67,5 @@ proc new*(
let muxerProvider = T(newMuxer: creator, codec: codec)
muxerProvider
method getStreams*(m: Muxer): seq[Connection] {.base.} =
raiseAssert("Not implemented!")
method getStreams*(m: Muxer): seq[Connection] {.base, gcsafe.} =
raiseAssert("[Muxer.getStreams] abstract method not implemented!")

View File

@@ -82,8 +82,7 @@ proc `$`(header: YamuxHeader): string =
if a != "":
a & ", " & $b
else:
$b
,
$b,
"",
) & "}, " & "streamId: " & $header.streamId & ", " & "length: " & $header.length &
"}"
@@ -176,8 +175,7 @@ proc `$`(channel: YamuxChannel): string =
if a != "":
a & ", " & b
else:
b
,
b,
"",
) & "}"
@@ -205,6 +203,7 @@ proc remoteClosed(channel: YamuxChannel) {.async: (raises: []).} =
if not channel.closedRemotely.isSet():
channel.closedRemotely.fire()
await channel.actuallyClose()
channel.isClosedRemotely = true
method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
if not channel.closedLocally:
@@ -270,10 +269,13 @@ method readOnce*(
if channel.isReset:
raise
if channel.remoteReset:
trace "stream is remote reset when readOnce", channel = $channel
newLPStreamResetError()
elif channel.closedLocally:
trace "stream is closed locally when readOnce", channel = $channel
newLPStreamClosedError()
else:
trace "stream is down when readOnce", channel = $channel
newLPStreamConnDownError()
if channel.isEof:
raise newLPStreamRemoteClosedError()
@@ -397,6 +399,7 @@ method write*(
##
result = newFuture[void]("Yamux Send")
if channel.remoteReset:
trace "stream is reset when write", channel = $channel
result.fail(newLPStreamResetError())
return result
if channel.closedLocally or channel.isReset:
@@ -632,7 +635,7 @@ method handle*(m: Yamux) {.async: (raises: []).} =
await m.close()
trace "Stopped yamux handler"
method getStreams*(m: Yamux): seq[Connection] =
method getStreams*(m: Yamux): seq[Connection] {.gcsafe.} =
for c in m.channels.values:
result.add(c)

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -10,7 +10,7 @@
{.push raises: [].}
import
std/[streams, strutils, sets, sequtils],
std/[streams, sets, sequtils],
chronos,
chronicles,
stew/byteutils,
@@ -39,24 +39,32 @@ proc questionToBuf(address: string, kind: QKind): seq[byte] =
var buf = newSeq[byte](dataLen)
discard requestStream.readData(addr buf[0], dataLen)
return buf
except CatchableError as exc:
buf
except IOError as exc:
info "Failed to created DNS buffer", description = exc.msg
return newSeq[byte](0)
newSeq[byte](0)
except OSError as exc:
info "Failed to created DNS buffer", description = exc.msg
newSeq[byte](0)
except ValueError as exc:
info "Failed to created DNS buffer", description = exc.msg
newSeq[byte](0)
proc getDnsResponse(
dnsServer: TransportAddress, address: string, kind: QKind
): Future[Response] {.async.} =
): Future[Response] {.
async: (raises: [CancelledError, IOError, OSError, TransportError, ValueError])
.} =
var sendBuf = questionToBuf(address, kind)
if sendBuf.len == 0:
raise newException(ValueError, "Incorrect DNS query")
let receivedDataFuture = newFuture[void]()
let receivedDataFuture = Future[void].Raising([CancelledError]).init()
proc datagramDataReceived(
transp: DatagramTransport, raddr: TransportAddress
): Future[void] {.async, closure.} =
): Future[void] {.async: (raises: []).} =
receivedDataFuture.complete()
let sock =
@@ -68,27 +76,41 @@ proc getDnsResponse(
try:
await sock.sendTo(dnsServer, addr sendBuf[0], sendBuf.len)
await receivedDataFuture or sleepAsync(5.seconds) #unix default
if not receivedDataFuture.finished:
try:
await receivedDataFuture.wait(5.seconds) #unix default
except AsyncTimeoutError:
raise newException(IOError, "DNS server timeout")
let rawResponse = sock.getMessage()
# parseResponse can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
return exceptionToAssert:
try:
parseResponse(string.fromBytes(rawResponse))
except IOError as exc:
raise exc
except OSError as exc:
raise exc
except ValueError as exc:
raise exc
except Exception as exc:
# Nim 1.6: parseResponse can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert exc.msg
finally:
await sock.closeWait()
method resolveIp*(
self: DnsResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.async.} =
): Future[seq[TransportAddress]] {.
async: (raises: [CancelledError, TransportAddressError])
.} =
trace "Resolving IP using DNS", address, servers = self.nameServers.mapIt($it), domain
for _ in 0 ..< self.nameServers.len:
let server = self.nameServers[0]
var responseFutures: seq[Future[Response]]
var responseFutures: seq[
Future[Response].Raising(
[CancelledError, IOError, OSError, TransportError, ValueError]
)
]
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
responseFutures.add(getDnsResponse(server, address, A))
@@ -103,23 +125,32 @@ method resolveIp*(
var
resolvedAddresses: OrderedSet[string]
resolveFailed = false
template handleFail(e): untyped =
info "Failed to query DNS", address, error = e.msg
resolveFailed = true
break
for fut in responseFutures:
try:
let resp = await fut
for answer in resp.answers:
# toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
resolvedAddresses.incl(exceptionToAssert(answer.toString()))
resolvedAddresses.incl(answer.toString())
except CancelledError as e:
raise e
except ValueError as e:
info "Invalid DNS query", address, error = e.msg
return @[]
except CatchableError as e:
info "Failed to query DNS", address, error = e.msg
resolveFailed = true
break
except IOError as e:
handleFail(e)
except OSError as e:
handleFail(e)
except TransportError as e:
handleFail(e)
except Exception as e:
# Nim 1.6: answer.toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert e.msg
if resolveFailed:
self.nameServers.add(self.nameServers[0])
@@ -132,27 +163,39 @@ method resolveIp*(
debug "Failed to resolve address, returning empty set"
return @[]
method resolveTxt*(self: DnsResolver, address: string): Future[seq[string]] {.async.} =
method resolveTxt*(
self: DnsResolver, address: string
): Future[seq[string]] {.async: (raises: [CancelledError]).} =
trace "Resolving TXT using DNS", address, servers = self.nameServers.mapIt($it)
for _ in 0 ..< self.nameServers.len:
let server = self.nameServers[0]
try:
# toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
let response = await getDnsResponse(server, address, TXT)
return exceptionToAssert:
trace "Got TXT response",
server = $server, answer = response.answers.mapIt(it.toString())
response.answers.mapIt(it.toString())
except CancelledError as e:
raise e
except CatchableError as e:
template handleFail(e): untyped =
info "Failed to query DNS", address, error = e.msg
self.nameServers.add(self.nameServers[0])
self.nameServers.delete(0)
continue
try:
let response = await getDnsResponse(server, address, TXT)
trace "Got TXT response",
server = $server, answer = response.answers.mapIt(it.toString())
return response.answers.mapIt(it.toString())
except CancelledError as e:
raise e
except IOError as e:
handleFail(e)
except OSError as e:
handleFail(e)
except TransportError as e:
handleFail(e)
except ValueError as e:
handleFail(e)
except Exception as e:
# Nim 1.6: toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert e.msg
debug "Failed to resolve TXT, returning empty set"
return @[]

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -25,17 +25,25 @@ type MockResolver* = ref object of NameResolver
method resolveIp*(
self: MockResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.async.} =
): Future[seq[TransportAddress]] {.
async: (raises: [CancelledError, TransportAddressError])
.} =
var res: seq[TransportAddress]
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
for resp in self.ipResponses.getOrDefault((address, false)):
result.add(initTAddress(resp, port))
res.add(initTAddress(resp, port))
if domain == Domain.AF_INET6 or domain == Domain.AF_UNSPEC:
for resp in self.ipResponses.getOrDefault((address, true)):
result.add(initTAddress(resp, port))
res.add(initTAddress(resp, port))
method resolveTxt*(self: MockResolver, address: string): Future[seq[string]] {.async.} =
return self.txtResponses.getOrDefault(address)
res
method resolveTxt*(
self: MockResolver, address: string
): Future[seq[string]] {.async: (raises: [CancelledError]).} =
self.txtResponses.getOrDefault(address)
proc new*(T: typedesc[MockResolver]): T =
T()

View File

@@ -1,5 +1,5 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@@ -9,7 +9,7 @@
{.push raises: [].}
import std/[sugar, sets, sequtils, strutils]
import std/[sets, sequtils, strutils]
import chronos, chronicles, stew/endians2
import ".."/[multiaddress, multicodec]
@@ -20,19 +20,17 @@ type NameResolver* = ref object of RootObj
method resolveTxt*(
self: NameResolver, address: string
): Future[seq[string]] {.async, base.} =
): Future[seq[string]] {.async: (raises: [CancelledError]), base.} =
## Get TXT record
##
doAssert(false, "Not implemented!")
raiseAssert "[NameResolver.resolveTxt] abstract method not implemented!"
method resolveIp*(
self: NameResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
): Future[seq[TransportAddress]] {.async, base.} =
): Future[seq[TransportAddress]] {.
async: (raises: [CancelledError, TransportAddressError]), base
.} =
## Resolve the specified address
##
doAssert(false, "Not implemented!")
raiseAssert "[NameResolver.resolveIp] abstract method not implemented!"
proc getHostname*(ma: MultiAddress): string =
let
@@ -46,30 +44,40 @@ proc getHostname*(ma: MultiAddress): string =
proc resolveOneAddress(
self: NameResolver, ma: MultiAddress, domain: Domain = Domain.AF_UNSPEC, prefix = ""
): Future[seq[MultiAddress]] {.async.} =
#Resolve a single address
): Future[seq[MultiAddress]] {.
async: (raises: [CancelledError, MaError, TransportAddressError])
.} =
# Resolve a single address
let portPart = ma[1].valueOr:
raise maErr error
var pbuf: array[2, byte]
var dnsval = getHostname(ma)
if ma[1].tryGet().protoArgument(pbuf).tryGet() == 0:
raise newException(MaError, "Incorrect port number")
let plen = portPart.protoArgument(pbuf).valueOr:
raise maErr error
if plen == 0:
raise maErr "Incorrect port number"
let
port = Port(fromBytesBE(uint16, pbuf))
dnsval = getHostname(ma)
resolvedAddresses = await self.resolveIp(prefix & dnsval, port, domain)
return collect(newSeqOfCap(4)):
for address in resolvedAddresses:
var createdAddress = MultiAddress.init(address).tryGet()[0].tryGet()
for part in ma:
if DNS.match(part.tryGet()):
continue
createdAddress &= part.tryGet()
createdAddress
resolvedAddresses.mapIt:
let address = MultiAddress.init(it).valueOr:
raise maErr error
var createdAddress = address[0].valueOr:
raise maErr error
for part in ma:
let part = part.valueOr:
raise maErr error
if DNS.match(part):
continue
createdAddress &= part
createdAddress
proc resolveDnsAddr*(
self: NameResolver, ma: MultiAddress, depth: int = 0
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.
async: (raises: [CancelledError, MaError, TransportAddressError])
.} =
if not DNSADDR.matchPartial(ma):
return @[ma]
@@ -78,54 +86,67 @@ proc resolveDnsAddr*(
info "Stopping DNSADDR recursion, probably malicious", ma
return @[]
var dnsval = getHostname(ma)
let txt = await self.resolveTxt("_dnsaddr." & dnsval)
let
dnsval = getHostname(ma)
txt = await self.resolveTxt("_dnsaddr." & dnsval)
trace "txt entries", txt
var result: seq[MultiAddress]
const codec = multiCodec("p2p")
let maCodec = block:
let hasCodec = ma.contains(codec).valueOr:
raise maErr error
if hasCodec:
ma[codec]
else:
(static(default(MaResult[MultiAddress])))
var res: seq[MultiAddress]
for entry in txt:
if not entry.startsWith("dnsaddr="):
continue
let entryValue = MultiAddress.init(entry[8 ..^ 1]).tryGet()
if entryValue.contains(multiCodec("p2p")).tryGet() and
ma.contains(multiCodec("p2p")).tryGet():
if entryValue[multiCodec("p2p")] != ma[multiCodec("p2p")]:
continue
let
entryValue = MultiAddress.init(entry[8 ..^ 1]).valueOr:
raise maErr error
entryHasCodec = entryValue.contains(multiCodec("p2p")).valueOr:
raise maErr error
if entryHasCodec and maCodec.isOk and entryValue[codec] != maCodec:
continue
let resolved = await self.resolveDnsAddr(entryValue, depth + 1)
for r in resolved:
result.add(r)
res.add(r)
if result.len == 0:
if res.len == 0:
debug "Failed to resolve a DNSADDR", ma
return @[]
return result
res
proc resolveMAddress*(
self: NameResolver, address: MultiAddress
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.
async: (raises: [CancelledError, MaError, TransportAddressError])
.} =
var res = initOrderedSet[MultiAddress]()
if not DNS.matchPartial(address):
res.incl(address)
else:
let code = address[0].tryGet().protoCode().tryGet()
let seq =
case code
of multiCodec("dns"):
await self.resolveOneAddress(address)
of multiCodec("dns4"):
await self.resolveOneAddress(address, Domain.AF_INET)
of multiCodec("dns6"):
await self.resolveOneAddress(address, Domain.AF_INET6)
of multiCodec("dnsaddr"):
await self.resolveDnsAddr(address)
else:
assert false
@[address]
for ad in seq:
let
firstPart = address[0].valueOr:
raise maErr error
code = firstPart.protoCode().valueOr:
raise maErr error
ads =
case code
of multiCodec("dns"):
await self.resolveOneAddress(address)
of multiCodec("dns4"):
await self.resolveOneAddress(address, Domain.AF_INET)
of multiCodec("dns6"):
await self.resolveOneAddress(address, Domain.AF_INET6)
of multiCodec("dnsaddr"):
await self.resolveDnsAddr(address)
else:
raise maErr("Unsupported codec " & $code)
for ad in ads:
res.incl(ad)
return res.toSeq
res.toSeq

View File

@@ -14,7 +14,8 @@
import
std/[hashes, strutils],
stew/[base58, results],
stew/base58,
results,
chronicles,
nimcrypto/utils,
utility,

View File

@@ -11,7 +11,7 @@
{.push public.}
import std/sequtils
import pkg/[chronos, chronicles, stew/results]
import pkg/[chronos, chronicles, results]
import peerid, multiaddress, multicodec, crypto/crypto, routing_record, errors, utility
export peerid, multiaddress, crypto, routing_record, errors, results
@@ -22,7 +22,7 @@ type
PeerInfoError* = object of LPError
AddressMapper* = proc(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.
gcsafe, raises: []
gcsafe, async: (raises: [CancelledError])
.} ## A proc that expected to resolve the listen addresses into dialable addresses
PeerInfo* {.public.} = ref object
@@ -52,7 +52,7 @@ func shortLog*(p: PeerInfo): auto =
chronicles.formatIt(PeerInfo):
shortLog(it)
proc update*(p: PeerInfo) {.async.} =
proc update*(p: PeerInfo) {.async: (raises: [CancelledError]).} =
# p.addrs.len == 0 overrides addrs only if it is the first time update is being executed or if the field is empty.
# p.addressMappers.len == 0 is for when all addressMappers have been removed,
# and we wish to have addrs in its initial state, i.e., a copy of listenAddrs.

View File

@@ -63,6 +63,7 @@ type
KeyBook* {.public.} = ref object of PeerBook[PublicKey]
AgentBook* {.public.} = ref object of PeerBook[string]
LastSeenBook* {.public.} = ref object of PeerBook[Opt[MultiAddress]]
ProtoVersionBook* {.public.} = ref object of PeerBook[string]
SPRBook* {.public.} = ref object of PeerBook[Envelope]
@@ -145,18 +146,24 @@ proc del*(peerStore: PeerStore, peerId: PeerId) {.public.} =
for _, book in peerStore.books:
book.deletor(peerId)
proc updatePeerInfo*(peerStore: PeerStore, info: IdentifyInfo) =
if info.addrs.len > 0:
proc updatePeerInfo*(
peerStore: PeerStore,
info: IdentifyInfo,
observedAddr: Opt[MultiAddress] = Opt.none(MultiAddress),
) =
if len(info.addrs) > 0:
peerStore[AddressBook][info.peerId] = info.addrs
peerStore[LastSeenBook][info.peerId] = observedAddr
info.pubkey.withValue(pubkey):
peerStore[KeyBook][info.peerId] = pubkey
info.agentVersion.withValue(agentVersion):
peerStore[AgentBook][info.peerId] = agentVersion.string
peerStore[AgentBook][info.peerId] = agentVersion
info.protoVersion.withValue(protoVersion):
peerStore[ProtoVersionBook][info.peerId] = protoVersion.string
peerStore[ProtoVersionBook][info.peerId] = protoVersion
if info.protos.len > 0:
peerStore[ProtoBook][info.peerId] = info.protos
@@ -181,7 +188,16 @@ proc cleanup*(peerStore: PeerStore, peerId: PeerId) =
peerStore.del(peerStore.toClean[0])
peerStore.toClean.delete(0)
proc identify*(peerStore: PeerStore, muxer: Muxer) {.async.} =
proc identify*(
peerStore: PeerStore, muxer: Muxer
) {.
async: (
raises: [
CancelledError, IdentityNoMatchError, IdentityInvalidMsgError, MultiStreamError,
LPStreamError, MuxerError,
]
)
.} =
# new stream for identify
var stream = await muxer.newStream()
if stream == nil:
@@ -200,7 +216,7 @@ proc identify*(peerStore: PeerStore, muxer: Muxer) {.async.} =
knownAgent = shortAgent
muxer.connection.setShortAgent(knownAgent)
peerStore.updatePeerInfo(info)
peerStore.updatePeerInfo(info, stream.observedAddr)
finally:
await stream.closeWithEOF()

View File

@@ -11,13 +11,11 @@
{.push raises: [].}
import ../varint, ../utility, stew/[endians2, results]
import ../varint, ../utility, stew/endians2, results
export results, utility
{.push public.}
const MaxMessageSize = 1'u shl 22
type
ProtoFieldKind* = enum
## Protobuf's field types enum
@@ -39,7 +37,6 @@ type
buffer*: seq[byte]
offset*: int
length*: int
maxSize*: uint
ProtoHeader* = object
wire*: ProtoFieldKind
@@ -63,7 +60,6 @@ type
VarintDecode
MessageIncomplete
BufferOverflow
MessageTooBig
BadWireType
IncorrectBlob
RequiredFieldMissing
@@ -99,11 +95,14 @@ template getProtoHeader*(field: ProtoField): uint64 =
template toOpenArray*(pb: ProtoBuffer): untyped =
toOpenArray(pb.buffer, pb.offset, len(pb.buffer) - 1)
template lenu64*(x: untyped): untyped =
uint64(len(x))
template isEmpty*(pb: ProtoBuffer): bool =
len(pb.buffer) - pb.offset <= 0
template isEnough*(pb: ProtoBuffer, length: int): bool =
len(pb.buffer) - pb.offset - length >= 0
template isEnough*(pb: ProtoBuffer, length: uint64): bool =
pb.offset <= len(pb.buffer) and length <= uint64(len(pb.buffer) - pb.offset)
template getPtr*(pb: ProtoBuffer): pointer =
cast[pointer](unsafeAddr pb.buffer[pb.offset])
@@ -127,33 +126,25 @@ proc vsizeof*(field: ProtoField): int {.inline.} =
0
proc initProtoBuffer*(
data: seq[byte], offset = 0, options: set[ProtoFlags] = {}, maxSize = MaxMessageSize
data: seq[byte], offset = 0, options: set[ProtoFlags] = {}
): ProtoBuffer =
## Initialize ProtoBuffer with shallow copy of ``data``.
result.buffer = data
result.offset = offset
result.options = options
result.maxSize = maxSize
proc initProtoBuffer*(
data: openArray[byte],
offset = 0,
options: set[ProtoFlags] = {},
maxSize = MaxMessageSize,
data: openArray[byte], offset = 0, options: set[ProtoFlags] = {}
): ProtoBuffer =
## Initialize ProtoBuffer with copy of ``data``.
result.buffer = @data
result.offset = offset
result.options = options
result.maxSize = maxSize
proc initProtoBuffer*(
options: set[ProtoFlags] = {}, maxSize = MaxMessageSize
): ProtoBuffer =
proc initProtoBuffer*(options: set[ProtoFlags] = {}): ProtoBuffer =
## Initialize ProtoBuffer with new sequence of capacity ``cap``.
result.buffer = newSeq[byte]()
result.options = options
result.maxSize = maxSize
if WithVarintLength in options:
# Our buffer will start from position 10, so we can store length of buffer
# in [0, 9].
@@ -194,12 +185,12 @@ proc write*[T: ProtoScalar](pb: var ProtoBuffer, field: int, value: T) =
doAssert(vres.isOk())
pb.offset += length
elif T is float32:
doAssert(pb.isEnough(sizeof(T)))
doAssert(pb.isEnough(uint64(sizeof(T))))
let u32 = cast[uint32](value)
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u32.toBytesLE()
pb.offset += sizeof(T)
elif T is float64:
doAssert(pb.isEnough(sizeof(T)))
doAssert(pb.isEnough(uint64(sizeof(T))))
let u64 = cast[uint64](value)
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u64.toBytesLE()
pb.offset += sizeof(T)
@@ -242,12 +233,12 @@ proc writePacked*[T: ProtoScalar](
doAssert(vres.isOk())
pb.offset += length
elif T is float32:
doAssert(pb.isEnough(sizeof(T)))
doAssert(pb.isEnough(uint64(sizeof(T))))
let u32 = cast[uint32](item)
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u32.toBytesLE()
pb.offset += sizeof(T)
elif T is float64:
doAssert(pb.isEnough(sizeof(T)))
doAssert(pb.isEnough(uint64(sizeof(T))))
let u64 = cast[uint64](item)
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u64.toBytesLE()
pb.offset += sizeof(T)
@@ -268,7 +259,7 @@ proc write*[T: byte | char](pb: var ProtoBuffer, field: int, value: openArray[T]
doAssert(lres.isOk())
pb.offset += length
if len(value) > 0:
doAssert(pb.isEnough(len(value)))
doAssert(pb.isEnough(value.lenu64))
copyMem(addr pb.buffer[pb.offset], unsafeAddr value[0], len(value))
pb.offset += len(value)
@@ -327,13 +318,13 @@ proc skipValue(data: var ProtoBuffer, header: ProtoHeader): ProtoResult[void] =
else:
err(ProtoError.VarintDecode)
of ProtoFieldKind.Fixed32:
if data.isEnough(sizeof(uint32)):
if data.isEnough(uint64(sizeof(uint32))):
data.offset += sizeof(uint32)
ok()
else:
err(ProtoError.VarintDecode)
of ProtoFieldKind.Fixed64:
if data.isEnough(sizeof(uint64)):
if data.isEnough(uint64(sizeof(uint64))):
data.offset += sizeof(uint64)
ok()
else:
@@ -343,14 +334,11 @@ proc skipValue(data: var ProtoBuffer, header: ProtoHeader): ProtoResult[void] =
var bsize = 0'u64
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
data.offset += length
if bsize <= uint64(data.maxSize):
if data.isEnough(int(bsize)):
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageIncomplete)
if data.isEnough(bsize):
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageTooBig)
err(ProtoError.MessageIncomplete)
else:
err(ProtoError.VarintDecode)
of ProtoFieldKind.StartGroup, ProtoFieldKind.EndGroup:
@@ -382,7 +370,7 @@ proc getValue[T: ProtoScalar](
err(ProtoError.VarintDecode)
elif T is float32:
doAssert(header.wire == ProtoFieldKind.Fixed32)
if data.isEnough(sizeof(float32)):
if data.isEnough(uint64(sizeof(float32))):
outval = cast[float32](fromBytesLE(uint32, data.toOpenArray()))
data.offset += sizeof(float32)
ok()
@@ -390,7 +378,7 @@ proc getValue[T: ProtoScalar](
err(ProtoError.MessageIncomplete)
elif T is float64:
doAssert(header.wire == ProtoFieldKind.Fixed64)
if data.isEnough(sizeof(float64)):
if data.isEnough(uint64(sizeof(float64))):
outval = cast[float64](fromBytesLE(uint64, data.toOpenArray()))
data.offset += sizeof(float64)
ok()
@@ -410,22 +398,19 @@ proc getValue[T: byte | char](
outLength = 0
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
data.offset += length
if bsize <= uint64(data.maxSize):
if data.isEnough(int(bsize)):
outLength = int(bsize)
if len(outBytes) >= int(bsize):
if bsize > 0'u64:
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
data.offset += int(bsize)
ok()
else:
# Buffer overflow should not be critical failure
data.offset += int(bsize)
err(ProtoError.BufferOverflow)
if data.isEnough(bsize):
outLength = int(bsize)
if len(outBytes) >= int(bsize):
if bsize > 0'u64:
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageIncomplete)
# Buffer overflow should not be critical failure
data.offset += int(bsize)
err(ProtoError.BufferOverflow)
else:
err(ProtoError.MessageTooBig)
err(ProtoError.MessageIncomplete)
else:
err(ProtoError.VarintDecode)
@@ -439,17 +424,14 @@ proc getValue[T: seq[byte] | string](
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
data.offset += length
if bsize <= uint64(data.maxSize):
if data.isEnough(int(bsize)):
outBytes.setLen(bsize)
if bsize > 0'u64:
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageIncomplete)
if data.isEnough(bsize):
outBytes.setLen(bsize)
if bsize > 0'u64:
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
data.offset += int(bsize)
ok()
else:
err(ProtoError.MessageTooBig)
err(ProtoError.MessageIncomplete)
else:
err(ProtoError.VarintDecode)

View File

@@ -19,7 +19,9 @@ logScope:
type AutonatClient* = ref object of RootObj
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
proc sendDial(
conn: Connection, pid: PeerId, addrs: seq[MultiAddress]
) {.async: (raises: [CancelledError, LPStreamError]).} =
let pb = AutonatDial(
peerInfo: Opt.some(AutonatPeerInfo(id: Opt.some(pid), addrs: addrs))
).encode()
@@ -30,7 +32,9 @@ method dialMe*(
switch: Switch,
pid: PeerId,
addrs: seq[MultiAddress] = newSeq[MultiAddress](),
): Future[MultiAddress] {.base, async.} =
): Future[MultiAddress] {.
base, async: (raises: [AutonatError, AutonatUnreachableError, CancelledError])
.} =
proc getResponseOrRaise(
autonatMsg: Opt[AutonatMsg]
): AutonatDialResponse {.raises: [AutonatError].} =
@@ -47,7 +51,9 @@ method dialMe*(
await switch.dial(pid, @[AutonatCodec])
else:
await switch.dial(pid, addrs, AutonatCodec)
except CatchableError as err:
except CancelledError as err:
raise err
except DialFailedError as err:
raise
newException(AutonatError, "Unexpected error when dialling: " & err.msg, err)
@@ -61,14 +67,37 @@ method dialMe*(
incomingConnection.cancel()
# Safer to always try to cancel cause we aren't sure if the peer dialled us or not
if incomingConnection.completed():
await (await incomingConnection).connection.close()
trace "sending Dial", addrs = switch.peerInfo.addrs
await conn.sendDial(switch.peerInfo.peerId, switch.peerInfo.addrs)
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
try:
await (await incomingConnection).connection.close()
except AlreadyExpectingConnectionError as e:
# this err is already handled above and could not happen later
error "Unexpected error", description = e.msg
try:
trace "sending Dial", addrs = switch.peerInfo.addrs
await conn.sendDial(switch.peerInfo.peerId, switch.peerInfo.addrs)
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(AutonatError, "Sending dial failed", e)
var respBytes: seq[byte]
try:
respBytes = await conn.readLp(1024)
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(AutonatError, "read Dial response failed", e)
let response = getResponseOrRaise(AutonatMsg.decode(respBytes))
return
case response.status
of ResponseStatus.Ok:
response.ma.tryGet()
try:
response.ma.tryGet()
except ResultError[void]:
raiseAssert("checked with if")
of ResponseStatus.DialError:
raise newException(
AutonatUnreachableError, "Peer could not dial us back: " & response.text.get("")

View File

@@ -9,9 +9,10 @@
{.push raises: [].}
import stew/[results, objects]
import chronos, chronicles
import stew/objects
import results, chronos, chronicles
import ../../../multiaddress, ../../../peerid, ../../../errors
import ../../../protobuf/minprotobuf
logScope:
topics = "libp2p autonat"

View File

@@ -10,7 +10,7 @@
{.push raises: [].}
import std/[sets, sequtils]
import stew/results
import results
import chronos, chronicles
import
../../protocol,
@@ -32,7 +32,9 @@ type Autonat* = ref object of LPProtocol
switch*: Switch
dialTimeout: Duration
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
proc sendDial(
conn: Connection, pid: PeerId, addrs: seq[MultiAddress]
) {.async: (raises: [LPStreamError, CancelledError]).} =
let pb = AutonatDial(
peerInfo: Opt.some(AutonatPeerInfo(id: Opt.some(pid), addrs: addrs))
).encode()
@@ -40,28 +42,37 @@ proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.}
proc sendResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async.} =
) {.async: (raises: [CancelledError]).} =
let pb = AutonatDialResponse(
status: status,
text:
if text == "":
Opt.none(string)
else:
Opt.some(text)
,
Opt.some(text),
ma: Opt.none(MultiAddress),
).encode()
await conn.writeLp(pb.buffer)
try:
await conn.writeLp(pb.buffer)
except LPStreamError as exc:
trace "autonat failed to send response error", description = exc.msg, conn
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
proc sendResponseOk(
conn: Connection, ma: MultiAddress
) {.async: (raises: [CancelledError]).} =
let pb = AutonatDialResponse(
status: ResponseStatus.Ok, text: Opt.some("Ok"), ma: Opt.some(ma)
).encode()
await conn.writeLp(pb.buffer)
try:
await conn.writeLp(pb.buffer)
except LPStreamError as exc:
trace "autonat failed to send response ok", description = exc.msg, conn
proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.async.} =
proc tryDial(
autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]
) {.async: (raises: [DialFailedError, CancelledError]).} =
await autonat.sem.acquire()
var futs: seq[Future[Opt[MultiAddress]]]
var futs: seq[Future[Opt[MultiAddress]].Raising([DialFailedError, CancelledError])]
try:
# This is to bypass the per peer max connections limit
let outgoingConnection =
@@ -72,7 +83,8 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
return
# Safer to always try to cancel cause we aren't sure if the connection was established
defer:
outgoingConnection.cancel()
outgoingConnection.cancelSoon()
# tryDial is to bypass the global max connections limit
futs = addrs.mapIt(autonat.switch.dialer.tryDial(conn.peerId, @[it]))
let fut = await anyCompleted(futs).wait(autonat.dialTimeout)
@@ -89,9 +101,6 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
except AsyncTimeoutError as exc:
debug "Dial timeout", addrs, description = exc.msg
await conn.sendResponseError(DialError, "Dial timeout")
except CatchableError as exc:
debug "Unexpected error", addrs, description = exc.msg
await conn.sendResponseError(DialError, "Unexpected error")
finally:
autonat.sem.release()
for f in futs:
@@ -155,7 +164,9 @@ proc new*(
): T =
let autonat =
T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
proc handleStream(conn: Connection, proto: string) {.async.} =
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
raise newException(AutonatError, "Received malformed message")
@@ -163,6 +174,7 @@ proc new*(
raise newException(AutonatError, "Message type should be dial")
await autonat.handleDial(conn, msg)
except CancelledError as exc:
trace "cancelled autonat handler"
raise exc
except CatchableError as exc:
debug "exception in autonat handler", description = exc.msg, conn

View File

@@ -50,7 +50,7 @@ type
StatusAndConfidenceHandler* = proc(
networkReachability: NetworkReachability, confidence: Opt[float]
): Future[void] {.gcsafe, raises: [].}
): Future[void] {.gcsafe, async: (raises: [CancelledError]).}
proc new*(
T: typedesc[AutonatService],
@@ -79,7 +79,7 @@ proc new*(
enableAddressMapper: enableAddressMapper,
)
proc callHandler(self: AutonatService) {.async.} =
proc callHandler(self: AutonatService) {.async: (raises: [CancelledError]).} =
if not isNil(self.statusAndConfidenceHandler):
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
@@ -92,7 +92,7 @@ proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
proc handleAnswer(
self: AutonatService, ans: NetworkReachability
): Future[bool] {.async.} =
): Future[bool] {.async: (raises: [CancelledError]).} =
if ans == Unknown:
return
@@ -127,7 +127,7 @@ proc handleAnswer(
proc askPeer(
self: AutonatService, switch: Switch, peerId: PeerId
): Future[NetworkReachability] {.async.} =
): Future[NetworkReachability] {.async: (raises: [CancelledError]).} =
logScope:
peerId = $peerId
@@ -160,7 +160,9 @@ proc askPeer(
await switch.peerInfo.update()
return ans
proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
proc askConnectedPeers(
self: AutonatService, switch: Switch
) {.async: (raises: [CancelledError]).} =
trace "Asking peers for reachability"
var peers = switch.connectedPeers(Direction.Out)
self.rng.shuffle(peers)
@@ -175,13 +177,15 @@ proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
if (await askPeer(self, switch, peer)) != Unknown:
answersFromPeers.inc()
proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.async.} =
proc schedule(
service: AutonatService, switch: Switch, interval: Duration
) {.async: (raises: [CancelledError]).} =
heartbeat "Scheduling AutonatService run", interval:
await service.run(switch)
proc addressMapper(
self: AutonatService, peerStore: PeerStore, listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
if self.networkReachability != NetworkReachability.Reachable:
return listenAddrs
@@ -198,10 +202,12 @@ proc addressMapper(
addrs.add(processedMA)
return addrs
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
method setup*(
self: AutonatService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
self.addressMapper = proc(
listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
return await addressMapper(self, switch.peerStore, listenAddrs)
info "Setting up AutonatService"
@@ -210,22 +216,30 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
if self.askNewConnectedPeers:
self.newConnectedPeerHandler = proc(
peerId: PeerId, event: PeerEvent
): Future[void] {.async.} =
): Future[void] {.async: (raises: [CancelledError]).} =
discard askPeer(self, switch, peerId)
switch.connManager.addPeerEventHandler(
self.newConnectedPeerHandler, PeerEventKind.Joined
)
self.scheduleInterval.withValue(interval):
self.scheduleHandle = schedule(self, switch, interval)
if self.enableAddressMapper:
switch.peerInfo.addressMappers.add(self.addressMapper)
return hasBeenSetup
method run*(self: AutonatService, switch: Switch) {.async, public.} =
method run*(
self: AutonatService, switch: Switch
) {.public, async: (raises: [CancelledError]).} =
trace "Running AutonatService"
await askConnectedPeers(self, switch)
method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public.} =
method stop*(
self: AutonatService, switch: Switch
): Future[bool] {.public, async: (raises: [CancelledError]).} =
info "Stopping AutonatService"
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:

View File

@@ -34,7 +34,7 @@ proc new*(
proc startSync*(
self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: seq[MultiAddress]
) {.async.} =
) {.async: (raises: [DcutrError, CancelledError]).} =
logScope:
peerId = switch.peerInfo.peerId

View File

@@ -15,6 +15,7 @@ import chronos
import stew/objects
import ../../../multiaddress, ../../../errors, ../../../stream/connection
import ../../../protobuf/minprotobuf
export multiaddress
@@ -49,7 +50,9 @@ proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [DcutrEr
raise newException(DcutrError, "Received malformed message")
return dcutrMsg
proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async.} =
proc send*(
conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]
) {.async: (raises: [CancelledError, LPStreamError]).} =
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
await conn.writeLp(pb.buffer)

View File

@@ -30,7 +30,9 @@ proc new*(
connectTimeout = 15.seconds,
maxDialableAddrs = 8,
): T =
proc handleStream(stream: Connection, proto: string) {.async.} =
proc handleStream(
stream: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
var peerDialableAddrs: seq[MultiAddress]
try:
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
@@ -77,6 +79,7 @@ proc new*(
for fut in futs:
fut.cancel()
except CancelledError as err:
trace "cancelled Dcutr receiver"
raise err
except AllFuturesFailedError as err:
debug "Dcutr receiver could not connect to the remote peer, " &

View File

@@ -29,11 +29,12 @@ const RelayClientMsgSize = 4096
type
RelayClientError* = object of LPError
ReservationError* = object of RelayClientError
RelayV1DialError* = object of RelayClientError
RelayV2DialError* = object of RelayClientError
RelayDialError* = object of DialFailedError
RelayV1DialError* = object of RelayDialError
RelayV2DialError* = object of RelayDialError
RelayClientAddConn* = proc(
conn: Connection, duration: uint32, data: uint64
): Future[void] {.gcsafe, raises: [].}
): Future[void] {.gcsafe, async: (raises: [CancelledError]).}
RelayClient* = ref object of Relay
onNewConnection*: RelayClientAddConn
canHop: bool
@@ -45,14 +46,21 @@ type
limitDuration*: uint32 # seconds
limitData*: uint64 # bytes
proc sendStopError(conn: Connection, code: StatusV2) {.async.} =
proc sendStopError(
conn: Connection, code: StatusV2
) {.async: (raises: [CancelledError]).} =
trace "send stop status", status = $code & " (" & $ord(code) & ")"
let msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
await conn.writeLp(encode(msg).buffer)
try:
let msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
await conn.writeLp(encode(msg).buffer)
except CancelledError as e:
raise e
except LPStreamError as e:
trace "failed to send stop status", description = e.msg
proc handleRelayedConnect(
cl: RelayClient, conn: Connection, msg: StopMessage
) {.async.} =
) {.async: (raises: [CancelledError, LPStreamError]).} =
let
# TODO: check the go version to see in which way this could fail
# it's unclear in the spec
@@ -80,7 +88,7 @@ proc handleRelayedConnect(
proc reserve*(
cl: RelayClient, peerId: PeerId, addrs: seq[MultiAddress] = @[]
): Future[Rsvp] {.async.} =
): Future[Rsvp] {.async: (raises: [ReservationError, DialFailedError, CancelledError]).} =
let conn = await cl.switch.dial(peerId, addrs, RelayV2HopCodec)
defer:
await conn.close()
@@ -121,7 +129,7 @@ proc reserve*(
proc dialPeerV1*(
cl: RelayClient, conn: Connection, dstPeerId: PeerId, dstAddrs: seq[MultiAddress]
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [CancelledError, RelayV1DialError]).} =
var
msg = RelayMessage(
msgType: Opt.some(RelayType.Hop),
@@ -138,19 +146,19 @@ proc dialPeerV1*(
await conn.writeLp(pb.buffer)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except LPStreamError as exc:
trace "error writing hop request", description = exc.msg
raise exc
raise newException(RelayV1DialError, "error writing hop request", exc)
let msgRcvFromRelayOpt =
try:
RelayMessage.decode(await conn.readLp(RelayClientMsgSize))
except CancelledError as exc:
raise exc
except CatchableError as exc:
except LPStreamError as exc:
trace "error reading stop response", description = exc.msg
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise exc
raise newException(RelayV1DialError, "error reading stop response", exc)
try:
let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
@@ -176,7 +184,7 @@ proc dialPeerV2*(
conn: RelayConnection,
dstPeerId: PeerId,
dstAddrs: seq[MultiAddress],
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [RelayV2DialError, CancelledError]).} =
let
p = Peer(peerId: dstPeerId, addrs: dstAddrs)
pb = encode(HopMessage(msgType: HopMessageType.Connect, peer: Opt.some(p)))
@@ -202,7 +210,9 @@ proc dialPeerV2*(
conn.limitData = msgRcvFromRelay.limit.data
return conn
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
proc handleStopStreamV2(
cl: RelayClient, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@@ -214,7 +224,9 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
trace "Unexpected client / relayv2 handshake", msgType = msg.msgType
await sendStopError(conn, MalformedMessage)
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
proc handleStop(
cl: RelayClient, conn: Connection, msg: RelayMessage
) {.async: (raises: [CancelledError]).} =
let src = msg.srcPeer.valueOr:
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
return
@@ -241,7 +253,9 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.}
else:
await conn.close()
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
proc handleStreamV1(
cl: RelayClient, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@@ -290,7 +304,9 @@ proc new*(
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1,
)
proc handleStream(conn: Connection, proto: string) {.async.} =
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
case proto
of RelayV1Codec:
@@ -300,6 +316,7 @@ proc new*(
of RelayV2HopCodec:
await cl.handleHopStreamV2(conn)
except CancelledError as exc:
trace "cancelled client handler"
raise exc
except CatchableError as exc:
trace "exception in client handler", description = exc.msg, conn

View File

@@ -10,8 +10,10 @@
{.push raises: [].}
import macros
import stew/[objects, results]
import stew/objects
import results
import ../../../peerinfo, ../../../signed_envelope
import ../../../protobuf/minprotobuf
# Circuit Relay V1 Message

View File

@@ -112,7 +112,9 @@ proc isRelayed*(conn: Connection): bool =
wrappedConn = wrappedConn.getWrapped()
return false
proc handleReserve(r: Relay, conn: Connection) {.async.} =
proc handleReserve(
r: Relay, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
if conn.isRelayed():
trace "reservation attempt over relay connection", pid = conn.peerId
await sendHopStatus(conn, PermissionDenied)
@@ -133,7 +135,9 @@ proc handleReserve(r: Relay, conn: Connection) {.async.} =
r.rsvp[pid] = expire
await conn.writeLp(encode(msg).buffer)
proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
proc handleConnect(
r: Relay, connSrc: Connection, msg: HopMessage
) {.async: (raises: [CancelledError, LPStreamError]).} =
if connSrc.isRelayed():
trace "connection attempt over relay connection"
await sendHopStatus(connSrc, PermissionDenied)
@@ -166,14 +170,14 @@ proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
await r.switch.dial(dst, RelayV2StopCodec)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except DialFailedError as exc:
trace "error opening relay stream", dst, description = exc.msg
await sendHopStatus(connSrc, ConnectionFailed)
return
defer:
await connDst.close()
proc sendStopMsg() {.async.} =
proc sendStopMsg() {.async: (raises: [SendStopError, CancelledError, LPStreamError]).} =
let stopMsg = StopMessage(
msgType: StopMessageType.Connect,
peer: Opt.some(Peer(peerId: src, addrs: @[])),
@@ -209,7 +213,9 @@ proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
await rconnDst.close()
await bridge(rconnSrc, rconnDst)
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
proc handleHopStreamV2*(
r: Relay, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
@@ -225,7 +231,9 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
# Relay V1
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
proc handleHop*(
r: Relay, connSrc: Connection, msg: RelayMessage
) {.async: (raises: [CancelledError]).} =
r.streamCount.inc()
defer:
r.streamCount.dec()
@@ -271,7 +279,7 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
await r.switch.dial(dst.peerId, RelayV1Codec)
except CancelledError as exc:
raise exc
except CatchableError as exc:
except DialFailedError as exc:
trace "error opening relay stream", dst, description = exc.msg
await sendStatus(connSrc, StatusV1.HopCantDialDst)
return
@@ -309,7 +317,9 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
trace "relaying connection", src, dst
await bridge(connSrc, connDst)
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
proc handleStreamV1(
r: Relay, conn: Connection
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
@@ -333,9 +343,8 @@ proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
proc setup*(r: Relay, switch: Switch) =
r.switch = switch
r.switch.addPeerEventHandler(
proc(peerId: PeerId, event: PeerEvent) {.async.} =
r.rsvp.del(peerId)
,
proc(peerId: PeerId, event: PeerEvent) {.async: (raises: [CancelledError]).} =
r.rsvp.del(peerId),
Left,
)
@@ -360,7 +369,9 @@ proc new*(
isCircuitRelayV1: circuitRelayV1,
)
proc handleStream(conn: Connection, proto: string) {.async.} =
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
case proto
of RelayV2HopCodec:
@@ -368,6 +379,7 @@ proc new*(
of RelayV1Codec:
await r.handleStreamV1(conn)
except CancelledError as exc:
trace "cancelled relayv2 handler"
raise exc
except CatchableError as exc:
debug "exception in relayv2 handler", description = exc.msg, conn
@@ -383,12 +395,15 @@ proc new*(
r.handler = handleStream
r
proc deletesReservation(r: Relay) {.async.} =
proc deletesReservation(r: Relay) {.async: (raises: [CancelledError]).} =
heartbeat "Reservation timeout", r.heartbeatSleepTime.seconds():
let n = now().utc
for k in toSeq(r.rsvp.keys):
if n > r.rsvp[k]:
r.rsvp.del(k)
try:
let n = now().utc
for k in toSeq(r.rsvp.keys):
if n > r.rsvp[k]:
r.rsvp.del(k)
except KeyError:
raiseAssert "checked with in"
method start*(r: Relay): Future[void] {.async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()

View File

@@ -29,71 +29,99 @@ type RelayTransport* = ref object of Transport
queue: AsyncQueue[Connection]
selfRunning: bool
method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
method start*(
self: RelayTransport, ma: seq[MultiAddress]
) {.async: (raises: [LPError, transport.TransportError]).} =
if self.selfRunning:
trace "Relay transport already running"
return
self.client.onNewConnection = proc(
conn: Connection, duration: uint32 = 0, data: uint64 = 0
) {.async.} =
) {.async: (raises: [CancelledError]).} =
await self.queue.addLast(RelayConnection.new(conn, duration, data))
await conn.join()
self.selfRunning = true
await procCall Transport(self).start(ma)
trace "Starting Relay transport"
method stop*(self: RelayTransport) {.async.} =
method stop*(self: RelayTransport) {.async: (raises: []).} =
self.running = false
self.selfRunning = false
self.client.onNewConnection = nil
while not self.queue.empty():
await self.queue.popFirstNoWait().close()
try:
await self.queue.popFirstNoWait().close()
except AsyncQueueEmptyError:
continue # checked with self.queue.empty()
method accept*(self: RelayTransport): Future[Connection] {.async.} =
method accept*(
self: RelayTransport
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
result = await self.queue.popFirst()
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
let
sma = toSeq(ma.items())
relayAddrs = sma[0 .. sma.len - 4].mapIt(it.tryGet()).foldl(a & b)
proc dial*(
self: RelayTransport, ma: MultiAddress
): Future[Connection] {.async: (raises: [RelayDialError, CancelledError]).} =
var
relayAddrs: MultiAddress
relayPeerId: PeerId
dstPeerId: PeerId
if not relayPeerId.init(($(sma[^3].tryGet())).split('/')[2]):
raise newException(RelayV2DialError, "Relay doesn't exist")
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
raise newException(RelayV2DialError, "Destination doesn't exist")
try:
let sma = toSeq(ma.items())
relayAddrs = sma[0 .. sma.len - 4].mapIt(it.tryGet()).foldl(a & b)
if not relayPeerId.init(($(sma[^3].tryGet())).split('/')[2]):
raise newException(RelayDialError, "Relay doesn't exist")
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
raise newException(RelayDialError, "Destination doesn't exist")
except RelayDialError as e:
raise e
except CatchableError:
raise newException(RelayDialError, "dial address not valid")
trace "Dial", relayPeerId, dstPeerId
let conn = await self.client.switch.dial(
relayPeerId, @[relayAddrs], @[RelayV2HopCodec, RelayV1Codec]
)
conn.dir = Direction.Out
var rc: RelayConnection
try:
let conn = await self.client.switch.dial(
relayPeerId, @[relayAddrs], @[RelayV2HopCodec, RelayV1Codec]
)
conn.dir = Direction.Out
case conn.protocol
of RelayV1Codec:
return await self.client.dialPeerV1(conn, dstPeerId, @[])
of RelayV2HopCodec:
rc = RelayConnection.new(conn, 0, 0)
return await self.client.dialPeerV2(rc, dstPeerId, @[])
except CancelledError as exc:
raise exc
except CatchableError as exc:
if not rc.isNil:
await rc.close()
raise exc
except CancelledError as e:
safeClose(rc)
raise e
except DialFailedError as e:
safeClose(rc)
raise newException(RelayDialError, "dial relay peer failed", e)
except RelayV1DialError as e:
safeClose(rc)
raise e
except RelayV2DialError as e:
safeClose(rc)
raise e
method dial*(
self: RelayTransport,
hostname: string,
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId),
): Future[Connection] {.async.} =
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
peerId.withValue(pid):
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
result = await self.dial(address)
try:
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
result = await self.dial(address)
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(transport.TransportDialError, e.msg, e)
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe.} =
try:

View File

@@ -22,12 +22,17 @@ const
proc sendStatus*(
conn: Connection, code: StatusV1
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
) {.async: (raises: [CancelledError]).} =
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
let
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
conn.writeLp(pb.buffer)
try:
let
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
except CancelledError as e:
raise e
except LPStreamError as e:
trace "error sending relay status", description = e.msg
proc sendHopStatus*(
conn: Connection, code: StatusV2

View File

@@ -13,8 +13,7 @@
{.push raises: [].}
import std/[sequtils, options, strutils, sugar]
import stew/results
import chronos, chronicles
import results, chronos, chronicles
import
../protobuf/minprotobuf,
../peerinfo,
@@ -148,12 +147,13 @@ proc new*(
identify
method init*(p: Identify) =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
trace "handling identify request", conn
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
await conn.writeLp(pb.buffer)
except CancelledError as exc:
trace "cancelled identify handler"
raise exc
except CatchableError as exc:
trace "exception in identify handler", description = exc.msg, conn
@@ -166,7 +166,12 @@ method init*(p: Identify) =
proc identify*(
self: Identify, conn: Connection, remotePeerId: PeerId
): Future[IdentifyInfo] {.async.} =
): Future[IdentifyInfo] {.
async: (
raises:
[IdentityInvalidMsgError, IdentityNoMatchError, LPStreamError, CancelledError]
)
.} =
trace "initiating identify", conn
var message = await conn.readLp(64 * 1024)
if len(message) == 0:
@@ -205,7 +210,7 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
identifypush
proc init*(p: IdentifyPush) =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
trace "handling identify push", conn
try:
var message = await conn.readLp(64 * 1024)
@@ -224,6 +229,7 @@ proc init*(p: IdentifyPush) =
if not isNil(p.identifyHandler):
await p.identifyHandler(conn.peerId, identInfo)
except CancelledError as exc:
trace "cancelled identify push handler"
raise exc
except CatchableError as exc:
info "exception in identify push handler", description = exc.msg, conn
@@ -234,7 +240,9 @@ proc init*(p: IdentifyPush) =
p.handler = handle
p.codec = IdentifyPushCodec
proc push*(p: IdentifyPush, peerInfo: PeerInfo, conn: Connection) {.async, public.} =
proc push*(
p: IdentifyPush, peerInfo: PeerInfo, conn: Connection
) {.public, async: (raises: [CancelledError, LPStreamError]).} =
## Send new `peerInfo`s to a connection
var pb = encodeMsg(peerInfo, conn.observedAddr, true)
await conn.writeLp(pb.buffer)

View File

@@ -23,7 +23,7 @@ proc perf*(
conn: Connection,
sizeToWrite: uint64 = 0,
sizeToRead: uint64 = 0,
): Future[Duration] {.async, public.} =
): Future[Duration] {.public, async: (raises: [CancelledError, LPStreamError]).} =
var
size = sizeToWrite
buf: array[PerfSize, byte]

View File

@@ -24,7 +24,7 @@ type Perf* = ref object of LPProtocol
proc new*(T: typedesc[Perf]): T {.public.} =
var p = T()
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
var bytesRead = 0
try:
trace "Received benchmark performance check", conn
@@ -47,10 +47,12 @@ proc new*(T: typedesc[Perf]): T {.public.} =
await conn.write(buf[0 ..< toWrite])
size -= toWrite
except CancelledError as exc:
trace "cancelled perf handler"
raise exc
except CatchableError as exc:
trace "exception in perf handler", description = exc.msg, conn
await conn.close()
finally:
await conn.close()
p.handler = handle
p.codec = PerfCodec

View File

@@ -51,7 +51,7 @@ proc new*(
ping
method init*(p: Ping) =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
try:
trace "handling ping", conn
var buf: array[PingSize, byte]
@@ -61,6 +61,7 @@ method init*(p: Ping) =
if not isNil(p.pingHandler):
await p.pingHandler(conn.peerId)
except CancelledError as exc:
trace "cancelled ping handler"
raise exc
except CatchableError as exc:
trace "exception in ping handler", description = exc.msg, conn
@@ -68,7 +69,11 @@ method init*(p: Ping) =
p.handler = handle
p.codec = PingCodec
proc ping*(p: Ping, conn: Connection): Future[Duration] {.async, public.} =
proc ping*(
p: Ping, conn: Connection
): Future[Duration] {.
public, async: (raises: [CancelledError, LPStreamError, WrongPingAckError])
.} =
## Sends ping to `conn`, returns the delay
trace "initiating ping", conn

View File

@@ -9,7 +9,7 @@
{.push raises: [].}
import chronos, stew/results
import chronos, results
import ../stream/connection
export results
@@ -17,7 +17,9 @@ export results
const DefaultMaxIncomingStreams* = 10
type
LPProtoHandler* = proc(conn: Connection, proto: string): Future[void] {.async.}
LPProtoHandler* = proc(conn: Connection, proto: string): Future[void] {.
async: (raises: [CancelledError])
.}
LPProtocol* = ref object of RootObj
codecs*: seq[string]
@@ -28,13 +30,13 @@ type
method init*(p: LPProtocol) {.base, gcsafe.} =
discard
method start*(p: LPProtocol) {.async: (raises: [CancelledError], raw: true), base.} =
method start*(p: LPProtocol) {.base, async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()
fut.complete()
p.started = true
fut
method stop*(p: LPProtocol) {.async: (raises: [], raw: true), base.} =
method stop*(p: LPProtocol) {.base, async: (raises: [], raw: true).} =
let fut = newFuture[void]()
fut.complete()
p.started = false
@@ -64,21 +66,6 @@ template `handler`*(p: LPProtocol, conn: Connection, proto: string): Future[void
func `handler=`*(p: LPProtocol, handler: LPProtoHandler) =
p.handlerImpl = handler
# Callbacks that are annotated with `{.async: (raises).}` explicitly
# document the types of errors that they may raise, but are not compatible
# with `LPProtoHandler` and need to use a custom `proc` type.
# They are internally wrapped into a `LPProtoHandler`, but still allow the
# compiler to check that their `{.async: (raises).}` annotation is correct.
# https://github.com/nim-lang/Nim/issues/23432
func `handler=`*[E](
p: LPProtocol,
handler: proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
) =
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
await handler(conn, proto)
p.handlerImpl = wrap
proc new*(
T: type LPProtocol,
codecs: seq[string],
@@ -92,17 +79,5 @@ proc new*(
when maxIncomingStreams is int:
Opt.some(maxIncomingStreams)
else:
maxIncomingStreams
,
maxIncomingStreams,
)
proc new*[E](
T: type LPProtocol,
codecs: seq[string],
handler: proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
maxIncomingStreams: Opt[int] | int = Opt.none(int),
): T =
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
await handler(conn, proto)
T.new(codec, wrap, maxIncomingStreams)

View File

@@ -101,10 +101,12 @@ method unsubscribePeer*(f: FloodSub, peer: PeerId) =
procCall PubSub(f).unsubscribePeer(peer)
method rpcHandler*(f: FloodSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
method rpcHandler*(
f: FloodSub, peer: PubSubPeer, data: seq[byte]
) {.async: (raises: [CancelledError, PeerMessageDecodeError, PeerRateLimitError]).} =
var rpcMsg = decodeRpcMsg(data).valueOr:
debug "failed to decode msg from peer", peer, err = error
raise newException(CatchableError, "Peer msg couldn't be decoded")
raise newException(PeerMessageDecodeError, "Peer msg couldn't be decoded")
trace "decoded msg from peer", peer, payload = rpcMsg.shortLog
# trigger hooks
@@ -175,24 +177,23 @@ method rpcHandler*(f: FloodSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
f.updateMetrics(rpcMsg)
method init*(f: FloodSub) =
proc handler(conn: Connection, proto: string) {.async.} =
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
## main protocol handler that gets triggered on every
## connection for a protocol string
## e.g. ``/floodsub/1.0.0``, etc...
##
try:
await f.handleConn(conn, proto)
except CancelledError:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.
except CancelledError as exc:
trace "Unexpected cancellation in floodsub handler", conn
except CatchableError as exc:
trace "FloodSub handler leaks an error", description = exc.msg, conn
raise exc
f.handler = handler
f.codec = FloodSubCodec
method publish*(f: FloodSub, topic: string, data: seq[byte]): Future[int] {.async.} =
method publish*(
f: FloodSub, topic: string, data: seq[byte]
): Future[int] {.async: (raises: []).} =
# base returns always 0
discard await procCall PubSub(f).publish(topic, data)

View File

@@ -102,6 +102,7 @@ proc init*(
overheadRateLimit = Opt.none(tuple[bytes: int, interval: Duration]),
disconnectPeerAboveRateLimit = false,
maxNumElementsInNonPriorityQueue = DefaultMaxNumElementsInNonPriorityQueue,
sendIDontWantOnPublish = false,
): GossipSubParams =
GossipSubParams(
explicit: true,
@@ -139,6 +140,7 @@ proc init*(
overheadRateLimit: overheadRateLimit,
disconnectPeerAboveRateLimit: disconnectPeerAboveRateLimit,
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue,
sendIDontWantOnPublish: sendIDontWantOnPublish,
)
proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
@@ -208,19 +210,16 @@ proc validateParameters*(parameters: TopicParams): Result[void, cstring] =
ok()
method init*(g: GossipSub) =
proc handler(conn: Connection, proto: string) {.async.} =
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
## main protocol handler that gets triggered on every
## connection for a protocol string
## e.g. ``/floodsub/1.0.0``, etc...
##
try:
await g.handleConn(conn, proto)
except CancelledError:
# This is top-level procedure which will work as separate task, so it
# do not need to propogate CancelledError.
except CancelledError as exc:
trace "Unexpected cancellation in gossipsub handler", conn
except CatchableError as exc:
trace "GossipSub handler leaks an error", description = exc.msg, conn
raise exc
g.handler = handler
g.codecs &= GossipSubCodec_12
@@ -383,45 +382,51 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
trace "sending iwant reply messages", peer
g.send(peer, RPCMsg(messages: messages), isHighPriority = false)
proc sendIDontWant(
g: GossipSub,
msg: Message,
msgId: MessageId,
peersToSendIDontWant: HashSet[PubSubPeer],
) =
# If the message is "large enough", let the mesh know that we do not want
# any more copies of it, regardless if it is valid or not.
#
# In the case that it is not valid, this leads to some redundancy
# (since the other peer should not send us an invalid message regardless),
# but the expectation is that this is rare (due to such peers getting
# descored) and that the savings from honest peers are greater than the
# cost a dishonest peer can incur in short time (since the IDONTWANT is
# small).
# IDONTWANT is only supported by >= GossipSubCodec_12
let peers = peersToSendIDontWant.filterIt(
it.codec != GossipSubCodec_10 and it.codec != GossipSubCodec_11
)
g.broadcast(
peers,
RPCMsg(
control: some(ControlMessage(idontwant: @[ControlIWant(messageIDs: @[msgId])]))
),
isHighPriority = true,
)
const iDontWantMessageSizeThreshold* = 512
proc isLargeMessage(msg: Message, msgId: MessageId): bool =
msg.data.len > max(iDontWantMessageSizeThreshold, msgId.len * 10)
proc validateAndRelay(
g: GossipSub, msg: Message, msgId: MessageId, saltedId: SaltedId, peer: PubSubPeer
) {.async.} =
) {.async: (raises: []).} =
try:
template topic(): string =
msg.topic
proc addToSendPeers(toSendPeers: var HashSet[PubSubPeer]) =
g.floodsub.withValue(topic, peers):
toSendPeers.incl(peers[])
g.mesh.withValue(topic, peers):
toSendPeers.incl(peers[])
g.subscribedDirectPeers.withValue(topic, peers):
toSendPeers.incl(peers[])
toSendPeers.excl(peer)
if msg.data.len > max(512, msgId.len * 10):
# If the message is "large enough", let the mesh know that we do not want
# any more copies of it, regardless if it is valid or not.
#
# In the case that it is not valid, this leads to some redundancy
# (since the other peer should not send us an invalid message regardless),
# but the expectation is that this is rare (due to such peers getting
# descored) and that the savings from honest peers are greater than the
# cost a dishonest peer can incur in short time (since the IDONTWANT is
# small).
var peersToSendIDontWant = HashSet[PubSubPeer]()
addToSendPeers(peersToSendIDontWant)
peersToSendIDontWant.exclIfIt(
it.codec == GossipSubCodec_10 or it.codec == GossipSubCodec_11
)
g.broadcast(
peersToSendIDontWant,
RPCMsg(
control:
some(ControlMessage(idontwant: @[ControlIWant(messageIDs: @[msgId])]))
),
isHighPriority = true,
)
if isLargeMessage(msg, msgId):
var peersToSendIDontWant = g.mesh.getOrDefault(topic)
peersToSendIDontWant.excl(peer)
g.sendIDontWant(msg, msgId, peersToSendIDontWant)
let validation = await g.validate(msg)
@@ -456,8 +461,15 @@ proc validateAndRelay(
# trigger hooks
peer.validatedObservers(msg, msgId)
# The send list typically matches the idontwant list from above, but
# might differ if validation takes time
proc addToSendPeers(toSendPeers: var HashSet[PubSubPeer]) =
g.floodsub.withValue(topic, peers):
toSendPeers.incl(peers[])
g.mesh.withValue(topic, peers):
toSendPeers.incl(peers[])
g.subscribedDirectPeers.withValue(topic, peers):
toSendPeers.incl(peers[])
toSendPeers.excl(peer)
var toSendPeers = HashSet[PubSubPeer]()
addToSendPeers(toSendPeers)
# Don't send it to peers that sent it during validation
@@ -490,7 +502,9 @@ proc validateAndRelay(
)
await handleData(g, topic, msg.data)
except CatchableError as exc:
except CancelledError as exc:
info "validateAndRelay failed", description = exc.msg
except PeerRateLimitError as exc:
info "validateAndRelay failed", description = exc.msg
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
@@ -511,7 +525,9 @@ proc messageOverhead(g: GossipSub, msg: RPCMsg, msgSize: int): int =
msgSize - payloadSize - controlSize
proc rateLimit*(g: GossipSub, peer: PubSubPeer, overhead: int) {.async.} =
proc rateLimit*(
g: GossipSub, peer: PubSubPeer, overhead: int
) {.async: (raises: [PeerRateLimitError]).} =
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(overhead):
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()])
@@ -524,7 +540,9 @@ proc rateLimit*(g: GossipSub, peer: PubSubPeer, overhead: int) {.async.} =
PeerRateLimitError, "Peer disconnected because it's above rate limit."
)
method rpcHandler*(g: GossipSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
method rpcHandler*(
g: GossipSub, peer: PubSubPeer, data: seq[byte]
) {.async: (raises: [CancelledError, PeerMessageDecodeError, PeerRateLimitError]).} =
let msgSize = data.len
var rpcMsg = decodeRpcMsg(data).valueOr:
debug "failed to decode msg from peer", peer, err = error
@@ -534,7 +552,7 @@ method rpcHandler*(g: GossipSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
# TODO evaluate behaviour penalty values
peer.behaviourPenalty += 0.1
raise newException(CatchableError, "Peer msg couldn't be decoded")
raise newException(PeerMessageDecodeError, "Peer msg couldn't be decoded")
when defined(libp2p_expensive_metrics):
for m in rpcMsg.messages:
@@ -682,7 +700,9 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
# Send unsubscribe (in reverse order to sub/graft)
procCall PubSub(g).onTopicSubscription(topic, subscribed)
method publish*(g: GossipSub, topic: string, data: seq[byte]): Future[int] {.async.} =
method publish*(
g: GossipSub, topic: string, data: seq[byte]
): Future[int] {.async: (raises: []).} =
logScope:
topic
@@ -782,6 +802,9 @@ method publish*(g: GossipSub, topic: string, data: seq[byte]): Future[int] {.asy
g.mcache.put(msgId, msg)
if g.parameters.sendIDontWantOnPublish and isLargeMessage(msg, msgId):
g.sendIDontWant(msg, msgId, g.mesh.getOrDefault(topic))
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
if g.knownTopics.contains(topic):
@@ -792,7 +815,9 @@ method publish*(g: GossipSub, topic: string, data: seq[byte]): Future[int] {.asy
trace "Published message to peers", peers = peers.len
return peers.len
proc maintainDirectPeer(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.async.} =
proc maintainDirectPeer(
g: GossipSub, id: PeerId, addrs: seq[MultiAddress]
) {.async: (raises: [CancelledError]).} =
if id notin g.peers:
trace "Attempting to dial a direct peer", peer = id
if g.switch.isConnected(id):
@@ -805,14 +830,16 @@ proc maintainDirectPeer(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.as
except CancelledError as exc:
trace "Direct peer dial canceled"
raise exc
except CatchableError as exc:
except DialFailedError as exc:
debug "Direct peer error dialing", description = exc.msg
proc addDirectPeer*(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.async.} =
proc addDirectPeer*(
g: GossipSub, id: PeerId, addrs: seq[MultiAddress]
) {.async: (raises: [CancelledError]).} =
g.parameters.directPeers[id] = addrs
await g.maintainDirectPeer(id, addrs)
proc maintainDirectPeers(g: GossipSub) {.async.} =
proc maintainDirectPeers(g: GossipSub) {.async: (raises: [CancelledError]).} =
heartbeat "GossipSub DirectPeers", 1.minutes:
for id, addrs in g.parameters.directPeers:
await g.addDirectPeer(id, addrs)
@@ -846,9 +873,9 @@ method stop*(g: GossipSub): Future[void] {.async: (raises: [], raw: true).} =
return fut
# stop heartbeat interval
g.directPeersLoop.cancel()
g.scoringHeartbeatFut.cancel()
g.heartbeatFut.cancel()
g.directPeersLoop.cancelSoon()
g.scoringHeartbeatFut.cancelSoon()
g.heartbeatFut.cancelSoon()
g.heartbeatFut = nil
fut

View File

@@ -126,8 +126,7 @@ proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] =
if x.peerId in sprBook:
sprBook[x.peerId].encode().get(default(seq[byte]))
else:
default(seq[byte])
,
default(seq[byte]),
)
proc handleGraft*(
@@ -770,7 +769,7 @@ proc onHeartbeat(g: GossipSub) =
g.mcache.shift() # shift the cache
proc heartbeat*(g: GossipSub) {.async.} =
proc heartbeat*(g: GossipSub) {.async: (raises: [CancelledError]).} =
heartbeat "GossipSub", g.parameters.heartbeatInterval:
trace "running heartbeat", instance = cast[int](g)
g.onHeartbeat()

View File

@@ -131,7 +131,7 @@ proc colocationFactor(g: GossipSub, peer: PubSubPeer): float64 =
else:
0.0
proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async.} =
proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async: (raises: []).} =
try:
await g.switch.disconnect(peer.peerId)
except CatchableError as exc: # Never cancelled
@@ -313,12 +313,14 @@ proc updateScores*(g: GossipSub) = # avoid async
trace "updated scores", peers = g.peers.len
proc scoringHeartbeat*(g: GossipSub) {.async.} =
proc scoringHeartbeat*(g: GossipSub) {.async: (raises: [CancelledError]).} =
heartbeat "Gossipsub scoring", g.parameters.decayInterval:
trace "running scoring heartbeat", instance = cast[int](g)
g.updateScores()
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
proc punishInvalidMessage*(
g: GossipSub, peer: PubSubPeer, msg: Message
) {.async: (raises: [PeerRateLimitError]).} =
let uselessAppBytesNum = msg.data.len
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(uselessAppBytesNum):

View File

@@ -154,6 +154,9 @@ type
# Max number of elements allowed in the non-priority queue. When this limit has been reached, the peer will be disconnected.
maxNumElementsInNonPriorityQueue*: int
# Broadcast an IDONTWANT message automatically when the message exceeds the IDONTWANT message size threshold
sendIDontWantOnPublish*: bool
BackoffTable* = Table[string, Table[PeerId, Moment]]
ValidationSeenTable* = Table[SaltedId, HashSet[PubSubPeer]]

View File

@@ -125,6 +125,8 @@ declarePublicCounter(
type
InitializationError* = object of LPError
PeerMessageDecodeError* = object of CatchableError
TopicHandler* {.public.} =
proc(topic: string, data: seq[byte]): Future[void] {.gcsafe, raises: [].}
@@ -327,7 +329,9 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
method rpcHandler*(
p: PubSub, peer: PubSubPeer, data: seq[byte]
): Future[void] {.base, async.} =
): Future[void] {.
base, async: (raises: [CancelledError, PeerMessageDecodeError, PeerRateLimitError])
.} =
## Handler that must be overridden by concrete implementation
raiseAssert "Unimplemented"
@@ -355,8 +359,15 @@ method getOrCreatePeer*(
peer[].codec = protoNegotiated
return peer[]
proc getConn(): Future[Connection] {.async.} =
return await p.switch.dial(peerId, protosToDial)
proc getConn(): Future[Connection] {.
async: (raises: [CancelledError, GetConnDialError])
.} =
try:
return await p.switch.dial(peerId, protosToDial)
except CancelledError as exc:
raise exc
except DialFailedError as e:
raise (ref GetConnDialError)(parent: e)
proc onEvent(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
p.onPubSubPeerEvent(peer, event)
@@ -376,7 +387,9 @@ method getOrCreatePeer*(
return pubSubPeer
proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
proc handleData*(
p: PubSub, topic: string, data: seq[byte]
): Future[void] {.async: (raises: [], raw: true).} =
# Start work on all data handlers without copying data into closure like
# happens on {.async.} transformation
p.topics.withValue(topic, handlers):
@@ -389,7 +402,7 @@ proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
futs.add(fut)
if futs.len() > 0:
proc waiter(): Future[void] {.async.} =
proc waiter(): Future[void] {.async: (raises: []).} =
# slow path - we have to wait for the handlers to complete
try:
futs = await allFinished(futs)
@@ -397,12 +410,12 @@ proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
# propagate cancellation
for fut in futs:
if not (fut.finished):
fut.cancel()
fut.cancelSoon()
# check for errors in futures
for fut in futs:
if fut.failed:
let err = fut.readError()
let err = fut.error()
warn "Error in topic handler", description = err.msg
return waiter()
@@ -412,7 +425,9 @@ proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
res.complete()
return res
method handleConn*(p: PubSub, conn: Connection, proto: string) {.base, async.} =
method handleConn*(
p: PubSub, conn: Connection, proto: string
) {.base, async: (raises: [CancelledError]).} =
## handle incoming connections
##
## this proc will:
@@ -424,7 +439,9 @@ method handleConn*(p: PubSub, conn: Connection, proto: string) {.base, async.} =
## that we're interested in
##
proc handler(peer: PubSubPeer, data: seq[byte]): Future[void] =
proc handler(
peer: PubSubPeer, data: seq[byte]
): Future[void] {.async: (raises: []).} =
# call pubsub rpc handler
p.rpcHandler(peer, data)
@@ -436,7 +453,7 @@ method handleConn*(p: PubSub, conn: Connection, proto: string) {.base, async.} =
trace "pubsub peer handler ended", conn
except CancelledError as exc:
raise exc
except CatchableError as exc:
except PeerMessageDecodeError as exc:
trace "exception ocurred in pubsub handle", description = exc.msg, conn
finally:
await conn.closeWithEOF()
@@ -542,7 +559,7 @@ proc subscribe*(p: PubSub, topic: string, handler: TopicHandler) {.public.} =
method publish*(
p: PubSub, topic: string, data: seq[byte]
): Future[int] {.base, async, public.} =
): Future[int] {.base, async: (raises: []), public.} =
## publish to a ``topic``
##
## The return value is the number of neighbours that we attempted to send the
@@ -572,7 +589,7 @@ method addValidator*(
method removeValidator*(
p: PubSub, topic: varargs[string], hook: ValidatorHandler
) {.base, public.} =
) {.base, public, gcsafe.} =
for t in topic:
p.validators.withValue(t, validators):
validators[].excl(hook)
@@ -581,7 +598,7 @@ method removeValidator*(
method validate*(
p: PubSub, message: Message
): Future[ValidationResult] {.async, base.} =
): Future[ValidationResult] {.async: (raises: [CancelledError]), base.} =
var pending: seq[Future[ValidationResult]]
trace "about to validate message"
let topic = message.topic
@@ -589,22 +606,27 @@ method validate*(
topic = topic, registered = toSeq(p.validators.keys)
if topic in p.validators:
trace "running validators for topic", topic = topic
for validator in p.validators[topic]:
pending.add(validator(topic, message))
result = ValidationResult.Accept
p.validators.withValue(topic, validators):
for validator in validators[]:
pending.add(validator(topic, message))
var valResult = ValidationResult.Accept
let futs = await allFinished(pending)
for fut in futs:
if fut.failed:
result = ValidationResult.Reject
valResult = ValidationResult.Reject
break
let res = fut.read()
if res != ValidationResult.Accept:
result = res
if res == ValidationResult.Reject:
break
try:
let res = fut.read()
if res != ValidationResult.Accept:
valResult = res
if res == ValidationResult.Reject:
break
except CatchableError as e:
trace "validator for message could not be executed, ignoring",
topic = topic, err = e.msg
valResult = ValidationResult.Ignore
case result
case valResult
of ValidationResult.Accept:
libp2p_pubsub_validation_success.inc()
of ValidationResult.Reject:
@@ -612,6 +634,8 @@ method validate*(
of ValidationResult.Ignore:
libp2p_pubsub_validation_ignore.inc()
valResult
proc init*[PubParams: object | bool](
P: typedesc[PubSub],
switch: Switch,
@@ -656,7 +680,9 @@ proc init*[PubParams: object | bool](
topicsHigh: int.high,
)
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
proc peerEventHandler(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
pubsub.subscribePeer(peerId)
else:

View File

@@ -64,6 +64,8 @@ const DefaultMaxNumElementsInNonPriorityQueue* = 1024
type
PeerRateLimitError* = object of CatchableError
GetConnDialError* = object of CatchableError
PubSubObserver* = ref object
onRecv*: proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
onSend*: proc(peer: PubSubPeer, msgs: var RPCMsg) {.gcsafe, raises: [].}
@@ -79,7 +81,8 @@ type
PubSubPeerEvent* = object
kind*: PubSubPeerEventKind
GetConn* = proc(): Future[Connection] {.gcsafe, raises: [].}
GetConn* =
proc(): Future[Connection] {.async: (raises: [CancelledError, GetConnDialError]).}
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [].}
# have to pass peer as it's unknown during init
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
@@ -122,7 +125,7 @@ type
disconnected: bool
RPCHandler* =
proc(peer: PubSubPeer, data: seq[byte]): Future[void] {.gcsafe, raises: [].}
proc(peer: PubSubPeer, data: seq[byte]): Future[void] {.async: (raises: []).}
when defined(libp2p_agents_metrics):
func shortAgent*(p: PubSubPeer): string =
@@ -190,7 +193,7 @@ proc sendObservers(p: PubSubPeer, msg: var RPCMsg) =
if not (isNil(obs.onSend)):
obs.onSend(p, msg)
proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
proc handle*(p: PubSubPeer, conn: Connection) {.async: (raises: []).} =
debug "starting pubsub read loop", conn, peer = p, closed = conn.closed
try:
try:
@@ -206,7 +209,7 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
except PeerRateLimitError as exc:
debug "Peer rate limit exceeded, exiting read while",
conn, peer = p, description = exc.msg
except CatchableError as exc:
except LPStreamError as exc:
debug "Exception occurred in PubSubPeer.handle",
conn, peer = p, closed = conn.closed, description = exc.msg
finally:
@@ -215,13 +218,12 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.
trace "Unexpected cancellation in PubSubPeer.handle"
except CatchableError as exc:
trace "Exception occurred in PubSubPeer.handle",
conn, peer = p, closed = conn.closed, description = exc.msg
finally:
debug "exiting pubsub read loop", conn, peer = p, closed = conn.closed
proc closeSendConn(p: PubSubPeer, event: PubSubPeerEventKind) {.async.} =
proc closeSendConn(
p: PubSubPeer, event: PubSubPeerEventKind
) {.async: (raises: [CancelledError]).} =
if p.sendConn != nil:
trace "Removing send connection", p, conn = p.sendConn
await p.sendConn.close()
@@ -235,17 +237,20 @@ proc closeSendConn(p: PubSubPeer, event: PubSubPeerEventKind) {.async.} =
p.onEvent(p, PubSubPeerEvent(kind: event))
except CancelledError as exc:
raise exc
except CatchableError as exc:
debug "Errors during diconnection events", description = exc.msg
# don't cleanup p.address else we leak some gossip stat table
proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
proc connectOnce(
p: PubSubPeer
): Future[void] {.async: (raises: [CancelledError, GetConnDialError, LPError]).} =
try:
if p.connectedFut.finished:
p.connectedFut = newFuture[void]()
let newConn = await p.getConn().wait(5.seconds)
if newConn.isNil:
raise (ref LPError)(msg: "Cannot establish send connection")
let newConn =
try:
await p.getConn().wait(5.seconds)
except AsyncTimeoutError as error:
trace "getConn timed out", description = error.msg
raise (ref LPError)(msg: "Cannot establish send connection")
# When the send channel goes up, subscriptions need to be sent to the
# remote peer - if we had multiple channels up and one goes down, all
@@ -271,7 +276,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
finally:
await p.closeSendConn(PubSubPeerEventKind.StreamClosed)
proc connectImpl(p: PubSubPeer) {.async.} =
proc connectImpl(p: PubSubPeer) {.async: (raises: []).} =
try:
# Keep trying to establish a connection while it's possible to do so - the
# send connection might get disconnected due to a timeout or an unrelated
@@ -282,7 +287,11 @@ proc connectImpl(p: PubSubPeer) {.async.} =
p.connectedFut.complete()
return
await connectOnce(p)
except CatchableError as exc: # never cancelled
except CancelledError as exc:
debug "Could not establish send connection", description = exc.msg
except LPError as exc:
debug "Could not establish send connection", description = exc.msg
except GetConnDialError as exc:
debug "Could not establish send connection", description = exc.msg
proc connect*(p: PubSubPeer) =
@@ -317,7 +326,7 @@ proc clearSendPriorityQueue(p: PubSubPeer) =
value = p.rpcmessagequeue.sendPriorityQueue.len.int64, labelValues = [$p.peerId]
)
proc sendMsgContinue(conn: Connection, msgFut: Future[void]) {.async.} =
proc sendMsgContinue(conn: Connection, msgFut: Future[void]) {.async: (raises: []).} =
# Continuation for a pending `sendMsg` future from below
try:
await msgFut
@@ -331,7 +340,7 @@ proc sendMsgContinue(conn: Connection, msgFut: Future[void]) {.async.} =
await conn.close() # This will clean up the send connection
proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async.} =
proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async: (raises: [CancelledError]).} =
# Slow path of `sendMsg` where msg is held in memory while send connection is
# being set up
if p.sendConn == nil:
@@ -347,7 +356,7 @@ proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async.} =
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
await sendMsgContinue(conn, conn.writeLp(msg))
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] =
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] {.async: (raises: []).} =
if p.sendConn != nil and not p.sendConn.closed():
# Fast path that avoids copying msg (which happens for {.async.})
let conn = p.sendConn
@@ -493,7 +502,7 @@ proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
return true
return false
proc sendNonPriorityTask(p: PubSubPeer) {.async.} =
proc sendNonPriorityTask(p: PubSubPeer) {.async: (raises: [CancelledError]).} =
while true:
# we send non-priority messages only if there are no pending priority messages
let msg = await p.rpcmessagequeue.nonPriorityQueue.popFirst()

View File

@@ -65,7 +65,10 @@ proc init*(
topic: string,
seqno: Option[uint64],
sign: bool = true,
): Message {.gcsafe, raises: [LPError].} =
): Message {.gcsafe, raises: [].} =
if sign and peer.isNone():
doAssert(false, "Cannot sign message without peer info")
var msg = Message(data: data, topic: topic)
# order matters, we want to include seqno in the signature
@@ -81,9 +84,6 @@ proc init*(
.expect("Invalid private key!")
.getBytes()
.expect("Couldn't get public key bytes!")
else:
if sign:
raise (ref LPError)(msg: "Cannot sign message without peer info")
msg

View File

@@ -303,15 +303,14 @@ proc decodeMessages*(pb: ProtoBuffer): ProtoResult[seq[Message]] {.inline.} =
if ?pb.getRepeatedField(2, msgpbs):
trace "decodeMessages: read messages", count = len(msgpbs)
for item in msgpbs:
# size is constrained at the network level
msgs.add(?decodeMessage(initProtoBuffer(item, maxSize = uint.high)))
msgs.add(?decodeMessage(initProtoBuffer(item)))
else:
trace "decodeMessages: no messages found"
ok(msgs)
proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
trace "encodeRpcMsg: encoding message", payload = msg.shortLog()
var pb = initProtoBuffer(maxSize = uint.high)
var pb = initProtoBuffer()
for item in msg.subscriptions:
pb.write(1, item)
for item in msg.messages:
@@ -330,7 +329,7 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
proc decodeRpcMsg*(msg: seq[byte]): ProtoResult[RPCMsg] {.inline.} =
trace "decodeRpcMsg: decoding message", payload = msg.shortLog()
var pb = initProtoBuffer(msg, maxSize = uint.high)
var pb = initProtoBuffer(msg)
var rpcMsg = RPCMsg()
assign(rpcMsg.messages, ?pb.decodeMessages())
assign(rpcMsg.subscriptions, ?pb.decodeSubscriptions())

View File

@@ -11,15 +11,17 @@
import tables, sequtils, sugar, sets
import metrics except collect
import chronos, chronicles, bearssl/rand, stew/[byteutils, objects, results]
import chronos, chronicles, bearssl/rand, stew/[byteutils, objects]
import
./protocol,
../protobuf/minprotobuf,
../switch,
../routing_record,
../utils/heartbeat,
../stream/connection,
../utils/offsettedseq,
../utils/semaphore
../utils/semaphore,
../discovery/discoverymngr
export chronicles
@@ -35,6 +37,9 @@ const
RendezVousCodec* = "/rendezvous/1.0.0"
MinimumDuration* = 2.hours
MaximumDuration = 72.hours
MaximumMessageLen = 1 shl 22 # 4MB
MinimumNamespaceLen = 1
MaximumNamespaceLen = 255
RegistrationLimitPerPeer = 1000
DiscoverLimit = 1000'u64
SemaphoreDefaultSize = 5
@@ -59,7 +64,7 @@ type
Cookie = object
offset: uint64
ns: string
ns: Opt[string]
Register = object
ns: string
@@ -75,7 +80,7 @@ type
ns: string
Discover = object
ns: string
ns: Opt[string]
limit: Opt[uint64]
cookie: Opt[seq[byte]]
@@ -96,7 +101,8 @@ type
proc encode(c: Cookie): ProtoBuffer =
result = initProtoBuffer()
result.write(1, c.offset)
result.write(2, c.ns)
if c.ns.isSome():
result.write(2, c.ns.get())
result.finish()
proc encode(r: Register): ProtoBuffer =
@@ -123,7 +129,8 @@ proc encode(u: Unregister): ProtoBuffer =
proc encode(d: Discover): ProtoBuffer =
result = initProtoBuffer()
result.write(1, d.ns)
if d.ns.isSome():
result.write(1, d.ns.get())
d.limit.withValue(limit):
result.write(2, limit)
d.cookie.withValue(cookie):
@@ -157,13 +164,17 @@ proc encode(msg: Message): ProtoBuffer =
result.finish()
proc decode(_: typedesc[Cookie], buf: seq[byte]): Opt[Cookie] =
var c: Cookie
var
c: Cookie
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, c.offset)
r2 = pb.getRequiredField(2, c.ns)
r2 = pb.getField(2, ns)
if r1.isErr() or r2.isErr():
return Opt.none(Cookie)
if r2.get(false):
c.ns = Opt.some(ns)
Opt.some(c)
proc decode(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
@@ -215,13 +226,16 @@ proc decode(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
d: Discover
limit: uint64
cookie: seq[byte]
ns: string
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, d.ns)
r1 = pb.getField(1, ns)
r2 = pb.getField(2, limit)
r3 = pb.getField(3, cookie)
if r1.isErr() or r2.isErr() or r3.isErr:
return Opt.none(Discover)
if r1.get(false):
d.ns = Opt.some(ns)
if r2.get(false):
d.limit = Opt.some(limit)
if r3.get(false):
@@ -294,7 +308,7 @@ proc decode(_: typedesc[Message], buf: seq[byte]): Opt[Message] =
Opt.some(msg)
type
RendezVousError* = object of LPError
RendezVousError* = object of DiscoveryError
RegisteredData = object
expiration: Moment
peerId: PeerId
@@ -331,7 +345,9 @@ proc checkPeerRecord(spr: seq[byte], peerId: PeerId): Result[void, string] =
return err("Bad Peer ID")
return ok()
proc sendRegisterResponse(conn: Connection, ttl: uint64) {.async.} =
proc sendRegisterResponse(
conn: Connection, ttl: uint64
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
@@ -342,7 +358,7 @@ proc sendRegisterResponse(conn: Connection, ttl: uint64) {.async.} =
proc sendRegisterResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async.} =
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.RegisterResponse,
@@ -353,7 +369,7 @@ proc sendRegisterResponseError(
proc sendDiscoverResponse(
conn: Connection, s: seq[Register], cookie: Cookie
) {.async.} =
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
@@ -368,7 +384,7 @@ proc sendDiscoverResponse(
proc sendDiscoverResponseError(
conn: Connection, status: ResponseStatus, text: string = ""
) {.async.} =
) {.async: (raises: [CancelledError, LPStreamError]).} =
let msg = encode(
Message(
msgType: MessageType.DiscoverResponse,
@@ -409,10 +425,10 @@ proc save(
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
trace "Received Register", peerId = conn.peerId, ns = r.ns
libp2p_rendezvous_register.inc()
if r.ns.len notin 1 .. 255:
if r.ns.len < MinimumNamespaceLen or r.ns.len > MaximumNamespaceLen:
return conn.sendRegisterResponseError(InvalidNamespace)
let ttl = r.ttl.get(rdv.minTTL)
if ttl notin rdv.minTTL .. rdv.maxTTL:
if ttl < rdv.minTTL or ttl > rdv.maxTTL:
return conn.sendRegisterResponseError(InvalidTTL)
let pr = checkPeerRecord(r.signedPeerRecord, conn.peerId)
if pr.isErr():
@@ -435,10 +451,12 @@ proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
except KeyError:
return
proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
proc discover(
rdv: RendezVous, conn: Connection, d: Discover
) {.async: (raises: [CancelledError, LPStreamError]).} =
trace "Received Discover", peerId = conn.peerId, ns = d.ns
libp2p_rendezvous_discover.inc()
if d.ns.len notin 0 .. 255:
if d.ns.isSome() and d.ns.get().len > MaximumNamespaceLen:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
var limit = min(DiscoverLimit, d.limit.get(DiscoverLimit))
@@ -451,20 +469,19 @@ proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
return
else:
Cookie(offset: rdv.registered.low().uint64 - 1)
if cookie.ns != d.ns or
cookie.offset notin rdv.registered.low().uint64 .. rdv.registered.high().uint64:
if d.ns.isSome() and cookie.ns.isSome() and cookie.ns.get() != d.ns.get() or
cookie.offset < rdv.registered.low().uint64 or
cookie.offset > rdv.registered.high().uint64:
cookie = Cookie(offset: rdv.registered.low().uint64 - 1)
let
nsSalted = d.ns & rdv.salt
namespaces =
if d.ns != "":
try:
rdv.namespaces[nsSalted]
except KeyError:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
else:
toSeq(cookie.offset.int .. rdv.registered.high())
let namespaces =
if d.ns.isSome():
try:
rdv.namespaces[d.ns.get() & rdv.salt]
except KeyError:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
else:
toSeq(max(cookie.offset.int, rdv.registered.offset) .. rdv.registered.high())
if namespaces.len() == 0:
await conn.sendDiscoverResponse(@[], Cookie())
return
@@ -484,8 +501,10 @@ proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
rdv.rng.shuffle(s)
await conn.sendDiscoverResponse(s, Cookie(offset: offset.uint64, ns: d.ns))
proc advertisePeer(rdv: RendezVous, peer: PeerId, msg: seq[byte]) {.async.} =
proc advertiseWrap() {.async.} =
proc advertisePeer(
rdv: RendezVous, peer: PeerId, msg: seq[byte]
) {.async: (raises: [CancelledError]).} =
proc advertiseWrap() {.async: (raises: []).} =
try:
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer:
@@ -506,19 +525,19 @@ proc advertisePeer(rdv: RendezVous, peer: PeerId, msg: seq[byte]) {.async.} =
rdv.sema.release()
await rdv.sema.acquire()
discard await advertiseWrap().withTimeout(5.seconds)
await advertiseWrap()
proc advertise*(
rdv: RendezVous, ns: string, ttl: Duration, peers: seq[PeerId]
) {.async.} =
if ns.len notin 1 .. 255:
raise newException(RendezVousError, "Invalid namespace")
) {.async: (raises: [CancelledError, AdvertiseError]).} =
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
if ttl notin rdv.minDuration .. rdv.maxDuration:
raise newException(RendezVousError, "Invalid time to live: " & $ttl)
if ttl < rdv.minDuration or ttl > rdv.maxDuration:
raise newException(AdvertiseError, "Invalid time to live: " & $ttl)
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
raise newException(RendezVousError, "Wrong Signed Peer Record")
raise newException(AdvertiseError, "Wrong Signed Peer Record")
let
r = Register(ns: ns, signedPeerRecord: sprBuff, ttl: Opt.some(ttl.seconds.uint64))
@@ -529,13 +548,13 @@ proc advertise*(
let futs = collect(newSeq()):
for peer in peers:
trace "Send Advertise", peerId = peer, ns
rdv.advertisePeer(peer, msg.buffer)
rdv.advertisePeer(peer, msg.buffer).withTimeout(5.seconds)
await allFutures(futs)
method advertise*(
rdv: RendezVous, ns: string, ttl: Duration = rdv.minDuration
) {.async, base.} =
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
await rdv.advertise(ns, ttl, rdv.peers)
proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
@@ -553,33 +572,39 @@ proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
@[]
proc request*(
rdv: RendezVous, ns: string, l: int = DiscoverLimit.int, peers: seq[PeerId]
): Future[seq[PeerRecord]] {.async.} =
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int, peers: seq[PeerId]
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
var
s: Table[PeerId, (PeerRecord, Register)]
limit: uint64
d = Discover(ns: ns)
if l <= 0 or l > DiscoverLimit.int:
raise newException(RendezVousError, "Invalid limit")
if ns.len notin 0 .. 255:
raise newException(RendezVousError, "Invalid namespace")
raise newException(AdvertiseError, "Invalid limit")
if ns.isSome() and ns.get().len > MaximumNamespaceLen:
raise newException(AdvertiseError, "Invalid namespace")
limit = l.uint64
proc requestPeer(peer: PeerId) {.async.} =
proc requestPeer(
peer: PeerId
) {.async: (raises: [CancelledError, DialFailedError, LPStreamError]).} =
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer:
await conn.close()
d.limit = Opt.some(limit)
d.cookie =
try:
Opt.some(rdv.cookiesSaved[peer][ns])
except KeyError as exc:
if ns.isSome():
try:
Opt.some(rdv.cookiesSaved[peer][ns.get()])
except KeyError, CatchableError:
Opt.none(seq[byte])
else:
Opt.none(seq[byte])
await conn.writeLp(
encode(Message(msgType: MessageType.Discover, discover: Opt.some(d))).buffer
)
let
buf = await conn.readLp(65536)
buf = await conn.readLp(MaximumMessageLen)
msgRcv = Message.decode(buf).valueOr:
debug "Message undecodable"
return
@@ -593,9 +618,14 @@ proc request*(
trace "Cannot discover", ns, status = resp.status, text = resp.text
return
resp.cookie.withValue(cookie):
if cookie.len() < 1000 and
rdv.cookiesSaved.hasKeyOrPut(peer, {ns: cookie}.toTable()):
rdv.cookiesSaved[peer][ns] = cookie
if ns.isSome:
let namespace = ns.get()
if cookie.len() < 1000 and
rdv.cookiesSaved.hasKeyOrPut(peer, {namespace: cookie}.toTable()):
try:
rdv.cookiesSaved[peer][namespace] = cookie
except KeyError:
raiseAssert "checked with hasKeyOrPut"
for r in resp.registrations:
if limit == 0:
return
@@ -607,15 +637,20 @@ proc request*(
continue
pr = spr.data
if s.hasKey(pr.peerId):
let (prSaved, rSaved) = s[pr.peerId]
let (prSaved, rSaved) =
try:
s[pr.peerId]
except KeyError:
raiseAssert "checked with hasKey"
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(rdv.maxTTL) < ttl) or
prSaved.seqNo < pr.seqNo:
s[pr.peerId] = (pr, r)
else:
s[pr.peerId] = (pr, r)
limit.dec()
for (_, r) in s.values():
rdv.save(ns, peer, r, false)
if ns.isSome():
for (_, r) in s.values():
rdv.save(ns.get(), peer, r, false)
for peer in peers:
if limit == 0:
@@ -625,17 +660,24 @@ proc request*(
try:
trace "Send Request", peerId = peer, ns
await peer.requestPeer()
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "exception catch in request", description = exc.msg
except CancelledError as e:
raise e
except DialFailedError as e:
trace "failed to dial a peer", description = e.msg
except LPStreamError as e:
trace "failed to communicate with a peer", description = e.msg
return toSeq(s.values()).mapIt(it[0])
proc request*(
rdv: RendezVous, ns: string, l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async.} =
rdv: RendezVous, ns: Opt[string], l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(ns, l, rdv.peers)
proc request*(
rdv: RendezVous, l: int = DiscoverLimit.int
): Future[seq[PeerRecord]] {.async: (raises: [DiscoveryError, CancelledError]).} =
await rdv.request(Opt.none(string), l, rdv.peers)
proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
let nsSalted = ns & rdv.salt
try:
@@ -645,15 +687,17 @@ proc unsubscribeLocally*(rdv: RendezVous, ns: string) =
except KeyError:
return
proc unsubscribe*(rdv: RendezVous, ns: string, peerIds: seq[PeerId]) {.async.} =
if ns.len notin 1 .. 255:
proc unsubscribe*(
rdv: RendezVous, ns: string, peerIds: seq[PeerId]
) {.async: (raises: [RendezVousError, CancelledError]).} =
if ns.len < MinimumNamespaceLen or ns.len > MaximumNamespaceLen:
raise newException(RendezVousError, "Invalid namespace")
let msg = encode(
Message(msgType: MessageType.Unregister, unregister: Opt.some(Unregister(ns: ns)))
)
proc unsubscribePeer(peerId: PeerId) {.async.} =
proc unsubscribePeer(peerId: PeerId) {.async: (raises: []).} =
try:
let conn = await rdv.switch.dial(peerId, RendezVousCodec)
defer:
@@ -666,16 +710,20 @@ proc unsubscribe*(rdv: RendezVous, ns: string, peerIds: seq[PeerId]) {.async.} =
for peer in peerIds:
unsubscribePeer(peer)
discard await allFutures(futs).withTimeout(5.seconds)
await allFutures(futs)
proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
proc unsubscribe*(
rdv: RendezVous, ns: string
) {.async: (raises: [RendezVousError, CancelledError]).} =
rdv.unsubscribeLocally(ns)
await rdv.unsubscribe(ns, rdv.peers)
proc setup*(rdv: RendezVous, switch: Switch) =
rdv.switch = switch
proc handlePeer(peerId: PeerId, event: PeerEvent) {.async.} =
proc handlePeer(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
if event.kind == PeerEventKind.Joined:
rdv.peers.add(peerId)
elif event.kind == PeerEventKind.Left:
@@ -717,7 +765,9 @@ proc new*(
)
logScope:
topics = "libp2p discovery rendezvous"
proc handleStream(conn: Connection, proto: string) {.async.} =
proc handleStream(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
let
buf = await conn.readLp(4096)
@@ -734,6 +784,7 @@ proc new*(
of MessageType.DiscoverResponse:
trace "Got an unexpected Discover Response", response = msg.discoverResponse
except CancelledError as exc:
trace "cancelled rendezvous handler"
raise exc
except CatchableError as exc:
trace "exception in rendezvous handler", description = exc.msg
@@ -750,13 +801,15 @@ proc new*(
rng: ref HmacDrbgContext = newRng(),
minDuration = MinimumDuration,
maxDuration = MaximumDuration,
): T =
): T {.raises: [RendezVousError].} =
let rdv = T.new(rng, minDuration, maxDuration)
rdv.setup(switch)
return rdv
proc deletesRegister(rdv: RendezVous) {.async.} =
heartbeat "Register timeout", 1.minutes:
proc deletesRegister(
rdv: RendezVous, interval = 1.minutes
) {.async: (raises: [CancelledError]).} =
heartbeat "Register timeout", interval:
let n = Moment.now()
var total = 0
rdv.registered.flushIfIt(it.expiration < n)

View File

@@ -20,7 +20,6 @@ import ../../peerid
import ../../peerinfo
import ../../protobuf/minprotobuf
import ../../utility
import ../../errors
import secure, ../../crypto/[crypto, chacha20poly1305, curve25519, hkdf]

View File

@@ -17,7 +17,7 @@ const PlainTextCodec* = "/plaintext/1.0.0"
type PlainText* = ref object of Secure
method init(p: PlainText) {.gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
## plain text doesn't do anything
discard

View File

@@ -11,15 +11,14 @@
{.push raises: [].}
import std/[strformat]
import stew/results
import results
import chronos, chronicles
import
../protocol,
../../stream/streamseq,
../../stream/connection,
../../multiaddress,
../../peerinfo,
../../errors
../../peerinfo
export protocol, results
@@ -82,7 +81,7 @@ method readMessage*(
): Future[seq[byte]] {.
async: (raises: [CancelledError, LPStreamError], raw: true), base
.} =
raiseAssert("Not implemented!")
raiseAssert("[SecureConn.readMessage] abstract method not implemented!")
method getWrapped*(s: SecureConn): Connection =
s.stream
@@ -92,7 +91,7 @@ method handshake*(
): Future[SecureConn] {.
async: (raises: [CancelledError, LPStreamError], raw: true), base
.} =
raiseAssert("Not implemented!")
raiseAssert("[Secure.handshake] abstract method not implemented!")
proc handleConn(
s: Secure, conn: Connection, initiator: bool, peerId: Opt[PeerId]
@@ -143,7 +142,7 @@ proc handleConn(
method init*(s: Secure) =
procCall LPProtocol(s).init()
proc handle(conn: Connection, proto: string) {.async.} =
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
trace "handling connection upgrade", proto, conn
try:
# We don't need the result but we
@@ -152,10 +151,10 @@ method init*(s: Secure) =
trace "connection secured", conn
except CancelledError as exc:
warn "securing connection canceled", conn
await conn.close()
raise exc
except LPStreamError as exc:
warn "securing connection failed", description = exc.msg, conn
finally:
await conn.close()
s.handler = handle

View File

@@ -12,7 +12,7 @@
{.push raises: [].}
import std/[sequtils, times]
import pkg/stew/results
import pkg/results
import multiaddress, multicodec, peerid, protobuf/minprotobuf, signed_envelope
export peerid, multiaddress, signed_envelope

View File

@@ -36,12 +36,14 @@ proc isRunning*(self: AutoRelayService): bool =
proc addressMapper(
self: AutoRelayService, listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.async: (raises: []).} =
return concat(toSeq(self.relayAddresses.values)) & listenAddrs
proc reserveAndUpdate(
self: AutoRelayService, relayPid: PeerId, switch: Switch
) {.async.} =
) {.async: (raises: [CatchableError]).} =
# CatchableError used to simplify raised errors here, as there could be
# many different errors raised but caller don't really care what is cause of error
while self.running:
let
rsvp = await self.client.reserve(relayPid).wait(chronos.seconds(5))
@@ -58,20 +60,26 @@ proc reserveAndUpdate(
self.onReservation(concat(toSeq(self.relayAddresses.values)))
await sleepAsync chronos.seconds(ttl - 30)
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
method setup*(
self: AutoRelayService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
self.addressMapper = proc(
listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
return await addressMapper(self, listenAddrs)
let hasBeenSetUp = await procCall Service(self).setup(switch)
if hasBeenSetUp:
proc handlePeerIdentified(peerId: PeerId, event: PeerEvent) {.async.} =
proc handlePeerIdentified(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
trace "Peer Identified", peerId
if self.relayPeers.len < self.maxNumRelays:
self.peerAvailable.fire()
proc handlePeerLeft(peerId: PeerId, event: PeerEvent) {.async.} =
proc handlePeerLeft(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
trace "Peer Left", peerId
self.relayPeers.withValue(peerId, future):
future[].cancel()
@@ -82,24 +90,31 @@ method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
await self.run(switch)
return hasBeenSetUp
proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
proc manageBackedOff(
self: AutoRelayService, pid: PeerId
) {.async: (raises: [CancelledError]).} =
await sleepAsync(chronos.seconds(5))
self.backingOff.keepItIf(it != pid)
self.peerAvailable.fire()
proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
proc innerRun(
self: AutoRelayService, switch: Switch
) {.async: (raises: [CancelledError]).} =
while true:
# Remove relayPeers that failed
let peers = toSeq(self.relayPeers.keys())
for k in peers:
if self.relayPeers[k].finished():
self.relayPeers.del(k)
self.relayAddresses.del(k)
if not self.onReservation.isNil():
self.onReservation(concat(toSeq(self.relayAddresses.values)))
# To avoid ddosing our peers in certain conditions
self.backingOff.add(k)
asyncSpawn self.manageBackedOff(k)
try:
if self.relayPeers[k].finished():
self.relayPeers.del(k)
self.relayAddresses.del(k)
if not self.onReservation.isNil():
self.onReservation(concat(toSeq(self.relayAddresses.values)))
# To avoid ddosing our peers in certain conditions
self.backingOff.add(k)
asyncSpawn self.manageBackedOff(k)
except KeyError:
raiseAssert "checked with in"
# Get all connected relayPeers
self.peerAvailable.clear()
@@ -116,18 +131,25 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
self.relayPeers[relayPid] = self.reserveAndUpdate(relayPid, switch)
if self.relayPeers.len() > 0:
await one(toSeq(self.relayPeers.values())) or self.peerAvailable.wait()
try:
await one(toSeq(self.relayPeers.values())) or self.peerAvailable.wait()
except ValueError:
raiseAssert "checked with relayPeers.len()"
else:
await self.peerAvailable.wait()
method run*(self: AutoRelayService, switch: Switch) {.async.} =
method run*(
self: AutoRelayService, switch: Switch
) {.async: (raises: [CancelledError]).} =
if self.running:
trace "Autorelay is already running"
return
self.running = true
self.runner = self.innerRun(switch)
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
method stop*(
self: AutoRelayService, switch: Switch
): Future[bool] {.public, async: (raises: [CancelledError]).} =
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
self.running = false

View File

@@ -39,8 +39,10 @@ proc new*(
proc tryStartingDirectConn(
self: HPService, switch: Switch, peerId: PeerId
): Future[bool] {.async.} =
proc tryConnect(address: MultiAddress): Future[bool] {.async.} =
): Future[bool] {.async: (raises: [CancelledError]).} =
proc tryConnect(
address: MultiAddress
): Future[bool] {.async: (raises: [DialFailedError, CancelledError]).} =
debug "Trying to create direct connection", peerId, address
await switch.connect(peerId, @[address], true, false)
debug "Direct connection created."
@@ -57,13 +59,13 @@ proc tryStartingDirectConn(
continue
return false
proc closeRelayConn(relayedConn: Connection) {.async.} =
proc closeRelayConn(relayedConn: Connection) {.async: (raises: [CancelledError]).} =
await sleepAsync(2000.milliseconds) # grace period before closing relayed connection
await relayedConn.close()
proc newConnectedPeerHandler(
self: HPService, switch: Switch, peerId: PeerId, event: PeerEvent
) {.async.} =
) {.async: (raises: [CancelledError]).} =
try:
# Get all connections to the peer. If there is at least one non-relayed connection, return.
let connections = switch.connManager.getConnections()[peerId].mapIt(it.connection)
@@ -86,18 +88,27 @@ proc newConnectedPeerHandler(
switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it))
await dcutrClient.startSync(switch, peerId, natAddrs)
await closeRelayConn(relayedConn)
except CancelledError as err:
raise err
except CatchableError as err:
debug "Hole punching failed during dcutr", err = err.msg
method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
method setup*(
self: HPService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
var hasBeenSetup = await procCall Service(self).setup(switch)
hasBeenSetup = hasBeenSetup and await self.autonatService.setup(switch)
if hasBeenSetup:
let dcutrProto = Dcutr.new(switch)
switch.mount(dcutrProto)
try:
let dcutrProto = Dcutr.new(switch)
switch.mount(dcutrProto)
except LPError as err:
error "Failed to mount Dcutr", err = err.msg
self.newConnectedPeerHandler = proc(peerId: PeerId, event: PeerEvent) {.async.} =
self.newConnectedPeerHandler = proc(
peerId: PeerId, event: PeerEvent
) {.async: (raises: [CancelledError]).} =
await newConnectedPeerHandler(self, switch, peerId, event)
switch.connManager.addPeerEventHandler(
@@ -106,7 +117,7 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
self.onNewStatusHandler = proc(
networkReachability: NetworkReachability, confidence: Opt[float]
) {.async.} =
) {.async: (raises: [CancelledError]).} =
if networkReachability == NetworkReachability.NotReachable and
not self.autoRelayService.isRunning():
discard await self.autoRelayService.setup(switch)
@@ -121,10 +132,14 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
self.autonatService.statusAndConfidenceHandler(self.onNewStatusHandler)
return hasBeenSetup
method run*(self: HPService, switch: Switch) {.async, public.} =
method run*(
self: HPService, switch: Switch
) {.public, async: (raises: [CancelledError]).} =
await self.autonatService.run(switch)
method stop*(self: HPService, switch: Switch): Future[bool] {.async, public.} =
method stop*(
self: HPService, switch: Switch
): Future[bool] {.public, async: (raises: [CancelledError]).} =
discard await self.autonatService.stop(switch)
if not isNil(self.newConnectedPeerHandler):
switch.connManager.removePeerEventHandler(

View File

@@ -10,8 +10,8 @@
{.push raises: [].}
import std/sequtils
import stew/[byteutils, results, endians2]
import chronos, chronos/transports/[osnet, ipnet], chronicles
import stew/endians2
import chronos, chronos/transports/[osnet, ipnet], chronicles, results
import ../[multiaddress, multicodec]
import ../switch
@@ -73,7 +73,6 @@ proc new*(
return T(networkInterfaceProvider: networkInterfaceProvider)
proc getProtocolArgument*(ma: MultiAddress, codec: MultiCodec): MaResult[seq[byte]] =
var buffer: seq[byte]
for item in ma:
let
ritem = ?item
@@ -148,7 +147,7 @@ proc expandWildcardAddresses(
method setup*(
self: WildcardAddressResolverService, switch: Switch
): Future[bool] {.async.} =
): Future[bool] {.async: (raises: [CancelledError]).} =
## Sets up the `WildcardAddressResolverService`.
##
## This method adds the address mapper to the peer's list of address mappers.
@@ -161,7 +160,7 @@ method setup*(
## - A `Future[bool]` that resolves to `true` if the setup was successful, otherwise `false`.
self.addressMapper = proc(
listenAddrs: seq[MultiAddress]
): Future[seq[MultiAddress]] {.async.} =
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
return expandWildcardAddresses(self.networkInterfaceProvider, listenAddrs)
debug "Setting up WildcardAddressResolverService"
@@ -170,7 +169,9 @@ method setup*(
switch.peerInfo.addressMappers.add(self.addressMapper)
return hasBeenSetup
method run*(self: WildcardAddressResolverService, switch: Switch) {.async, public.} =
method run*(
self: WildcardAddressResolverService, switch: Switch
) {.public, async: (raises: [CancelledError]).} =
## Runs the WildcardAddressResolverService for a given switch.
##
## It updates the peer information for the provided switch by running the registered address mapper. Any other
@@ -181,7 +182,7 @@ method run*(self: WildcardAddressResolverService, switch: Switch) {.async, publi
method stop*(
self: WildcardAddressResolverService, switch: Switch
): Future[bool] {.async, public.} =
): Future[bool] {.public, async: (raises: [CancelledError]).} =
## Stops the WildcardAddressResolverService.
##
## Handles the shutdown process of the WildcardAddressResolverService for a given switch.

View File

@@ -12,7 +12,7 @@
{.push raises: [].}
import std/sugar
import pkg/stew/[results, byteutils]
import pkg/stew/byteutils, pkg/results
import multicodec, crypto/crypto, protobuf/minprotobuf, vbuffer
export crypto

View File

@@ -0,0 +1,63 @@
# Nim-LibP2P
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import pkg/chronos
import connection, bufferstream
export connection
type
WriteHandler = proc(data: seq[byte]): Future[void] {.
async: (raises: [CancelledError, LPStreamError])
.}
BridgeStream* = ref object of BufferStream
writeHandler: WriteHandler
closeHandler: proc(): Future[void] {.async: (raises: []).}
method write*(
s: BridgeStream, msg: seq[byte]
): Future[void] {.public, async: (raises: [CancelledError, LPStreamError], raw: true).} =
s.writeHandler(msg)
method closeImpl*(s: BridgeStream): Future[void] {.async: (raises: [], raw: true).} =
if not isNil(s.closeHandler):
discard s.closeHandler()
procCall BufferStream(s).closeImpl()
method getWrapped*(s: BridgeStream): Connection =
nil
proc bridgedConnections*(
closeTogether: bool = true, dirA = Direction.In, dirB = Direction.In
): (BridgeStream, BridgeStream) =
let connA = BridgeStream()
let connB = BridgeStream()
connA.dir = dirA
connB.dir = dirB
connA.initStream()
connB.initStream()
connA.writeHandler = proc(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
connB.pushData(data)
connB.writeHandler = proc(
data: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
connA.pushData(data)
if closeTogether:
connA.closeHandler = proc(): Future[void] {.async: (raises: []).} =
await noCancel connB.close()
connB.closeHandler = proc(): Future[void] {.async: (raises: []).} =
await noCancel connA.close()
return (connA, connB)

View File

@@ -44,8 +44,7 @@ chronicles.formatIt(BufferStream):
proc len*(s: BufferStream): int =
s.readBuf.len + (if s.readQueue.len > 0: s.readQueue[0].len()
else: 0
)
else: 0)
method initStream*(s: BufferStream) =
if s.objName.len == 0:

Some files were not shown because too many files have changed in this diff Show More