mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-09 14:28:11 -05:00
Compare commits
143 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c0d4b873e | ||
|
|
d803352bd6 | ||
|
|
2eafac47e8 | ||
|
|
848fdde0a8 | ||
|
|
31e7dc68e2 | ||
|
|
08299a2059 | ||
|
|
2f3156eafb | ||
|
|
72e85101b0 | ||
|
|
d205260a3e | ||
|
|
97e576d146 | ||
|
|
888cb78331 | ||
|
|
1d4c261d2a | ||
|
|
83de0c0abd | ||
|
|
c501adc9ab | ||
|
|
f9fc24cc08 | ||
|
|
cd26244ccc | ||
|
|
cabab6aafe | ||
|
|
fb42a9b4aa | ||
|
|
141f4d9116 | ||
|
|
cb31152b53 | ||
|
|
3a7745f920 | ||
|
|
a89916fb1a | ||
|
|
c6cf46c904 | ||
|
|
b28a71ab13 | ||
|
|
95b9859bcd | ||
|
|
9e599753af | ||
|
|
2e924906bb | ||
|
|
e811c1ad32 | ||
|
|
86695b55bb | ||
|
|
8c3a4d882a | ||
|
|
4bad343ddc | ||
|
|
47b8a05c32 | ||
|
|
4e6f4af601 | ||
|
|
7275f6f9c3 | ||
|
|
c3dae6a7d4 | ||
|
|
bb404eda4a | ||
|
|
584710bd80 | ||
|
|
ad5eae9adf | ||
|
|
26fae7cd2d | ||
|
|
87d6655368 | ||
|
|
cd60b254a0 | ||
|
|
b88cdcdd4b | ||
|
|
4a5e06cb45 | ||
|
|
fff3a7ad1f | ||
|
|
05c894d487 | ||
|
|
8850e9ccd9 | ||
|
|
2746531851 | ||
|
|
2856db5490 | ||
|
|
b29e78ccae | ||
|
|
c9761c3588 | ||
|
|
e4ef21e07c | ||
|
|
61429aa0d6 | ||
|
|
c1ef011556 | ||
|
|
cd1424c09f | ||
|
|
878d627f93 | ||
|
|
1d6385ddc5 | ||
|
|
873f730b4e | ||
|
|
1c1547b137 | ||
|
|
9997f3e3d3 | ||
|
|
4d0b4ecc22 | ||
|
|
ccb24b5f1f | ||
|
|
5cb493439d | ||
|
|
24b284240a | ||
|
|
b0f77d24f9 | ||
|
|
e32ac492d3 | ||
|
|
470a7f8cc5 | ||
|
|
b269fce289 | ||
|
|
bc4febe92c | ||
|
|
b5f9bfe0f4 | ||
|
|
4ce1e8119b | ||
|
|
65136b38e2 | ||
|
|
ffc114e8d9 | ||
|
|
f2be2d6ed5 | ||
|
|
ab690a06a6 | ||
|
|
10cdaf14c5 | ||
|
|
ebbfb63c17 | ||
|
|
ac25da6cea | ||
|
|
fb41972ba3 | ||
|
|
504d1618af | ||
|
|
0f91b23f12 | ||
|
|
5ddd62a8b9 | ||
|
|
e7f13a7e73 | ||
|
|
89e825fb0d | ||
|
|
1b706e84fa | ||
|
|
5cafcb70dc | ||
|
|
8c71266058 | ||
|
|
9c986c5c13 | ||
|
|
3d0451d7f2 | ||
|
|
b1f65c97ae | ||
|
|
5584809fca | ||
|
|
7586f17b15 | ||
|
|
0e16d873c8 | ||
|
|
b11acd2118 | ||
|
|
1376f5b077 | ||
|
|
340ea05ae5 | ||
|
|
024ec51f66 | ||
|
|
efe453df87 | ||
|
|
c0f4d903ba | ||
|
|
28f2b268ae | ||
|
|
5abb6916b6 | ||
|
|
e6aec94c0c | ||
|
|
9eddc7c662 | ||
|
|
028c730a4f | ||
|
|
3c93bdaf80 | ||
|
|
037b99997e | ||
|
|
e67744bf2a | ||
|
|
5843e6fb4f | ||
|
|
f0ff7e4c69 | ||
|
|
24808ad534 | ||
|
|
c4bccef138 | ||
|
|
adf2345adb | ||
|
|
f7daad91e6 | ||
|
|
65052d7b59 | ||
|
|
b07ec5c0c6 | ||
|
|
f4c94ddba1 | ||
|
|
a7ec485ca9 | ||
|
|
86b6469e35 | ||
|
|
3e16ca724d | ||
|
|
93dd5a6768 | ||
|
|
ec43d0cb9f | ||
|
|
8469a750e7 | ||
|
|
fc6ac07ce8 | ||
|
|
79cdc31b37 | ||
|
|
be33ad6ac7 | ||
|
|
a6e45d6157 | ||
|
|
37e0f61679 | ||
|
|
5d382b6423 | ||
|
|
78a4344054 | ||
|
|
a4f0a638e7 | ||
|
|
c5aa3736f9 | ||
|
|
b0f83fd48c | ||
|
|
d6e5094095 | ||
|
|
483e1d91ba | ||
|
|
d215bb21e0 | ||
|
|
61ac0c5b95 | ||
|
|
1fa30f07e8 | ||
|
|
39d0451a10 | ||
|
|
4dc7a89f45 | ||
|
|
fd26f93b80 | ||
|
|
dd2c74d413 | ||
|
|
b7e0df127f | ||
|
|
f591e692fc | ||
|
|
8855bce085 |
@@ -1,52 +0,0 @@
|
||||
version: '{build}'
|
||||
|
||||
image: Visual Studio 2015
|
||||
|
||||
cache:
|
||||
- NimBinaries
|
||||
- p2pdCache
|
||||
|
||||
matrix:
|
||||
# We always want 32 and 64-bit compilation
|
||||
fast_finish: false
|
||||
|
||||
platform:
|
||||
- x86
|
||||
- x64
|
||||
|
||||
# when multiple CI builds are queued, the tested commit needs to be in the last X commits cloned with "--depth X"
|
||||
clone_depth: 10
|
||||
|
||||
install:
|
||||
- git submodule update --init --recursive
|
||||
|
||||
# use the newest versions documented here: https://www.appveyor.com/docs/windows-images-software/#mingw-msys-cygwin
|
||||
- IF "%PLATFORM%" == "x86" SET PATH=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%PATH%
|
||||
- IF "%PLATFORM%" == "x64" SET PATH=C:\mingw-w64\x86_64-8.1.0-posix-seh-rt_v6-rev0\mingw64\bin;%PATH%
|
||||
|
||||
# build nim from our own branch - this to avoid the day-to-day churn and
|
||||
# regressions of the fast-paced Nim development while maintaining the
|
||||
# flexibility to apply patches
|
||||
- curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_nim.sh
|
||||
- env MAKE="mingw32-make -j2" ARCH_OVERRIDE=%PLATFORM% bash build_nim.sh Nim csources dist/nimble NimBinaries
|
||||
- SET PATH=%CD%\Nim\bin;%PATH%
|
||||
|
||||
# set path for produced Go binaries
|
||||
- MKDIR goblin
|
||||
- CD goblin
|
||||
- SET GOPATH=%CD%
|
||||
- SET PATH=%GOPATH%\bin;%PATH%
|
||||
- CD ..
|
||||
|
||||
# install and build go-libp2p-daemon
|
||||
- bash scripts/build_p2pd.sh p2pdCache v0.3.0
|
||||
|
||||
build_script:
|
||||
- nimble install -y --depsOnly
|
||||
|
||||
test_script:
|
||||
- nimble test
|
||||
- nimble examples_build
|
||||
|
||||
deploy: off
|
||||
|
||||
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* @vacp2p/p2p
|
||||
2
.github/actions/install_nim/action.yml
vendored
2
.github/actions/install_nim/action.yml
vendored
@@ -88,6 +88,8 @@ runs:
|
||||
run: |
|
||||
if [[ '${{ inputs.cpu }}' == 'amd64' ]]; then
|
||||
PLATFORM=x64
|
||||
elif [[ '${{ inputs.cpu }}' == 'arm64' ]]; then
|
||||
PLATFORM=arm64
|
||||
else
|
||||
PLATFORM=x86
|
||||
fi
|
||||
|
||||
12
.github/workflows/auto_assign_pr.yml
vendored
Normal file
12
.github/workflows/auto_assign_pr.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Auto Assign PR to Creator
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
|
||||
jobs:
|
||||
assign_creator:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: toshimaru/auto-author-assign@v1.6.2
|
||||
23
.github/workflows/ci.yml
vendored
23
.github/workflows/ci.yml
vendored
@@ -14,7 +14,7 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 40
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -27,15 +27,16 @@ jobs:
|
||||
cpu: amd64
|
||||
- os: macos
|
||||
cpu: amd64
|
||||
- os: macos-14
|
||||
cpu: arm64
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
nim:
|
||||
- ref: version-1-6
|
||||
memory_management: refc
|
||||
# The ref below corresponds to the branch "version-2-0".
|
||||
# Right before an update from Nimble 0.16.1 to 0.16.2.
|
||||
# That update breaks our dependency resolution.
|
||||
- ref: 8754469f4947844c5938f56e1fba846c349354b5
|
||||
- ref: version-2-0
|
||||
memory_management: refc
|
||||
- ref: version-2-2
|
||||
memory_management: refc
|
||||
include:
|
||||
- platform:
|
||||
@@ -50,6 +51,10 @@ jobs:
|
||||
os: macos
|
||||
builder: macos-13
|
||||
shell: bash
|
||||
- platform:
|
||||
os: macos-14
|
||||
builder: macos-14
|
||||
shell: bash
|
||||
- platform:
|
||||
os: windows
|
||||
builder: windows-2022
|
||||
@@ -78,7 +83,7 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
go-version: '~1.16.0' # That's the minimum Go version that works with arm.
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
@@ -90,8 +95,8 @@ jobs:
|
||||
with:
|
||||
path: nimbledeps
|
||||
# Using nim.ref as a simple way to differentiate between nimble using the "pkgs" or "pkgs2" directories.
|
||||
# The change happened on Nimble v0.14.0.
|
||||
key: nimbledeps-${{ matrix.nim.ref }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
|
||||
# The change happened on Nimble v0.14.0. Also forcing the deps to be reinstalled on each os and cpu.
|
||||
key: nimbledeps-${{ matrix.nim.ref }}-${{ matrix.builder }}-${{ matrix.platform.cpu }}-${{ hashFiles('.pinned') }} # hashFiles returns a different value on windows
|
||||
|
||||
- name: Install deps
|
||||
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||
@@ -113,5 +118,5 @@ jobs:
|
||||
nimble --version
|
||||
gcc --version
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble test
|
||||
|
||||
23
.github/workflows/daily_amd64.yml
vendored
23
.github/workflows/daily_amd64.yml
vendored
@@ -6,9 +6,26 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_amd64:
|
||||
name: Daily amd64
|
||||
test_amd64_latest:
|
||||
name: Daily amd64 (latest dependencies)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': '8754469f4947844c5938f56e1fba846c349354b5', 'memory_management': 'refc'}]"
|
||||
nim: "[
|
||||
{'ref': 'version-1-6', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['amd64']"
|
||||
test_amd64_pinned:
|
||||
name: Daily amd64 (pinned dependencies)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
pinned_deps: true
|
||||
nim: "[
|
||||
{'ref': 'version-1-6', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['amd64']"
|
||||
38
.github/workflows/daily_common.yml
vendored
38
.github/workflows/daily_common.yml
vendored
@@ -4,6 +4,11 @@ name: Daily Common
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
pinned_deps:
|
||||
description: 'Should dependencies be installed from pinned file or use latest versions'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
nim:
|
||||
description: 'Nim Configuration'
|
||||
required: true
|
||||
@@ -17,26 +22,18 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: "[]"
|
||||
use_sat_solver:
|
||||
description: 'Install dependencies with SAT Solver'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
delete_cache:
|
||||
name: Delete github action's branch cache
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
test:
|
||||
needs: delete_cache
|
||||
timeout-minutes: 90
|
||||
timeout-minutes: 40
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -75,14 +72,20 @@ jobs:
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
go-version: '~1.16.0'
|
||||
cache: false
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Install dependencies
|
||||
|
||||
- name: Install dependencies (pinned)
|
||||
if: ${{ inputs.pinned_deps }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Install dependencies (latest)
|
||||
if: ${{ inputs.pinned_deps != 'true' }}
|
||||
run: |
|
||||
nimble install -y --depsOnly
|
||||
|
||||
@@ -91,11 +94,6 @@ jobs:
|
||||
nim --version
|
||||
nimble --version
|
||||
|
||||
if [[ "${{ inputs.use_sat_solver }}" == "true" ]]; then
|
||||
dependency_solver="sat"
|
||||
else
|
||||
dependency_solver="legacy"
|
||||
fi
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }} --solver:${dependency_solver}"
|
||||
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble test
|
||||
nimble testintegration
|
||||
|
||||
14
.github/workflows/daily_devel.yml
vendored
14
.github/workflows/daily_devel.yml
vendored
@@ -1,14 +0,0 @@
|
||||
name: Daily Nim Devel
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_nim_devel:
|
||||
name: Daily Nim Devel
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'ref': 'devel', 'memory_management': 'orc'}]"
|
||||
cpu: "['amd64']"
|
||||
12
.github/workflows/daily_i386.yml
vendored
12
.github/workflows/daily_i386.yml
vendored
@@ -10,6 +10,14 @@ jobs:
|
||||
name: Daily i386 (Linux)
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': '8754469f4947844c5938f56e1fba846c349354b5', 'memory_management': 'refc'}, {'ref': 'devel', 'memory_management': 'orc'}]"
|
||||
nim: "[
|
||||
{'ref': 'version-1-6', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-0', 'memory_management': 'refc'},
|
||||
{'ref': 'version-2-2', 'memory_management': 'refc'},
|
||||
{'ref': 'devel', 'memory_management': 'refc'},
|
||||
]"
|
||||
cpu: "['i386']"
|
||||
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
|
||||
exclude: "[
|
||||
{'platform': {'os':'macos'}},
|
||||
{'platform': {'os':'windows'}},
|
||||
]"
|
||||
|
||||
15
.github/workflows/daily_sat.yml
vendored
15
.github/workflows/daily_sat.yml
vendored
@@ -1,15 +0,0 @@
|
||||
name: Daily SAT
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_amd64:
|
||||
name: Daily SAT
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim: "[{'ref': '8754469f4947844c5938f56e1fba846c349354b5', 'memory_management': 'refc'}]"
|
||||
cpu: "['amd64']"
|
||||
use_sat_solver: true
|
||||
11
.github/workflows/dependencies.yml
vendored
11
.github/workflows/dependencies.yml
vendored
@@ -17,10 +17,13 @@ jobs:
|
||||
target:
|
||||
- repository: status-im/nimbus-eth2
|
||||
ref: unstable
|
||||
secret: ACTIONS_GITHUB_TOKEN_NIMBUS_ETH2
|
||||
- repository: waku-org/nwaku
|
||||
ref: master
|
||||
secret: ACTIONS_GITHUB_TOKEN_NWAKU
|
||||
- repository: codex-storage/nim-codex
|
||||
ref: master
|
||||
secret: ACTIONS_GITHUB_TOKEN_NIM_CODEX
|
||||
steps:
|
||||
- name: Clone target repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -29,7 +32,7 @@ jobs:
|
||||
ref: ${{ matrix.target.ref}}
|
||||
path: nbc
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
token: ${{ secrets[matrix.target.secret] }}
|
||||
|
||||
- name: Checkout this ref in target repository
|
||||
run: |
|
||||
@@ -44,7 +47,7 @@ jobs:
|
||||
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
|
||||
git config --global user.name = "${{ github.actor }}"
|
||||
git commit --allow-empty -a -m "auto-bump nim-libp2p"
|
||||
git branch -D nim-libp2p-auto-bump-${GITHUB_REF##*/} || true
|
||||
git switch -c nim-libp2p-auto-bump-${GITHUB_REF##*/}
|
||||
git push -f origin nim-libp2p-auto-bump-${GITHUB_REF##*/}
|
||||
git branch -D nim-libp2p-auto-bump-${{ matrix.target.ref }} || true
|
||||
git switch -c nim-libp2p-auto-bump-${{ matrix.target.ref }}
|
||||
git push -f origin nim-libp2p-auto-bump-${{ matrix.target.ref }}
|
||||
|
||||
|
||||
60
.github/workflows/examples.yml
vendored
Normal file
60
.github/workflows/examples.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Examples
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
merge_group:
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
examples:
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
name: "Build Examples"
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
shell: bash
|
||||
os: linux
|
||||
cpu: amd64
|
||||
nim_ref: version-1-6
|
||||
|
||||
- name: Restore deps from cache
|
||||
id: deps-cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: nimbledeps
|
||||
key: nimbledeps-${{ hashFiles('.pinned') }}
|
||||
|
||||
- name: Install deps
|
||||
if: ${{ steps.deps-cache.outputs.cache-hit != 'true' }}
|
||||
run: |
|
||||
nimble install_pinned
|
||||
|
||||
- name: Build and run examples
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
gcc --version
|
||||
|
||||
NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
|
||||
nimble examples
|
||||
11
.github/workflows/interop.yml
vendored
11
.github/workflows/interop.yml
vendored
@@ -27,12 +27,15 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: docker buildx build --load -t nim-libp2p-head -f tests/transport-interop/Dockerfile .
|
||||
run: docker buildx build --load -t nim-libp2p-head -f interop/transport/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/tests/transport-interop/version.json
|
||||
# without suffix action fails because "hole-punching-interop" artifacts have
|
||||
# the same name as "transport-interop" artifacts
|
||||
test-results-suffix: transport-interop
|
||||
extra-versions: ${{ github.workspace }}/interop/transport/version.json
|
||||
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
|
||||
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
|
||||
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
@@ -45,12 +48,12 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
|
||||
run: docker buildx build --load -t nim-libp2p-head -f interop/hole-punching/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json
|
||||
extra-versions: ${{ github.workspace }}/interop/hole-punching/version.json
|
||||
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
|
||||
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
|
||||
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
|
||||
|
||||
21
.github/workflows/linters.yml
vendored
21
.github/workflows/linters.yml
vendored
@@ -18,17 +18,10 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 2 # In PR, has extra merge commit: ^1 = PR, ^2 = base
|
||||
|
||||
- name: Setup NPH
|
||||
# Pin nph to a specific version to avoid sudden style differences.
|
||||
# Updating nph version should be accompanied with running the new version on the fluffy directory.
|
||||
run: |
|
||||
VERSION="v0.5.1"
|
||||
ARCHIVE="nph-linux_x64.tar.gz"
|
||||
curl -L "https://github.com/arnetheduck/nph/releases/download/${VERSION}/${ARCHIVE}" -o ${ARCHIVE}
|
||||
tar -xzf ${ARCHIVE}
|
||||
|
||||
- name: Check style
|
||||
run: |
|
||||
shopt -s extglob # Enable extended globbing
|
||||
./nph examples libp2p tests tools *.@(nim|nims|nimble)
|
||||
git diff --exit-code
|
||||
- name: Check `nph` formatting
|
||||
uses: arnetheduck/nph-action@v1
|
||||
with:
|
||||
version: 0.6.1
|
||||
options: "examples libp2p tests interop tools *.nim*"
|
||||
fail: true
|
||||
suggest: true
|
||||
|
||||
35
.github/workflows/pr_lint.yml
vendored
Normal file
35
.github/workflows/pr_lint.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: "Conventional Commits"
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- reopened
|
||||
- synchronize
|
||||
jobs:
|
||||
main:
|
||||
name: Validate PR title
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: amannn/action-semantic-pull-request@v5
|
||||
id: lint_pr_title
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- uses: marocchino/sticky-pull-request-comment@v2
|
||||
# When the previous steps fails, the workflow would stop. By adding this
|
||||
# condition you can continue the execution with the populated error message.
|
||||
if: always() && (steps.lint_pr_title.outputs.error_message != null)
|
||||
with:
|
||||
header: pr-title-lint-error
|
||||
message: |
|
||||
Pull requests titles must follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
|
||||
# Delete a previous comment when the issue has been resolved
|
||||
- if: ${{ steps.lint_pr_title.outputs.error_message == null }}
|
||||
uses: marocchino/sticky-pull-request-comment@v2
|
||||
with:
|
||||
header: pr-title-lint-error
|
||||
delete: true
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -17,3 +17,11 @@ examples/*.md
|
||||
nimble.develop
|
||||
nimble.paths
|
||||
go-libp2p-daemon/
|
||||
|
||||
# Ignore all test build files in tests folder (auto generated when running tests).
|
||||
# First rule (`tests/**/test*[^.]*`) will ignore all binaries: has prefix test + does not have dot in name.
|
||||
# Second and third rules are here to un-ignores all files with extension and Docker file,
|
||||
# because it appears that vs code is skipping text search is some tests files without these rules.
|
||||
tests/**/test*[^.]*
|
||||
!tests/**/*.*
|
||||
!tests/**/Dockerfile
|
||||
37
.pinned
37
.pinned
@@ -1,19 +1,22 @@
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#667b40440a53a58e9f922e29e20818720c62d9ac
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
|
||||
chronos;https://github.com/status-im/nim-chronos@#c04576d829b8a0a1b12baaa8bc92037501b3a4a0
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#34d712933a4e0f91f5e66bc848594a581504a215
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#81a4a7a360c78be9c80c8f735c76b6d4a1517304
|
||||
chronos;https://github.com/status-im/nim-chronos@#b55e2816eb45f698ddaca8d8473e401502562db2
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
|
||||
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
|
||||
httputils;https://github.com/status-im/nim-http-utils@#79cbab1460f4c0cdde2084589d017c43a3d7b4f1
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#2b1c5eb11df3647a2cee107cd4cce3593cbb8bcf
|
||||
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
|
||||
ngtcp2;https://github.com/status-im/nim-ngtcp2@#6834f4756b6af58356ac9c4fef3d71db3c3ae5fe
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
|
||||
quic;https://github.com/status-im/nim-quic.git@#ddcb31ffb74b5460ab37fd13547eca90594248bc
|
||||
results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
|
||||
serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35
|
||||
stew;https://github.com/status-im/nim-stew@#3159137d9a3110edb4024145ce0ba778975de40e
|
||||
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#2300fa9924a76e6c96bc4ea79d043e3a0f27120c
|
||||
websock;https://github.com/status-im/nim-websock@#f8ed9b40a5ff27ad02a3c237c4905b0924e3f982
|
||||
zlib;https://github.com/status-im/nim-zlib@#38b72eda9d70067df4a953f56b5ed59630f2a17b
|
||||
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
|
||||
quic;https://github.com/status-im/nim-quic.git@#ca3eda53bee9cef7379be195738ca1490877432f
|
||||
results;https://github.com/arnetheduck/nim-results@#df8113dda4c2d74d460a8fa98252b0b771bf1f27
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#f808ed5e7a7bfc42204ec7830f14b7a42b63c284
|
||||
serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2db7f788b804330306293088
|
||||
stew;https://github.com/status-im/nim-stew@#0db179256cf98eb9ce9ee7b9bc939f219e621f77
|
||||
testutils;https://github.com/status-im/nim-testutils@#9e842bd58420d23044bc55e16088e8abbe93ce51
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#8b51e99b4a57fcfb31689230e75595f024543024
|
||||
websock;https://github.com/status-im/nim-websock@#d5cd89062cd2d168ef35193c7d29d2102921d97e
|
||||
zlib;https://github.com/status-im/nim-zlib@#daa8723fd32299d4ca621c837430c29a5a11e19a
|
||||
jwt;https://github.com/vacp2p/nim-jwt@#18f8378de52b241f321c1f9ea905456e89b95c6f
|
||||
bearssl_pkey_decoder;https://github.com/vacp2p/bearssl_pkey_decoder@#21dd3710df9345ed2ad8bf8f882761e07863b8e0
|
||||
bio;https://github.com/xzeshen/bio@#0f5ed58b31c678920b6b4f7c1783984e6660be97
|
||||
|
||||
173
README.md
173
README.md
@@ -20,39 +20,120 @@
|
||||
- [Background](#background)
|
||||
- [Install](#install)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Go-libp2p-daemon](#go-libp2p-daemon)
|
||||
- [Modules](#modules)
|
||||
- [Users](#users)
|
||||
- [Stability](#stability)
|
||||
- [Development](#development)
|
||||
- [Contribute](#contribute)
|
||||
- [Contributors](#contributors)
|
||||
- [Core Maintainers](#core-maintainers)
|
||||
- [Modules](#modules)
|
||||
- [Users](#users)
|
||||
- [Stability](#stability)
|
||||
- [License](#license)
|
||||
|
||||
## Background
|
||||
libp2p is a [Peer-to-Peer](https://en.wikipedia.org/wiki/Peer-to-peer) networking stack, with [implementations](https://github.com/libp2p/libp2p#implementations) in multiple languages derived from the same [specifications.](https://github.com/libp2p/specs)
|
||||
|
||||
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It's striving to be a modular stack, with sane and secure defaults, useful protocols, while remain open and extensible.
|
||||
This implementation in native Nim, relying on [chronos](https://github.com/status-im/nim-chronos) for async. It's used in production by a few [projects](#users)
|
||||
Building large scale peer-to-peer systems has been complex and difficult in the last 15 years and libp2p is a way to fix that. It strives to be a modular stack with secure defaults and useful protocols, while remaining open and extensible.
|
||||
This is a native Nim implementation, using [chronos](https://github.com/status-im/nim-chronos) for asynchronous execution. It's used in production by a few [projects](#users)
|
||||
|
||||
Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p's documentation [**docs.libp2p.io**](https://docs.libp2p.io).
|
||||
|
||||
## Install
|
||||
**Prerequisite**
|
||||
- [Nim](https://nim-lang.org/install.html)
|
||||
> The currently supported Nim version is 1.6.18.
|
||||
|
||||
> The currently supported Nim versions are 1.6, 2.0 and 2.2.
|
||||
|
||||
```
|
||||
nimble install libp2p
|
||||
```
|
||||
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/). See [examples](./examples) for simple usage patterns.
|
||||
|
||||
## Getting Started
|
||||
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/).
|
||||
Try out the chat example. For this you'll need to have [`go-libp2p-daemon`](examples/go-daemon/daemonapi.md) running. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
|
||||
|
||||
```bash
|
||||
nim c -r --threads:on examples/directchat.nim
|
||||
```
|
||||
|
||||
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
|
||||
|
||||
```bash
|
||||
./examples/directchat
|
||||
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu # change this hash by the hash you were given
|
||||
```
|
||||
|
||||
You can now chat between the instances!
|
||||
|
||||

|
||||
|
||||
## Development
|
||||
Clone the repository and install the dependencies:
|
||||
```sh
|
||||
git clone https://github.com/vacp2p/nim-libp2p
|
||||
cd nim-libp2p
|
||||
nimble install -dy
|
||||
```
|
||||
### Testing
|
||||
Remember you'll need to build the `go-libp2p-daemon` binary to run the `nim-libp2p` tests.
|
||||
To do so, please follow the installation instructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
|
||||
Run unit tests:
|
||||
```sh
|
||||
# run all the unit tests
|
||||
nimble test
|
||||
```
|
||||
**Obs:** Running all tests requires the [`go-libp2p-daemon` to be installed and running](examples/go-daemon/daemonapi.md).
|
||||
|
||||
If you only want to run tests that don't require `go-libp2p-daemon`, use:
|
||||
```
|
||||
nimble testnative
|
||||
```
|
||||
|
||||
For a list of all available test suites, use:
|
||||
```
|
||||
nimble tasks
|
||||
```
|
||||
|
||||
### Contribute
|
||||
|
||||
The libp2p implementation in Nim is a work in progress. We welcome contributors to help out! Specifically, you can:
|
||||
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
|
||||
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
|
||||
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
|
||||
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
|
||||
- **Code format**. Code should be formatted with [nph](https://github.com/arnetheduck/nph) and follow the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
|
||||
|
||||
### Contributors
|
||||
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
|
||||
|
||||
### Core Maintainers
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
### Compile time flags
|
||||
|
||||
Enable quic transport support
|
||||
```bash
|
||||
nim c -d:libp2p_quic_support some_file.nim
|
||||
```
|
||||
|
||||
Enable expensive metrics (ie, metrics with per-peer cardinality):
|
||||
```bash
|
||||
nim c -d:libp2p_expensive_metrics some_file.nim
|
||||
```
|
||||
|
||||
Set list of known libp2p agents for metrics:
|
||||
```bash
|
||||
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
|
||||
```
|
||||
|
||||
Specify gossipsub specific topics to measure in the metrics:
|
||||
```bash
|
||||
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
|
||||
```
|
||||
|
||||
|
||||
## Modules
|
||||
List of packages modules implemented in nim-libp2p:
|
||||
@@ -70,6 +151,8 @@ List of packages modules implemented in nim-libp2p:
|
||||
| [libp2p-tcp](libp2p/transports/tcptransport.nim) | TCP transport |
|
||||
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
|
||||
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
|
||||
| [libp2p-quic](libp2p/transports/quictransport.nim) | Quic Transport |
|
||||
| [libp2p-memory](libp2p/transports/memorytransport.nim) | Memory Transport |
|
||||
| **Secure Channels** | |
|
||||
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
|
||||
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
|
||||
@@ -78,10 +161,10 @@ List of packages modules implemented in nim-libp2p:
|
||||
| [libp2p-yamux](libp2p/muxers/yamux/yamux.nim) | [Yamux](https://docs.libp2p.io/concepts/multiplex/yamux/) multiplexer |
|
||||
| **Data Types** | |
|
||||
| [peer-id](libp2p/peerid.nim) | [Cryptographic identifiers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-id) |
|
||||
| [peer-store](libp2p/peerstore.nim) | ["Address book" of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
|
||||
| [peer-store](libp2p/peerstore.nim) | [Address book of known peers](https://docs.libp2p.io/concepts/fundamentals/peers/#peer-store) |
|
||||
| [multiaddress](libp2p/multiaddress.nim) | [Composable network addresses](https://github.com/multiformats/multiaddr) |
|
||||
| [signed envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
|
||||
| [routing record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
|
||||
| [signed-envelope](libp2p/signed_envelope.nim) | [Signed generic data container](https://github.com/libp2p/specs/blob/master/RFC/0002-signed-envelopes.md) |
|
||||
| [routing-record](libp2p/routing_record.nim) | [Signed peer dialing informations](https://github.com/libp2p/specs/blob/master/RFC/0003-routing-records.md) |
|
||||
| [discovery manager](libp2p/discovery/discoverymngr.nim) | Discovery Manager |
|
||||
| **Utilities** | |
|
||||
| [libp2p-crypto](libp2p/crypto) | Cryptographic backend |
|
||||
@@ -109,66 +192,6 @@ The versioning follows [semver](https://semver.org/), with some additions:
|
||||
|
||||
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.6 & 2.0`
|
||||
|
||||
## Development
|
||||
Clone and Install dependencies:
|
||||
```sh
|
||||
git clone https://github.com/vacp2p/nim-libp2p
|
||||
cd nim-libp2p
|
||||
# to use dependencies computed by nimble
|
||||
nimble install -dy
|
||||
# OR to install the dependencies versions used in CI
|
||||
nimble install_pinned
|
||||
```
|
||||
|
||||
Run unit tests:
|
||||
```sh
|
||||
# run all the unit tests
|
||||
nimble test
|
||||
```
|
||||
This requires the go daemon to be available. To only run native tests, use `nimble testnative`.
|
||||
Or use `nimble tasks` to show all available tasks.
|
||||
|
||||
### Contribute
|
||||
|
||||
The libp2p implementation in Nim is a work in progress. We welcome contributors to help out! Specifically, you can:
|
||||
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
|
||||
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
|
||||
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
|
||||
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
|
||||
- **Code format**. Please format code using [nph](https://github.com/arnetheduck/nph) v0.5.1. This will ensure a consistent codebase and make PRs easier to review. A CI rule has been added to ensure that future commits are all formatted using the same nph version.
|
||||
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
|
||||
|
||||
### Contributors
|
||||
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
|
||||
|
||||
### Core Maintainers
|
||||
<table>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td align="center"><a href="https://github.com/Menduist"><img src="https://avatars.githubusercontent.com/u/13471753?v=4?s=100" width="100px;" alt="Tanguy"/><br /><sub><b>Tanguy (Menduist)</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/lchenut"><img src="https://avatars.githubusercontent.com/u/11214565?v=4?s=100" width="100px;" alt="Ludovic"/><br /><sub><b>Ludovic</b></sub></a></td>
|
||||
<td align="center"><a href="https://github.com/diegomrsantos"><img src="https://avatars.githubusercontent.com/u/7316595?v=4?s=100" width="100px;" alt="Diego"/><br /><sub><b>Diego</b></sub></a></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
### Compile time flags
|
||||
|
||||
Enable expensive metrics (ie, metrics with per-peer cardinality):
|
||||
```bash
|
||||
nim c -d:libp2p_expensive_metrics some_file.nim
|
||||
```
|
||||
|
||||
Set list of known libp2p agents for metrics:
|
||||
```bash
|
||||
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
|
||||
```
|
||||
|
||||
Specify gossipsub specific topics to measure in the metrics:
|
||||
```bash
|
||||
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Licensed and distributed under either of
|
||||
|
||||
@@ -4,6 +4,7 @@ if dirExists("nimbledeps/pkgs"):
|
||||
if dirExists("nimbledeps/pkgs2"):
|
||||
switch("NimblePath", "nimbledeps/pkgs2")
|
||||
|
||||
switch("warningAsError", "UnusedImport:on")
|
||||
switch("warning", "CaseTransition:off")
|
||||
switch("warning", "ObservableStores:off")
|
||||
switch("warning", "LockLevel:off")
|
||||
|
||||
@@ -26,15 +26,22 @@ proc main() {.async.} =
|
||||
let customProtoCodec = "/test"
|
||||
var proto = new LPProtocol
|
||||
proto.codec = customProtoCodec
|
||||
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
||||
var msg = string.fromBytes(await conn.readLp(1024))
|
||||
echo "1 - Dst Received: ", msg
|
||||
assert "test1" == msg
|
||||
await conn.writeLp("test2")
|
||||
msg = string.fromBytes(await conn.readLp(1024))
|
||||
echo "2 - Dst Received: ", msg
|
||||
assert "test3" == msg
|
||||
await conn.writeLp("test4")
|
||||
proto.handler = proc(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
var msg = string.fromBytes(await conn.readLp(1024))
|
||||
echo "1 - Dst Received: ", msg
|
||||
assert "test1" == msg
|
||||
await conn.writeLp("test2")
|
||||
msg = string.fromBytes(await conn.readLp(1024))
|
||||
echo "2 - Dst Received: ", msg
|
||||
assert "test3" == msg
|
||||
await conn.writeLp("test4")
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
|
||||
let
|
||||
relay = Relay.new()
|
||||
|
||||
@@ -43,12 +43,17 @@ proc new(T: typedesc[ChatProto], c: Chat): T =
|
||||
let chatproto = T()
|
||||
|
||||
# create handler for incoming connection
|
||||
proc handle(stream: Connection, proto: string) {.async.} =
|
||||
if c.connected and not c.conn.closed:
|
||||
c.writeStdout "a chat session is already in progress - refusing incoming peer!"
|
||||
await stream.close()
|
||||
else:
|
||||
await c.handlePeer(stream)
|
||||
proc handle(stream: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
if c.connected and not c.conn.closed:
|
||||
c.writeStdout "a chat session is already in progress - refusing incoming peer!"
|
||||
else:
|
||||
await c.handlePeer(stream)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
finally:
|
||||
await stream.close()
|
||||
|
||||
# assign the new handler
|
||||
|
||||
3
examples/examples_build.nim
Normal file
3
examples/examples_build.nim
Normal file
@@ -0,0 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import directchat, tutorial_6_game
|
||||
5
examples/examples_run.nim
Normal file
5
examples/examples_run.nim
Normal file
@@ -0,0 +1,5 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
helloworld, circuitrelay, tutorial_1_connect, tutorial_2_customproto,
|
||||
tutorial_3_protobuf, tutorial_4_gossipsub, tutorial_5_discovery
|
||||
@@ -93,8 +93,8 @@ proc serveThread(udata: CustomData) {.async.} =
|
||||
pending.add(item.write(msg))
|
||||
if len(pending) > 0:
|
||||
var results = await all(pending)
|
||||
except:
|
||||
echo getCurrentException().msg
|
||||
except CatchableError as err:
|
||||
echo err.msg
|
||||
|
||||
proc main() {.async.} =
|
||||
var data = new CustomData
|
||||
|
||||
@@ -3,9 +3,7 @@
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Installation](#installation)
|
||||
- [Script](#script)
|
||||
- [Usage](#usage)
|
||||
- [Example](#example)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Examples](#examples)
|
||||
|
||||
# Introduction
|
||||
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
|
||||
@@ -13,20 +11,25 @@ For more information about the go daemon, check out [this repository](https://gi
|
||||
> **Required only** for running the tests.
|
||||
|
||||
# Prerequisites
|
||||
Go with version `1.15.15`.
|
||||
Go with version `1.16.0`
|
||||
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
|
||||
|
||||
# Installation
|
||||
Follow one of the methods below:
|
||||
|
||||
## Script
|
||||
Run the build script while having the `go` command pointing to the correct Go version.
|
||||
We recommend using `1.15.15`, as previously stated.
|
||||
```sh
|
||||
./scripts/build_p2pd.sh
|
||||
```
|
||||
If everything goes correctly, the binary (`p2pd`) should be built and placed in the correct directory.
|
||||
If you find any issues, please head into our discord and ask for our assistance.
|
||||
`build_p2pd.sh` will not rebuild unless needed. If you already have the newest binary and you want to force the rebuild, use:
|
||||
```sh
|
||||
./scripts/build_p2pd.sh -f
|
||||
```
|
||||
Or:
|
||||
```sh
|
||||
./scripts/build_p2pd.sh --force
|
||||
```
|
||||
|
||||
If everything goes correctly, the binary (`p2pd`) should be built and placed in the `$GOPATH/bin` directory.
|
||||
If you're having issues, head into [our discord](https://discord.com/channels/864066763682218004/1115526869769535629) and ask for assistance.
|
||||
|
||||
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
|
||||
```sh
|
||||
@@ -34,28 +37,7 @@ export PATH="$PATH:$HOME/go/bin"
|
||||
```
|
||||
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
|
||||
|
||||
# Usage
|
||||
|
||||
## Example
|
||||
# Examples
|
||||
Examples can be found in the [examples folder](https://github.com/status-im/nim-libp2p/tree/readme/examples/go-daemon)
|
||||
|
||||
## Getting Started
|
||||
Try out the chat example. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
|
||||
|
||||
```bash
|
||||
nim c -r --threads:on examples/directchat.nim
|
||||
```
|
||||
|
||||
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
|
||||
|
||||
```bash
|
||||
./examples/directchat
|
||||
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu
|
||||
```
|
||||
|
||||
You can now chat between the instances!
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
@@ -11,12 +11,17 @@ type TestProto = ref object of LPProtocol # declare a custom protocol
|
||||
|
||||
proc new(T: typedesc[TestProto]): T =
|
||||
# every incoming connections will be in handled in this closure
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.writeLp("Roger p2p!")
|
||||
|
||||
# We must close the connections ourselves when we're done with it
|
||||
await conn.close()
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.writeLp("Roger p2p!")
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
finally:
|
||||
# We must close the connections ourselves when we're done with it
|
||||
await conn.close()
|
||||
|
||||
return T.new(codecs = @[TestCodec], handler = handle)
|
||||
|
||||
|
||||
@@ -25,12 +25,17 @@ type TestProto = ref object of LPProtocol
|
||||
|
||||
proc new(T: typedesc[TestProto]): T =
|
||||
# every incoming connections will in be handled in this closure
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
# Read up to 1024 bytes from this connection, and transform them into
|
||||
# a string
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
# We must close the connections ourselves when we're done with it
|
||||
await conn.close()
|
||||
try:
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
return T.new(codecs = @[TestCodec], handler = handle)
|
||||
|
||||
|
||||
@@ -108,12 +108,18 @@ type
|
||||
|
||||
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
|
||||
var res: MetricProto
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
let
|
||||
metrics = await res.metricGetter()
|
||||
asProtobuf = metrics.encode()
|
||||
await conn.writeLp(asProtobuf.buffer)
|
||||
await conn.close()
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
let
|
||||
metrics = await res.metricGetter()
|
||||
asProtobuf = metrics.encode()
|
||||
await conn.writeLp(asProtobuf.buffer)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
res = MetricProto.new(@["/metric-getter/1.0.0"], handle)
|
||||
res.metricGetter = cb
|
||||
|
||||
@@ -79,8 +79,7 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
|
||||
let decoded = MetricList.decode(message.data)
|
||||
if decoded.isErr:
|
||||
return ValidationResult.Reject
|
||||
return ValidationResult.Accept
|
||||
,
|
||||
return ValidationResult.Accept,
|
||||
)
|
||||
# This "validator" will attach to the `metrics` topic and make sure
|
||||
# that every message in this topic is valid. This allows us to stop
|
||||
@@ -93,7 +92,8 @@ proc oneNode(node: Node, rng: ref HmacDrbgContext) {.async.} =
|
||||
node.gossip.subscribe(
|
||||
"metrics",
|
||||
proc(topic: string, data: seq[byte]) {.async.} =
|
||||
echo MetricList.decode(data).tryGet()
|
||||
let m = MetricList.decode(data).expect("metric can be decoded")
|
||||
echo m
|
||||
,
|
||||
)
|
||||
else:
|
||||
@@ -158,8 +158,8 @@ waitFor(main())
|
||||
## This is John receiving & logging everyone's metrics.
|
||||
##
|
||||
## ## Going further
|
||||
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
|
||||
## and [topic params](https://status-im.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
|
||||
## Building efficient & safe GossipSub networks is a tricky subject. By tweaking the [gossip params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#GossipSubParams)
|
||||
## and [topic params](https://vacp2p.github.io/nim-libp2p/master/libp2p/protocols/pubsub/gossipsub/types.html#TopicParams),
|
||||
## you can achieve very different properties.
|
||||
##
|
||||
## Also see reports for [GossipSub v1.1](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4)
|
||||
|
||||
@@ -34,9 +34,15 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
|
||||
const DumbCodec = "/dumb/proto/1.0.0"
|
||||
type DumbProto = ref object of LPProtocol
|
||||
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.close()
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
return T.new(codecs = @[DumbCodec], handler = handle)
|
||||
|
||||
|
||||
@@ -152,21 +152,26 @@ proc draw(g: Game) =
|
||||
## peer know that we are available, check that he is also available,
|
||||
## and launch the game.
|
||||
proc new(T: typedesc[GameProto], g: Game): T =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
defer:
|
||||
await conn.closeWithEof()
|
||||
if g.peerFound.finished or g.hasCandidate:
|
||||
await conn.close()
|
||||
return
|
||||
g.hasCandidate = true
|
||||
await conn.writeLp("ok")
|
||||
if "ok" != string.fromBytes(await conn.readLp(1024)):
|
||||
g.hasCandidate = false
|
||||
return
|
||||
g.peerFound.complete(conn)
|
||||
# The handler of a protocol must wait for the stream to
|
||||
# be finished before returning
|
||||
await conn.join()
|
||||
try:
|
||||
if g.peerFound.finished or g.hasCandidate:
|
||||
await conn.close()
|
||||
return
|
||||
g.hasCandidate = true
|
||||
await conn.writeLp("ok")
|
||||
if "ok" != string.fromBytes(await conn.readLp(1024)):
|
||||
g.hasCandidate = false
|
||||
return
|
||||
g.peerFound.complete(conn)
|
||||
# The handler of a protocol must wait for the stream to
|
||||
# be finished before returning
|
||||
await conn.join()
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
echo "exception in handler", e.msg
|
||||
|
||||
return T.new(codecs = @["/tron/1.0.0"], handler = handle)
|
||||
|
||||
@@ -214,8 +219,7 @@ proc networking(g: Game) {.async.} =
|
||||
# We are "player 2"
|
||||
swap(g.localPlayer, g.remotePlayer)
|
||||
except CatchableError as exc:
|
||||
discard
|
||||
,
|
||||
discard,
|
||||
)
|
||||
|
||||
await switch.start()
|
||||
@@ -268,14 +272,11 @@ nico.init("Status", "Tron")
|
||||
nico.createWindow("Tron", mapSize * 4, mapSize * 4, 4, false)
|
||||
nico.run(
|
||||
proc() =
|
||||
discard
|
||||
,
|
||||
discard,
|
||||
proc(dt: float32) =
|
||||
game.update(dt)
|
||||
,
|
||||
game.update(dt),
|
||||
proc() =
|
||||
game.draw()
|
||||
,
|
||||
game.draw(),
|
||||
)
|
||||
waitFor(netFut.cancelAndWait())
|
||||
|
||||
|
||||
19
interop/hole-punching/Dockerfile
Normal file
19
interop/hole-punching/Dockerfile
Normal file
@@ -0,0 +1,19 @@
|
||||
# syntax=docker/dockerfile:1.5-labs
|
||||
FROM nimlang/nim:1.6.16 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
COPY .pinned libp2p.nimble nim-libp2p/
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
|
||||
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" -y
|
||||
|
||||
COPY . nim-libp2p/
|
||||
|
||||
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs --mm:refc -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./interop/hole-punching/hole_punching.nim
|
||||
|
||||
FROM --platform=linux/amd64 debian:bullseye-slim
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2 libssl-dev
|
||||
COPY --from=builder /workspace/nim-libp2p/hole-punching-tests /usr/bin/hole-punch-client
|
||||
ENV RUST_BACKTRACE=1
|
||||
138
interop/hole-punching/hole_punching.nim
Normal file
138
interop/hole-punching/hole_punching.nim
Normal file
@@ -0,0 +1,138 @@
|
||||
import std/[os, options, strformat, sequtils]
|
||||
import redis
|
||||
import chronos, chronicles
|
||||
import
|
||||
../../libp2p/[
|
||||
builders,
|
||||
switch,
|
||||
multicodec,
|
||||
observedaddrmanager,
|
||||
services/hpservice,
|
||||
services/autorelayservice,
|
||||
protocols/connectivity/autonat/client as aclient,
|
||||
protocols/connectivity/relay/client as rclient,
|
||||
protocols/connectivity/relay/relay,
|
||||
protocols/connectivity/autonat/service,
|
||||
protocols/ping,
|
||||
]
|
||||
import ../../tests/[stubs/autonatclientstub, errorhelpers]
|
||||
|
||||
logScope:
|
||||
topics = "hp interop node"
|
||||
|
||||
proc createSwitch(r: Relay = nil, hpService: Service = nil): Switch =
|
||||
let rng = newRng()
|
||||
var builder = SwitchBuilder
|
||||
.new()
|
||||
.withRng(rng)
|
||||
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
|
||||
.withObservedAddrManager(ObservedAddrManager.new(maxSize = 1, minCount = 1))
|
||||
.withTcpTransport({ServerFlags.TcpNoDelay})
|
||||
.withYamux()
|
||||
.withAutonat()
|
||||
.withNoise()
|
||||
|
||||
if hpService != nil:
|
||||
builder = builder.withServices(@[hpService])
|
||||
|
||||
if r != nil:
|
||||
builder = builder.withCircuitRelay(r)
|
||||
|
||||
let s = builder.build()
|
||||
s.mount(Ping.new(rng = rng))
|
||||
return s
|
||||
|
||||
proc main() {.async.} =
|
||||
let relayClient = RelayClient.new()
|
||||
let autoRelayService = AutoRelayService.new(1, relayClient, nil, newRng())
|
||||
let autonatClientStub = AutonatClientStub.new(expectedDials = 1)
|
||||
autonatClientStub.answer = NotReachable
|
||||
let autonatService = AutonatService.new(autonatClientStub, newRng(), maxQueueSize = 1)
|
||||
let hpservice = HPService.new(autonatService, autoRelayService)
|
||||
|
||||
let
|
||||
isListener = getEnv("MODE") == "listen"
|
||||
switch = createSwitch(relayClient, hpservice)
|
||||
auxSwitch = createSwitch()
|
||||
redisClient = open("redis", 6379.Port)
|
||||
|
||||
debug "Connected to redis"
|
||||
|
||||
await switch.start()
|
||||
await auxSwitch.start()
|
||||
|
||||
let relayAddr =
|
||||
try:
|
||||
redisClient.bLPop(@["RELAY_TCP_ADDRESS"], 0)
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
|
||||
debug "All relay addresses", relayAddr
|
||||
|
||||
# This is necessary to make the autonat service work. It will ask this peer for our reachability which the autonat
|
||||
# client stub will answer NotReachable.
|
||||
await switch.connect(auxSwitch.peerInfo.peerId, auxSwitch.peerInfo.addrs)
|
||||
|
||||
# Wait for autonat to be NotReachable
|
||||
while autonatService.networkReachability != NetworkReachability.NotReachable:
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
# This will trigger the autonat relay service to make a reservation.
|
||||
let relayMA = MultiAddress.init(relayAddr[1]).tryGet()
|
||||
|
||||
try:
|
||||
debug "Dialing relay...", relayMA
|
||||
let relayId = await switch.connect(relayMA).wait(30.seconds)
|
||||
debug "Connected to relay", relayId
|
||||
except AsyncTimeoutError as e:
|
||||
raise newException(CatchableError, "Connection to relay timed out: " & e.msg, e)
|
||||
|
||||
# Wait for our relay address to be published
|
||||
while not switch.peerInfo.addrs.anyIt(it.contains(multiCodec("p2p-circuit")).tryGet()):
|
||||
await sleepAsync(100.milliseconds)
|
||||
|
||||
if isListener:
|
||||
let listenerPeerId = switch.peerInfo.peerId
|
||||
discard redisClient.rPush("LISTEN_CLIENT_PEER_ID", $listenerPeerId)
|
||||
debug "Pushed listener client peer id to redis", listenerPeerId
|
||||
|
||||
# Nothing to do anymore, wait to be killed
|
||||
await sleepAsync(2.minutes)
|
||||
else:
|
||||
let listenerId =
|
||||
try:
|
||||
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, "Exception init peer: " & e.msg, e)
|
||||
|
||||
debug "Got listener peer id", listenerId
|
||||
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
|
||||
|
||||
debug "Dialing listener relay address", listenerRelayAddr
|
||||
await switch.connect(listenerId, @[listenerRelayAddr])
|
||||
|
||||
# wait for hole-punching to complete in the background
|
||||
await sleepAsync(5000.milliseconds)
|
||||
|
||||
let conn = switch.connManager.selectMuxer(listenerId).connection
|
||||
let channel = await switch.dial(listenerId, @[listenerRelayAddr], PingCodec)
|
||||
let delay = await Ping.new().ping(channel)
|
||||
await allFuturesThrowing(
|
||||
channel.close(), conn.close(), switch.stop(), auxSwitch.stop()
|
||||
)
|
||||
echo &"""{{"rtt_to_holepunched_peer_millis":{delay.millis}}}"""
|
||||
|
||||
try:
|
||||
proc mainAsync(): Future[string] {.async.} =
|
||||
# mainAsync wraps main and returns some value, as otherwise
|
||||
# 'waitFor(fut)' has no type (or is ambiguous)
|
||||
await main()
|
||||
return "done"
|
||||
|
||||
discard waitFor(mainAsync().wait(4.minutes))
|
||||
except AsyncTimeoutError as e:
|
||||
error "Program execution timed out", description = e.msg
|
||||
quit(-1)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", description = e.msg
|
||||
quit(-1)
|
||||
18
interop/transport/Dockerfile
Normal file
18
interop/transport/Dockerfile
Normal file
@@ -0,0 +1,18 @@
|
||||
# syntax=docker/dockerfile:1.5-labs
|
||||
FROM nimlang/nim:1.6.16 as builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY .pinned libp2p.nimble nim-libp2p/
|
||||
|
||||
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y libssl-dev
|
||||
|
||||
RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240dbf11c544011dd0e033d3c3acca56af" -y
|
||||
|
||||
COPY . nim-libp2p/
|
||||
|
||||
RUN \
|
||||
cd nim-libp2p && \
|
||||
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs -p:nim-libp2p --mm:refc -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./interop/transport/main.nim
|
||||
|
||||
ENTRYPOINT ["/app/nim-libp2p/interop/transport/main"]
|
||||
@@ -42,29 +42,26 @@ proc main() {.async.} =
|
||||
discard switchBuilder.withTcpTransport().withAddress(
|
||||
MultiAddress.init("/ip4/" & ip & "/tcp/0").tryGet()
|
||||
)
|
||||
of "ws":
|
||||
discard switchBuilder
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade): Transport =
|
||||
WsTransport.new(upgr)
|
||||
of "quic-v1":
|
||||
discard switchBuilder.withQuicTransport().withAddress(
|
||||
MultiAddress.init("/ip4/" & ip & "/udp/0/quic-v1").tryGet()
|
||||
)
|
||||
of "ws":
|
||||
discard switchBuilder.withWsTransport().withAddress(
|
||||
MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet()
|
||||
)
|
||||
.withAddress(MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet())
|
||||
else:
|
||||
doAssert false
|
||||
|
||||
case secureChannel
|
||||
of "noise":
|
||||
discard switchBuilder.withNoise()
|
||||
else:
|
||||
doAssert false
|
||||
|
||||
case muxer
|
||||
of "yamux":
|
||||
discard switchBuilder.withYamux()
|
||||
of "mplex":
|
||||
discard switchBuilder.withMplex()
|
||||
else:
|
||||
doAssert false
|
||||
|
||||
let
|
||||
rng = newRng()
|
||||
@@ -83,7 +80,7 @@ proc main() {.async.} =
|
||||
try:
|
||||
redisClient.bLPop(@["listenerAddr"], testTimeout.seconds.int)[1]
|
||||
except Exception as e:
|
||||
raise newException(CatchableError, e.msg)
|
||||
raise newException(CatchableError, "Exception calling bLPop: " & e.msg, e)
|
||||
let
|
||||
remoteAddr = MultiAddress.init(listenerAddr).tryGet()
|
||||
dialingStart = Moment.now()
|
||||
@@ -99,7 +96,18 @@ proc main() {.async.} =
|
||||
pingRTTMilllis: float(pingDelay.milliseconds),
|
||||
)
|
||||
)
|
||||
quit(0)
|
||||
|
||||
discard waitFor(main().withTimeout(testTimeout))
|
||||
quit(1)
|
||||
try:
|
||||
proc mainAsync(): Future[string] {.async.} =
|
||||
# mainAsync wraps main and returns some value, as otherwise
|
||||
# 'waitFor(fut)' has no type (or is ambiguous)
|
||||
await main()
|
||||
return "done"
|
||||
|
||||
discard waitFor(mainAsync().wait(testTimeout))
|
||||
except AsyncTimeoutError as e:
|
||||
error "Program execution timed out", description = e.msg
|
||||
quit(-1)
|
||||
except CatchableError as e:
|
||||
error "Unexpected error", description = e.msg
|
||||
quit(-1)
|
||||
@@ -3,7 +3,8 @@
|
||||
"containerImageID": "nim-libp2p-head",
|
||||
"transports": [
|
||||
"tcp",
|
||||
"ws"
|
||||
"ws",
|
||||
"quic-v1"
|
||||
],
|
||||
"secureChannels": [
|
||||
"noise"
|
||||
@@ -17,7 +17,7 @@ when defined(nimdoc):
|
||||
## stay backward compatible during the Major version, whereas private ones can
|
||||
## change at each new Minor version.
|
||||
##
|
||||
## If you're new to nim-libp2p, you can find a tutorial `here<https://status-im.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
|
||||
## If you're new to nim-libp2p, you can find a tutorial `here<https://vacp2p.github.io/nim-libp2p/docs/tutorial_1_connect/>`_
|
||||
## that can help you get started.
|
||||
|
||||
# Import stuff for doc
|
||||
@@ -52,7 +52,6 @@ else:
|
||||
stream/connection,
|
||||
transports/transport,
|
||||
transports/tcptransport,
|
||||
transports/quictransport,
|
||||
protocols/secure/noise,
|
||||
cid,
|
||||
multihash,
|
||||
@@ -71,3 +70,7 @@ else:
|
||||
minprotobuf, switch, peerid, peerinfo, connection, multiaddress, crypto, lpstream,
|
||||
bufferstream, muxer, mplex, transport, tcptransport, noise, errors, cid, multihash,
|
||||
multicodec, builders, pubsub
|
||||
|
||||
when defined(libp2p_quic_support):
|
||||
import libp2p/transports/quictransport
|
||||
export quictransport
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
mode = ScriptMode.Verbose
|
||||
|
||||
packageName = "libp2p"
|
||||
version = "1.7.0"
|
||||
version = "1.11.0"
|
||||
author = "Status Research & Development GmbH"
|
||||
description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
@@ -9,9 +9,9 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
|
||||
requires "nim >= 1.6.0",
|
||||
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
|
||||
"chronicles >= 0.10.2", "chronos >= 4.0.3", "metrics", "secp256k1", "stew#head",
|
||||
"websock", "unittest2",
|
||||
"https://github.com/status-im/nim-quic.git#ddcb31ffb74b5460ab37fd13547eca90594248bc"
|
||||
"chronicles >= 0.10.3 & < 0.11.0", "chronos >= 4.0.4", "metrics", "secp256k1",
|
||||
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.7", "bio",
|
||||
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
|
||||
@@ -25,16 +25,12 @@ let cfg =
|
||||
|
||||
import hashes, strutils
|
||||
|
||||
proc runTest(
|
||||
filename: string, verify: bool = true, sign: bool = true, moreoptions: string = ""
|
||||
) =
|
||||
proc runTest(filename: string, moreoptions: string = "") =
|
||||
var excstr = nimc & " " & lang & " -d:debug " & cfg & " " & flags
|
||||
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
||||
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
|
||||
excstr.add(" " & moreoptions & " ")
|
||||
if getEnv("CICOV").len > 0:
|
||||
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
|
||||
exec excstr & " -r " & " tests/" & filename
|
||||
exec excstr & " -r -d:libp2p_quic_support tests/" & filename
|
||||
rmFile "tests/" & filename.toExe
|
||||
|
||||
proc buildSample(filename: string, run = false, extraFlags = "") =
|
||||
@@ -60,51 +56,18 @@ task testinterop, "Runs interop tests":
|
||||
runTest("testinterop")
|
||||
|
||||
task testpubsub, "Runs pubsub tests":
|
||||
runTest(
|
||||
"pubsub/testgossipinternal",
|
||||
sign = false,
|
||||
verify = false,
|
||||
moreoptions = "-d:pubsub_internal_testing",
|
||||
)
|
||||
runTest("pubsub/testpubsub")
|
||||
runTest("pubsub/testpubsub", sign = false, verify = false)
|
||||
runTest(
|
||||
"pubsub/testpubsub",
|
||||
sign = false,
|
||||
verify = false,
|
||||
moreoptions = "-d:libp2p_pubsub_anonymize=true",
|
||||
)
|
||||
|
||||
task testpubsub_slim, "Runs pubsub tests":
|
||||
runTest(
|
||||
"pubsub/testgossipinternal",
|
||||
sign = false,
|
||||
verify = false,
|
||||
moreoptions = "-d:pubsub_internal_testing",
|
||||
)
|
||||
runTest("pubsub/testpubsub")
|
||||
|
||||
task testfilter, "Run PKI filter test":
|
||||
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1\"")
|
||||
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519\"")
|
||||
runTest(
|
||||
"testpkifilter", moreoptions = "-d:libp2p_pki_schemes=\"secp256k1;ed25519;ecnist\""
|
||||
)
|
||||
runTest("testpkifilter")
|
||||
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
|
||||
|
||||
task test, "Runs the test suite":
|
||||
exec "nimble testnative"
|
||||
exec "nimble testpubsub"
|
||||
exec "nimble testdaemon"
|
||||
exec "nimble testinterop"
|
||||
exec "nimble testfilter"
|
||||
exec "nimble examples_build"
|
||||
task testintegration, "Runs integraion tests":
|
||||
runTest("testintegration")
|
||||
|
||||
task test_slim, "Runs the (slimmed down) test suite":
|
||||
exec "nimble testnative"
|
||||
exec "nimble testpubsub_slim"
|
||||
task test, "Runs the test suite":
|
||||
runTest("testall")
|
||||
exec "nimble testfilter"
|
||||
exec "nimble examples_build"
|
||||
|
||||
task website, "Build the website":
|
||||
tutorialToMd("examples/tutorial_1_connect.nim")
|
||||
@@ -116,19 +79,12 @@ task website, "Build the website":
|
||||
tutorialToMd("examples/circuitrelay.nim")
|
||||
exec "mkdocs build"
|
||||
|
||||
task examples_build, "Build the samples":
|
||||
buildSample("directchat")
|
||||
buildSample("helloworld", true)
|
||||
buildSample("circuitrelay", true)
|
||||
buildSample("tutorial_1_connect", true)
|
||||
buildSample("tutorial_2_customproto", true)
|
||||
buildSample("tutorial_3_protobuf", true)
|
||||
buildSample("tutorial_4_gossipsub", true)
|
||||
buildSample("tutorial_5_discovery", true)
|
||||
exec "nimble install -y nimpng@#HEAD"
|
||||
# this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
|
||||
exec "nimble install -y nico@#af99dd60bf2b395038ece815ea1012330a80d6e6"
|
||||
buildSample("tutorial_6_game", false, "--styleCheck:off")
|
||||
task examples, "Build and run examples":
|
||||
exec "nimble install -y nimpng"
|
||||
exec "nimble install -y nico --passNim=--skipParentCfg"
|
||||
buildSample("examples_build", false, "--styleCheck:off") # build only
|
||||
|
||||
buildSample("examples_run", true)
|
||||
|
||||
# pin system
|
||||
# while nimble lockfile
|
||||
|
||||
478
libp2p/autotls/acme/api.nim
Normal file
478
libp2p/autotls/acme/api.nim
Normal file
@@ -0,0 +1,478 @@
|
||||
import options, base64, sequtils, strutils, json
|
||||
from times import DateTime, parse
|
||||
import chronos/apps/http/httpclient, jwt, results, bearssl/pem
|
||||
|
||||
import ./utils
|
||||
import ../../crypto/crypto
|
||||
import ../../crypto/rsa
|
||||
|
||||
export ACMEError
|
||||
|
||||
const
|
||||
LetsEncryptURL* = "https://acme-v02.api.letsencrypt.org"
|
||||
LetsEncryptURLStaging* = "https://acme-staging-v02.api.letsencrypt.org"
|
||||
Alg = "RS256"
|
||||
DefaultChalCompletedRetries = 10
|
||||
DefaultChalCompletedRetryTime = 1.seconds
|
||||
DefaultFinalizeRetries = 10
|
||||
DefaultFinalizeRetryTime = 1.seconds
|
||||
DefaultRandStringSize = 256
|
||||
ACMEHttpHeaders = [("Content-Type", "application/jose+json")]
|
||||
|
||||
type Nonce* = string
|
||||
type Kid* = string
|
||||
|
||||
type ACMEDirectory* = object
|
||||
newNonce*: string
|
||||
newOrder*: string
|
||||
newAccount*: string
|
||||
|
||||
type ACMEApi* = ref object of RootObj
|
||||
directory: ACMEDirectory
|
||||
session: HttpSessionRef
|
||||
acmeServerURL*: string
|
||||
|
||||
type HTTPResponse* = object
|
||||
body*: JsonNode
|
||||
headers*: HttpTable
|
||||
|
||||
type JWK = object
|
||||
kty: string
|
||||
n: string
|
||||
e: string
|
||||
|
||||
# whether the request uses Kid or not
|
||||
type ACMERequestType = enum
|
||||
ACMEJwkRequest
|
||||
ACMEKidRequest
|
||||
|
||||
type ACMERequestHeader = object
|
||||
alg: string
|
||||
typ: string
|
||||
nonce: string
|
||||
url: string
|
||||
case kind: ACMERequestType
|
||||
of ACMEJwkRequest:
|
||||
jwk: JWK
|
||||
of ACMEKidRequest:
|
||||
kid: Kid
|
||||
|
||||
type ACMERegisterRequest* = object
|
||||
termsOfServiceAgreed: bool
|
||||
contact: seq[string]
|
||||
|
||||
type ACMEAccountStatus = enum
|
||||
valid
|
||||
deactivated
|
||||
revoked
|
||||
|
||||
type ACMERegisterResponseBody = object
|
||||
status*: ACMEAccountStatus
|
||||
|
||||
type ACMERegisterResponse* = object
|
||||
kid*: Kid
|
||||
status*: ACMEAccountStatus
|
||||
|
||||
type ACMEChallengeStatus* {.pure.} = enum
|
||||
pending = "pending"
|
||||
processing = "processing"
|
||||
valid = "valid"
|
||||
invalid = "invalid"
|
||||
|
||||
type ACMEChallenge = object
|
||||
url*: string
|
||||
`type`*: string
|
||||
status*: ACMEChallengeStatus
|
||||
token*: string
|
||||
|
||||
type ACMEChallengeIdentifier = object
|
||||
`type`: string
|
||||
value: string
|
||||
|
||||
type ACMEChallengeRequest = object
|
||||
identifiers: seq[ACMEChallengeIdentifier]
|
||||
|
||||
type ACMEChallengeResponseBody = object
|
||||
status: ACMEChallengeStatus
|
||||
authorizations: seq[string]
|
||||
finalize: string
|
||||
|
||||
type ACMEChallengeResponse* = object
|
||||
status*: ACMEChallengeStatus
|
||||
authorizations*: seq[string]
|
||||
finalize*: string
|
||||
orderURL*: string
|
||||
|
||||
type ACMEChallengeResponseWrapper* = object
|
||||
finalizeURL*: string
|
||||
orderURL*: string
|
||||
dns01*: ACMEChallenge
|
||||
|
||||
type ACMEAuthorizationsResponse* = object
|
||||
challenges*: seq[ACMEChallenge]
|
||||
|
||||
type ACMECompletedResponse* = object
|
||||
checkURL: string
|
||||
|
||||
type ACMEOrderStatus* {.pure.} = enum
|
||||
pending = "pending"
|
||||
ready = "ready"
|
||||
processing = "processing"
|
||||
valid = "valid"
|
||||
invalid = "invalid"
|
||||
|
||||
type ACMECheckKind* = enum
|
||||
ACMEOrderCheck
|
||||
ACMEChallengeCheck
|
||||
|
||||
type ACMECheckResponse* = object
|
||||
case kind: ACMECheckKind
|
||||
of ACMEOrderCheck:
|
||||
orderStatus: ACMEOrderStatus
|
||||
of ACMEChallengeCheck:
|
||||
chalStatus: ACMEChallengeStatus
|
||||
retryAfter: Duration
|
||||
|
||||
type ACMEFinalizeResponse* = object
|
||||
status: ACMEOrderStatus
|
||||
|
||||
type ACMEOrderResponse* = object
|
||||
certificate: string
|
||||
expires: string
|
||||
|
||||
type ACMECertificateResponse* = object
|
||||
rawCertificate: string
|
||||
certificateExpiry: DateTime
|
||||
|
||||
template handleError*(msg: string, body: untyped): untyped =
|
||||
try:
|
||||
body
|
||||
except ACMEError as exc:
|
||||
raise exc
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except JsonKindError as exc:
|
||||
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
|
||||
except ValueError as exc:
|
||||
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
|
||||
except HttpError as exc:
|
||||
raise newException(ACMEError, msg & ": Failed to connect to ACME server", exc)
|
||||
except CatchableError as exc:
|
||||
raise newException(ACMEError, msg & ": Unexpected error", exc)
|
||||
|
||||
method post*(
|
||||
self: ACMEApi, url: string, payload: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.}
|
||||
|
||||
method get*(
|
||||
self: ACMEApi, url: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.}
|
||||
|
||||
proc new*(
|
||||
T: typedesc[ACMEApi], acmeServerURL: string = LetsEncryptURL
|
||||
): Future[ACMEApi] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let session = HttpSessionRef.new()
|
||||
let directory = handleError("new API"):
|
||||
let rawResponse =
|
||||
await HttpClientRequestRef.get(session, acmeServerURL & "/directory").get().send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
body.to(ACMEDirectory)
|
||||
|
||||
ACMEApi(session: session, directory: directory, acmeServerURL: acmeServerURL)
|
||||
|
||||
method requestNonce*(
|
||||
self: ACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]), base.} =
|
||||
handleError("requestNonce"):
|
||||
let acmeResponse = await self.get(self.directory.newNonce)
|
||||
Nonce(acmeResponse.headers.keyOrError("Replay-Nonce"))
|
||||
|
||||
# TODO: save n and e in account so we don't have to recalculate every time
|
||||
proc acmeHeader(
|
||||
self: ACMEApi, url: string, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
|
||||
): Future[ACMERequestHeader] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
if not needsJwk and kid.isNone:
|
||||
raise newException(ACMEError, "kid not set")
|
||||
|
||||
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
|
||||
raise newException(ACMEError, "Unsupported signing key type")
|
||||
|
||||
let newNonce = await self.requestNonce()
|
||||
if needsJwk:
|
||||
let pubkey = key.pubkey.rsakey
|
||||
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
|
||||
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
|
||||
ACMERequestHeader(
|
||||
kind: ACMEJwkRequest,
|
||||
alg: Alg,
|
||||
typ: "JWT",
|
||||
nonce: newNonce,
|
||||
url: url,
|
||||
jwk: JWK(kty: "RSA", n: base64UrlEncode(nArray), e: base64UrlEncode(eArray)),
|
||||
)
|
||||
else:
|
||||
ACMERequestHeader(
|
||||
kind: ACMEKidRequest,
|
||||
alg: Alg,
|
||||
typ: "JWT",
|
||||
nonce: newNonce,
|
||||
url: url,
|
||||
kid: kid.get(),
|
||||
)
|
||||
|
||||
method post*(
|
||||
self: ACMEApi, url: string, payload: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.post(self.session, url, body = payload, headers = ACMEHttpHeaders)
|
||||
.get()
|
||||
.send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
HTTPResponse(body: body, headers: rawResponse.headers)
|
||||
|
||||
method get*(
|
||||
self: ACMEApi, url: string
|
||||
): Future[HTTPResponse] {.
|
||||
async: (raises: [ACMEError, HttpError, CancelledError]), base
|
||||
.} =
|
||||
let rawResponse = await HttpClientRequestRef.get(self.session, url).get().send()
|
||||
let body = await rawResponse.getResponseBody()
|
||||
HTTPResponse(body: body, headers: rawResponse.headers)
|
||||
|
||||
proc createSignedAcmeRequest(
|
||||
self: ACMEApi,
|
||||
url: string,
|
||||
payload: auto,
|
||||
key: KeyPair,
|
||||
needsJwk: bool = false,
|
||||
kid: Opt[Kid] = Opt.none(Kid),
|
||||
): Future[string] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
|
||||
raise newException(ACMEError, "Unsupported signing key type")
|
||||
|
||||
let acmeHeader = await self.acmeHeader(url, key, needsJwk, kid)
|
||||
handleError("createSignedAcmeRequest"):
|
||||
var token = toJWT(%*{"header": acmeHeader, "claims": payload})
|
||||
let derPrivKey = key.seckey.rsakey.getBytes.get
|
||||
let pemPrivKey: string = pemEncode(derPrivKey, "PRIVATE KEY")
|
||||
token.sign(pemPrivKey)
|
||||
$token.toFlattenedJson()
|
||||
|
||||
proc requestRegister*(
|
||||
self: ACMEApi, key: KeyPair
|
||||
): Future[ACMERegisterResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let registerRequest = ACMERegisterRequest(termsOfServiceAgreed: true)
|
||||
handleError("acmeRegister"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
self.directory.newAccount, registerRequest, key, needsJwk = true
|
||||
)
|
||||
let acmeResponse = await self.post(self.directory.newAccount, payload)
|
||||
let acmeResponseBody = acmeResponse.body.to(ACMERegisterResponseBody)
|
||||
|
||||
ACMERegisterResponse(
|
||||
status: acmeResponseBody.status, kid: acmeResponse.headers.keyOrError("location")
|
||||
)
|
||||
|
||||
proc requestNewOrder*(
|
||||
self: ACMEApi, domains: seq[string], key: KeyPair, kid: Kid
|
||||
): Future[ACMEChallengeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
# request challenge from ACME server
|
||||
let orderRequest = ACMEChallengeRequest(
|
||||
identifiers: domains.mapIt(ACMEChallengeIdentifier(`type`: "dns", value: it))
|
||||
)
|
||||
handleError("requestNewOrder"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
self.directory.newOrder, orderRequest, key, kid = Opt.some(kid)
|
||||
)
|
||||
let acmeResponse = await self.post(self.directory.newOrder, payload)
|
||||
|
||||
let challengeResponseBody = acmeResponse.body.to(ACMEChallengeResponseBody)
|
||||
if challengeResponseBody.authorizations.len() == 0:
|
||||
raise newException(ACMEError, "Authorizations field is empty")
|
||||
ACMEChallengeResponse(
|
||||
status: challengeResponseBody.status,
|
||||
authorizations: challengeResponseBody.authorizations,
|
||||
finalize: challengeResponseBody.finalize,
|
||||
orderURL: acmeResponse.headers.keyOrError("location"),
|
||||
)
|
||||
|
||||
proc requestAuthorizations*(
|
||||
self: ACMEApi, authorizations: seq[string], key: KeyPair, kid: Kid
|
||||
): Future[ACMEAuthorizationsResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestAuthorizations"):
|
||||
doAssert authorizations.len > 0
|
||||
let acmeResponse = await self.get(authorizations[0])
|
||||
acmeResponse.body.to(ACMEAuthorizationsResponse)
|
||||
|
||||
proc requestChallenge*(
|
||||
self: ACMEApi, domains: seq[string], key: KeyPair, kid: Kid
|
||||
): Future[ACMEChallengeResponseWrapper] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let challengeResponse = await self.requestNewOrder(domains, key, kid)
|
||||
|
||||
let authorizationsResponse =
|
||||
await self.requestAuthorizations(challengeResponse.authorizations, key, kid)
|
||||
|
||||
return ACMEChallengeResponseWrapper(
|
||||
finalizeURL: challengeResponse.finalize,
|
||||
orderURL: challengeResponse.orderURL,
|
||||
dns01: authorizationsResponse.challenges.filterIt(it.`type` == "dns-01")[0],
|
||||
)
|
||||
|
||||
proc requestCheck*(
|
||||
self: ACMEApi, checkURL: string, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
|
||||
): Future[ACMECheckResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestCheck"):
|
||||
let acmeResponse = await self.get(checkURL)
|
||||
let retryAfter =
|
||||
try:
|
||||
parseInt(acmeResponse.headers.keyOrError("Retry-After")).seconds
|
||||
except ValueError:
|
||||
DefaultChalCompletedRetryTime
|
||||
|
||||
case checkKind
|
||||
of ACMEOrderCheck:
|
||||
try:
|
||||
ACMECheckResponse(
|
||||
kind: checkKind,
|
||||
orderStatus: parseEnum[ACMEOrderStatus](acmeResponse.body["status"].getStr),
|
||||
retryAfter: retryAfter,
|
||||
)
|
||||
except ValueError:
|
||||
raise newException(
|
||||
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
|
||||
)
|
||||
of ACMEChallengeCheck:
|
||||
try:
|
||||
ACMECheckResponse(
|
||||
kind: checkKind,
|
||||
chalStatus: parseEnum[ACMEChallengeStatus](acmeResponse.body["status"].getStr),
|
||||
retryAfter: retryAfter,
|
||||
)
|
||||
except ValueError:
|
||||
raise newException(
|
||||
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
|
||||
)
|
||||
|
||||
proc requestCompleted*(
|
||||
self: ACMEApi, chalURL: string, key: KeyPair, kid: Kid
|
||||
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestCompleted (send notify)"):
|
||||
let payload =
|
||||
await self.createSignedAcmeRequest(chalURL, %*{}, key, kid = Opt.some(kid))
|
||||
let acmeResponse = await self.post(chalURL, payload)
|
||||
acmeResponse.body.to(ACMECompletedResponse)
|
||||
|
||||
proc checkChallengeCompleted*(
|
||||
self: ACMEApi,
|
||||
checkURL: string,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
for i in 0 .. retries:
|
||||
let checkResponse = await self.requestCheck(checkURL, ACMEChallengeCheck, key, kid)
|
||||
case checkResponse.chalStatus
|
||||
of ACMEChallengeStatus.pending:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
of ACMEChallengeStatus.valid:
|
||||
return true
|
||||
else:
|
||||
raise newException(
|
||||
ACMEError,
|
||||
"Failed challenge completion: expected 'valid', got '" &
|
||||
$checkResponse.chalStatus & "'",
|
||||
)
|
||||
return false
|
||||
|
||||
proc completeChallenge*(
|
||||
self: ACMEApi,
|
||||
chalURL: string,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let completedResponse = await self.requestCompleted(chalURL, key, kid)
|
||||
# check until acme server is done (poll validation)
|
||||
return await self.checkChallengeCompleted(chalURL, key, kid, retries = retries)
|
||||
|
||||
proc requestFinalize*(
|
||||
self: ACMEApi, domain: string, finalizeURL: string, key: KeyPair, kid: Kid
|
||||
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let derCSR = createCSR(domain)
|
||||
let b64CSR = base64.encode(derCSR.toSeq, safe = true)
|
||||
|
||||
handleError("requestFinalize"):
|
||||
let payload = await self.createSignedAcmeRequest(
|
||||
finalizeURL, %*{"csr": b64CSR}, key, kid = Opt.some(kid)
|
||||
)
|
||||
let acmeResponse = await self.post(finalizeURL, payload)
|
||||
# server responds with updated order response
|
||||
acmeResponse.body.to(ACMEFinalizeResponse)
|
||||
|
||||
proc checkCertFinalized*(
|
||||
self: ACMEApi,
|
||||
orderURL: string,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultChalCompletedRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
for i in 0 .. retries:
|
||||
let checkResponse = await self.requestCheck(orderURL, ACMEOrderCheck, key, kid)
|
||||
case checkResponse.orderStatus
|
||||
of ACMEOrderStatus.valid:
|
||||
return true
|
||||
of ACMEOrderStatus.processing:
|
||||
await sleepAsync(checkResponse.retryAfter) # try again after some delay
|
||||
else:
|
||||
raise newException(
|
||||
ACMEError,
|
||||
"Failed certificate finalization: expected 'valid', got '" &
|
||||
$checkResponse.orderStatus & "'",
|
||||
)
|
||||
return false
|
||||
|
||||
return false
|
||||
|
||||
proc certificateFinalized*(
|
||||
self: ACMEApi,
|
||||
domain: string,
|
||||
finalizeURL: string,
|
||||
orderURL: string,
|
||||
key: KeyPair,
|
||||
kid: Kid,
|
||||
retries: int = DefaultFinalizeRetries,
|
||||
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let finalizeResponse = await self.requestFinalize(domain, finalizeURL, key, kid)
|
||||
# keep checking order until cert is valid (done)
|
||||
return await self.checkCertFinalized(orderURL, key, kid, retries = retries)
|
||||
|
||||
proc requestGetOrder*(
|
||||
self: ACMEApi, orderURL: string
|
||||
): Future[ACMEOrderResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
handleError("requestGetOrder"):
|
||||
let acmeResponse = await self.get(orderURL)
|
||||
acmeResponse.body.to(ACMEOrderResponse)
|
||||
|
||||
proc downloadCertificate*(
|
||||
self: ACMEApi, orderURL: string
|
||||
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let orderResponse = await self.requestGetOrder(orderURL)
|
||||
|
||||
handleError("downloadCertificate"):
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.get(self.session, orderResponse.certificate)
|
||||
.get()
|
||||
.send()
|
||||
ACMECertificateResponse(
|
||||
rawCertificate: bytesToString(await rawResponse.getBodyBytes()),
|
||||
certificateExpiry: parse(orderResponse.expires, "yyyy-MM-dd'T'HH:mm:ss'Z'"),
|
||||
)
|
||||
|
||||
proc close*(self: ACMEApi): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
await self.session.closeWait()
|
||||
37
libp2p/autotls/acme/mockapi.nim
Normal file
37
libp2p/autotls/acme/mockapi.nim
Normal file
@@ -0,0 +1,37 @@
|
||||
import chronos, chronos/apps/http/httpclient, json
|
||||
|
||||
import ./api, ./utils
|
||||
|
||||
export api
|
||||
|
||||
type MockACMEApi* = ref object of ACMEApi
|
||||
parent*: ACMEApi
|
||||
mockedHeaders*: HttpTable
|
||||
mockedBody*: JsonNode
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MockACMEApi]
|
||||
): Future[MockACMEApi] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
let directory = ACMEDirectory(
|
||||
newNonce: LetsEncryptURL & "/new-nonce",
|
||||
newOrder: LetsEncryptURL & "/new-order",
|
||||
newAccount: LetsEncryptURL & "/new-account",
|
||||
)
|
||||
MockACMEApi(
|
||||
session: HttpSessionRef.new(), directory: directory, acmeServerURL: LetsEncryptURL
|
||||
)
|
||||
|
||||
method requestNonce*(
|
||||
self: MockACMEApi
|
||||
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
return self.acmeServerURL & "/acme/1234"
|
||||
|
||||
method post*(
|
||||
self: MockACMEApi, url: string, payload: string
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
HTTPResponse(body: self.mockedBody, headers: self.mockedHeaders)
|
||||
|
||||
method get*(
|
||||
self: MockACMEApi, url: string
|
||||
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
|
||||
HTTPResponse(body: self.mockedBody, headers: self.mockedHeaders)
|
||||
48
libp2p/autotls/acme/utils.nim
Normal file
48
libp2p/autotls/acme/utils.nim
Normal file
@@ -0,0 +1,48 @@
|
||||
import base64, strutils, chronos/apps/http/httpclient, json
|
||||
import ../../errors
|
||||
import ../../transports/tls/certificate_ffi
|
||||
|
||||
type ACMEError* = object of LPError
|
||||
|
||||
proc keyOrError*(table: HttpTable, key: string): string {.raises: [ValueError].} =
|
||||
if not table.contains(key):
|
||||
raise newException(ValueError, "key " & key & " not present in headers")
|
||||
table.getString(key)
|
||||
|
||||
proc base64UrlEncode*(data: seq[byte]): string =
|
||||
## Encodes data using base64url (RFC 4648 §5) — no padding, URL-safe
|
||||
var encoded = base64.encode(data, safe = true)
|
||||
encoded.removeSuffix("=")
|
||||
encoded.removeSuffix("=")
|
||||
return encoded
|
||||
|
||||
proc getResponseBody*(
|
||||
response: HttpClientResponseRef
|
||||
): Future[JsonNode] {.async: (raises: [ACMEError, CancelledError]).} =
|
||||
try:
|
||||
let responseBody = bytesToString(await response.getBodyBytes()).parseJson()
|
||||
return responseBody
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise
|
||||
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
|
||||
except Exception as exc: # this is required for nim 1.6
|
||||
raise
|
||||
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
|
||||
|
||||
proc createCSR*(domain: string): string {.raises: [ACMEError].} =
|
||||
var certKey: cert_key_t
|
||||
var certCtx: cert_context_t
|
||||
var derCSR: ptr cert_buffer = nil
|
||||
|
||||
let personalizationStr = "libp2p_autotls"
|
||||
if cert_init_drbg(
|
||||
personalizationStr.cstring, personalizationStr.len.csize_t, certCtx.addr
|
||||
) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to initialize certCtx")
|
||||
if cert_generate_key(certCtx, certKey.addr) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to generate cert key")
|
||||
|
||||
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
|
||||
raise newException(ACMEError, "Failed to create CSR")
|
||||
@@ -23,7 +23,7 @@ import
|
||||
stream/connection,
|
||||
multiaddress,
|
||||
crypto/crypto,
|
||||
transports/[transport, tcptransport],
|
||||
transports/[transport, tcptransport, wstransport, memorytransport],
|
||||
muxers/[muxer, mplex/mplex, yamux/yamux],
|
||||
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
||||
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
|
||||
@@ -35,10 +35,15 @@ import
|
||||
utility
|
||||
import services/wildcardresolverservice
|
||||
|
||||
export switch, peerid, peerinfo, connection, multiaddress, crypto, errors
|
||||
export
|
||||
switch, peerid, peerinfo, connection, multiaddress, crypto, errors, TLSPrivateKey,
|
||||
TLSCertificate, TLSFlags, ServerFlags
|
||||
|
||||
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
|
||||
|
||||
type
|
||||
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [].}
|
||||
TransportProvider* {.public.} =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport {.gcsafe, raises: [].}
|
||||
|
||||
SecureProtocol* {.pure.} = enum
|
||||
Noise
|
||||
@@ -151,7 +156,7 @@ proc withTransport*(
|
||||
let switch = SwitchBuilder
|
||||
.new()
|
||||
.withTransport(
|
||||
proc(upgr: Upgrade): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
TcpTransport.new(flags, upgr)
|
||||
)
|
||||
.build()
|
||||
@@ -162,10 +167,37 @@ proc withTcpTransport*(
|
||||
b: SwitchBuilder, flags: set[ServerFlags] = {}
|
||||
): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade): Transport =
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
TcpTransport.new(flags, upgr)
|
||||
)
|
||||
|
||||
proc withWsTransport*(
|
||||
b: SwitchBuilder,
|
||||
tlsPrivateKey: TLSPrivateKey = nil,
|
||||
tlsCertificate: TLSCertificate = nil,
|
||||
tlsFlags: set[TLSFlags] = {},
|
||||
flags: set[ServerFlags] = {},
|
||||
): SwitchBuilder =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
WsTransport.new(upgr, tlsPrivateKey, tlsCertificate, tlsFlags, flags)
|
||||
)
|
||||
|
||||
when defined(libp2p_quic_support):
|
||||
import transports/quictransport
|
||||
|
||||
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
QuicTransport.new(upgr, privateKey)
|
||||
)
|
||||
|
||||
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
|
||||
b.withTransport(
|
||||
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
|
||||
MemoryTransport.new(upgr)
|
||||
)
|
||||
|
||||
proc withRng*(b: SwitchBuilder, rng: ref HmacDrbgContext): SwitchBuilder {.public.} =
|
||||
b.rng = rng
|
||||
b
|
||||
@@ -247,6 +279,10 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
|
||||
let pkRes = PrivateKey.random(b.rng[])
|
||||
let seckey = b.privKey.get(otherwise = pkRes.expect("Expected default Private Key"))
|
||||
|
||||
if b.secureManagers.len == 0:
|
||||
debug "no secure managers defined. Adding noise by default"
|
||||
b.secureManagers.add(SecureProtocol.Noise)
|
||||
|
||||
var secureManagerInstances: seq[Secure]
|
||||
if SecureProtocol.Noise in b.secureManagers:
|
||||
secureManagerInstances.add(Noise.new(b.rng, seckey).Secure)
|
||||
@@ -270,7 +306,7 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
|
||||
let transports = block:
|
||||
var transports: seq[Transport]
|
||||
for tProvider in b.transports:
|
||||
transports.add(tProvider(muxedUpgrade))
|
||||
transports.add(tProvider(muxedUpgrade, seckey))
|
||||
transports
|
||||
|
||||
if b.secureManagers.len == 0:
|
||||
|
||||
@@ -10,10 +10,11 @@
|
||||
## This module implementes CID (Content IDentifier).
|
||||
|
||||
{.push raises: [].}
|
||||
{.used.}
|
||||
|
||||
import tables, hashes
|
||||
import multibase, multicodec, multihash, vbuffer, varint
|
||||
import stew/[base58, results]
|
||||
import multibase, multicodec, multihash, vbuffer, varint, results
|
||||
import stew/base58
|
||||
|
||||
export results
|
||||
|
||||
@@ -41,6 +42,7 @@ const ContentIdsList = [
|
||||
multiCodec("dag-pb"),
|
||||
multiCodec("dag-cbor"),
|
||||
multiCodec("dag-json"),
|
||||
multiCodec("libp2p-key"),
|
||||
multiCodec("git-raw"),
|
||||
multiCodec("eth-block"),
|
||||
multiCodec("eth-block-list"),
|
||||
|
||||
@@ -42,8 +42,9 @@ type
|
||||
else:
|
||||
discard
|
||||
|
||||
ConnEventHandler* =
|
||||
proc(peerId: PeerId, event: ConnEvent): Future[void] {.gcsafe, raises: [].}
|
||||
ConnEventHandler* = proc(peerId: PeerId, event: ConnEvent): Future[void] {.
|
||||
gcsafe, async: (raises: [CancelledError])
|
||||
.}
|
||||
|
||||
PeerEventKind* {.pure.} = enum
|
||||
Left
|
||||
@@ -57,8 +58,9 @@ type
|
||||
else:
|
||||
discard
|
||||
|
||||
PeerEventHandler* =
|
||||
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [].}
|
||||
PeerEventHandler* = proc(peerId: PeerId, event: PeerEvent): Future[void] {.
|
||||
gcsafe, async: (raises: [CancelledError])
|
||||
.}
|
||||
|
||||
ConnManager* = ref object of RootObj
|
||||
maxConnsPerPeer: int
|
||||
@@ -123,7 +125,9 @@ proc removeConnEventHandler*(
|
||||
) =
|
||||
c.connEvents[kind].excl(handler)
|
||||
|
||||
proc triggerConnEvent*(c: ConnManager, peerId: PeerId, event: ConnEvent) {.async.} =
|
||||
proc triggerConnEvent*(
|
||||
c: ConnManager, peerId: PeerId, event: ConnEvent
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
trace "About to trigger connection events", peer = peerId
|
||||
if c.connEvents[event.kind].len() > 0:
|
||||
@@ -136,7 +140,7 @@ proc triggerConnEvent*(c: ConnManager, peerId: PeerId, event: ConnEvent) {.async
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
warn "Exception in triggerConnEvents",
|
||||
warn "Exception in triggerConnEvent",
|
||||
description = exc.msg, peer = peerId, event = $event
|
||||
|
||||
proc addPeerEventHandler*(
|
||||
@@ -154,7 +158,9 @@ proc removePeerEventHandler*(
|
||||
) =
|
||||
c.peerEvents[kind].excl(handler)
|
||||
|
||||
proc triggerPeerEvents*(c: ConnManager, peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
proc triggerPeerEvents*(
|
||||
c: ConnManager, peerId: PeerId, event: PeerEvent
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
trace "About to trigger peer events", peer = peerId
|
||||
if c.peerEvents[event.kind].len == 0:
|
||||
return
|
||||
@@ -174,16 +180,16 @@ proc triggerPeerEvents*(c: ConnManager, peerId: PeerId, event: PeerEvent) {.asyn
|
||||
|
||||
proc expectConnection*(
|
||||
c: ConnManager, p: PeerId, dir: Direction
|
||||
): Future[Muxer] {.async.} =
|
||||
): Future[Muxer] {.async: (raises: [AlreadyExpectingConnectionError, CancelledError]).} =
|
||||
## Wait for a peer to connect to us. This will bypass the `MaxConnectionsPerPeer`
|
||||
let key = (p, dir)
|
||||
if key in c.expectedConnectionsOverLimit:
|
||||
raise newException(
|
||||
AlreadyExpectingConnectionError,
|
||||
"Already expecting an incoming connection from that peer",
|
||||
"Already expecting an incoming connection from that peer: " & shortLog(p),
|
||||
)
|
||||
|
||||
let future = newFuture[Muxer]()
|
||||
let future = Future[Muxer].Raising([CancelledError]).init()
|
||||
c.expectedConnectionsOverLimit[key] = future
|
||||
|
||||
try:
|
||||
@@ -205,18 +211,18 @@ proc contains*(c: ConnManager, muxer: Muxer): bool =
|
||||
let conn = muxer.connection
|
||||
return muxer in c.muxed.getOrDefault(conn.peerId)
|
||||
|
||||
proc closeMuxer(muxer: Muxer) {.async.} =
|
||||
proc closeMuxer(muxer: Muxer) {.async: (raises: [CancelledError]).} =
|
||||
trace "Cleaning up muxer", m = muxer
|
||||
|
||||
await muxer.close()
|
||||
if not (isNil(muxer.handler)):
|
||||
try:
|
||||
await muxer.handler # TODO noraises?
|
||||
await muxer.handler
|
||||
except CatchableError as exc:
|
||||
trace "Exception in close muxer handler", description = exc.msg
|
||||
trace "Cleaned up muxer", m = muxer
|
||||
|
||||
proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
|
||||
proc muxCleanup(c: ConnManager, mux: Muxer) {.async: (raises: []).} =
|
||||
try:
|
||||
trace "Triggering disconnect events", mux
|
||||
let peerId = mux.connection.peerId
|
||||
@@ -238,7 +244,7 @@ proc muxCleanup(c: ConnManager, mux: Muxer) {.async.} =
|
||||
# do not need to propagate CancelledError and should handle other errors
|
||||
warn "Unexpected exception peer cleanup handler", mux, description = exc.msg
|
||||
|
||||
proc onClose(c: ConnManager, mux: Muxer) {.async.} =
|
||||
proc onClose(c: ConnManager, mux: Muxer) {.async: (raises: []).} =
|
||||
## connection close even handler
|
||||
##
|
||||
## triggers the connections resource cleanup
|
||||
@@ -272,7 +278,7 @@ proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer =
|
||||
trace "connection not found", peerId
|
||||
return mux
|
||||
|
||||
proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [CatchableError].} =
|
||||
proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [LPError].} =
|
||||
## store the connection and muxer
|
||||
##
|
||||
|
||||
@@ -324,7 +330,9 @@ proc storeMuxer*(c: ConnManager, muxer: Muxer) {.raises: [CatchableError].} =
|
||||
|
||||
trace "Stored muxer", muxer, direction = $muxer.connection.dir, peers = c.muxed.len
|
||||
|
||||
proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} =
|
||||
proc getIncomingSlot*(
|
||||
c: ConnManager
|
||||
): Future[ConnectionSlot] {.async: (raises: [CancelledError]).} =
|
||||
await c.inSema.acquire()
|
||||
return ConnectionSlot(connManager: c, direction: In)
|
||||
|
||||
@@ -339,25 +347,21 @@ proc getOutgoingSlot*(
|
||||
raise newTooManyConnectionsError()
|
||||
return ConnectionSlot(connManager: c, direction: Out)
|
||||
|
||||
func semaphore(c: ConnManager, dir: Direction): AsyncSemaphore {.inline.} =
|
||||
return if dir == In: c.inSema else: c.outSema
|
||||
|
||||
proc slotsAvailable*(c: ConnManager, dir: Direction): int =
|
||||
case dir
|
||||
of Direction.In:
|
||||
return c.inSema.count
|
||||
of Direction.Out:
|
||||
return c.outSema.count
|
||||
return semaphore(c, dir).count
|
||||
|
||||
proc release*(cs: ConnectionSlot) =
|
||||
if cs.direction == In:
|
||||
cs.connManager.inSema.release()
|
||||
else:
|
||||
cs.connManager.outSema.release()
|
||||
semaphore(cs.connManager, cs.direction).release()
|
||||
|
||||
proc trackConnection*(cs: ConnectionSlot, conn: Connection) =
|
||||
if isNil(conn):
|
||||
cs.release()
|
||||
return
|
||||
|
||||
proc semaphoreMonitor() {.async.} =
|
||||
proc semaphoreMonitor() {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
await conn.join()
|
||||
except CatchableError as exc:
|
||||
@@ -373,14 +377,18 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
|
||||
return
|
||||
cs.trackConnection(mux.connection)
|
||||
|
||||
proc getStream*(c: ConnManager, muxer: Muxer): Future[Connection] {.async.} =
|
||||
proc getStream*(
|
||||
c: ConnManager, muxer: Muxer
|
||||
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
|
||||
## get a muxed stream for the passed muxer
|
||||
##
|
||||
|
||||
if not (isNil(muxer)):
|
||||
return await muxer.newStream()
|
||||
|
||||
proc getStream*(c: ConnManager, peerId: PeerId): Future[Connection] {.async.} =
|
||||
proc getStream*(
|
||||
c: ConnManager, peerId: PeerId
|
||||
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
|
||||
## get a muxed stream for the passed peer from any connection
|
||||
##
|
||||
|
||||
@@ -388,13 +396,13 @@ proc getStream*(c: ConnManager, peerId: PeerId): Future[Connection] {.async.} =
|
||||
|
||||
proc getStream*(
|
||||
c: ConnManager, peerId: PeerId, dir: Direction
|
||||
): Future[Connection] {.async.} =
|
||||
): Future[Connection] {.async: (raises: [LPStreamError, MuxerError, CancelledError]).} =
|
||||
## get a muxed stream for the passed peer from a connection with `dir`
|
||||
##
|
||||
|
||||
return await c.getStream(c.selectMuxer(peerId, dir))
|
||||
|
||||
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
|
||||
proc dropPeer*(c: ConnManager, peerId: PeerId) {.async: (raises: [CancelledError]).} =
|
||||
## drop connections and cleanup resources for peer
|
||||
##
|
||||
trace "Dropping peer", peerId
|
||||
@@ -405,7 +413,7 @@ proc dropPeer*(c: ConnManager, peerId: PeerId) {.async.} =
|
||||
|
||||
trace "Peer dropped", peerId
|
||||
|
||||
proc close*(c: ConnManager) {.async.} =
|
||||
proc close*(c: ConnManager) {.async: (raises: [CancelledError]).} =
|
||||
## cleanup resources for the connection
|
||||
## manager
|
||||
##
|
||||
|
||||
@@ -76,7 +76,7 @@ import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
import ../utility
|
||||
import stew/results
|
||||
import results
|
||||
export results, utility
|
||||
|
||||
# This is workaround for Nim's `import` bug
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/[ec, rand]
|
||||
import stew/results
|
||||
import results
|
||||
from stew/assign2 import assign
|
||||
export results
|
||||
|
||||
|
||||
@@ -21,7 +21,8 @@ import bearssl/[ec, rand, hash]
|
||||
import nimcrypto/utils as ncrutils
|
||||
import minasn1
|
||||
export minasn1.Asn1Error
|
||||
import stew/[results, ctops]
|
||||
import stew/ctops
|
||||
import results
|
||||
|
||||
import ../utility
|
||||
|
||||
|
||||
@@ -18,7 +18,8 @@ import constants
|
||||
import nimcrypto/[hash, sha2]
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
import stew/[results, ctops]
|
||||
import results
|
||||
import stew/ctops
|
||||
|
||||
import ../../utility
|
||||
|
||||
|
||||
@@ -28,8 +28,7 @@ proc hkdf*[T: sha256, len: static int](
|
||||
if salt.len > 0:
|
||||
unsafeAddr salt[0]
|
||||
else:
|
||||
nil
|
||||
,
|
||||
nil,
|
||||
csize_t(salt.len),
|
||||
)
|
||||
hkdfInject(
|
||||
@@ -37,8 +36,7 @@ proc hkdf*[T: sha256, len: static int](
|
||||
if ikm.len > 0:
|
||||
unsafeAddr ikm[0]
|
||||
else:
|
||||
nil
|
||||
,
|
||||
nil,
|
||||
csize_t(ikm.len),
|
||||
)
|
||||
hkdfFlip(ctx)
|
||||
@@ -48,8 +46,7 @@ proc hkdf*[T: sha256, len: static int](
|
||||
if info.len > 0:
|
||||
unsafeAddr info[0]
|
||||
else:
|
||||
nil
|
||||
,
|
||||
nil,
|
||||
csize_t(info.len),
|
||||
addr outputs[i][0],
|
||||
csize_t(outputs[i].len),
|
||||
|
||||
@@ -11,7 +11,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/[endians2, results, ctops]
|
||||
import stew/[endians2, ctops]
|
||||
import results
|
||||
export results
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
@@ -291,28 +292,6 @@ proc asn1EncodeBitString*(
|
||||
dest[2 + lenlen + bytelen - 1] = lastbyte and mask
|
||||
res
|
||||
|
||||
proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte], value: T): int =
|
||||
var v = value
|
||||
if value <= cast[T](0x7F):
|
||||
if len(dest) >= 1:
|
||||
dest[0] = cast[byte](value)
|
||||
1
|
||||
else:
|
||||
var s = 0
|
||||
var res = 0
|
||||
while v != 0:
|
||||
v = v shr 7
|
||||
s += 7
|
||||
inc(res)
|
||||
if len(dest) >= res:
|
||||
var k = 0
|
||||
while s != 0:
|
||||
s -= 7
|
||||
dest[k] = cast[byte](((value shr s) and cast[T](0x7F)) or cast[T](0x80))
|
||||
inc(k)
|
||||
dest[k - 1] = dest[k - 1] and 0x7F'u8
|
||||
res
|
||||
|
||||
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
|
||||
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
|
||||
## number of bytes (octets) used.
|
||||
@@ -665,9 +644,6 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
|
||||
return ok(field)
|
||||
else:
|
||||
return err(Asn1Error.NoSupport)
|
||||
|
||||
inclass = false
|
||||
ttag = 0
|
||||
else:
|
||||
return err(Asn1Error.NoSupport)
|
||||
|
||||
|
||||
@@ -17,7 +17,8 @@
|
||||
|
||||
import bearssl/[rsa, rand, hash]
|
||||
import minasn1
|
||||
import stew/[results, ctops]
|
||||
import results
|
||||
import stew/ctops
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
import nimcrypto/utils as ncrutils
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/rand
|
||||
import secp256k1, stew/[byteutils, results], nimcrypto/[hash, sha2]
|
||||
import secp256k1, results, stew/byteutils, nimcrypto/[hash, sha2]
|
||||
|
||||
export sha2, results, rand
|
||||
|
||||
@@ -85,8 +85,9 @@ proc init*(sig: var SkSignature, data: string): SkResult[void] =
|
||||
var buffer: seq[byte]
|
||||
try:
|
||||
buffer = hexToSeqByte(data)
|
||||
except ValueError:
|
||||
return err("secp: Hex to bytes failed")
|
||||
except ValueError as e:
|
||||
let errMsg = "secp: Hex to bytes failed: " & e.msg
|
||||
return err(errMsg.cstring)
|
||||
init(sig, buffer)
|
||||
|
||||
proc init*(t: typedesc[SkPrivateKey], data: openArray[byte]): SkResult[SkPrivateKey] =
|
||||
|
||||
@@ -146,7 +146,7 @@ type
|
||||
|
||||
PubsubTicket* = ref object
|
||||
topic*: string
|
||||
handler*: P2PPubSubCallback
|
||||
handler*: P2PPubSubCallback2
|
||||
transp*: StreamTransport
|
||||
|
||||
PubSubMessage* = object
|
||||
@@ -158,12 +158,14 @@ type
|
||||
key*: PublicKey
|
||||
|
||||
P2PStreamCallback* = proc(api: DaemonAPI, stream: P2PStream): Future[void] {.
|
||||
gcsafe, raises: [CatchableError]
|
||||
gcsafe, async: (raises: [CatchableError])
|
||||
.}
|
||||
P2PPubSubCallback* = proc(
|
||||
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
|
||||
): Future[bool] {.gcsafe, raises: [CatchableError].}
|
||||
|
||||
P2PPubSubCallback2* = proc(
|
||||
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
|
||||
): Future[bool] {.async: (raises: [CatchableError]).}
|
||||
DaemonError* = object of LPError
|
||||
DaemonRemoteError* = object of DaemonError
|
||||
DaemonLocalError* = object of DaemonError
|
||||
@@ -485,7 +487,11 @@ proc getErrorMessage(pb: ProtoBuffer): string {.inline, raises: [DaemonLocalErro
|
||||
if initProtoBuffer(error).getRequiredField(1, result).isErr():
|
||||
raise newException(DaemonLocalError, "Error message is missing!")
|
||||
|
||||
proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
|
||||
proc recvMessage(
|
||||
conn: StreamTransport
|
||||
): Future[seq[byte]] {.
|
||||
async: (raises: [TransportIncompleteError, TransportError, CancelledError])
|
||||
.} =
|
||||
var
|
||||
size: uint
|
||||
length: int
|
||||
@@ -508,13 +514,19 @@ proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
|
||||
|
||||
result = buffer
|
||||
|
||||
proc newConnection*(api: DaemonAPI): Future[StreamTransport] {.raises: [LPError].} =
|
||||
result = connect(api.address)
|
||||
proc newConnection*(
|
||||
api: DaemonAPI
|
||||
): Future[StreamTransport] {.
|
||||
async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError])
|
||||
.} =
|
||||
await connect(api.address)
|
||||
|
||||
proc closeConnection*(api: DaemonAPI, transp: StreamTransport): Future[void] =
|
||||
result = transp.closeWait()
|
||||
proc closeConnection*(
|
||||
api: DaemonAPI, transp: StreamTransport
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
await transp.closeWait()
|
||||
|
||||
proc socketExists(address: MultiAddress): Future[bool] {.async.} =
|
||||
proc socketExists(address: MultiAddress): Future[bool] {.async: (raises: []).} =
|
||||
try:
|
||||
var transp = await connect(address)
|
||||
await transp.closeWait()
|
||||
@@ -534,7 +546,9 @@ else:
|
||||
proc getProcessId(): int =
|
||||
result = int(posix.getpid())
|
||||
|
||||
proc getSocket(pattern: string, count: ptr int): Future[MultiAddress] {.async.} =
|
||||
proc getSocket(
|
||||
pattern: string, count: ptr int
|
||||
): Future[MultiAddress] {.async: (raises: [ValueError, LPError]).} =
|
||||
var sockname = ""
|
||||
var pid = $getProcessId()
|
||||
sockname = pattern % [pid, $(count[])]
|
||||
@@ -562,7 +576,35 @@ proc getSocket(pattern: string, count: ptr int): Future[MultiAddress] {.async.}
|
||||
closeSocket(sock)
|
||||
|
||||
# This is forward declaration needed for newDaemonApi()
|
||||
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
|
||||
proc listPeers*(
|
||||
api: DaemonAPI
|
||||
): Future[seq[PeerInfo]] {.
|
||||
async: (
|
||||
raises: [
|
||||
ValueError, DaemonLocalError, OSError, MaInvalidAddress, TransportError,
|
||||
CancelledError, LPError,
|
||||
]
|
||||
)
|
||||
.}
|
||||
|
||||
template exceptionToAssert(body: untyped): untyped =
|
||||
block:
|
||||
var res: type(body)
|
||||
when defined(nimHasWarnBareExcept):
|
||||
{.push warning[BareExcept]: off.}
|
||||
try:
|
||||
res = body
|
||||
except OSError as exc:
|
||||
raise newException(OSError, "failure in exceptionToAssert: " & exc.msg, exc)
|
||||
except IOError as exc:
|
||||
raise newException(IOError, "failure in exceptionToAssert: " & exc.msg, exc)
|
||||
except Defect as exc:
|
||||
raise newException(Defect, "failure in exceptionToAssert: " & exc.msg, exc)
|
||||
except Exception as exc:
|
||||
raiseAssert "Exception captured in exceptionToAssert: " & exc.msg
|
||||
when defined(nimHasWarnBareExcept):
|
||||
{.pop.}
|
||||
res
|
||||
|
||||
proc copyEnv(): StringTableRef =
|
||||
## This procedure copy all environment variables into StringTable.
|
||||
@@ -586,7 +628,14 @@ proc newDaemonApi*(
|
||||
peersRequired = 2,
|
||||
logFile = "",
|
||||
logLevel = IpfsLogLevel.Debug,
|
||||
): Future[DaemonAPI] {.async.} =
|
||||
): Future[DaemonAPI] {.
|
||||
async: (
|
||||
raises: [
|
||||
ValueError, DaemonLocalError, CancelledError, LPError, OSError, IOError,
|
||||
AsyncError,
|
||||
]
|
||||
)
|
||||
.} =
|
||||
## Initialize connection to `go-libp2p-daemon` control socket.
|
||||
##
|
||||
## ``flags`` - set of P2PDaemonFlags.
|
||||
@@ -780,7 +829,7 @@ proc newDaemonApi*(
|
||||
|
||||
result = api
|
||||
|
||||
proc close*(stream: P2PStream) {.async.} =
|
||||
proc close*(stream: P2PStream) {.async: (raises: [DaemonLocalError]).} =
|
||||
## Close ``stream``.
|
||||
if P2PStreamFlags.Closed notin stream.flags:
|
||||
await stream.transp.closeWait()
|
||||
@@ -789,7 +838,9 @@ proc close*(stream: P2PStream) {.async.} =
|
||||
else:
|
||||
raise newException(DaemonLocalError, "Stream is already closed!")
|
||||
|
||||
proc close*(api: DaemonAPI) {.async.} =
|
||||
proc close*(
|
||||
api: DaemonAPI
|
||||
) {.async: (raises: [TransportOsError, LPError, ValueError, OSError, CancelledError]).} =
|
||||
## Shutdown connections to `go-libp2p-daemon` control socket.
|
||||
# await api.pool.close()
|
||||
# Closing all pending servers.
|
||||
@@ -827,7 +878,9 @@ template withMessage(m, body: untyped): untyped =
|
||||
|
||||
proc transactMessage(
|
||||
transp: StreamTransport, pb: ProtoBuffer
|
||||
): Future[ProtoBuffer] {.async.} =
|
||||
): Future[ProtoBuffer] {.
|
||||
async: (raises: [DaemonLocalError, TransportError, CancelledError])
|
||||
.} =
|
||||
let length = pb.getLen()
|
||||
let res = await transp.write(pb.getPtr(), length)
|
||||
if res != length:
|
||||
@@ -845,7 +898,11 @@ proc getPeerInfo(pb: ProtoBuffer): PeerInfo {.raises: [DaemonLocalError].} =
|
||||
|
||||
discard pb.getRepeatedField(2, result.addresses)
|
||||
|
||||
proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
|
||||
proc identity*(
|
||||
api: DaemonAPI
|
||||
): Future[PeerInfo] {.
|
||||
async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError])
|
||||
.} =
|
||||
## Get Node identity information
|
||||
var transp = await api.newConnection()
|
||||
try:
|
||||
@@ -860,7 +917,7 @@ proc identity*(api: DaemonAPI): Future[PeerInfo] {.async.} =
|
||||
|
||||
proc connect*(
|
||||
api: DaemonAPI, peer: PeerId, addresses: seq[MultiAddress], timeout = 0
|
||||
) {.async.} =
|
||||
) {.async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError]).} =
|
||||
## Connect to remote peer with id ``peer`` and addresses ``addresses``.
|
||||
var transp = await api.newConnection()
|
||||
try:
|
||||
@@ -870,7 +927,9 @@ proc connect*(
|
||||
except CatchableError:
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
|
||||
proc disconnect*(
|
||||
api: DaemonAPI, peer: PeerId
|
||||
) {.async: (raises: [MaInvalidAddress, TransportError, CancelledError, LPError]).} =
|
||||
## Disconnect from remote peer with id ``peer``.
|
||||
var transp = await api.newConnection()
|
||||
try:
|
||||
@@ -882,7 +941,12 @@ proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
|
||||
|
||||
proc openStream*(
|
||||
api: DaemonAPI, peer: PeerId, protocols: seq[string], timeout = 0
|
||||
): Future[P2PStream] {.async.} =
|
||||
): Future[P2PStream] {.
|
||||
async: (
|
||||
raises:
|
||||
[MaInvalidAddress, TransportError, CancelledError, LPError, DaemonLocalError]
|
||||
)
|
||||
.} =
|
||||
## Open new stream to peer ``peer`` using one of the protocols in
|
||||
## ``protocols``. Returns ``StreamTransport`` for the stream.
|
||||
var transp = await api.newConnection()
|
||||
@@ -903,11 +967,12 @@ proc openStream*(
|
||||
stream.flags.incl(Outbound)
|
||||
stream.transp = transp
|
||||
result = stream
|
||||
except CatchableError as exc:
|
||||
except ResultError[ProtoError] as e:
|
||||
await api.closeConnection(transp)
|
||||
raise exc
|
||||
raise newException(DaemonLocalError, "Wrong message type: " & e.msg, e)
|
||||
|
||||
proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
|
||||
# must not specify raised exceptions as this is StreamCallback from chronos
|
||||
var api = getUserData[DaemonAPI](server)
|
||||
var message = await transp.recvMessage()
|
||||
var pb = initProtoBuffer(message)
|
||||
@@ -927,11 +992,28 @@ proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
|
||||
|
||||
proc addHandler*(
|
||||
api: DaemonAPI, protocols: seq[string], handler: P2PStreamCallback
|
||||
) {.async, raises: [LPError].} =
|
||||
) {.
|
||||
async: (
|
||||
raises: [
|
||||
MaInvalidAddress, DaemonLocalError, TransportError, CancelledError, LPError,
|
||||
ValueError,
|
||||
]
|
||||
)
|
||||
.} =
|
||||
## Add stream handler ``handler`` for set of protocols ``protocols``.
|
||||
var transp = await api.newConnection()
|
||||
let maddress = await getSocket(api.pattern, addr api.ucounter)
|
||||
var server = createStreamServer(maddress, streamHandler, udata = api)
|
||||
|
||||
var removeHandler = proc(): Future[void] {.
|
||||
async: (raises: [CancelledError, TransportError])
|
||||
.} =
|
||||
for item in protocols:
|
||||
api.handlers.del(item)
|
||||
server.stop()
|
||||
server.close()
|
||||
await server.join()
|
||||
|
||||
try:
|
||||
for item in protocols:
|
||||
api.handlers[item] = handler
|
||||
@@ -939,17 +1021,28 @@ proc addHandler*(
|
||||
var pb = await transp.transactMessage(requestStreamHandler(maddress, protocols))
|
||||
pb.withMessage:
|
||||
api.servers.add(P2PServer(server: server, address: maddress))
|
||||
except CatchableError as exc:
|
||||
for item in protocols:
|
||||
api.handlers.del(item)
|
||||
server.stop()
|
||||
server.close()
|
||||
await server.join()
|
||||
raise exc
|
||||
except DaemonLocalError as e:
|
||||
await removeHandler()
|
||||
raise newException(DaemonLocalError, "Could not add stream handler: " & e.msg, e)
|
||||
except TransportError as e:
|
||||
await removeHandler()
|
||||
raise newException(TransportError, "Could not add stream handler: " & e.msg, e)
|
||||
except CancelledError as e:
|
||||
await removeHandler()
|
||||
raise e
|
||||
finally:
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.} =
|
||||
proc listPeers*(
|
||||
api: DaemonAPI
|
||||
): Future[seq[PeerInfo]] {.
|
||||
async: (
|
||||
raises: [
|
||||
ValueError, DaemonLocalError, OSError, MaInvalidAddress, TransportError,
|
||||
CancelledError, LPError,
|
||||
]
|
||||
)
|
||||
.} =
|
||||
## Get list of remote peers to which we are currently connected.
|
||||
var transp = await api.newConnection()
|
||||
try:
|
||||
@@ -964,7 +1057,14 @@ proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.} =
|
||||
finally:
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc cmTagPeer*(api: DaemonAPI, peer: PeerId, tag: string, weight: int) {.async.} =
|
||||
proc cmTagPeer*(
|
||||
api: DaemonAPI, peer: PeerId, tag: string, weight: int
|
||||
) {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Tag peer with id ``peer`` using ``tag`` and ``weight``.
|
||||
var transp = await api.newConnection()
|
||||
try:
|
||||
@@ -974,7 +1074,14 @@ proc cmTagPeer*(api: DaemonAPI, peer: PeerId, tag: string, weight: int) {.async.
|
||||
finally:
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc cmUntagPeer*(api: DaemonAPI, peer: PeerId, tag: string) {.async.} =
|
||||
proc cmUntagPeer*(
|
||||
api: DaemonAPI, peer: PeerId, tag: string
|
||||
) {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Remove tag ``tag`` from peer with id ``peer``.
|
||||
var transp = await api.newConnection()
|
||||
try:
|
||||
@@ -984,7 +1091,14 @@ proc cmUntagPeer*(api: DaemonAPI, peer: PeerId, tag: string) {.async.} =
|
||||
finally:
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc cmTrimPeers*(api: DaemonAPI) {.async.} =
|
||||
proc cmTrimPeers*(
|
||||
api: DaemonAPI
|
||||
) {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Trim all connections.
|
||||
var transp = await api.newConnection()
|
||||
try:
|
||||
@@ -1058,7 +1172,12 @@ proc getDhtMessageType(
|
||||
|
||||
proc dhtFindPeer*(
|
||||
api: DaemonAPI, peer: PeerId, timeout = 0
|
||||
): Future[PeerInfo] {.async.} =
|
||||
): Future[PeerInfo] {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Find peer with id ``peer`` and return peer information ``PeerInfo``.
|
||||
##
|
||||
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
|
||||
@@ -1073,7 +1192,12 @@ proc dhtFindPeer*(
|
||||
|
||||
proc dhtGetPublicKey*(
|
||||
api: DaemonAPI, peer: PeerId, timeout = 0
|
||||
): Future[PublicKey] {.async.} =
|
||||
): Future[PublicKey] {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Get peer's public key from peer with id ``peer``.
|
||||
##
|
||||
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
|
||||
@@ -1088,7 +1212,12 @@ proc dhtGetPublicKey*(
|
||||
|
||||
proc dhtGetValue*(
|
||||
api: DaemonAPI, key: string, timeout = 0
|
||||
): Future[seq[byte]] {.async.} =
|
||||
): Future[seq[byte]] {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Get value associated with ``key``.
|
||||
##
|
||||
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
|
||||
@@ -1103,7 +1232,12 @@ proc dhtGetValue*(
|
||||
|
||||
proc dhtPutValue*(
|
||||
api: DaemonAPI, key: string, value: seq[byte], timeout = 0
|
||||
) {.async.} =
|
||||
) {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Associate ``value`` with ``key``.
|
||||
##
|
||||
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
|
||||
@@ -1116,7 +1250,14 @@ proc dhtPutValue*(
|
||||
finally:
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc dhtProvide*(api: DaemonAPI, cid: Cid, timeout = 0) {.async.} =
|
||||
proc dhtProvide*(
|
||||
api: DaemonAPI, cid: Cid, timeout = 0
|
||||
) {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Provide content with id ``cid``.
|
||||
##
|
||||
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
|
||||
@@ -1131,7 +1272,12 @@ proc dhtProvide*(api: DaemonAPI, cid: Cid, timeout = 0) {.async.} =
|
||||
|
||||
proc dhtFindPeersConnectedToPeer*(
|
||||
api: DaemonAPI, peer: PeerId, timeout = 0
|
||||
): Future[seq[PeerInfo]] {.async.} =
|
||||
): Future[seq[PeerInfo]] {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Find peers which are connected to peer with id ``peer``.
|
||||
##
|
||||
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
|
||||
@@ -1157,7 +1303,12 @@ proc dhtFindPeersConnectedToPeer*(
|
||||
|
||||
proc dhtGetClosestPeers*(
|
||||
api: DaemonAPI, key: string, timeout = 0
|
||||
): Future[seq[PeerId]] {.async.} =
|
||||
): Future[seq[PeerId]] {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Get closest peers for ``key``.
|
||||
##
|
||||
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
|
||||
@@ -1183,7 +1334,12 @@ proc dhtGetClosestPeers*(
|
||||
|
||||
proc dhtFindProviders*(
|
||||
api: DaemonAPI, cid: Cid, count: uint32, timeout = 0
|
||||
): Future[seq[PeerInfo]] {.async.} =
|
||||
): Future[seq[PeerInfo]] {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Get ``count`` providers for content with id ``cid``.
|
||||
##
|
||||
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
|
||||
@@ -1209,7 +1365,12 @@ proc dhtFindProviders*(
|
||||
|
||||
proc dhtSearchValue*(
|
||||
api: DaemonAPI, key: string, timeout = 0
|
||||
): Future[seq[seq[byte]]] {.async.} =
|
||||
): Future[seq[seq[byte]]] {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Search for value with ``key``, return list of values found.
|
||||
##
|
||||
## You can specify timeout for DHT request with ``timeout`` value. ``0`` value
|
||||
@@ -1232,7 +1393,14 @@ proc dhtSearchValue*(
|
||||
finally:
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc pubsubGetTopics*(api: DaemonAPI): Future[seq[string]] {.async.} =
|
||||
proc pubsubGetTopics*(
|
||||
api: DaemonAPI
|
||||
): Future[seq[string]] {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Get list of topics this node is subscribed to.
|
||||
var transp = await api.newConnection()
|
||||
try:
|
||||
@@ -1245,7 +1413,14 @@ proc pubsubGetTopics*(api: DaemonAPI): Future[seq[string]] {.async.} =
|
||||
finally:
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc pubsubListPeers*(api: DaemonAPI, topic: string): Future[seq[PeerId]] {.async.} =
|
||||
proc pubsubListPeers*(
|
||||
api: DaemonAPI, topic: string
|
||||
): Future[seq[PeerId]] {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Get list of peers we are connected to and which also subscribed to topic
|
||||
## ``topic``.
|
||||
var transp = await api.newConnection()
|
||||
@@ -1260,7 +1435,14 @@ proc pubsubListPeers*(api: DaemonAPI, topic: string): Future[seq[PeerId]] {.asyn
|
||||
finally:
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc pubsubPublish*(api: DaemonAPI, topic: string, value: seq[byte]) {.async.} =
|
||||
proc pubsubPublish*(
|
||||
api: DaemonAPI, topic: string, value: seq[byte]
|
||||
) {.
|
||||
async: (
|
||||
raises:
|
||||
[DaemonLocalError, MaInvalidAddress, TransportError, CancelledError, LPError]
|
||||
)
|
||||
.} =
|
||||
## Get list of peer identifiers which are subscribed to topic ``topic``.
|
||||
var transp = await api.newConnection()
|
||||
try:
|
||||
@@ -1280,7 +1462,13 @@ proc getPubsubMessage*(pb: ProtoBuffer): PubSubMessage =
|
||||
discard pb.getField(5, result.signature)
|
||||
discard pb.getField(6, result.key)
|
||||
|
||||
proc pubsubLoop(api: DaemonAPI, ticket: PubsubTicket) {.async.} =
|
||||
proc pubsubLoop(
|
||||
api: DaemonAPI, ticket: PubsubTicket
|
||||
) {.
|
||||
async: (
|
||||
raises: [TransportIncompleteError, TransportError, CancelledError, CatchableError]
|
||||
)
|
||||
.} =
|
||||
while true:
|
||||
var pbmessage = await ticket.transp.recvMessage()
|
||||
if len(pbmessage) == 0:
|
||||
@@ -1295,8 +1483,13 @@ proc pubsubLoop(api: DaemonAPI, ticket: PubsubTicket) {.async.} =
|
||||
break
|
||||
|
||||
proc pubsubSubscribe*(
|
||||
api: DaemonAPI, topic: string, handler: P2PPubSubCallback
|
||||
): Future[PubsubTicket] {.async.} =
|
||||
api: DaemonAPI, topic: string, handler: P2PPubSubCallback2
|
||||
): Future[PubsubTicket] {.
|
||||
async: (
|
||||
raises:
|
||||
[MaInvalidAddress, TransportError, LPError, CancelledError, DaemonLocalError]
|
||||
)
|
||||
.} =
|
||||
## Subscribe to topic ``topic``.
|
||||
var transp = await api.newConnection()
|
||||
try:
|
||||
@@ -1308,10 +1501,32 @@ proc pubsubSubscribe*(
|
||||
ticket.transp = transp
|
||||
asyncSpawn pubsubLoop(api, ticket)
|
||||
result = ticket
|
||||
except CatchableError as exc:
|
||||
except DaemonLocalError as exc:
|
||||
await api.closeConnection(transp)
|
||||
raise newException(
|
||||
DaemonLocalError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
|
||||
)
|
||||
except TransportError as exc:
|
||||
await api.closeConnection(transp)
|
||||
raise newException(
|
||||
TransportError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
|
||||
)
|
||||
except CancelledError as exc:
|
||||
await api.closeConnection(transp)
|
||||
raise exc
|
||||
|
||||
proc pubsubSubscribe*(
|
||||
api: DaemonAPI, topic: string, handler: P2PPubSubCallback
|
||||
): Future[PubsubTicket] {.
|
||||
async: (raises: [CatchableError]), deprecated: "Use P2PPubSubCallback2 instead"
|
||||
.} =
|
||||
proc wrap(
|
||||
api: DaemonAPI, ticket: PubsubTicket, message: PubSubMessage
|
||||
): Future[bool] {.async: (raises: [CatchableError]).} =
|
||||
await handler(api, ticket, message)
|
||||
|
||||
await pubsubSubscribe(api, topic, wrap)
|
||||
|
||||
proc shortLog*(pinfo: PeerInfo): string =
|
||||
## Get string representation of ``PeerInfo`` object.
|
||||
result = newStringOfCap(128)
|
||||
|
||||
@@ -55,7 +55,7 @@ proc newPool*(
|
||||
address: TransportAddress,
|
||||
poolsize: int = DefaultPoolSize,
|
||||
bufferSize = DefaultStreamBufferSize,
|
||||
): Future[TransportPool] {.async.} =
|
||||
): Future[TransportPool] {.async: (raises: [CancelledError]).} =
|
||||
## Establish pool of connections to address ``address`` with size
|
||||
## ``poolsize``.
|
||||
var pool = new TransportPool
|
||||
@@ -80,7 +80,9 @@ proc newPool*(
|
||||
pool.state = Connected
|
||||
result = pool
|
||||
|
||||
proc acquire*(pool: TransportPool): Future[StreamTransport] {.async.} =
|
||||
proc acquire*(
|
||||
pool: TransportPool
|
||||
): Future[StreamTransport] {.async: (raises: [CancelledError, TransportPoolError]).} =
|
||||
## Acquire non-busy connection from pool ``pool``.
|
||||
var transp: StreamTransport
|
||||
if pool.state in {Connected}:
|
||||
@@ -102,7 +104,9 @@ proc acquire*(pool: TransportPool): Future[StreamTransport] {.async.} =
|
||||
raise newException(TransportPoolError, "Pool is not ready!")
|
||||
result = transp
|
||||
|
||||
proc release*(pool: TransportPool, transp: StreamTransport) =
|
||||
proc release*(
|
||||
pool: TransportPool, transp: StreamTransport
|
||||
) {.async: (raises: [TransportPoolError]).} =
|
||||
## Release connection ``transp`` back to pool ``pool``.
|
||||
if pool.state in {Connected, Closing}:
|
||||
var found = false
|
||||
@@ -118,7 +122,9 @@ proc release*(pool: TransportPool, transp: StreamTransport) =
|
||||
else:
|
||||
raise newException(TransportPoolError, "Pool is not ready!")
|
||||
|
||||
proc join*(pool: TransportPool) {.async.} =
|
||||
proc join*(
|
||||
pool: TransportPool
|
||||
) {.async: (raises: [TransportPoolError, CancelledError]).} =
|
||||
## Waiting for all connection to become available.
|
||||
if pool.state in {Connected, Closing}:
|
||||
while true:
|
||||
@@ -130,7 +136,9 @@ proc join*(pool: TransportPool) {.async.} =
|
||||
elif pool.state == Connecting:
|
||||
raise newException(TransportPoolError, "Pool is not ready!")
|
||||
|
||||
proc close*(pool: TransportPool) {.async.} =
|
||||
proc close*(
|
||||
pool: TransportPool
|
||||
) {.async: (raises: [TransportPoolError, CancelledError]).} =
|
||||
## Closes transports pool ``pool`` and release all resources.
|
||||
if pool.state == Connected:
|
||||
pool.state = Closing
|
||||
|
||||
@@ -10,12 +10,14 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import stew/results
|
||||
import results
|
||||
import peerid, stream/connection, transports/transport
|
||||
|
||||
export results
|
||||
|
||||
type Dial* = ref object of RootObj
|
||||
type
|
||||
Dial* = ref object of RootObj
|
||||
DialFailedError* = object of LPError
|
||||
|
||||
method connect*(
|
||||
self: Dial,
|
||||
@@ -24,28 +26,28 @@ method connect*(
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
dir = Direction.Out,
|
||||
) {.async, base.} =
|
||||
) {.base, async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.connect] abstract method not implemented!")
|
||||
|
||||
method connect*(
|
||||
self: Dial, address: MultiAddress, allowUnknownPeerId = false
|
||||
): Future[PeerId] {.async, base.} =
|
||||
): Future[PeerId] {.base, async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## Connects to a peer and retrieve its PeerId
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.connect] abstract method not implemented!")
|
||||
|
||||
method dial*(
|
||||
self: Dial, peerId: PeerId, protos: seq[string]
|
||||
): Future[Connection] {.async, base.} =
|
||||
): Future[Connection] {.base, async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## create a protocol stream over an
|
||||
## existing connection
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.dial] abstract method not implemented!")
|
||||
|
||||
method dial*(
|
||||
self: Dial,
|
||||
@@ -53,17 +55,19 @@ method dial*(
|
||||
addrs: seq[MultiAddress],
|
||||
protos: seq[string],
|
||||
forceDial = false,
|
||||
): Future[Connection] {.async, base.} =
|
||||
): Future[Connection] {.base, async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## create a protocol stream and establish
|
||||
## a connection if one doesn't exist already
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.dial] abstract method not implemented!")
|
||||
|
||||
method addTransport*(self: Dial, transport: Transport) {.base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
doAssert(false, "[Dial.addTransport] abstract method not implemented!")
|
||||
|
||||
method tryDial*(
|
||||
self: Dial, peerId: PeerId, addrs: seq[MultiAddress]
|
||||
): Future[Opt[MultiAddress]] {.async, base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
): Future[Opt[MultiAddress]] {.
|
||||
base, async: (raises: [DialFailedError, CancelledError])
|
||||
.} =
|
||||
doAssert(false, "[Dial.tryDial] abstract method not implemented!")
|
||||
|
||||
@@ -9,8 +9,7 @@
|
||||
|
||||
import std/tables
|
||||
|
||||
import stew/results
|
||||
import pkg/[chronos, chronicles, metrics]
|
||||
import pkg/[chronos, chronicles, metrics, results]
|
||||
|
||||
import
|
||||
dial,
|
||||
@@ -36,16 +35,13 @@ declareCounter(libp2p_total_dial_attempts, "total attempted dials")
|
||||
declareCounter(libp2p_successful_dials, "dialed successful peers")
|
||||
declareCounter(libp2p_failed_dials, "failed dials")
|
||||
|
||||
type
|
||||
DialFailedError* = object of LPError
|
||||
|
||||
Dialer* = ref object of Dial
|
||||
localPeerId*: PeerId
|
||||
connManager: ConnManager
|
||||
dialLock: Table[PeerId, AsyncLock]
|
||||
transports: seq[Transport]
|
||||
peerStore: PeerStore
|
||||
nameResolver: NameResolver
|
||||
type Dialer* = ref object of Dial
|
||||
localPeerId*: PeerId
|
||||
connManager: ConnManager
|
||||
dialLock: Table[PeerId, AsyncLock]
|
||||
transports: seq[Transport]
|
||||
peerStore: PeerStore
|
||||
nameResolver: NameResolver
|
||||
|
||||
proc dialAndUpgrade(
|
||||
self: Dialer,
|
||||
@@ -53,7 +49,7 @@ proc dialAndUpgrade(
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
dir = Direction.Out,
|
||||
): Future[Muxer] {.async.} =
|
||||
): Future[Muxer] {.async: (raises: [CancelledError]).} =
|
||||
for transport in self.transports: # for each transport
|
||||
if transport.handles(address): # check if it can dial it
|
||||
trace "Dialing address", address, peerId = peerId.get(default(PeerId)), hostname
|
||||
@@ -105,7 +101,9 @@ proc dialAndUpgrade(
|
||||
|
||||
proc expandDnsAddr(
|
||||
self: Dialer, peerId: Opt[PeerId], address: MultiAddress
|
||||
): Future[seq[(MultiAddress, Opt[PeerId])]] {.async.} =
|
||||
): Future[seq[(MultiAddress, Opt[PeerId])]] {.
|
||||
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
|
||||
.} =
|
||||
if not DNSADDR.matchPartial(address):
|
||||
return @[(address, peerId)]
|
||||
if isNil(self.nameResolver):
|
||||
@@ -115,7 +113,10 @@ proc expandDnsAddr(
|
||||
let
|
||||
toResolve =
|
||||
if peerId.isSome:
|
||||
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
|
||||
try:
|
||||
address & MultiAddress.init(multiCodec("p2p"), peerId.tryGet()).tryGet()
|
||||
except ResultError[void]:
|
||||
raiseAssert "checked with if"
|
||||
else:
|
||||
address
|
||||
resolved = await self.nameResolver.resolveDnsAddr(toResolve)
|
||||
@@ -123,16 +124,22 @@ proc expandDnsAddr(
|
||||
for resolvedAddress in resolved:
|
||||
let lastPart = resolvedAddress[^1].tryGet()
|
||||
if lastPart.protoCode == Result[MultiCodec, string].ok(multiCodec("p2p")):
|
||||
let
|
||||
var peerIdBytes: seq[byte]
|
||||
try:
|
||||
peerIdBytes = lastPart.protoArgument().tryGet()
|
||||
addrPeerId = PeerId.init(peerIdBytes).tryGet()
|
||||
except ResultError[string] as e:
|
||||
raiseAssert "expandDnsAddr failed in expandDnsAddr protoArgument: " & e.msg
|
||||
|
||||
let addrPeerId = PeerId.init(peerIdBytes).tryGet()
|
||||
result.add((resolvedAddress[0 ..^ 2].tryGet(), Opt.some(addrPeerId)))
|
||||
else:
|
||||
result.add((resolvedAddress, peerId))
|
||||
|
||||
proc dialAndUpgrade(
|
||||
self: Dialer, peerId: Opt[PeerId], addrs: seq[MultiAddress], dir = Direction.Out
|
||||
): Future[Muxer] {.async.} =
|
||||
): Future[Muxer] {.
|
||||
async: (raises: [CancelledError, MaError, TransportAddressError, LPError])
|
||||
.} =
|
||||
debug "Dialing peer", peerId = peerId.get(default(PeerId)), addrs
|
||||
|
||||
for rawAddress in addrs:
|
||||
@@ -169,47 +176,69 @@ proc internalConnect(
|
||||
forceDial: bool,
|
||||
reuseConnection = true,
|
||||
dir = Direction.Out,
|
||||
): Future[Muxer] {.async.} =
|
||||
): Future[Muxer] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
if Opt.some(self.localPeerId) == peerId:
|
||||
raise newException(CatchableError, "can't dial self!")
|
||||
raise newException(DialFailedError, "internalConnect can't dial self!")
|
||||
|
||||
# Ensure there's only one in-flight attempt per peer
|
||||
let lock = self.dialLock.mgetOrPut(peerId.get(default(PeerId)), newAsyncLock())
|
||||
try:
|
||||
await lock.acquire()
|
||||
|
||||
if reuseConnection:
|
||||
peerId.withValue(peerId):
|
||||
self.tryReusingConnection(peerId).withValue(mux):
|
||||
return mux
|
||||
|
||||
let slot = self.connManager.getOutgoingSlot(forceDial)
|
||||
let muxed =
|
||||
try:
|
||||
await self.dialAndUpgrade(peerId, addrs, dir)
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise exc
|
||||
slot.trackMuxer(muxed)
|
||||
if isNil(muxed): # None of the addresses connected
|
||||
raise newException(DialFailedError, "Unable to establish outgoing link")
|
||||
|
||||
await lock.acquire()
|
||||
defer:
|
||||
try:
|
||||
self.connManager.storeMuxer(muxed)
|
||||
await self.peerStore.identify(muxed)
|
||||
await self.connManager.triggerPeerEvents(
|
||||
muxed.connection.peerId,
|
||||
PeerEvent(kind: PeerEventKind.Identified, initiator: true),
|
||||
)
|
||||
except CatchableError as exc:
|
||||
trace "Failed to finish outgoung upgrade", description = exc.msg
|
||||
await muxed.close()
|
||||
raise exc
|
||||
|
||||
return muxed
|
||||
finally:
|
||||
if lock.locked():
|
||||
lock.release()
|
||||
except AsyncLockError as e:
|
||||
raiseAssert "lock must have been acquired in line above: " & e.msg
|
||||
|
||||
if reuseConnection:
|
||||
peerId.withValue(peerId):
|
||||
self.tryReusingConnection(peerId).withValue(mux):
|
||||
return mux
|
||||
|
||||
let slot =
|
||||
try:
|
||||
self.connManager.getOutgoingSlot(forceDial)
|
||||
except TooManyConnectionsError as exc:
|
||||
raise newException(
|
||||
DialFailedError, "failed getOutgoingSlot in internalConnect: " & exc.msg, exc
|
||||
)
|
||||
|
||||
let muxed =
|
||||
try:
|
||||
await self.dialAndUpgrade(peerId, addrs, dir)
|
||||
except CancelledError as exc:
|
||||
slot.release()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise newException(
|
||||
DialFailedError, "failed dialAndUpgrade in internalConnect: " & exc.msg, exc
|
||||
)
|
||||
|
||||
slot.trackMuxer(muxed)
|
||||
if isNil(muxed): # None of the addresses connected
|
||||
raise newException(
|
||||
DialFailedError, "Unable to establish outgoing link in internalConnect"
|
||||
)
|
||||
|
||||
try:
|
||||
self.connManager.storeMuxer(muxed)
|
||||
await self.peerStore.identify(muxed)
|
||||
await self.connManager.triggerPeerEvents(
|
||||
muxed.connection.peerId,
|
||||
PeerEvent(kind: PeerEventKind.Identified, initiator: true),
|
||||
)
|
||||
return muxed
|
||||
except CancelledError as exc:
|
||||
await muxed.close()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Failed to finish outgoing upgrade", description = exc.msg
|
||||
await muxed.close()
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Failed to finish outgoing upgrade in internalConnect: " & exc.msg,
|
||||
exc,
|
||||
)
|
||||
|
||||
method connect*(
|
||||
self: Dialer,
|
||||
@@ -218,7 +247,7 @@ method connect*(
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
dir = Direction.Out,
|
||||
) {.async.} =
|
||||
) {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
@@ -231,7 +260,7 @@ method connect*(
|
||||
|
||||
method connect*(
|
||||
self: Dialer, address: MultiAddress, allowUnknownPeerId = false
|
||||
): Future[PeerId] {.async.} =
|
||||
): Future[PeerId] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## Connects to a peer and retrieve its PeerId
|
||||
|
||||
parseFullAddress(address).toOpt().withValue(fullAddress):
|
||||
@@ -241,7 +270,7 @@ method connect*(
|
||||
|
||||
if allowUnknownPeerId == false:
|
||||
raise newException(
|
||||
DialFailedError, "Address without PeerID and unknown peer id disabled!"
|
||||
DialFailedError, "Address without PeerID and unknown peer id disabled in connect"
|
||||
)
|
||||
|
||||
return
|
||||
@@ -249,18 +278,18 @@ method connect*(
|
||||
|
||||
proc negotiateStream(
|
||||
self: Dialer, conn: Connection, protos: seq[string]
|
||||
): Future[Connection] {.async.} =
|
||||
): Future[Connection] {.async: (raises: [CatchableError]).} =
|
||||
trace "Negotiating stream", conn, protos
|
||||
let selected = await MultistreamSelect.select(conn, protos)
|
||||
if not protos.contains(selected):
|
||||
await conn.closeWithEOF()
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol " & $protos)
|
||||
raise newException(DialFailedError, "Unable to select sub-protocol: " & $protos)
|
||||
|
||||
return conn
|
||||
|
||||
method tryDial*(
|
||||
self: Dialer, peerId: PeerId, addrs: seq[MultiAddress]
|
||||
): Future[Opt[MultiAddress]] {.async.} =
|
||||
): Future[Opt[MultiAddress]] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## Create a protocol stream in order to check
|
||||
## if a connection is possible.
|
||||
## Doesn't use the Connection Manager to save it.
|
||||
@@ -270,27 +299,37 @@ method tryDial*(
|
||||
try:
|
||||
let mux = await self.dialAndUpgrade(Opt.some(peerId), addrs)
|
||||
if mux.isNil():
|
||||
raise newException(DialFailedError, "No valid multiaddress")
|
||||
raise newException(DialFailedError, "No valid multiaddress in tryDial")
|
||||
await mux.close()
|
||||
return mux.connection.observedAddr
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
raise newException(DialFailedError, exc.msg)
|
||||
raise newException(DialFailedError, "tryDial failed: " & exc.msg, exc)
|
||||
|
||||
method dial*(
|
||||
self: Dialer, peerId: PeerId, protos: seq[string]
|
||||
): Future[Connection] {.async.} =
|
||||
): Future[Connection] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## create a protocol stream over an
|
||||
## existing connection
|
||||
##
|
||||
|
||||
trace "Dialing (existing)", peerId, protos
|
||||
let stream = await self.connManager.getStream(peerId)
|
||||
if stream.isNil:
|
||||
raise newException(DialFailedError, "Couldn't get muxed stream")
|
||||
|
||||
return await self.negotiateStream(stream, protos)
|
||||
try:
|
||||
let stream = await self.connManager.getStream(peerId)
|
||||
if stream.isNil:
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Couldn't get muxed stream in dial for peer_id: " & shortLog(peerId),
|
||||
)
|
||||
return await self.negotiateStream(stream, protos)
|
||||
except CancelledError as exc:
|
||||
trace "Dial canceled", description = exc.msg
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "Error dialing", description = exc.msg
|
||||
raise newException(DialFailedError, "failed dial existing: " & exc.msg)
|
||||
|
||||
method dial*(
|
||||
self: Dialer,
|
||||
@@ -298,7 +337,7 @@ method dial*(
|
||||
addrs: seq[MultiAddress],
|
||||
protos: seq[string],
|
||||
forceDial = false,
|
||||
): Future[Connection] {.async.} =
|
||||
): Future[Connection] {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
## create a protocol stream and establish
|
||||
## a connection if one doesn't exist already
|
||||
##
|
||||
@@ -307,7 +346,7 @@ method dial*(
|
||||
conn: Muxer
|
||||
stream: Connection
|
||||
|
||||
proc cleanup() {.async.} =
|
||||
proc cleanup() {.async: (raises: []).} =
|
||||
if not (isNil(stream)):
|
||||
await stream.closeWithEOF()
|
||||
|
||||
@@ -321,17 +360,20 @@ method dial*(
|
||||
stream = await self.connManager.getStream(conn)
|
||||
|
||||
if isNil(stream):
|
||||
raise newException(DialFailedError, "Couldn't get muxed stream")
|
||||
raise newException(
|
||||
DialFailedError,
|
||||
"Couldn't get muxed stream in new dial for remote_peer_id: " & shortLog(peerId),
|
||||
)
|
||||
|
||||
return await self.negotiateStream(stream, protos)
|
||||
except CancelledError as exc:
|
||||
trace "Dial canceled", conn
|
||||
trace "Dial canceled", conn, description = exc.msg
|
||||
await cleanup()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Error dialing", conn, description = exc.msg
|
||||
await cleanup()
|
||||
raise exc
|
||||
raise newException(DialFailedError, "failed new dial: " & exc.msg, exc)
|
||||
|
||||
method addTransport*(self: Dialer, t: Transport) =
|
||||
self.transports &= t
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import chronos, chronicles, stew/results
|
||||
import chronos, chronicles, results
|
||||
import ../errors
|
||||
|
||||
type
|
||||
@@ -38,8 +38,7 @@ proc add*[T](pa: var PeerAttributes, value: T) =
|
||||
Attribute[T](
|
||||
value: value,
|
||||
comparator: proc(f: BaseAttr, c: BaseAttr): bool =
|
||||
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T)
|
||||
,
|
||||
f.ofType(T) and c.ofType(T) and f.to(T) == c.to(T),
|
||||
)
|
||||
)
|
||||
|
||||
@@ -60,7 +59,7 @@ proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
|
||||
|
||||
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [KeyError].} =
|
||||
pa{T}.valueOr:
|
||||
raise newException(KeyError, "Attritute not found")
|
||||
raise newException(KeyError, "Attribute not found")
|
||||
|
||||
proc match*(pa, candidate: PeerAttributes): bool =
|
||||
for f in pa.attributes:
|
||||
@@ -80,16 +79,21 @@ type
|
||||
advertisementUpdated*: AsyncEvent
|
||||
advertiseLoop*: Future[void]
|
||||
|
||||
method request*(self: DiscoveryInterface, pa: PeerAttributes) {.async, base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
method advertise*(self: DiscoveryInterface) {.async, base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
|
||||
type
|
||||
DiscoveryError* = object of LPError
|
||||
DiscoveryFinished* = object of LPError
|
||||
AdvertiseError* = object of DiscoveryError
|
||||
|
||||
method request*(
|
||||
self: DiscoveryInterface, pa: PeerAttributes
|
||||
) {.base, async: (raises: [DiscoveryError, CancelledError]).} =
|
||||
doAssert(false, "[DiscoveryInterface.request] abstract method not implemented!")
|
||||
|
||||
method advertise*(
|
||||
self: DiscoveryInterface
|
||||
) {.base, async: (raises: [CancelledError, AdvertiseError]).} =
|
||||
doAssert(false, "[DiscoveryInterface.advertise] abstract method not implemented!")
|
||||
|
||||
type
|
||||
DiscoveryQuery* = ref object
|
||||
attr: PeerAttributes
|
||||
peers: AsyncQueue[PeerAttributes]
|
||||
@@ -109,7 +113,7 @@ proc add*(dm: DiscoveryManager, di: DiscoveryInterface) =
|
||||
try:
|
||||
query.peers.putNoWait(pa)
|
||||
except AsyncQueueFullError as exc:
|
||||
debug "Cannot push discovered peer to queue"
|
||||
debug "Cannot push discovered peer to queue", description = exc.msg
|
||||
|
||||
proc request*(dm: DiscoveryManager, pa: PeerAttributes): DiscoveryQuery =
|
||||
var query = DiscoveryQuery(attr: pa, peers: newAsyncQueue[PeerAttributes]())
|
||||
@@ -138,7 +142,9 @@ template forEach*(query: DiscoveryQuery, code: untyped) =
|
||||
## peer attritubtes are available through the variable
|
||||
## `peer`
|
||||
|
||||
proc forEachInternal(q: DiscoveryQuery) {.async.} =
|
||||
proc forEachInternal(
|
||||
q: DiscoveryQuery
|
||||
) {.async: (raises: [CancelledError, DiscoveryError]).} =
|
||||
while true:
|
||||
let peer {.inject.} =
|
||||
try:
|
||||
@@ -163,7 +169,11 @@ proc stop*(dm: DiscoveryManager) =
|
||||
continue
|
||||
i.advertiseLoop.cancel()
|
||||
|
||||
proc getPeer*(query: DiscoveryQuery): Future[PeerAttributes] {.async.} =
|
||||
proc getPeer*(
|
||||
query: DiscoveryQuery
|
||||
): Future[PeerAttributes] {.
|
||||
async: (raises: [CancelledError, DiscoveryError, DiscoveryFinished])
|
||||
.} =
|
||||
let getter = query.peers.popFirst()
|
||||
|
||||
try:
|
||||
|
||||
@@ -23,15 +23,17 @@ type
|
||||
|
||||
proc `==`*(a, b: RdvNamespace): bool {.borrow.}
|
||||
|
||||
method request*(self: RendezVousInterface, pa: PeerAttributes) {.async.} =
|
||||
var namespace = ""
|
||||
method request*(
|
||||
self: RendezVousInterface, pa: PeerAttributes
|
||||
) {.async: (raises: [DiscoveryError, CancelledError]).} =
|
||||
var namespace = Opt.none(string)
|
||||
for attr in pa:
|
||||
if attr.ofType(RdvNamespace):
|
||||
namespace = string attr.to(RdvNamespace)
|
||||
namespace = Opt.some(string attr.to(RdvNamespace))
|
||||
elif attr.ofType(DiscoveryService):
|
||||
namespace = string attr.to(DiscoveryService)
|
||||
namespace = Opt.some(string attr.to(DiscoveryService))
|
||||
elif attr.ofType(PeerId):
|
||||
namespace = $attr.to(PeerId)
|
||||
namespace = Opt.some($attr.to(PeerId))
|
||||
else:
|
||||
# unhandled type
|
||||
return
|
||||
@@ -42,13 +44,15 @@ method request*(self: RendezVousInterface, pa: PeerAttributes) {.async.} =
|
||||
for address in pr.addresses:
|
||||
peer.add(address.address)
|
||||
|
||||
peer.add(DiscoveryService(namespace))
|
||||
peer.add(RdvNamespace(namespace))
|
||||
peer.add(DiscoveryService(namespace.get()))
|
||||
peer.add(RdvNamespace(namespace.get()))
|
||||
self.onPeerFound(peer)
|
||||
|
||||
await sleepAsync(self.timeToRequest)
|
||||
|
||||
method advertise*(self: RendezVousInterface) {.async.} =
|
||||
method advertise*(
|
||||
self: RendezVousInterface
|
||||
) {.async: (raises: [CancelledError, AdvertiseError]).} =
|
||||
while true:
|
||||
var toAdvertise: seq[string]
|
||||
for attr in self.toAdvertise:
|
||||
|
||||
@@ -26,7 +26,7 @@ import
|
||||
errors,
|
||||
utility
|
||||
import stew/[base58, base32, endians2]
|
||||
export results, minprotobuf, vbuffer, errors, utility
|
||||
export results, vbuffer, errors, utility
|
||||
|
||||
logScope:
|
||||
topics = "libp2p multiaddress"
|
||||
@@ -171,6 +171,18 @@ proc ip6zoneVB(vb: var VBuffer): bool =
|
||||
## IPv6 validateBuffer() implementation.
|
||||
pathValidateBufferNoSlash(vb)
|
||||
|
||||
proc memoryStB(s: string, vb: var VBuffer): bool =
|
||||
## Memory stringToBuffer() implementation.
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc memoryBtS(vb: var VBuffer, s: var string): bool =
|
||||
## Memory bufferToString() implementation.
|
||||
pathBufferToString(vb, s)
|
||||
|
||||
proc memoryVB(vb: var VBuffer): bool =
|
||||
## Memory validateBuffer() implementation.
|
||||
pathValidateBuffer(vb)
|
||||
|
||||
proc portStB(s: string, vb: var VBuffer): bool =
|
||||
## Port number stringToBuffer() implementation.
|
||||
var port: array[2, byte]
|
||||
@@ -355,6 +367,10 @@ const
|
||||
)
|
||||
TranscoderDNS* =
|
||||
Transcoder(stringToBuffer: dnsStB, bufferToString: dnsBtS, validateBuffer: dnsVB)
|
||||
TranscoderMemory* = Transcoder(
|
||||
stringToBuffer: memoryStB, bufferToString: memoryBtS, validateBuffer: memoryVB
|
||||
)
|
||||
|
||||
ProtocolsList = [
|
||||
MAProtocol(mcodec: multiCodec("ip4"), kind: Fixed, size: 4, coder: TranscoderIP4),
|
||||
MAProtocol(mcodec: multiCodec("tcp"), kind: Fixed, size: 2, coder: TranscoderPort),
|
||||
@@ -393,6 +409,9 @@ const
|
||||
MAProtocol(mcodec: multiCodec("p2p-websocket-star"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("p2p-webrtc-star"), kind: Marker, size: 0),
|
||||
MAProtocol(mcodec: multiCodec("p2p-webrtc-direct"), kind: Marker, size: 0),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("memory"), kind: Path, size: 0, coder: TranscoderMemory
|
||||
),
|
||||
]
|
||||
|
||||
DNSANY* = mapEq("dns")
|
||||
@@ -453,6 +472,8 @@ const
|
||||
|
||||
CircuitRelay* = mapEq("p2p-circuit")
|
||||
|
||||
Memory* = mapEq("memory")
|
||||
|
||||
proc initMultiAddressCodeTable(): Table[MultiCodec, MAProtocol] {.compileTime.} =
|
||||
for item in ProtocolsList:
|
||||
result[item.mcodec] = item
|
||||
|
||||
@@ -16,7 +16,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import tables
|
||||
import stew/[base32, base58, base64, results]
|
||||
import results
|
||||
import stew/[base32, base58, base64]
|
||||
|
||||
type
|
||||
MultiBaseStatus* {.pure.} = enum
|
||||
|
||||
@@ -10,10 +10,11 @@
|
||||
## This module implements MultiCodec.
|
||||
|
||||
{.push raises: [].}
|
||||
{.used.}
|
||||
|
||||
import tables, hashes
|
||||
import vbuffer
|
||||
import stew/results
|
||||
import results
|
||||
export results
|
||||
|
||||
## List of officially supported codecs can BE found here
|
||||
@@ -396,6 +397,7 @@ const MultiCodecList = [
|
||||
("onion3", 0x01BD),
|
||||
("p2p-circuit", 0x0122),
|
||||
("libp2p-peer-record", 0x0301),
|
||||
("memory", 0x0309),
|
||||
("dns", 0x35),
|
||||
("dns4", 0x36),
|
||||
("dns6", 0x37),
|
||||
@@ -403,6 +405,7 @@ const MultiCodecList = [
|
||||
# IPLD formats
|
||||
("dag-pb", 0x70),
|
||||
("dag-cbor", 0x71),
|
||||
("libp2p-key", 0x72),
|
||||
("dag-json", 0x129),
|
||||
("git-raw", 0x78),
|
||||
("eth-block", 0x90),
|
||||
|
||||
@@ -22,12 +22,13 @@
|
||||
## 2. MURMUR
|
||||
|
||||
{.push raises: [].}
|
||||
{.used.}
|
||||
|
||||
import tables
|
||||
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
|
||||
import varint, vbuffer, multicodec, multibase
|
||||
import stew/base58
|
||||
import stew/results
|
||||
import results
|
||||
export results
|
||||
# This is workaround for Nim `import` bug.
|
||||
export sha, sha2, keccak, blake2, hash, utils
|
||||
@@ -566,7 +567,7 @@ proc init*(mhtype: typedesc[MultiHash], data: string): MhResult[MultiHash] {.inl
|
||||
proc init58*(mhtype: typedesc[MultiHash], data: string): MultiHash {.inline.} =
|
||||
## Create MultiHash from BASE58 encoded string representation ``data``.
|
||||
if MultiHash.decode(Base58.decode(data), result) == -1:
|
||||
raise newException(MultihashError, "Incorrect MultiHash binary format")
|
||||
raise newException(MultihashError, "Incorrect MultiHash binary format in init58")
|
||||
|
||||
proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
|
||||
if len(a) != len(b):
|
||||
|
||||
@@ -87,7 +87,7 @@ proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
await s.conn.close()
|
||||
raise exc
|
||||
raise newException(LPStreamError, "Opening LPChannel failed: " & exc.msg, exc)
|
||||
|
||||
method closed*(s: LPChannel): bool =
|
||||
s.closedLocal
|
||||
@@ -169,6 +169,7 @@ method readOnce*(
|
||||
## channel must not be done from within a callback / read handler of another
|
||||
## or the reads will lock each other.
|
||||
if s.remoteReset:
|
||||
trace "reset stream in readOnce", s
|
||||
raise newLPStreamResetError()
|
||||
if s.localReset:
|
||||
raise newLPStreamClosedError()
|
||||
@@ -201,6 +202,7 @@ proc prepareWrite(
|
||||
# prepareWrite is the slow path of writing a message - see conditions in
|
||||
# write
|
||||
if s.remoteReset:
|
||||
trace "stream is reset when prepareWrite", s
|
||||
raise newLPStreamResetError()
|
||||
if s.closedLocal:
|
||||
raise newLPStreamClosedError()
|
||||
|
||||
@@ -247,7 +247,7 @@ method close*(m: Mplex) {.async: (raises: []).} =
|
||||
|
||||
trace "Closed mplex", m
|
||||
|
||||
method getStreams*(m: Mplex): seq[Connection] =
|
||||
method getStreams*(m: Mplex): seq[Connection] {.gcsafe.} =
|
||||
for c in m.channels[false].values:
|
||||
result.add(c)
|
||||
for c in m.channels[true].values:
|
||||
|
||||
@@ -52,7 +52,7 @@ method newStream*(
|
||||
): Future[Connection] {.
|
||||
base, async: (raises: [CancelledError, LPStreamError, MuxerError], raw: true)
|
||||
.} =
|
||||
raiseAssert("Not implemented!")
|
||||
raiseAssert("[Muxer.newStream] abstract method not implemented!")
|
||||
|
||||
method close*(m: Muxer) {.base, async: (raises: []).} =
|
||||
if m.connection != nil:
|
||||
@@ -67,5 +67,5 @@ proc new*(
|
||||
let muxerProvider = T(newMuxer: creator, codec: codec)
|
||||
muxerProvider
|
||||
|
||||
method getStreams*(m: Muxer): seq[Connection] {.base.} =
|
||||
raiseAssert("Not implemented!")
|
||||
method getStreams*(m: Muxer): seq[Connection] {.base, gcsafe.} =
|
||||
raiseAssert("[Muxer.getStreams] abstract method not implemented!")
|
||||
|
||||
@@ -82,8 +82,7 @@ proc `$`(header: YamuxHeader): string =
|
||||
if a != "":
|
||||
a & ", " & $b
|
||||
else:
|
||||
$b
|
||||
,
|
||||
$b,
|
||||
"",
|
||||
) & "}, " & "streamId: " & $header.streamId & ", " & "length: " & $header.length &
|
||||
"}"
|
||||
@@ -176,8 +175,7 @@ proc `$`(channel: YamuxChannel): string =
|
||||
if a != "":
|
||||
a & ", " & b
|
||||
else:
|
||||
b
|
||||
,
|
||||
b,
|
||||
"",
|
||||
) & "}"
|
||||
|
||||
@@ -205,6 +203,7 @@ proc remoteClosed(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if not channel.closedRemotely.isSet():
|
||||
channel.closedRemotely.fire()
|
||||
await channel.actuallyClose()
|
||||
channel.isClosedRemotely = true
|
||||
|
||||
method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if not channel.closedLocally:
|
||||
@@ -270,10 +269,13 @@ method readOnce*(
|
||||
if channel.isReset:
|
||||
raise
|
||||
if channel.remoteReset:
|
||||
trace "stream is remote reset when readOnce", channel = $channel
|
||||
newLPStreamResetError()
|
||||
elif channel.closedLocally:
|
||||
trace "stream is closed locally when readOnce", channel = $channel
|
||||
newLPStreamClosedError()
|
||||
else:
|
||||
trace "stream is down when readOnce", channel = $channel
|
||||
newLPStreamConnDownError()
|
||||
if channel.isEof:
|
||||
raise newLPStreamRemoteClosedError()
|
||||
@@ -397,6 +399,7 @@ method write*(
|
||||
##
|
||||
result = newFuture[void]("Yamux Send")
|
||||
if channel.remoteReset:
|
||||
trace "stream is reset when write", channel = $channel
|
||||
result.fail(newLPStreamResetError())
|
||||
return result
|
||||
if channel.closedLocally or channel.isReset:
|
||||
@@ -584,10 +587,12 @@ method handle*(m: Yamux) {.async: (raises: []).} =
|
||||
let channel =
|
||||
try:
|
||||
m.channels[header.streamId]
|
||||
except KeyError:
|
||||
except KeyError as e:
|
||||
raise newException(
|
||||
YamuxError,
|
||||
"Stream was cleaned up before handling data: " & $header.streamId,
|
||||
"Stream was cleaned up before handling data: " & $header.streamId & " : " &
|
||||
e.msg,
|
||||
e,
|
||||
)
|
||||
|
||||
if header.msgType == WindowUpdate:
|
||||
@@ -632,7 +637,7 @@ method handle*(m: Yamux) {.async: (raises: []).} =
|
||||
await m.close()
|
||||
trace "Stopped yamux handler"
|
||||
|
||||
method getStreams*(m: Yamux): seq[Connection] =
|
||||
method getStreams*(m: Yamux): seq[Connection] {.gcsafe.} =
|
||||
for c in m.channels.values:
|
||||
result.add(c)
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[streams, strutils, sets, sequtils],
|
||||
std/[streams, sets, sequtils],
|
||||
chronos,
|
||||
chronicles,
|
||||
stew/byteutils,
|
||||
@@ -39,24 +39,32 @@ proc questionToBuf(address: string, kind: QKind): seq[byte] =
|
||||
|
||||
var buf = newSeq[byte](dataLen)
|
||||
discard requestStream.readData(addr buf[0], dataLen)
|
||||
return buf
|
||||
except CatchableError as exc:
|
||||
buf
|
||||
except IOError as exc:
|
||||
info "Failed to created DNS buffer", description = exc.msg
|
||||
return newSeq[byte](0)
|
||||
newSeq[byte](0)
|
||||
except OSError as exc:
|
||||
info "Failed to created DNS buffer", description = exc.msg
|
||||
newSeq[byte](0)
|
||||
except ValueError as exc:
|
||||
info "Failed to created DNS buffer", description = exc.msg
|
||||
newSeq[byte](0)
|
||||
|
||||
proc getDnsResponse(
|
||||
dnsServer: TransportAddress, address: string, kind: QKind
|
||||
): Future[Response] {.async.} =
|
||||
): Future[Response] {.
|
||||
async: (raises: [CancelledError, IOError, OSError, TransportError, ValueError])
|
||||
.} =
|
||||
var sendBuf = questionToBuf(address, kind)
|
||||
|
||||
if sendBuf.len == 0:
|
||||
raise newException(ValueError, "Incorrect DNS query")
|
||||
|
||||
let receivedDataFuture = newFuture[void]()
|
||||
let receivedDataFuture = Future[void].Raising([CancelledError]).init()
|
||||
|
||||
proc datagramDataReceived(
|
||||
transp: DatagramTransport, raddr: TransportAddress
|
||||
): Future[void] {.async, closure.} =
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
receivedDataFuture.complete()
|
||||
|
||||
let sock =
|
||||
@@ -68,27 +76,41 @@ proc getDnsResponse(
|
||||
try:
|
||||
await sock.sendTo(dnsServer, addr sendBuf[0], sendBuf.len)
|
||||
|
||||
await receivedDataFuture or sleepAsync(5.seconds) #unix default
|
||||
|
||||
if not receivedDataFuture.finished:
|
||||
raise newException(IOError, "DNS server timeout")
|
||||
try:
|
||||
await receivedDataFuture.wait(5.seconds) #unix default
|
||||
except AsyncTimeoutError as e:
|
||||
raise newException(IOError, "DNS server timeout: " & e.msg, e)
|
||||
|
||||
let rawResponse = sock.getMessage()
|
||||
# parseResponse can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
return exceptionToAssert:
|
||||
try:
|
||||
parseResponse(string.fromBytes(rawResponse))
|
||||
except IOError as exc:
|
||||
raise newException(IOError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except OSError as exc:
|
||||
raise newException(OSError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except ValueError as exc:
|
||||
raise newException(ValueError, "Failed to parse DNS response: " & exc.msg, exc)
|
||||
except Exception as exc:
|
||||
# Nim 1.6: parseResponse can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
raiseAssert "Exception parsing DN response: " & exc.msg
|
||||
finally:
|
||||
await sock.closeWait()
|
||||
|
||||
method resolveIp*(
|
||||
self: DnsResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
|
||||
): Future[seq[TransportAddress]] {.async.} =
|
||||
): Future[seq[TransportAddress]] {.
|
||||
async: (raises: [CancelledError, TransportAddressError])
|
||||
.} =
|
||||
trace "Resolving IP using DNS", address, servers = self.nameServers.mapIt($it), domain
|
||||
for _ in 0 ..< self.nameServers.len:
|
||||
let server = self.nameServers[0]
|
||||
var responseFutures: seq[Future[Response]]
|
||||
var responseFutures: seq[
|
||||
Future[Response].Raising(
|
||||
[CancelledError, IOError, OSError, TransportError, ValueError]
|
||||
)
|
||||
]
|
||||
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
|
||||
responseFutures.add(getDnsResponse(server, address, A))
|
||||
|
||||
@@ -103,23 +125,32 @@ method resolveIp*(
|
||||
var
|
||||
resolvedAddresses: OrderedSet[string]
|
||||
resolveFailed = false
|
||||
template handleFail(e): untyped =
|
||||
info "Failed to query DNS", address, error = e.msg
|
||||
resolveFailed = true
|
||||
break
|
||||
|
||||
for fut in responseFutures:
|
||||
try:
|
||||
let resp = await fut
|
||||
for answer in resp.answers:
|
||||
# toString can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
resolvedAddresses.incl(exceptionToAssert(answer.toString()))
|
||||
resolvedAddresses.incl(answer.toString())
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except ValueError as e:
|
||||
info "Invalid DNS query", address, error = e.msg
|
||||
return @[]
|
||||
except CatchableError as e:
|
||||
info "Failed to query DNS", address, error = e.msg
|
||||
resolveFailed = true
|
||||
break
|
||||
except IOError as e:
|
||||
handleFail(e)
|
||||
except OSError as e:
|
||||
handleFail(e)
|
||||
except TransportError as e:
|
||||
handleFail(e)
|
||||
except Exception as e:
|
||||
# Nim 1.6: answer.toString can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
raiseAssert e.msg
|
||||
|
||||
if resolveFailed:
|
||||
self.nameServers.add(self.nameServers[0])
|
||||
@@ -132,27 +163,39 @@ method resolveIp*(
|
||||
debug "Failed to resolve address, returning empty set"
|
||||
return @[]
|
||||
|
||||
method resolveTxt*(self: DnsResolver, address: string): Future[seq[string]] {.async.} =
|
||||
method resolveTxt*(
|
||||
self: DnsResolver, address: string
|
||||
): Future[seq[string]] {.async: (raises: [CancelledError]).} =
|
||||
trace "Resolving TXT using DNS", address, servers = self.nameServers.mapIt($it)
|
||||
for _ in 0 ..< self.nameServers.len:
|
||||
let server = self.nameServers[0]
|
||||
try:
|
||||
# toString can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
let response = await getDnsResponse(server, address, TXT)
|
||||
return exceptionToAssert:
|
||||
trace "Got TXT response",
|
||||
server = $server, answer = response.answers.mapIt(it.toString())
|
||||
response.answers.mapIt(it.toString())
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
template handleFail(e): untyped =
|
||||
info "Failed to query DNS", address, error = e.msg
|
||||
self.nameServers.add(self.nameServers[0])
|
||||
self.nameServers.delete(0)
|
||||
continue
|
||||
|
||||
try:
|
||||
let response = await getDnsResponse(server, address, TXT)
|
||||
trace "Got TXT response",
|
||||
server = $server, answer = response.answers.mapIt(it.toString())
|
||||
return response.answers.mapIt(it.toString())
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except IOError as e:
|
||||
handleFail(e)
|
||||
except OSError as e:
|
||||
handleFail(e)
|
||||
except TransportError as e:
|
||||
handleFail(e)
|
||||
except ValueError as e:
|
||||
handleFail(e)
|
||||
except Exception as e:
|
||||
# Nim 1.6: toString can has a raises: [Exception, ..] because of
|
||||
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
|
||||
# it can't actually raise though
|
||||
raiseAssert e.msg
|
||||
|
||||
debug "Failed to resolve TXT, returning empty set"
|
||||
return @[]
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -25,17 +25,25 @@ type MockResolver* = ref object of NameResolver
|
||||
|
||||
method resolveIp*(
|
||||
self: MockResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
|
||||
): Future[seq[TransportAddress]] {.async.} =
|
||||
): Future[seq[TransportAddress]] {.
|
||||
async: (raises: [CancelledError, TransportAddressError])
|
||||
.} =
|
||||
var res: seq[TransportAddress]
|
||||
|
||||
if domain == Domain.AF_INET or domain == Domain.AF_UNSPEC:
|
||||
for resp in self.ipResponses.getOrDefault((address, false)):
|
||||
result.add(initTAddress(resp, port))
|
||||
res.add(initTAddress(resp, port))
|
||||
|
||||
if domain == Domain.AF_INET6 or domain == Domain.AF_UNSPEC:
|
||||
for resp in self.ipResponses.getOrDefault((address, true)):
|
||||
result.add(initTAddress(resp, port))
|
||||
res.add(initTAddress(resp, port))
|
||||
|
||||
method resolveTxt*(self: MockResolver, address: string): Future[seq[string]] {.async.} =
|
||||
return self.txtResponses.getOrDefault(address)
|
||||
res
|
||||
|
||||
method resolveTxt*(
|
||||
self: MockResolver, address: string
|
||||
): Future[seq[string]] {.async: (raises: [CancelledError]).} =
|
||||
self.txtResponses.getOrDefault(address)
|
||||
|
||||
proc new*(T: typedesc[MockResolver]): T =
|
||||
T()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sugar, sets, sequtils, strutils]
|
||||
import std/[sets, sequtils, strutils]
|
||||
import chronos, chronicles, stew/endians2
|
||||
import ".."/[multiaddress, multicodec]
|
||||
|
||||
@@ -20,19 +20,17 @@ type NameResolver* = ref object of RootObj
|
||||
|
||||
method resolveTxt*(
|
||||
self: NameResolver, address: string
|
||||
): Future[seq[string]] {.async, base.} =
|
||||
): Future[seq[string]] {.async: (raises: [CancelledError]), base.} =
|
||||
## Get TXT record
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
raiseAssert "[NameResolver.resolveTxt] abstract method not implemented!"
|
||||
|
||||
method resolveIp*(
|
||||
self: NameResolver, address: string, port: Port, domain: Domain = Domain.AF_UNSPEC
|
||||
): Future[seq[TransportAddress]] {.async, base.} =
|
||||
): Future[seq[TransportAddress]] {.
|
||||
async: (raises: [CancelledError, TransportAddressError]), base
|
||||
.} =
|
||||
## Resolve the specified address
|
||||
##
|
||||
|
||||
doAssert(false, "Not implemented!")
|
||||
raiseAssert "[NameResolver.resolveIp] abstract method not implemented!"
|
||||
|
||||
proc getHostname*(ma: MultiAddress): string =
|
||||
let
|
||||
@@ -46,30 +44,40 @@ proc getHostname*(ma: MultiAddress): string =
|
||||
|
||||
proc resolveOneAddress(
|
||||
self: NameResolver, ma: MultiAddress, domain: Domain = Domain.AF_UNSPEC, prefix = ""
|
||||
): Future[seq[MultiAddress]] {.async.} =
|
||||
#Resolve a single address
|
||||
): Future[seq[MultiAddress]] {.
|
||||
async: (raises: [CancelledError, MaError, TransportAddressError])
|
||||
.} =
|
||||
# Resolve a single address
|
||||
let portPart = ma[1].valueOr:
|
||||
raise maErr error
|
||||
var pbuf: array[2, byte]
|
||||
|
||||
var dnsval = getHostname(ma)
|
||||
|
||||
if ma[1].tryGet().protoArgument(pbuf).tryGet() == 0:
|
||||
raise newException(MaError, "Incorrect port number")
|
||||
let plen = portPart.protoArgument(pbuf).valueOr:
|
||||
raise maErr error
|
||||
if plen == 0:
|
||||
raise maErr "Incorrect port number"
|
||||
let
|
||||
port = Port(fromBytesBE(uint16, pbuf))
|
||||
dnsval = getHostname(ma)
|
||||
resolvedAddresses = await self.resolveIp(prefix & dnsval, port, domain)
|
||||
|
||||
return collect(newSeqOfCap(4)):
|
||||
for address in resolvedAddresses:
|
||||
var createdAddress = MultiAddress.init(address).tryGet()[0].tryGet()
|
||||
for part in ma:
|
||||
if DNS.match(part.tryGet()):
|
||||
continue
|
||||
createdAddress &= part.tryGet()
|
||||
createdAddress
|
||||
resolvedAddresses.mapIt:
|
||||
let address = MultiAddress.init(it).valueOr:
|
||||
raise maErr error
|
||||
var createdAddress = address[0].valueOr:
|
||||
raise maErr error
|
||||
for part in ma:
|
||||
let part = part.valueOr:
|
||||
raise maErr error
|
||||
if DNS.match(part):
|
||||
continue
|
||||
createdAddress &= part
|
||||
createdAddress
|
||||
|
||||
proc resolveDnsAddr*(
|
||||
self: NameResolver, ma: MultiAddress, depth: int = 0
|
||||
): Future[seq[MultiAddress]] {.async.} =
|
||||
): Future[seq[MultiAddress]] {.
|
||||
async: (raises: [CancelledError, MaError, TransportAddressError])
|
||||
.} =
|
||||
if not DNSADDR.matchPartial(ma):
|
||||
return @[ma]
|
||||
|
||||
@@ -78,54 +86,67 @@ proc resolveDnsAddr*(
|
||||
info "Stopping DNSADDR recursion, probably malicious", ma
|
||||
return @[]
|
||||
|
||||
var dnsval = getHostname(ma)
|
||||
|
||||
let txt = await self.resolveTxt("_dnsaddr." & dnsval)
|
||||
let
|
||||
dnsval = getHostname(ma)
|
||||
txt = await self.resolveTxt("_dnsaddr." & dnsval)
|
||||
|
||||
trace "txt entries", txt
|
||||
|
||||
var result: seq[MultiAddress]
|
||||
const codec = multiCodec("p2p")
|
||||
let maCodec = block:
|
||||
let hasCodec = ma.contains(codec).valueOr:
|
||||
raise maErr error
|
||||
if hasCodec:
|
||||
ma[codec]
|
||||
else:
|
||||
(static(default(MaResult[MultiAddress])))
|
||||
|
||||
var res: seq[MultiAddress]
|
||||
for entry in txt:
|
||||
if not entry.startsWith("dnsaddr="):
|
||||
continue
|
||||
let entryValue = MultiAddress.init(entry[8 ..^ 1]).tryGet()
|
||||
|
||||
if entryValue.contains(multiCodec("p2p")).tryGet() and
|
||||
ma.contains(multiCodec("p2p")).tryGet():
|
||||
if entryValue[multiCodec("p2p")] != ma[multiCodec("p2p")]:
|
||||
continue
|
||||
let
|
||||
entryValue = MultiAddress.init(entry[8 ..^ 1]).valueOr:
|
||||
raise maErr error
|
||||
entryHasCodec = entryValue.contains(multiCodec("p2p")).valueOr:
|
||||
raise maErr error
|
||||
if entryHasCodec and maCodec.isOk and entryValue[codec] != maCodec:
|
||||
continue
|
||||
|
||||
let resolved = await self.resolveDnsAddr(entryValue, depth + 1)
|
||||
for r in resolved:
|
||||
result.add(r)
|
||||
res.add(r)
|
||||
|
||||
if result.len == 0:
|
||||
if res.len == 0:
|
||||
debug "Failed to resolve a DNSADDR", ma
|
||||
return @[]
|
||||
return result
|
||||
res
|
||||
|
||||
proc resolveMAddress*(
|
||||
self: NameResolver, address: MultiAddress
|
||||
): Future[seq[MultiAddress]] {.async.} =
|
||||
): Future[seq[MultiAddress]] {.
|
||||
async: (raises: [CancelledError, MaError, TransportAddressError])
|
||||
.} =
|
||||
var res = initOrderedSet[MultiAddress]()
|
||||
|
||||
if not DNS.matchPartial(address):
|
||||
res.incl(address)
|
||||
else:
|
||||
let code = address[0].tryGet().protoCode().tryGet()
|
||||
let seq =
|
||||
case code
|
||||
of multiCodec("dns"):
|
||||
await self.resolveOneAddress(address)
|
||||
of multiCodec("dns4"):
|
||||
await self.resolveOneAddress(address, Domain.AF_INET)
|
||||
of multiCodec("dns6"):
|
||||
await self.resolveOneAddress(address, Domain.AF_INET6)
|
||||
of multiCodec("dnsaddr"):
|
||||
await self.resolveDnsAddr(address)
|
||||
else:
|
||||
assert false
|
||||
@[address]
|
||||
for ad in seq:
|
||||
let
|
||||
firstPart = address[0].valueOr:
|
||||
raise maErr error
|
||||
code = firstPart.protoCode().valueOr:
|
||||
raise maErr error
|
||||
ads =
|
||||
case code
|
||||
of multiCodec("dns"):
|
||||
await self.resolveOneAddress(address)
|
||||
of multiCodec("dns4"):
|
||||
await self.resolveOneAddress(address, Domain.AF_INET)
|
||||
of multiCodec("dns6"):
|
||||
await self.resolveOneAddress(address, Domain.AF_INET6)
|
||||
of multiCodec("dnsaddr"):
|
||||
await self.resolveDnsAddr(address)
|
||||
else:
|
||||
raise maErr("Unsupported codec " & $code)
|
||||
for ad in ads:
|
||||
res.incl(ad)
|
||||
return res.toSeq
|
||||
res.toSeq
|
||||
|
||||
@@ -11,10 +11,12 @@
|
||||
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[hashes, strutils],
|
||||
stew/[base58, results],
|
||||
stew/base58,
|
||||
results,
|
||||
chronicles,
|
||||
nimcrypto/utils,
|
||||
utility,
|
||||
|
||||
335
libp2p/peeridauth/client.nim
Normal file
335
libp2p/peeridauth/client.nim
Normal file
@@ -0,0 +1,335 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import base64, json, strutils, uri, times
|
||||
import chronos, chronos/apps/http/httpclient, results, chronicles, bio
|
||||
import ../peerinfo, ../crypto/crypto, ../varint.nim
|
||||
|
||||
logScope:
|
||||
topics = "libp2p peeridauth"
|
||||
|
||||
const
|
||||
NimLibp2pUserAgent = "nim-libp2p"
|
||||
PeerIDAuthPrefix* = "libp2p-PeerID"
|
||||
ChallengeCharset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
ChallengeDefaultLen = 48
|
||||
|
||||
type PeerIDAuthClient* = ref object of RootObj
|
||||
session: HttpSessionRef
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
type PeerIDAuthError* = object of LPError
|
||||
|
||||
type PeerIDAuthResponse* = object
|
||||
status*: int
|
||||
headers*: HttpTable
|
||||
body*: seq[byte]
|
||||
|
||||
type BearerToken* = object
|
||||
token*: string
|
||||
expires*: Opt[DateTime]
|
||||
|
||||
type PeerIDAuthOpaque* = string
|
||||
type PeerIDAuthSignature* = string
|
||||
type PeerIDAuthChallenge* = string
|
||||
|
||||
type PeerIDAuthAuthenticationResponse* = object
|
||||
challengeClient*: PeerIDAuthChallenge
|
||||
opaque*: PeerIDAuthOpaque
|
||||
serverPubkey*: PublicKey
|
||||
|
||||
type PeerIDAuthAuthorizationResponse* = object
|
||||
sig*: PeerIDAuthSignature
|
||||
bearer*: BearerToken
|
||||
response*: PeerIDAuthResponse
|
||||
|
||||
type SigParam = object
|
||||
k: string
|
||||
v: seq[byte]
|
||||
|
||||
proc new*(T: typedesc[PeerIDAuthClient], rng: ref HmacDrbgContext): PeerIDAuthClient =
|
||||
PeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
|
||||
|
||||
proc sampleChar(
|
||||
ctx: var HmacDrbgContext, choices: string
|
||||
): char {.raises: [ValueError].} =
|
||||
## Samples a random character from the input string using the DRBG context
|
||||
if choices.len == 0:
|
||||
raise newException(ValueError, "Cannot sample from an empty string")
|
||||
var idx: uint32
|
||||
ctx.generate(idx)
|
||||
return choices[uint32(idx mod uint32(choices.len))]
|
||||
|
||||
proc randomChallenge(
|
||||
rng: ref HmacDrbgContext, challengeLen: int = ChallengeDefaultLen
|
||||
): PeerIDAuthChallenge {.raises: [PeerIDAuthError].} =
|
||||
var rng = rng[]
|
||||
var challenge = ""
|
||||
try:
|
||||
for _ in 0 ..< challengeLen:
|
||||
challenge.add(rng.sampleChar(ChallengeCharset))
|
||||
except ValueError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to generate challenge", exc)
|
||||
PeerIDAuthChallenge(challenge)
|
||||
|
||||
proc extractField(data, key: string): string {.raises: [PeerIDAuthError].} =
|
||||
# Helper to extract quoted value from key
|
||||
for segment in data.split(","):
|
||||
if key in segment:
|
||||
return segment.split("=", 1)[1].strip(chars = {' ', '"'})
|
||||
raise newException(PeerIDAuthError, "Failed to find " & key & " in " & data)
|
||||
|
||||
proc genDataToSign(
|
||||
parts: seq[SigParam], prefix: string = PeerIDAuthPrefix
|
||||
): seq[byte] {.raises: [PeerIDAuthError].} =
|
||||
var buf: seq[byte] = prefix.toByteSeq()
|
||||
for p in parts:
|
||||
let varintLen = PB.encodeVarint(hint(p.k.len + p.v.len + 1)).valueOr:
|
||||
raise newException(PeerIDAuthError, "could not encode fields length to varint")
|
||||
buf.add varintLen
|
||||
buf.add (p.k & "=").toByteSeq()
|
||||
buf.add p.v
|
||||
return buf
|
||||
|
||||
proc getSigParams(
|
||||
clientSender: bool, hostname: string, challenge: string, publicKey: PublicKey
|
||||
): seq[SigParam] =
|
||||
if clientSender:
|
||||
@[
|
||||
SigParam(k: "challenge-client", v: challenge.toByteSeq()),
|
||||
SigParam(k: "hostname", v: hostname.toByteSeq()),
|
||||
SigParam(k: "server-public-key", v: publicKey.getBytes().get()),
|
||||
]
|
||||
else:
|
||||
@[
|
||||
SigParam(k: "challenge-server", v: challenge.toByteSeq()),
|
||||
SigParam(k: "client-public-key", v: publicKey.getBytes().get()),
|
||||
SigParam(k: "hostname", v: hostname.toByteSeq()),
|
||||
]
|
||||
|
||||
proc sign(
|
||||
privateKey: PrivateKey,
|
||||
challenge: PeerIDAuthChallenge,
|
||||
publicKey: PublicKey,
|
||||
hostname: string,
|
||||
clientSender: bool = true,
|
||||
): PeerIDAuthSignature {.raises: [PeerIDAuthError].} =
|
||||
let bytesToSign =
|
||||
getSigParams(clientSender, hostname, challenge, publicKey).genDataToSign()
|
||||
PeerIDAuthSignature(
|
||||
base64.encode(privateKey.sign(bytesToSign).get().getBytes(), safe = true)
|
||||
)
|
||||
|
||||
proc checkSignature*(
|
||||
serverSig: PeerIDAuthSignature,
|
||||
serverPublicKey: PublicKey,
|
||||
challengeServer: PeerIDAuthChallenge,
|
||||
clientPublicKey: PublicKey,
|
||||
hostname: string,
|
||||
): bool {.raises: [PeerIDAuthError].} =
|
||||
let bytesToSign =
|
||||
getSigParams(false, hostname, challengeServer, clientPublicKey).genDataToSign()
|
||||
var serverSignature: Signature
|
||||
try:
|
||||
if not serverSignature.init(base64.decode(serverSig).toByteSeq()):
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to initialize Signature from base64 encoded sig"
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to decode server's signature", exc)
|
||||
|
||||
serverSignature.verify(
|
||||
bytesToSign.toOpenArray(0, bytesToSign.len - 1), serverPublicKey
|
||||
)
|
||||
|
||||
method post*(
|
||||
self: PeerIDAuthClient, uri: string, payload: string, authHeader: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
|
||||
let rawResponse = await HttpClientRequestRef
|
||||
.post(
|
||||
self.session,
|
||||
uri,
|
||||
body = payload,
|
||||
headers = [
|
||||
("Content-Type", "application/json"),
|
||||
("User-Agent", NimLibp2pUserAgent),
|
||||
("Authorization", authHeader),
|
||||
],
|
||||
)
|
||||
.get()
|
||||
.send()
|
||||
|
||||
PeerIDAuthResponse(
|
||||
status: rawResponse.status,
|
||||
headers: rawResponse.headers,
|
||||
body: await rawResponse.getBodyBytes(),
|
||||
)
|
||||
|
||||
method get*(
|
||||
self: PeerIDAuthClient, uri: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
|
||||
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
|
||||
PeerIDAuthResponse(
|
||||
status: rawResponse.status,
|
||||
headers: rawResponse.headers,
|
||||
body: await rawResponse.getBodyBytes(),
|
||||
)
|
||||
|
||||
proc requestAuthentication*(
|
||||
self: PeerIDAuthClient, uri: Uri
|
||||
): Future[PeerIDAuthAuthenticationResponse] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
let response =
|
||||
try:
|
||||
await self.get($uri)
|
||||
except HttpError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to start PeerID Auth", exc)
|
||||
|
||||
let wwwAuthenticate = response.headers.getString("WWW-Authenticate")
|
||||
if wwwAuthenticate == "":
|
||||
raise newException(PeerIDAuthError, "WWW-authenticate not present in response")
|
||||
|
||||
let serverPubkey: PublicKey =
|
||||
try:
|
||||
PublicKey.init(decode(extractField(wwwAuthenticate, "public-key")).toByteSeq()).valueOr:
|
||||
raise newException(PeerIDAuthError, "Failed to initialize server public-key")
|
||||
except ValueError as exc:
|
||||
raise newException(PeerIDAuthError, "Failed to decode server public-key", exc)
|
||||
|
||||
PeerIDAuthAuthenticationResponse(
|
||||
challengeClient: extractField(wwwAuthenticate, "challenge-client"),
|
||||
opaque: extractField(wwwAuthenticate, "opaque"),
|
||||
serverPubkey: serverPubkey,
|
||||
)
|
||||
|
||||
proc pubkeyBytes*(pubkey: PublicKey): seq[byte] {.raises: [PeerIDAuthError].} =
|
||||
try:
|
||||
pubkey.getBytes().valueOr:
|
||||
raise
|
||||
newException(PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey")
|
||||
except ValueError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey", exc
|
||||
)
|
||||
|
||||
proc parse3339DateTime(
|
||||
timeStr: string
|
||||
): DateTime {.raises: [ValueError, TimeParseError].} =
|
||||
let parts = timeStr.split('.')
|
||||
let base = parse(parts[0], "yyyy-MM-dd'T'HH:mm:ss")
|
||||
let millis = parseInt(parts[1].strip(chars = {'Z'}))
|
||||
result = base + initDuration(milliseconds = millis)
|
||||
|
||||
proc requestAuthorization*(
|
||||
self: PeerIDAuthClient,
|
||||
peerInfo: PeerInfo,
|
||||
uri: Uri,
|
||||
challengeClient: PeerIDAuthChallenge,
|
||||
challengeServer: PeerIDAuthChallenge,
|
||||
serverPubkey: PublicKey,
|
||||
opaque: PeerIDAuthOpaque,
|
||||
payload: auto,
|
||||
): Future[PeerIDAuthAuthorizationResponse] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
let clientPubkeyB64 = peerInfo.publicKey.pubkeyBytes().encode(safe = true)
|
||||
let sig = peerInfo.privateKey.sign(challengeClient, serverPubkey, uri.hostname)
|
||||
let authHeader =
|
||||
PeerIDAuthPrefix & " public-key=\"" & clientPubkeyB64 & "\"" & ", opaque=\"" & opaque &
|
||||
"\"" & ", challenge-server=\"" & challengeServer & "\"" & ", sig=\"" & sig & "\""
|
||||
let response =
|
||||
try:
|
||||
await self.post($uri, $payload, authHeader)
|
||||
except HttpError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to send Authorization for PeerID Auth", exc
|
||||
)
|
||||
|
||||
let authenticationInfo = response.headers.getString("authentication-info")
|
||||
|
||||
let bearerExpires =
|
||||
try:
|
||||
Opt.some(parse3339DateTime(extractField(authenticationInfo, "expires")))
|
||||
except ValueError, PeerIDAuthError, TimeParseError:
|
||||
Opt.none(DateTime)
|
||||
|
||||
PeerIDAuthAuthorizationResponse(
|
||||
sig: PeerIDAuthSignature(extractField(authenticationInfo, "sig")),
|
||||
bearer: BearerToken(
|
||||
token: extractField(authenticationInfo, "bearer"), expires: bearerExpires
|
||||
),
|
||||
response: response,
|
||||
)
|
||||
|
||||
proc sendWithoutBearer(
|
||||
self: PeerIDAuthClient, uri: Uri, peerInfo: PeerInfo, payload: auto
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
# Authenticate in three ways as per the PeerID Auth spec
|
||||
# https://github.com/libp2p/specs/blob/master/http/peer-id-auth.md
|
||||
|
||||
let authenticationResponse = await self.requestAuthentication(uri)
|
||||
|
||||
let challengeServer = self.rng.randomChallenge()
|
||||
let authorizationResponse = await self.requestAuthorization(
|
||||
peerInfo, uri, authenticationResponse.challengeClient, challengeServer,
|
||||
authenticationResponse.serverPubkey, authenticationResponse.opaque, payload,
|
||||
)
|
||||
|
||||
if not checkSignature(
|
||||
authorizationResponse.sig, authenticationResponse.serverPubkey, challengeServer,
|
||||
peerInfo.publicKey, uri.hostname,
|
||||
):
|
||||
raise newException(PeerIDAuthError, "Failed to validate server's signature")
|
||||
|
||||
return (authorizationResponse.bearer, authorizationResponse.response)
|
||||
|
||||
proc sendWithBearer(
|
||||
self: PeerIDAuthClient,
|
||||
uri: Uri,
|
||||
peerInfo: PeerInfo,
|
||||
payload: auto,
|
||||
bearer: BearerToken,
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
if bearer.expires.isSome and DateTime(bearer.expires.get) <= now():
|
||||
raise newException(PeerIDAuthError, "Bearer expired")
|
||||
let authHeader = PeerIDAuthPrefix & " bearer=\"" & bearer.token & "\""
|
||||
let response =
|
||||
try:
|
||||
await self.post($uri, $payload, authHeader)
|
||||
except HttpError as exc:
|
||||
raise newException(
|
||||
PeerIDAuthError, "Failed to send request with bearer token for PeerID Auth", exc
|
||||
)
|
||||
return (bearer, response)
|
||||
|
||||
proc send*(
|
||||
self: PeerIDAuthClient,
|
||||
uri: Uri,
|
||||
peerInfo: PeerInfo,
|
||||
payload: auto,
|
||||
bearer: BearerToken = BearerToken(),
|
||||
): Future[(BearerToken, PeerIDAuthResponse)] {.
|
||||
async: (raises: [PeerIDAuthError, CancelledError])
|
||||
.} =
|
||||
if bearer.token == "":
|
||||
await self.sendWithoutBearer(uri, peerInfo, payload)
|
||||
else:
|
||||
await self.sendWithBearer(uri, peerInfo, payload, bearer)
|
||||
|
||||
proc close*(
|
||||
self: PeerIDAuthClient
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
await self.session.closeWait()
|
||||
41
libp2p/peeridauth/mockclient.nim
Normal file
41
libp2p/peeridauth/mockclient.nim
Normal file
@@ -0,0 +1,41 @@
|
||||
# Nim-Libp2p
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronos/apps/http/httpclient
|
||||
import ../crypto/crypto
|
||||
|
||||
import ./client
|
||||
|
||||
export client
|
||||
|
||||
type MockPeerIDAuthClient* = ref object of PeerIDAuthClient
|
||||
mockedStatus*: int
|
||||
mockedHeaders*: HttpTable
|
||||
mockedBody*: seq[byte]
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MockPeerIDAuthClient], rng: ref HmacDrbgContext
|
||||
): MockPeerIDAuthClient {.raises: [PeerIDAuthError].} =
|
||||
MockPeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
|
||||
|
||||
method post*(
|
||||
self: MockPeerIDAuthClient, uri: string, payload: string, authHeader: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
|
||||
PeerIDAuthResponse(
|
||||
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
|
||||
)
|
||||
|
||||
method get*(
|
||||
self: MockPeerIDAuthClient, uri: string
|
||||
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
|
||||
PeerIDAuthResponse(
|
||||
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
|
||||
)
|
||||
@@ -11,7 +11,7 @@
|
||||
{.push public.}
|
||||
|
||||
import std/sequtils
|
||||
import pkg/[chronos, chronicles, stew/results]
|
||||
import pkg/[chronos, chronicles, results]
|
||||
import peerid, multiaddress, multicodec, crypto/crypto, routing_record, errors, utility
|
||||
|
||||
export peerid, multiaddress, crypto, routing_record, errors, results
|
||||
@@ -22,7 +22,7 @@ type
|
||||
PeerInfoError* = object of LPError
|
||||
|
||||
AddressMapper* = proc(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.
|
||||
gcsafe, raises: []
|
||||
gcsafe, async: (raises: [CancelledError])
|
||||
.} ## A proc that expected to resolve the listen addresses into dialable addresses
|
||||
|
||||
PeerInfo* {.public.} = ref object
|
||||
@@ -52,7 +52,7 @@ func shortLog*(p: PeerInfo): auto =
|
||||
chronicles.formatIt(PeerInfo):
|
||||
shortLog(it)
|
||||
|
||||
proc update*(p: PeerInfo) {.async.} =
|
||||
proc update*(p: PeerInfo) {.async: (raises: [CancelledError]).} =
|
||||
# p.addrs.len == 0 overrides addrs only if it is the first time update is being executed or if the field is empty.
|
||||
# p.addressMappers.len == 0 is for when all addressMappers have been removed,
|
||||
# and we wish to have addrs in its initial state, i.e., a copy of listenAddrs.
|
||||
@@ -101,8 +101,10 @@ proc new*(
|
||||
let pubkey =
|
||||
try:
|
||||
key.getPublicKey().tryGet()
|
||||
except CatchableError:
|
||||
raise newException(PeerInfoError, "invalid private key")
|
||||
except CatchableError as e:
|
||||
raise newException(
|
||||
PeerInfoError, "invalid private key creating PeerInfo: " & e.msg, e
|
||||
)
|
||||
|
||||
let peerId = PeerId.init(key).tryGet()
|
||||
|
||||
|
||||
@@ -63,6 +63,7 @@ type
|
||||
KeyBook* {.public.} = ref object of PeerBook[PublicKey]
|
||||
|
||||
AgentBook* {.public.} = ref object of PeerBook[string]
|
||||
LastSeenBook* {.public.} = ref object of PeerBook[Opt[MultiAddress]]
|
||||
ProtoVersionBook* {.public.} = ref object of PeerBook[string]
|
||||
SPRBook* {.public.} = ref object of PeerBook[Envelope]
|
||||
|
||||
@@ -145,18 +146,24 @@ proc del*(peerStore: PeerStore, peerId: PeerId) {.public.} =
|
||||
for _, book in peerStore.books:
|
||||
book.deletor(peerId)
|
||||
|
||||
proc updatePeerInfo*(peerStore: PeerStore, info: IdentifyInfo) =
|
||||
if info.addrs.len > 0:
|
||||
proc updatePeerInfo*(
|
||||
peerStore: PeerStore,
|
||||
info: IdentifyInfo,
|
||||
observedAddr: Opt[MultiAddress] = Opt.none(MultiAddress),
|
||||
) =
|
||||
if len(info.addrs) > 0:
|
||||
peerStore[AddressBook][info.peerId] = info.addrs
|
||||
|
||||
peerStore[LastSeenBook][info.peerId] = observedAddr
|
||||
|
||||
info.pubkey.withValue(pubkey):
|
||||
peerStore[KeyBook][info.peerId] = pubkey
|
||||
|
||||
info.agentVersion.withValue(agentVersion):
|
||||
peerStore[AgentBook][info.peerId] = agentVersion.string
|
||||
peerStore[AgentBook][info.peerId] = agentVersion
|
||||
|
||||
info.protoVersion.withValue(protoVersion):
|
||||
peerStore[ProtoVersionBook][info.peerId] = protoVersion.string
|
||||
peerStore[ProtoVersionBook][info.peerId] = protoVersion
|
||||
|
||||
if info.protos.len > 0:
|
||||
peerStore[ProtoBook][info.peerId] = info.protos
|
||||
@@ -181,7 +188,16 @@ proc cleanup*(peerStore: PeerStore, peerId: PeerId) =
|
||||
peerStore.del(peerStore.toClean[0])
|
||||
peerStore.toClean.delete(0)
|
||||
|
||||
proc identify*(peerStore: PeerStore, muxer: Muxer) {.async.} =
|
||||
proc identify*(
|
||||
peerStore: PeerStore, muxer: Muxer
|
||||
) {.
|
||||
async: (
|
||||
raises: [
|
||||
CancelledError, IdentityNoMatchError, IdentityInvalidMsgError, MultiStreamError,
|
||||
LPStreamError, MuxerError,
|
||||
]
|
||||
)
|
||||
.} =
|
||||
# new stream for identify
|
||||
var stream = await muxer.newStream()
|
||||
if stream == nil:
|
||||
@@ -200,7 +216,7 @@ proc identify*(peerStore: PeerStore, muxer: Muxer) {.async.} =
|
||||
knownAgent = shortAgent
|
||||
muxer.connection.setShortAgent(knownAgent)
|
||||
|
||||
peerStore.updatePeerInfo(info)
|
||||
peerStore.updatePeerInfo(info, stream.observedAddr)
|
||||
finally:
|
||||
await stream.closeWithEOF()
|
||||
|
||||
|
||||
@@ -11,13 +11,11 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import ../varint, ../utility, stew/[endians2, results]
|
||||
import ../varint, ../utility, stew/endians2, results
|
||||
export results, utility
|
||||
|
||||
{.push public.}
|
||||
|
||||
const MaxMessageSize = 1'u shl 22
|
||||
|
||||
type
|
||||
ProtoFieldKind* = enum
|
||||
## Protobuf's field types enum
|
||||
@@ -39,7 +37,6 @@ type
|
||||
buffer*: seq[byte]
|
||||
offset*: int
|
||||
length*: int
|
||||
maxSize*: uint
|
||||
|
||||
ProtoHeader* = object
|
||||
wire*: ProtoFieldKind
|
||||
@@ -63,7 +60,6 @@ type
|
||||
VarintDecode
|
||||
MessageIncomplete
|
||||
BufferOverflow
|
||||
MessageTooBig
|
||||
BadWireType
|
||||
IncorrectBlob
|
||||
RequiredFieldMissing
|
||||
@@ -99,11 +95,14 @@ template getProtoHeader*(field: ProtoField): uint64 =
|
||||
template toOpenArray*(pb: ProtoBuffer): untyped =
|
||||
toOpenArray(pb.buffer, pb.offset, len(pb.buffer) - 1)
|
||||
|
||||
template lenu64*(x: untyped): untyped =
|
||||
uint64(len(x))
|
||||
|
||||
template isEmpty*(pb: ProtoBuffer): bool =
|
||||
len(pb.buffer) - pb.offset <= 0
|
||||
|
||||
template isEnough*(pb: ProtoBuffer, length: int): bool =
|
||||
len(pb.buffer) - pb.offset - length >= 0
|
||||
template isEnough*(pb: ProtoBuffer, length: uint64): bool =
|
||||
pb.offset <= len(pb.buffer) and length <= uint64(len(pb.buffer) - pb.offset)
|
||||
|
||||
template getPtr*(pb: ProtoBuffer): pointer =
|
||||
cast[pointer](unsafeAddr pb.buffer[pb.offset])
|
||||
@@ -127,33 +126,25 @@ proc vsizeof*(field: ProtoField): int {.inline.} =
|
||||
0
|
||||
|
||||
proc initProtoBuffer*(
|
||||
data: seq[byte], offset = 0, options: set[ProtoFlags] = {}, maxSize = MaxMessageSize
|
||||
data: seq[byte], offset = 0, options: set[ProtoFlags] = {}
|
||||
): ProtoBuffer =
|
||||
## Initialize ProtoBuffer with shallow copy of ``data``.
|
||||
result.buffer = data
|
||||
result.offset = offset
|
||||
result.options = options
|
||||
result.maxSize = maxSize
|
||||
|
||||
proc initProtoBuffer*(
|
||||
data: openArray[byte],
|
||||
offset = 0,
|
||||
options: set[ProtoFlags] = {},
|
||||
maxSize = MaxMessageSize,
|
||||
data: openArray[byte], offset = 0, options: set[ProtoFlags] = {}
|
||||
): ProtoBuffer =
|
||||
## Initialize ProtoBuffer with copy of ``data``.
|
||||
result.buffer = @data
|
||||
result.offset = offset
|
||||
result.options = options
|
||||
result.maxSize = maxSize
|
||||
|
||||
proc initProtoBuffer*(
|
||||
options: set[ProtoFlags] = {}, maxSize = MaxMessageSize
|
||||
): ProtoBuffer =
|
||||
proc initProtoBuffer*(options: set[ProtoFlags] = {}): ProtoBuffer =
|
||||
## Initialize ProtoBuffer with new sequence of capacity ``cap``.
|
||||
result.buffer = newSeq[byte]()
|
||||
result.options = options
|
||||
result.maxSize = maxSize
|
||||
if WithVarintLength in options:
|
||||
# Our buffer will start from position 10, so we can store length of buffer
|
||||
# in [0, 9].
|
||||
@@ -194,12 +185,12 @@ proc write*[T: ProtoScalar](pb: var ProtoBuffer, field: int, value: T) =
|
||||
doAssert(vres.isOk())
|
||||
pb.offset += length
|
||||
elif T is float32:
|
||||
doAssert(pb.isEnough(sizeof(T)))
|
||||
doAssert(pb.isEnough(uint64(sizeof(T))))
|
||||
let u32 = cast[uint32](value)
|
||||
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u32.toBytesLE()
|
||||
pb.offset += sizeof(T)
|
||||
elif T is float64:
|
||||
doAssert(pb.isEnough(sizeof(T)))
|
||||
doAssert(pb.isEnough(uint64(sizeof(T))))
|
||||
let u64 = cast[uint64](value)
|
||||
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u64.toBytesLE()
|
||||
pb.offset += sizeof(T)
|
||||
@@ -242,12 +233,12 @@ proc writePacked*[T: ProtoScalar](
|
||||
doAssert(vres.isOk())
|
||||
pb.offset += length
|
||||
elif T is float32:
|
||||
doAssert(pb.isEnough(sizeof(T)))
|
||||
doAssert(pb.isEnough(uint64(sizeof(T))))
|
||||
let u32 = cast[uint32](item)
|
||||
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u32.toBytesLE()
|
||||
pb.offset += sizeof(T)
|
||||
elif T is float64:
|
||||
doAssert(pb.isEnough(sizeof(T)))
|
||||
doAssert(pb.isEnough(uint64(sizeof(T))))
|
||||
let u64 = cast[uint64](item)
|
||||
pb.buffer[pb.offset ..< pb.offset + sizeof(T)] = u64.toBytesLE()
|
||||
pb.offset += sizeof(T)
|
||||
@@ -268,7 +259,7 @@ proc write*[T: byte | char](pb: var ProtoBuffer, field: int, value: openArray[T]
|
||||
doAssert(lres.isOk())
|
||||
pb.offset += length
|
||||
if len(value) > 0:
|
||||
doAssert(pb.isEnough(len(value)))
|
||||
doAssert(pb.isEnough(value.lenu64))
|
||||
copyMem(addr pb.buffer[pb.offset], unsafeAddr value[0], len(value))
|
||||
pb.offset += len(value)
|
||||
|
||||
@@ -327,13 +318,13 @@ proc skipValue(data: var ProtoBuffer, header: ProtoHeader): ProtoResult[void] =
|
||||
else:
|
||||
err(ProtoError.VarintDecode)
|
||||
of ProtoFieldKind.Fixed32:
|
||||
if data.isEnough(sizeof(uint32)):
|
||||
if data.isEnough(uint64(sizeof(uint32))):
|
||||
data.offset += sizeof(uint32)
|
||||
ok()
|
||||
else:
|
||||
err(ProtoError.VarintDecode)
|
||||
of ProtoFieldKind.Fixed64:
|
||||
if data.isEnough(sizeof(uint64)):
|
||||
if data.isEnough(uint64(sizeof(uint64))):
|
||||
data.offset += sizeof(uint64)
|
||||
ok()
|
||||
else:
|
||||
@@ -343,14 +334,11 @@ proc skipValue(data: var ProtoBuffer, header: ProtoHeader): ProtoResult[void] =
|
||||
var bsize = 0'u64
|
||||
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
|
||||
data.offset += length
|
||||
if bsize <= uint64(data.maxSize):
|
||||
if data.isEnough(int(bsize)):
|
||||
data.offset += int(bsize)
|
||||
ok()
|
||||
else:
|
||||
err(ProtoError.MessageIncomplete)
|
||||
if data.isEnough(bsize):
|
||||
data.offset += int(bsize)
|
||||
ok()
|
||||
else:
|
||||
err(ProtoError.MessageTooBig)
|
||||
err(ProtoError.MessageIncomplete)
|
||||
else:
|
||||
err(ProtoError.VarintDecode)
|
||||
of ProtoFieldKind.StartGroup, ProtoFieldKind.EndGroup:
|
||||
@@ -382,7 +370,7 @@ proc getValue[T: ProtoScalar](
|
||||
err(ProtoError.VarintDecode)
|
||||
elif T is float32:
|
||||
doAssert(header.wire == ProtoFieldKind.Fixed32)
|
||||
if data.isEnough(sizeof(float32)):
|
||||
if data.isEnough(uint64(sizeof(float32))):
|
||||
outval = cast[float32](fromBytesLE(uint32, data.toOpenArray()))
|
||||
data.offset += sizeof(float32)
|
||||
ok()
|
||||
@@ -390,7 +378,7 @@ proc getValue[T: ProtoScalar](
|
||||
err(ProtoError.MessageIncomplete)
|
||||
elif T is float64:
|
||||
doAssert(header.wire == ProtoFieldKind.Fixed64)
|
||||
if data.isEnough(sizeof(float64)):
|
||||
if data.isEnough(uint64(sizeof(float64))):
|
||||
outval = cast[float64](fromBytesLE(uint64, data.toOpenArray()))
|
||||
data.offset += sizeof(float64)
|
||||
ok()
|
||||
@@ -410,22 +398,19 @@ proc getValue[T: byte | char](
|
||||
outLength = 0
|
||||
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
|
||||
data.offset += length
|
||||
if bsize <= uint64(data.maxSize):
|
||||
if data.isEnough(int(bsize)):
|
||||
outLength = int(bsize)
|
||||
if len(outBytes) >= int(bsize):
|
||||
if bsize > 0'u64:
|
||||
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
|
||||
data.offset += int(bsize)
|
||||
ok()
|
||||
else:
|
||||
# Buffer overflow should not be critical failure
|
||||
data.offset += int(bsize)
|
||||
err(ProtoError.BufferOverflow)
|
||||
if data.isEnough(bsize):
|
||||
outLength = int(bsize)
|
||||
if len(outBytes) >= int(bsize):
|
||||
if bsize > 0'u64:
|
||||
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
|
||||
data.offset += int(bsize)
|
||||
ok()
|
||||
else:
|
||||
err(ProtoError.MessageIncomplete)
|
||||
# Buffer overflow should not be critical failure
|
||||
data.offset += int(bsize)
|
||||
err(ProtoError.BufferOverflow)
|
||||
else:
|
||||
err(ProtoError.MessageTooBig)
|
||||
err(ProtoError.MessageIncomplete)
|
||||
else:
|
||||
err(ProtoError.VarintDecode)
|
||||
|
||||
@@ -439,17 +424,14 @@ proc getValue[T: seq[byte] | string](
|
||||
|
||||
if PB.getUVarint(data.toOpenArray(), length, bsize).isOk():
|
||||
data.offset += length
|
||||
if bsize <= uint64(data.maxSize):
|
||||
if data.isEnough(int(bsize)):
|
||||
outBytes.setLen(bsize)
|
||||
if bsize > 0'u64:
|
||||
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
|
||||
data.offset += int(bsize)
|
||||
ok()
|
||||
else:
|
||||
err(ProtoError.MessageIncomplete)
|
||||
if data.isEnough(bsize):
|
||||
outBytes.setLen(bsize)
|
||||
if bsize > 0'u64:
|
||||
copyMem(addr outBytes[0], addr data.buffer[data.offset], int(bsize))
|
||||
data.offset += int(bsize)
|
||||
ok()
|
||||
else:
|
||||
err(ProtoError.MessageTooBig)
|
||||
err(ProtoError.MessageIncomplete)
|
||||
else:
|
||||
err(ProtoError.VarintDecode)
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import ../../../switch, ../../../multiaddress, ../../../peerid
|
||||
import core
|
||||
@@ -19,7 +19,9 @@ logScope:
|
||||
|
||||
type AutonatClient* = ref object of RootObj
|
||||
|
||||
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
proc sendDial(
|
||||
conn: Connection, pid: PeerId, addrs: seq[MultiAddress]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let pb = AutonatDial(
|
||||
peerInfo: Opt.some(AutonatPeerInfo(id: Opt.some(pid), addrs: addrs))
|
||||
).encode()
|
||||
@@ -30,7 +32,9 @@ method dialMe*(
|
||||
switch: Switch,
|
||||
pid: PeerId,
|
||||
addrs: seq[MultiAddress] = newSeq[MultiAddress](),
|
||||
): Future[MultiAddress] {.base, async.} =
|
||||
): Future[MultiAddress] {.
|
||||
base, async: (raises: [AutonatError, AutonatUnreachableError, CancelledError])
|
||||
.} =
|
||||
proc getResponseOrRaise(
|
||||
autonatMsg: Opt[AutonatMsg]
|
||||
): AutonatDialResponse {.raises: [AutonatError].} =
|
||||
@@ -47,7 +51,9 @@ method dialMe*(
|
||||
await switch.dial(pid, @[AutonatCodec])
|
||||
else:
|
||||
await switch.dial(pid, addrs, AutonatCodec)
|
||||
except CatchableError as err:
|
||||
except CancelledError as err:
|
||||
raise err
|
||||
except DialFailedError as err:
|
||||
raise
|
||||
newException(AutonatError, "Unexpected error when dialling: " & err.msg, err)
|
||||
|
||||
@@ -61,14 +67,37 @@ method dialMe*(
|
||||
incomingConnection.cancel()
|
||||
# Safer to always try to cancel cause we aren't sure if the peer dialled us or not
|
||||
if incomingConnection.completed():
|
||||
await (await incomingConnection).connection.close()
|
||||
trace "sending Dial", addrs = switch.peerInfo.addrs
|
||||
await conn.sendDial(switch.peerInfo.peerId, switch.peerInfo.addrs)
|
||||
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
|
||||
try:
|
||||
await (await incomingConnection).connection.close()
|
||||
except AlreadyExpectingConnectionError as e:
|
||||
# this err is already handled above and could not happen later
|
||||
error "Unexpected error", description = e.msg
|
||||
|
||||
try:
|
||||
trace "sending Dial", addrs = switch.peerInfo.addrs
|
||||
await conn.sendDial(switch.peerInfo.peerId, switch.peerInfo.addrs)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(AutonatError, "Sending dial failed", e)
|
||||
|
||||
var respBytes: seq[byte]
|
||||
try:
|
||||
respBytes = await conn.readLp(1024)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise newException(AutonatError, "read Dial response failed: " & e.msg, e)
|
||||
|
||||
let response = getResponseOrRaise(AutonatMsg.decode(respBytes))
|
||||
|
||||
return
|
||||
case response.status
|
||||
of ResponseStatus.Ok:
|
||||
response.ma.tryGet()
|
||||
try:
|
||||
response.ma.tryGet()
|
||||
except ResultError[void]:
|
||||
raiseAssert("checked with if")
|
||||
of ResponseStatus.DialError:
|
||||
raise newException(
|
||||
AutonatUnreachableError, "Peer could not dial us back: " & response.text.get("")
|
||||
|
||||
@@ -9,9 +9,10 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/[results, objects]
|
||||
import chronos, chronicles
|
||||
import stew/objects
|
||||
import results, chronos, chronicles
|
||||
import ../../../multiaddress, ../../../peerid, ../../../errors
|
||||
import ../../../protobuf/minprotobuf
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonat"
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, sequtils]
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles
|
||||
import
|
||||
../../protocol,
|
||||
@@ -32,7 +32,9 @@ type Autonat* = ref object of LPProtocol
|
||||
switch*: Switch
|
||||
dialTimeout: Duration
|
||||
|
||||
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
proc sendDial(
|
||||
conn: Connection, pid: PeerId, addrs: seq[MultiAddress]
|
||||
) {.async: (raises: [LPStreamError, CancelledError]).} =
|
||||
let pb = AutonatDial(
|
||||
peerInfo: Opt.some(AutonatPeerInfo(id: Opt.some(pid), addrs: addrs))
|
||||
).encode()
|
||||
@@ -40,28 +42,37 @@ proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.}
|
||||
|
||||
proc sendResponseError(
|
||||
conn: Connection, status: ResponseStatus, text: string = ""
|
||||
) {.async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
let pb = AutonatDialResponse(
|
||||
status: status,
|
||||
text:
|
||||
if text == "":
|
||||
Opt.none(string)
|
||||
else:
|
||||
Opt.some(text)
|
||||
,
|
||||
Opt.some(text),
|
||||
ma: Opt.none(MultiAddress),
|
||||
).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
try:
|
||||
await conn.writeLp(pb.buffer)
|
||||
except LPStreamError as exc:
|
||||
trace "autonat failed to send response error", description = exc.msg, conn
|
||||
|
||||
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
|
||||
proc sendResponseOk(
|
||||
conn: Connection, ma: MultiAddress
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
let pb = AutonatDialResponse(
|
||||
status: ResponseStatus.Ok, text: Opt.some("Ok"), ma: Opt.some(ma)
|
||||
).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
try:
|
||||
await conn.writeLp(pb.buffer)
|
||||
except LPStreamError as exc:
|
||||
trace "autonat failed to send response ok", description = exc.msg, conn
|
||||
|
||||
proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.async.} =
|
||||
proc tryDial(
|
||||
autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]
|
||||
) {.async: (raises: [DialFailedError, CancelledError]).} =
|
||||
await autonat.sem.acquire()
|
||||
var futs: seq[Future[Opt[MultiAddress]]]
|
||||
var futs: seq[Future[Opt[MultiAddress]].Raising([DialFailedError, CancelledError])]
|
||||
try:
|
||||
# This is to bypass the per peer max connections limit
|
||||
let outgoingConnection =
|
||||
@@ -72,7 +83,8 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
|
||||
return
|
||||
# Safer to always try to cancel cause we aren't sure if the connection was established
|
||||
defer:
|
||||
outgoingConnection.cancel()
|
||||
outgoingConnection.cancelSoon()
|
||||
|
||||
# tryDial is to bypass the global max connections limit
|
||||
futs = addrs.mapIt(autonat.switch.dialer.tryDial(conn.peerId, @[it]))
|
||||
let fut = await anyCompleted(futs).wait(autonat.dialTimeout)
|
||||
@@ -89,9 +101,6 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
|
||||
except AsyncTimeoutError as exc:
|
||||
debug "Dial timeout", addrs, description = exc.msg
|
||||
await conn.sendResponseError(DialError, "Dial timeout")
|
||||
except CatchableError as exc:
|
||||
debug "Unexpected error", addrs, description = exc.msg
|
||||
await conn.sendResponseError(DialError, "Unexpected error")
|
||||
finally:
|
||||
autonat.sem.release()
|
||||
for f in futs:
|
||||
@@ -155,7 +164,9 @@ proc new*(
|
||||
): T =
|
||||
let autonat =
|
||||
T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
proc handleStream(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
|
||||
raise newException(AutonatError, "Received malformed message")
|
||||
@@ -163,6 +174,7 @@ proc new*(
|
||||
raise newException(AutonatError, "Message type should be dial")
|
||||
await autonat.handleDial(conn, msg)
|
||||
except CancelledError as exc:
|
||||
trace "cancelled autonat handler"
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "exception in autonat handler", description = exc.msg, conn
|
||||
|
||||
@@ -50,7 +50,7 @@ type
|
||||
|
||||
StatusAndConfidenceHandler* = proc(
|
||||
networkReachability: NetworkReachability, confidence: Opt[float]
|
||||
): Future[void] {.gcsafe, raises: [].}
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).}
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutonatService],
|
||||
@@ -79,7 +79,7 @@ proc new*(
|
||||
enableAddressMapper: enableAddressMapper,
|
||||
)
|
||||
|
||||
proc callHandler(self: AutonatService) {.async.} =
|
||||
proc callHandler(self: AutonatService) {.async: (raises: [CancelledError]).} =
|
||||
if not isNil(self.statusAndConfidenceHandler):
|
||||
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
|
||||
|
||||
@@ -92,7 +92,7 @@ proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
|
||||
|
||||
proc handleAnswer(
|
||||
self: AutonatService, ans: NetworkReachability
|
||||
): Future[bool] {.async.} =
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
if ans == Unknown:
|
||||
return
|
||||
|
||||
@@ -127,7 +127,7 @@ proc handleAnswer(
|
||||
|
||||
proc askPeer(
|
||||
self: AutonatService, switch: Switch, peerId: PeerId
|
||||
): Future[NetworkReachability] {.async.} =
|
||||
): Future[NetworkReachability] {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
peerId = $peerId
|
||||
|
||||
@@ -160,7 +160,9 @@ proc askPeer(
|
||||
await switch.peerInfo.update()
|
||||
return ans
|
||||
|
||||
proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
|
||||
proc askConnectedPeers(
|
||||
self: AutonatService, switch: Switch
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
trace "Asking peers for reachability"
|
||||
var peers = switch.connectedPeers(Direction.Out)
|
||||
self.rng.shuffle(peers)
|
||||
@@ -175,13 +177,15 @@ proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
|
||||
if (await askPeer(self, switch, peer)) != Unknown:
|
||||
answersFromPeers.inc()
|
||||
|
||||
proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.async.} =
|
||||
proc schedule(
|
||||
service: AutonatService, switch: Switch, interval: Duration
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
heartbeat "Scheduling AutonatService run", interval:
|
||||
await service.run(switch)
|
||||
|
||||
proc addressMapper(
|
||||
self: AutonatService, peerStore: PeerStore, listenAddrs: seq[MultiAddress]
|
||||
): Future[seq[MultiAddress]] {.async.} =
|
||||
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
|
||||
if self.networkReachability != NetworkReachability.Reachable:
|
||||
return listenAddrs
|
||||
|
||||
@@ -198,10 +202,12 @@ proc addressMapper(
|
||||
addrs.add(processedMA)
|
||||
return addrs
|
||||
|
||||
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
||||
method setup*(
|
||||
self: AutonatService, switch: Switch
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
self.addressMapper = proc(
|
||||
listenAddrs: seq[MultiAddress]
|
||||
): Future[seq[MultiAddress]] {.async.} =
|
||||
): Future[seq[MultiAddress]] {.async: (raises: [CancelledError]).} =
|
||||
return await addressMapper(self, switch.peerStore, listenAddrs)
|
||||
|
||||
info "Setting up AutonatService"
|
||||
@@ -210,22 +216,30 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
||||
if self.askNewConnectedPeers:
|
||||
self.newConnectedPeerHandler = proc(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
): Future[void] {.async.} =
|
||||
): Future[void] {.async: (raises: [CancelledError]).} =
|
||||
discard askPeer(self, switch, peerId)
|
||||
|
||||
switch.connManager.addPeerEventHandler(
|
||||
self.newConnectedPeerHandler, PeerEventKind.Joined
|
||||
)
|
||||
|
||||
self.scheduleInterval.withValue(interval):
|
||||
self.scheduleHandle = schedule(self, switch, interval)
|
||||
|
||||
if self.enableAddressMapper:
|
||||
switch.peerInfo.addressMappers.add(self.addressMapper)
|
||||
|
||||
return hasBeenSetup
|
||||
|
||||
method run*(self: AutonatService, switch: Switch) {.async, public.} =
|
||||
method run*(
|
||||
self: AutonatService, switch: Switch
|
||||
) {.public, async: (raises: [CancelledError]).} =
|
||||
trace "Running AutonatService"
|
||||
await askConnectedPeers(self, switch)
|
||||
|
||||
method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public.} =
|
||||
method stop*(
|
||||
self: AutonatService, switch: Switch
|
||||
): Future[bool] {.public, async: (raises: [CancelledError]).} =
|
||||
info "Stopping AutonatService"
|
||||
let hasBeenStopped = await procCall Service(self).stop(switch)
|
||||
if hasBeenStopped:
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
import std/sequtils
|
||||
|
||||
import stew/results
|
||||
import results
|
||||
import chronos, chronicles
|
||||
|
||||
import core
|
||||
@@ -34,7 +34,7 @@ proc new*(
|
||||
|
||||
proc startSync*(
|
||||
self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: seq[MultiAddress]
|
||||
) {.async.} =
|
||||
) {.async: (raises: [DcutrError, CancelledError]).} =
|
||||
logScope:
|
||||
peerId = switch.peerInfo.peerId
|
||||
|
||||
@@ -107,7 +107,9 @@ proc startSync*(
|
||||
description = err.msg
|
||||
raise newException(
|
||||
DcutrError,
|
||||
"Unexpected error when Dcutr initiator tried to connect to the remote peer", err,
|
||||
"Unexpected error when Dcutr initiator tried to connect to the remote peer: " &
|
||||
err.msg,
|
||||
err,
|
||||
)
|
||||
finally:
|
||||
if stream != nil:
|
||||
|
||||
@@ -15,6 +15,7 @@ import chronos
|
||||
import stew/objects
|
||||
|
||||
import ../../../multiaddress, ../../../errors, ../../../stream/connection
|
||||
import ../../../protobuf/minprotobuf
|
||||
|
||||
export multiaddress
|
||||
|
||||
@@ -49,7 +50,9 @@ proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [DcutrEr
|
||||
raise newException(DcutrError, "Received malformed message")
|
||||
return dcutrMsg
|
||||
|
||||
proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async.} =
|
||||
proc send*(
|
||||
conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, sequtils]
|
||||
import stew/[results, objects]
|
||||
import chronos, chronicles
|
||||
import stew/objects
|
||||
import results, chronos, chronicles
|
||||
|
||||
import core
|
||||
import
|
||||
@@ -30,7 +30,9 @@ proc new*(
|
||||
connectTimeout = 15.seconds,
|
||||
maxDialableAddrs = 8,
|
||||
): T =
|
||||
proc handleStream(stream: Connection, proto: string) {.async.} =
|
||||
proc handleStream(
|
||||
stream: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
var peerDialableAddrs: seq[MultiAddress]
|
||||
try:
|
||||
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
|
||||
@@ -77,6 +79,7 @@ proc new*(
|
||||
for fut in futs:
|
||||
fut.cancel()
|
||||
except CancelledError as err:
|
||||
trace "cancelled Dcutr receiver"
|
||||
raise err
|
||||
except AllFuturesFailedError as err:
|
||||
debug "Dcutr receiver could not connect to the remote peer, " &
|
||||
|
||||
@@ -29,11 +29,12 @@ const RelayClientMsgSize = 4096
|
||||
type
|
||||
RelayClientError* = object of LPError
|
||||
ReservationError* = object of RelayClientError
|
||||
RelayV1DialError* = object of RelayClientError
|
||||
RelayV2DialError* = object of RelayClientError
|
||||
RelayDialError* = object of DialFailedError
|
||||
RelayV1DialError* = object of RelayDialError
|
||||
RelayV2DialError* = object of RelayDialError
|
||||
RelayClientAddConn* = proc(
|
||||
conn: Connection, duration: uint32, data: uint64
|
||||
): Future[void] {.gcsafe, raises: [].}
|
||||
): Future[void] {.gcsafe, async: (raises: [CancelledError]).}
|
||||
RelayClient* = ref object of Relay
|
||||
onNewConnection*: RelayClientAddConn
|
||||
canHop: bool
|
||||
@@ -45,14 +46,21 @@ type
|
||||
limitDuration*: uint32 # seconds
|
||||
limitData*: uint64 # bytes
|
||||
|
||||
proc sendStopError(conn: Connection, code: StatusV2) {.async.} =
|
||||
proc sendStopError(
|
||||
conn: Connection, code: StatusV2
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
trace "send stop status", status = $code & " (" & $ord(code) & ")"
|
||||
let msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
|
||||
await conn.writeLp(encode(msg).buffer)
|
||||
try:
|
||||
let msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
|
||||
await conn.writeLp(encode(msg).buffer)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except LPStreamError as e:
|
||||
trace "failed to send stop status", description = e.msg
|
||||
|
||||
proc handleRelayedConnect(
|
||||
cl: RelayClient, conn: Connection, msg: StopMessage
|
||||
) {.async.} =
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let
|
||||
# TODO: check the go version to see in which way this could fail
|
||||
# it's unclear in the spec
|
||||
@@ -80,7 +88,7 @@ proc handleRelayedConnect(
|
||||
|
||||
proc reserve*(
|
||||
cl: RelayClient, peerId: PeerId, addrs: seq[MultiAddress] = @[]
|
||||
): Future[Rsvp] {.async.} =
|
||||
): Future[Rsvp] {.async: (raises: [ReservationError, DialFailedError, CancelledError]).} =
|
||||
let conn = await cl.switch.dial(peerId, addrs, RelayV2HopCodec)
|
||||
defer:
|
||||
await conn.close()
|
||||
@@ -121,7 +129,7 @@ proc reserve*(
|
||||
|
||||
proc dialPeerV1*(
|
||||
cl: RelayClient, conn: Connection, dstPeerId: PeerId, dstAddrs: seq[MultiAddress]
|
||||
): Future[Connection] {.async.} =
|
||||
): Future[Connection] {.async: (raises: [CancelledError, RelayV1DialError]).} =
|
||||
var
|
||||
msg = RelayMessage(
|
||||
msgType: Opt.some(RelayType.Hop),
|
||||
@@ -138,19 +146,20 @@ proc dialPeerV1*(
|
||||
await conn.writeLp(pb.buffer)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
trace "error writing hop request", description = exc.msg
|
||||
raise exc
|
||||
raise newException(RelayV1DialError, "error writing hop request: " & exc.msg, exc)
|
||||
|
||||
let msgRcvFromRelayOpt =
|
||||
try:
|
||||
RelayMessage.decode(await conn.readLp(RelayClientMsgSize))
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
trace "error reading stop response", description = exc.msg
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise exc
|
||||
raise
|
||||
newException(RelayV1DialError, "error reading stop response: " & exc.msg, exc)
|
||||
|
||||
try:
|
||||
let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
|
||||
@@ -165,10 +174,16 @@ proc dialPeerV1*(
|
||||
)
|
||||
except RelayV1DialError as exc:
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise exc
|
||||
raise newException(
|
||||
RelayV1DialError,
|
||||
"Hop can't open destination stream after sendStatus: " & exc.msg,
|
||||
exc,
|
||||
)
|
||||
except ValueError as exc:
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise newException(RelayV1DialError, exc.msg)
|
||||
raise newException(
|
||||
RelayV1DialError, "Exception reading msg in dialPeerV1: " & exc.msg, exc
|
||||
)
|
||||
result = conn
|
||||
|
||||
proc dialPeerV2*(
|
||||
@@ -176,7 +191,7 @@ proc dialPeerV2*(
|
||||
conn: RelayConnection,
|
||||
dstPeerId: PeerId,
|
||||
dstAddrs: seq[MultiAddress],
|
||||
): Future[Connection] {.async.} =
|
||||
): Future[Connection] {.async: (raises: [RelayV2DialError, CancelledError]).} =
|
||||
let
|
||||
p = Peer(peerId: dstPeerId, addrs: dstAddrs)
|
||||
pb = encode(HopMessage(msgType: HopMessageType.Connect, peer: Opt.some(p)))
|
||||
@@ -191,7 +206,8 @@ proc dialPeerV2*(
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "error reading stop response", description = exc.msg
|
||||
raise newException(RelayV2DialError, exc.msg)
|
||||
raise
|
||||
newException(RelayV2DialError, "Exception decoding HopMessage: " & exc.msg, exc)
|
||||
|
||||
if msgRcvFromRelay.msgType != HopMessageType.Status:
|
||||
raise newException(RelayV2DialError, "Unexpected stop response")
|
||||
@@ -202,7 +218,9 @@ proc dialPeerV2*(
|
||||
conn.limitData = msgRcvFromRelay.limit.data
|
||||
return conn
|
||||
|
||||
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
|
||||
proc handleStopStreamV2(
|
||||
cl: RelayClient, conn: Connection
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
||||
await sendHopStatus(conn, MalformedMessage)
|
||||
return
|
||||
@@ -214,7 +232,9 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
|
||||
trace "Unexpected client / relayv2 handshake", msgType = msg.msgType
|
||||
await sendStopError(conn, MalformedMessage)
|
||||
|
||||
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
|
||||
proc handleStop(
|
||||
cl: RelayClient, conn: Connection, msg: RelayMessage
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
let src = msg.srcPeer.valueOr:
|
||||
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
|
||||
return
|
||||
@@ -241,7 +261,9 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.}
|
||||
else:
|
||||
await conn.close()
|
||||
|
||||
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
|
||||
proc handleStreamV1(
|
||||
cl: RelayClient, conn: Connection
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
@@ -290,7 +312,9 @@ proc new*(
|
||||
msgSize: msgSize,
|
||||
isCircuitRelayV1: circuitRelayV1,
|
||||
)
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
proc handleStream(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
case proto
|
||||
of RelayV1Codec:
|
||||
@@ -300,6 +324,7 @@ proc new*(
|
||||
of RelayV2HopCodec:
|
||||
await cl.handleHopStreamV2(conn)
|
||||
except CancelledError as exc:
|
||||
trace "cancelled client handler"
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in client handler", description = exc.msg, conn
|
||||
|
||||
@@ -10,8 +10,10 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import macros
|
||||
import stew/[objects, results]
|
||||
import stew/objects
|
||||
import results
|
||||
import ../../../peerinfo, ../../../signed_envelope
|
||||
import ../../../protobuf/minprotobuf
|
||||
|
||||
# Circuit Relay V1 Message
|
||||
|
||||
|
||||
@@ -112,7 +112,9 @@ proc isRelayed*(conn: Connection): bool =
|
||||
wrappedConn = wrappedConn.getWrapped()
|
||||
return false
|
||||
|
||||
proc handleReserve(r: Relay, conn: Connection) {.async.} =
|
||||
proc handleReserve(
|
||||
r: Relay, conn: Connection
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
if conn.isRelayed():
|
||||
trace "reservation attempt over relay connection", pid = conn.peerId
|
||||
await sendHopStatus(conn, PermissionDenied)
|
||||
@@ -133,7 +135,9 @@ proc handleReserve(r: Relay, conn: Connection) {.async.} =
|
||||
r.rsvp[pid] = expire
|
||||
await conn.writeLp(encode(msg).buffer)
|
||||
|
||||
proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
|
||||
proc handleConnect(
|
||||
r: Relay, connSrc: Connection, msg: HopMessage
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
if connSrc.isRelayed():
|
||||
trace "connection attempt over relay connection"
|
||||
await sendHopStatus(connSrc, PermissionDenied)
|
||||
@@ -166,14 +170,14 @@ proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
|
||||
await r.switch.dial(dst, RelayV2StopCodec)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except DialFailedError as exc:
|
||||
trace "error opening relay stream", dst, description = exc.msg
|
||||
await sendHopStatus(connSrc, ConnectionFailed)
|
||||
return
|
||||
defer:
|
||||
await connDst.close()
|
||||
|
||||
proc sendStopMsg() {.async.} =
|
||||
proc sendStopMsg() {.async: (raises: [SendStopError, CancelledError, LPStreamError]).} =
|
||||
let stopMsg = StopMessage(
|
||||
msgType: StopMessageType.Connect,
|
||||
peer: Opt.some(Peer(peerId: src, addrs: @[])),
|
||||
@@ -209,7 +213,9 @@ proc handleConnect(r: Relay, connSrc: Connection, msg: HopMessage) {.async.} =
|
||||
await rconnDst.close()
|
||||
await bridge(rconnSrc, rconnDst)
|
||||
|
||||
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
|
||||
proc handleHopStreamV2*(
|
||||
r: Relay, conn: Connection
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
||||
await sendHopStatus(conn, MalformedMessage)
|
||||
return
|
||||
@@ -225,7 +231,9 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
|
||||
|
||||
# Relay V1
|
||||
|
||||
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
|
||||
proc handleHop*(
|
||||
r: Relay, connSrc: Connection, msg: RelayMessage
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
r.streamCount.inc()
|
||||
defer:
|
||||
r.streamCount.dec()
|
||||
@@ -271,7 +279,7 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
|
||||
await r.switch.dial(dst.peerId, RelayV1Codec)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except DialFailedError as exc:
|
||||
trace "error opening relay stream", dst, description = exc.msg
|
||||
await sendStatus(connSrc, StatusV1.HopCantDialDst)
|
||||
return
|
||||
@@ -309,7 +317,9 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
|
||||
trace "relaying connection", src, dst
|
||||
await bridge(connSrc, connDst)
|
||||
|
||||
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
|
||||
proc handleStreamV1(
|
||||
r: Relay, conn: Connection
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
@@ -333,9 +343,8 @@ proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
|
||||
proc setup*(r: Relay, switch: Switch) =
|
||||
r.switch = switch
|
||||
r.switch.addPeerEventHandler(
|
||||
proc(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
r.rsvp.del(peerId)
|
||||
,
|
||||
proc(peerId: PeerId, event: PeerEvent) {.async: (raises: [CancelledError]).} =
|
||||
r.rsvp.del(peerId),
|
||||
Left,
|
||||
)
|
||||
|
||||
@@ -360,7 +369,9 @@ proc new*(
|
||||
isCircuitRelayV1: circuitRelayV1,
|
||||
)
|
||||
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
proc handleStream(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
case proto
|
||||
of RelayV2HopCodec:
|
||||
@@ -368,6 +379,7 @@ proc new*(
|
||||
of RelayV1Codec:
|
||||
await r.handleStreamV1(conn)
|
||||
except CancelledError as exc:
|
||||
trace "cancelled relayv2 handler"
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "exception in relayv2 handler", description = exc.msg, conn
|
||||
@@ -383,12 +395,15 @@ proc new*(
|
||||
r.handler = handleStream
|
||||
r
|
||||
|
||||
proc deletesReservation(r: Relay) {.async.} =
|
||||
proc deletesReservation(r: Relay) {.async: (raises: [CancelledError]).} =
|
||||
heartbeat "Reservation timeout", r.heartbeatSleepTime.seconds():
|
||||
let n = now().utc
|
||||
for k in toSeq(r.rsvp.keys):
|
||||
if n > r.rsvp[k]:
|
||||
r.rsvp.del(k)
|
||||
try:
|
||||
let n = now().utc
|
||||
for k in toSeq(r.rsvp.keys):
|
||||
if n > r.rsvp[k]:
|
||||
r.rsvp.del(k)
|
||||
except KeyError:
|
||||
raiseAssert "checked with in"
|
||||
|
||||
method start*(r: Relay): Future[void] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
|
||||
@@ -29,71 +29,100 @@ type RelayTransport* = ref object of Transport
|
||||
queue: AsyncQueue[Connection]
|
||||
selfRunning: bool
|
||||
|
||||
method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
|
||||
method start*(
|
||||
self: RelayTransport, ma: seq[MultiAddress]
|
||||
) {.async: (raises: [LPError, transport.TransportError]).} =
|
||||
if self.selfRunning:
|
||||
trace "Relay transport already running"
|
||||
return
|
||||
|
||||
self.client.onNewConnection = proc(
|
||||
conn: Connection, duration: uint32 = 0, data: uint64 = 0
|
||||
) {.async.} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
await self.queue.addLast(RelayConnection.new(conn, duration, data))
|
||||
await conn.join()
|
||||
self.selfRunning = true
|
||||
await procCall Transport(self).start(ma)
|
||||
trace "Starting Relay transport"
|
||||
|
||||
method stop*(self: RelayTransport) {.async.} =
|
||||
method stop*(self: RelayTransport) {.async: (raises: []).} =
|
||||
self.running = false
|
||||
self.selfRunning = false
|
||||
self.client.onNewConnection = nil
|
||||
while not self.queue.empty():
|
||||
await self.queue.popFirstNoWait().close()
|
||||
try:
|
||||
await self.queue.popFirstNoWait().close()
|
||||
except AsyncQueueEmptyError:
|
||||
continue # checked with self.queue.empty()
|
||||
|
||||
method accept*(self: RelayTransport): Future[Connection] {.async.} =
|
||||
method accept*(
|
||||
self: RelayTransport
|
||||
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
|
||||
result = await self.queue.popFirst()
|
||||
|
||||
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
|
||||
let
|
||||
sma = toSeq(ma.items())
|
||||
relayAddrs = sma[0 .. sma.len - 4].mapIt(it.tryGet()).foldl(a & b)
|
||||
proc dial*(
|
||||
self: RelayTransport, ma: MultiAddress
|
||||
): Future[Connection] {.async: (raises: [RelayDialError, CancelledError]).} =
|
||||
var
|
||||
relayAddrs: MultiAddress
|
||||
relayPeerId: PeerId
|
||||
dstPeerId: PeerId
|
||||
if not relayPeerId.init(($(sma[^3].tryGet())).split('/')[2]):
|
||||
raise newException(RelayV2DialError, "Relay doesn't exist")
|
||||
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
|
||||
raise newException(RelayV2DialError, "Destination doesn't exist")
|
||||
|
||||
try:
|
||||
let sma = toSeq(ma.items())
|
||||
relayAddrs = sma[0 .. sma.len - 4].mapIt(it.tryGet()).foldl(a & b)
|
||||
if not relayPeerId.init(($(sma[^3].tryGet())).split('/')[2]):
|
||||
raise newException(RelayDialError, "Relay doesn't exist")
|
||||
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
|
||||
raise newException(RelayDialError, "Destination doesn't exist")
|
||||
except RelayDialError as e:
|
||||
raise newException(RelayDialError, "dial address not valid: " & e.msg, e)
|
||||
except CatchableError:
|
||||
raise newException(RelayDialError, "dial address not valid")
|
||||
|
||||
trace "Dial", relayPeerId, dstPeerId
|
||||
|
||||
let conn = await self.client.switch.dial(
|
||||
relayPeerId, @[relayAddrs], @[RelayV2HopCodec, RelayV1Codec]
|
||||
)
|
||||
conn.dir = Direction.Out
|
||||
var rc: RelayConnection
|
||||
try:
|
||||
let conn = await self.client.switch.dial(
|
||||
relayPeerId, @[relayAddrs], @[RelayV2HopCodec, RelayV1Codec]
|
||||
)
|
||||
conn.dir = Direction.Out
|
||||
|
||||
case conn.protocol
|
||||
of RelayV1Codec:
|
||||
return await self.client.dialPeerV1(conn, dstPeerId, @[])
|
||||
of RelayV2HopCodec:
|
||||
rc = RelayConnection.new(conn, 0, 0)
|
||||
return await self.client.dialPeerV2(rc, dstPeerId, @[])
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
if not rc.isNil:
|
||||
await rc.close()
|
||||
raise exc
|
||||
except CancelledError as e:
|
||||
safeClose(rc)
|
||||
raise e
|
||||
except DialFailedError as e:
|
||||
safeClose(rc)
|
||||
raise newException(RelayDialError, "dial relay peer failed: " & e.msg, e)
|
||||
except RelayV1DialError as e:
|
||||
safeClose(rc)
|
||||
raise newException(RelayV1DialError, "dial relay v1 failed: " & e.msg, e)
|
||||
except RelayV2DialError as e:
|
||||
safeClose(rc)
|
||||
raise newException(RelayV2DialError, "dial relay v2 failed: " & e.msg, e)
|
||||
|
||||
method dial*(
|
||||
self: RelayTransport,
|
||||
hostname: string,
|
||||
ma: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId),
|
||||
): Future[Connection] {.async.} =
|
||||
): Future[Connection] {.async: (raises: [transport.TransportError, CancelledError]).} =
|
||||
peerId.withValue(pid):
|
||||
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
|
||||
result = await self.dial(address)
|
||||
try:
|
||||
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
|
||||
result = await self.dial(address)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except CatchableError as e:
|
||||
raise
|
||||
newException(transport.TransportDialError, "Caught error in dial: " & e.msg, e)
|
||||
|
||||
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe.} =
|
||||
try:
|
||||
|
||||
@@ -22,12 +22,17 @@ const
|
||||
|
||||
proc sendStatus*(
|
||||
conn: Connection, code: StatusV1
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
|
||||
let
|
||||
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
conn.writeLp(pb.buffer)
|
||||
try:
|
||||
let
|
||||
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
await conn.writeLp(pb.buffer)
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except LPStreamError as e:
|
||||
trace "error sending relay status", description = e.msg
|
||||
|
||||
proc sendHopStatus*(
|
||||
conn: Connection, code: StatusV2
|
||||
@@ -64,8 +69,8 @@ proc bridge*(
|
||||
while not connSrc.closed() and not connDst.closed():
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(futSrc, futDst)
|
||||
except ValueError:
|
||||
raiseAssert("Futures list is not empty")
|
||||
except ValueError as e:
|
||||
raiseAssert("Futures list is not empty: " & e.msg)
|
||||
if futSrc.finished():
|
||||
bufRead = await futSrc
|
||||
if bufRead > 0:
|
||||
|
||||
@@ -13,8 +13,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, options, strutils, sugar]
|
||||
import stew/results
|
||||
import chronos, chronicles
|
||||
import results, chronos, chronicles
|
||||
import
|
||||
../protobuf/minprotobuf,
|
||||
../peerinfo,
|
||||
@@ -148,12 +147,13 @@ proc new*(
|
||||
identify
|
||||
|
||||
method init*(p: Identify) =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
trace "handling identify request", conn
|
||||
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
|
||||
await conn.writeLp(pb.buffer)
|
||||
except CancelledError as exc:
|
||||
trace "cancelled identify handler"
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in identify handler", description = exc.msg, conn
|
||||
@@ -166,7 +166,12 @@ method init*(p: Identify) =
|
||||
|
||||
proc identify*(
|
||||
self: Identify, conn: Connection, remotePeerId: PeerId
|
||||
): Future[IdentifyInfo] {.async.} =
|
||||
): Future[IdentifyInfo] {.
|
||||
async: (
|
||||
raises:
|
||||
[IdentityInvalidMsgError, IdentityNoMatchError, LPStreamError, CancelledError]
|
||||
)
|
||||
.} =
|
||||
trace "initiating identify", conn
|
||||
var message = await conn.readLp(64 * 1024)
|
||||
if len(message) == 0:
|
||||
@@ -205,7 +210,7 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
|
||||
identifypush
|
||||
|
||||
proc init*(p: IdentifyPush) =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
trace "handling identify push", conn
|
||||
try:
|
||||
var message = await conn.readLp(64 * 1024)
|
||||
@@ -224,6 +229,7 @@ proc init*(p: IdentifyPush) =
|
||||
if not isNil(p.identifyHandler):
|
||||
await p.identifyHandler(conn.peerId, identInfo)
|
||||
except CancelledError as exc:
|
||||
trace "cancelled identify push handler"
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
info "exception in identify push handler", description = exc.msg, conn
|
||||
@@ -234,7 +240,9 @@ proc init*(p: IdentifyPush) =
|
||||
p.handler = handle
|
||||
p.codec = IdentifyPushCodec
|
||||
|
||||
proc push*(p: IdentifyPush, peerInfo: PeerInfo, conn: Connection) {.async, public.} =
|
||||
proc push*(
|
||||
p: IdentifyPush, peerInfo: PeerInfo, conn: Connection
|
||||
) {.public, async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Send new `peerInfo`s to a connection
|
||||
var pb = encodeMsg(peerInfo, conn.observedAddr, true)
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
159
libp2p/protocols/kademlia/protobuf.nim
Normal file
159
libp2p/protocols/kademlia/protobuf.nim
Normal file
@@ -0,0 +1,159 @@
|
||||
import ../../protobuf/minprotobuf
|
||||
import ../../varint
|
||||
import ../../utility
|
||||
import results
|
||||
import ../../multiaddress
|
||||
import stew/objects
|
||||
import stew/assign2
|
||||
import options
|
||||
|
||||
type
|
||||
Record* {.public.} = object
|
||||
key*: Option[seq[byte]]
|
||||
value*: Option[seq[byte]]
|
||||
timeReceived*: Option[string]
|
||||
|
||||
MessageType* = enum
|
||||
putValue = 0
|
||||
getValue = 1
|
||||
addProvider = 2
|
||||
getProviders = 3
|
||||
findNode = 4
|
||||
ping = 5 # Deprecated
|
||||
|
||||
ConnectionType* = enum
|
||||
notConnected = 0
|
||||
connected = 1
|
||||
canConnect = 2 # Unused
|
||||
cannotConnect = 3 # Unused
|
||||
|
||||
Peer* {.public.} = object
|
||||
id*: seq[byte]
|
||||
addrs*: seq[MultiAddress]
|
||||
connection*: ConnectionType
|
||||
|
||||
Message* {.public.} = object
|
||||
msgType*: MessageType
|
||||
key*: Option[seq[byte]]
|
||||
record*: Option[Record]
|
||||
closerPeers*: seq[Peer]
|
||||
providerPeers*: seq[Peer]
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].}
|
||||
|
||||
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].}
|
||||
|
||||
proc encode*(record: Record): ProtoBuffer {.raises: [].} =
|
||||
var pb = initProtoBuffer()
|
||||
pb.writeOpt(1, record.key)
|
||||
pb.writeOpt(2, record.value)
|
||||
pb.writeOpt(5, record.timeReceived)
|
||||
pb.finish()
|
||||
return pb
|
||||
|
||||
proc encode*(peer: Peer): ProtoBuffer {.raises: [].} =
|
||||
var pb = initProtoBuffer()
|
||||
pb.write(1, peer.id)
|
||||
for address in peer.addrs:
|
||||
pb.write(2, address.data.buffer)
|
||||
pb.write(3, uint32(ord(peer.connection)))
|
||||
pb.finish()
|
||||
return pb
|
||||
|
||||
proc encode*(msg: Message): ProtoBuffer {.raises: [].} =
|
||||
var pb = initProtoBuffer()
|
||||
|
||||
pb.write(1, uint32(ord(msg.msgType)))
|
||||
|
||||
pb.writeOpt(2, msg.key)
|
||||
|
||||
msg.record.withValue(record):
|
||||
pb.writeOpt(3, msg.record)
|
||||
|
||||
for peer in msg.closerPeers:
|
||||
pb.write(8, peer.encode())
|
||||
|
||||
for peer in msg.providerPeers:
|
||||
pb.write(9, peer.encode())
|
||||
|
||||
pb.finish()
|
||||
|
||||
return pb
|
||||
|
||||
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].} =
|
||||
opt.withValue(v):
|
||||
pb.write(field, v)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].} =
|
||||
pb.write(field, value.encode())
|
||||
|
||||
proc getOptionField[T: ProtoScalar | string | seq[byte]](
|
||||
pb: ProtoBuffer, field: int, output: var Option[T]
|
||||
): ProtoResult[void] =
|
||||
var f: T
|
||||
if ?pb.getField(field, f):
|
||||
assign(output, some(f))
|
||||
ok()
|
||||
|
||||
proc decode*(T: type Record, pb: ProtoBuffer): ProtoResult[Option[T]] =
|
||||
var r: Record
|
||||
?pb.getOptionField(1, r.key)
|
||||
?pb.getOptionField(2, r.value)
|
||||
?pb.getOptionField(5, r.timeReceived)
|
||||
return ok(some(r))
|
||||
|
||||
proc decode*(T: type Peer, pb: ProtoBuffer): ProtoResult[Option[T]] =
|
||||
var
|
||||
p: Peer
|
||||
id: seq[byte]
|
||||
|
||||
?pb.getRequiredField(1, p.id)
|
||||
|
||||
discard ?pb.getRepeatedField(2, p.addrs)
|
||||
|
||||
var connVal: uint32
|
||||
if ?pb.getField(3, connVal):
|
||||
var connType: ConnectionType
|
||||
if not checkedEnumAssign(connType, connVal):
|
||||
return err(ProtoError.BadWireType)
|
||||
p.connection = connType
|
||||
|
||||
return ok(some(p))
|
||||
|
||||
proc decode*(T: type Message, buf: seq[byte]): ProtoResult[Option[T]] =
|
||||
var
|
||||
m: Message
|
||||
key: seq[byte]
|
||||
recPb: seq[byte]
|
||||
closerPbs: seq[seq[byte]]
|
||||
providerPbs: seq[seq[byte]]
|
||||
|
||||
var pb = initProtoBuffer(buf)
|
||||
|
||||
var msgTypeVal: uint32
|
||||
?pb.getRequiredField(1, msgTypeVal)
|
||||
|
||||
var msgType: MessageType
|
||||
if not checkedEnumAssign(msgType, msgTypeVal):
|
||||
return err(ProtoError.BadWireType)
|
||||
|
||||
m.msgType = msgType
|
||||
|
||||
?pb.getOptionField(2, m.key)
|
||||
|
||||
if ?pb.getField(3, recPb):
|
||||
assign(m.record, ?Record.decode(initProtoBuffer(recPb)))
|
||||
|
||||
discard ?pb.getRepeatedField(8, closerPbs)
|
||||
for ppb in closerPbs:
|
||||
let peerOpt = ?Peer.decode(initProtoBuffer(ppb))
|
||||
peerOpt.withValue(peer):
|
||||
m.closerPeers.add(peer)
|
||||
|
||||
discard ?pb.getRepeatedField(9, providerPbs)
|
||||
for ppb in providerPbs:
|
||||
let peer = ?Peer.decode(initProtoBuffer(ppb))
|
||||
peer.withValue(peer):
|
||||
m.providerPeers.add(peer)
|
||||
|
||||
return ok(some(m))
|
||||
@@ -16,35 +16,68 @@ import ./core, ../../stream/connection
|
||||
logScope:
|
||||
topics = "libp2p perf"
|
||||
|
||||
type PerfClient* = ref object of RootObj
|
||||
type Stats* = object
|
||||
isFinal*: bool
|
||||
uploadBytes*: uint
|
||||
downloadBytes*: uint
|
||||
duration*: Duration
|
||||
|
||||
type PerfClient* = ref object
|
||||
stats: Stats
|
||||
|
||||
proc new*(T: typedesc[PerfClient]): T =
|
||||
return T()
|
||||
|
||||
proc currentStats*(p: PerfClient): Stats =
|
||||
return p.stats
|
||||
|
||||
proc perf*(
|
||||
_: typedesc[PerfClient],
|
||||
conn: Connection,
|
||||
sizeToWrite: uint64 = 0,
|
||||
sizeToRead: uint64 = 0,
|
||||
): Future[Duration] {.async, public.} =
|
||||
var
|
||||
size = sizeToWrite
|
||||
buf: array[PerfSize, byte]
|
||||
let start = Moment.now()
|
||||
p: PerfClient, conn: Connection, sizeToWrite: uint64 = 0, sizeToRead: uint64 = 0
|
||||
): Future[Duration] {.public, async: (raises: [CancelledError, LPStreamError]).} =
|
||||
trace "starting performance benchmark", conn, sizeToWrite, sizeToRead
|
||||
|
||||
await conn.write(toSeq(toBytesBE(sizeToRead)))
|
||||
while size > 0:
|
||||
let toWrite = min(size, PerfSize)
|
||||
await conn.write(buf[0 ..< toWrite])
|
||||
size -= toWrite
|
||||
p.stats = Stats()
|
||||
|
||||
await conn.close()
|
||||
try:
|
||||
var
|
||||
size = sizeToWrite
|
||||
buf: array[PerfSize, byte]
|
||||
|
||||
size = sizeToRead
|
||||
let start = Moment.now()
|
||||
|
||||
while size > 0:
|
||||
let toRead = min(size, PerfSize)
|
||||
await conn.readExactly(addr buf[0], toRead.int)
|
||||
size = size - toRead
|
||||
await conn.write(toSeq(toBytesBE(sizeToRead)))
|
||||
while size > 0:
|
||||
let toWrite = min(size, PerfSize)
|
||||
await conn.write(buf[0 ..< toWrite])
|
||||
size -= toWrite.uint
|
||||
|
||||
let duration = Moment.now() - start
|
||||
trace "finishing performance benchmark", duration
|
||||
return duration
|
||||
# set stats using copy value to avoid race condition
|
||||
var statsCopy = p.stats
|
||||
statsCopy.duration = Moment.now() - start
|
||||
statsCopy.uploadBytes += toWrite.uint
|
||||
p.stats = statsCopy
|
||||
|
||||
await conn.close()
|
||||
|
||||
size = sizeToRead
|
||||
|
||||
while size > 0:
|
||||
let toRead = min(size, PerfSize)
|
||||
await conn.readExactly(addr buf[0], toRead.int)
|
||||
size = size - toRead.uint
|
||||
|
||||
# set stats using copy value to avoid race condition
|
||||
var statsCopy = p.stats
|
||||
statsCopy.duration = Moment.now() - start
|
||||
statsCopy.downloadBytes += toRead.uint
|
||||
p.stats = statsCopy
|
||||
except CancelledError as e:
|
||||
raise e
|
||||
except LPStreamError as e:
|
||||
raise e
|
||||
finally:
|
||||
p.stats.isFinal = true
|
||||
|
||||
trace "finishing performance benchmark", duration = p.stats.duration
|
||||
|
||||
return p.stats.duration
|
||||
|
||||
@@ -24,7 +24,7 @@ type Perf* = ref object of LPProtocol
|
||||
|
||||
proc new*(T: typedesc[Perf]): T {.public.} =
|
||||
var p = T()
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
var bytesRead = 0
|
||||
try:
|
||||
trace "Received benchmark performance check", conn
|
||||
@@ -47,10 +47,12 @@ proc new*(T: typedesc[Perf]): T {.public.} =
|
||||
await conn.write(buf[0 ..< toWrite])
|
||||
size -= toWrite
|
||||
except CancelledError as exc:
|
||||
trace "cancelled perf handler"
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in perf handler", description = exc.msg, conn
|
||||
await conn.close()
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
p.handler = handle
|
||||
p.codec = PerfCodec
|
||||
|
||||
@@ -51,7 +51,7 @@ proc new*(
|
||||
ping
|
||||
|
||||
method init*(p: Ping) =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
proc handle(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
trace "handling ping", conn
|
||||
var buf: array[PingSize, byte]
|
||||
@@ -61,6 +61,7 @@ method init*(p: Ping) =
|
||||
if not isNil(p.pingHandler):
|
||||
await p.pingHandler(conn.peerId)
|
||||
except CancelledError as exc:
|
||||
trace "cancelled ping handler"
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in ping handler", description = exc.msg, conn
|
||||
@@ -68,7 +69,11 @@ method init*(p: Ping) =
|
||||
p.handler = handle
|
||||
p.codec = PingCodec
|
||||
|
||||
proc ping*(p: Ping, conn: Connection): Future[Duration] {.async, public.} =
|
||||
proc ping*(
|
||||
p: Ping, conn: Connection
|
||||
): Future[Duration] {.
|
||||
public, async: (raises: [CancelledError, LPStreamError, WrongPingAckError])
|
||||
.} =
|
||||
## Sends ping to `conn`, returns the delay
|
||||
|
||||
trace "initiating ping", conn
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, stew/results
|
||||
import chronos, results
|
||||
import ../stream/connection
|
||||
|
||||
export results
|
||||
@@ -17,7 +17,9 @@ export results
|
||||
const DefaultMaxIncomingStreams* = 10
|
||||
|
||||
type
|
||||
LPProtoHandler* = proc(conn: Connection, proto: string): Future[void] {.async.}
|
||||
LPProtoHandler* = proc(conn: Connection, proto: string): Future[void] {.
|
||||
async: (raises: [CancelledError])
|
||||
.}
|
||||
|
||||
LPProtocol* = ref object of RootObj
|
||||
codecs*: seq[string]
|
||||
@@ -28,13 +30,13 @@ type
|
||||
method init*(p: LPProtocol) {.base, gcsafe.} =
|
||||
discard
|
||||
|
||||
method start*(p: LPProtocol) {.async: (raises: [CancelledError], raw: true), base.} =
|
||||
method start*(p: LPProtocol) {.base, async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
p.started = true
|
||||
fut
|
||||
|
||||
method stop*(p: LPProtocol) {.async: (raises: [], raw: true), base.} =
|
||||
method stop*(p: LPProtocol) {.base, async: (raises: [], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
p.started = false
|
||||
@@ -64,21 +66,6 @@ template `handler`*(p: LPProtocol, conn: Connection, proto: string): Future[void
|
||||
func `handler=`*(p: LPProtocol, handler: LPProtoHandler) =
|
||||
p.handlerImpl = handler
|
||||
|
||||
# Callbacks that are annotated with `{.async: (raises).}` explicitly
|
||||
# document the types of errors that they may raise, but are not compatible
|
||||
# with `LPProtoHandler` and need to use a custom `proc` type.
|
||||
# They are internally wrapped into a `LPProtoHandler`, but still allow the
|
||||
# compiler to check that their `{.async: (raises).}` annotation is correct.
|
||||
# https://github.com/nim-lang/Nim/issues/23432
|
||||
func `handler=`*[E](
|
||||
p: LPProtocol,
|
||||
handler: proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
|
||||
) =
|
||||
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
|
||||
await handler(conn, proto)
|
||||
|
||||
p.handlerImpl = wrap
|
||||
|
||||
proc new*(
|
||||
T: type LPProtocol,
|
||||
codecs: seq[string],
|
||||
@@ -92,17 +79,5 @@ proc new*(
|
||||
when maxIncomingStreams is int:
|
||||
Opt.some(maxIncomingStreams)
|
||||
else:
|
||||
maxIncomingStreams
|
||||
,
|
||||
maxIncomingStreams,
|
||||
)
|
||||
|
||||
proc new*[E](
|
||||
T: type LPProtocol,
|
||||
codecs: seq[string],
|
||||
handler: proc(conn: Connection, proto: string): InternalRaisesFuture[void, E],
|
||||
maxIncomingStreams: Opt[int] | int = Opt.none(int),
|
||||
): T =
|
||||
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
|
||||
await handler(conn, proto)
|
||||
|
||||
T.new(codec, wrap, maxIncomingStreams)
|
||||
|
||||
@@ -101,10 +101,12 @@ method unsubscribePeer*(f: FloodSub, peer: PeerId) =
|
||||
|
||||
procCall PubSub(f).unsubscribePeer(peer)
|
||||
|
||||
method rpcHandler*(f: FloodSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||
method rpcHandler*(
|
||||
f: FloodSub, peer: PubSubPeer, data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, PeerMessageDecodeError, PeerRateLimitError]).} =
|
||||
var rpcMsg = decodeRpcMsg(data).valueOr:
|
||||
debug "failed to decode msg from peer", peer, err = error
|
||||
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
||||
raise newException(PeerMessageDecodeError, "Peer msg couldn't be decoded")
|
||||
|
||||
trace "decoded msg from peer", peer, payload = rpcMsg.shortLog
|
||||
# trigger hooks
|
||||
@@ -175,24 +177,23 @@ method rpcHandler*(f: FloodSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||
f.updateMetrics(rpcMsg)
|
||||
|
||||
method init*(f: FloodSub) =
|
||||
proc handler(conn: Connection, proto: string) {.async.} =
|
||||
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
## main protocol handler that gets triggered on every
|
||||
## connection for a protocol string
|
||||
## e.g. ``/floodsub/1.0.0``, etc...
|
||||
##
|
||||
try:
|
||||
await f.handleConn(conn, proto)
|
||||
except CancelledError:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError.
|
||||
trace "Unexpected cancellation in floodsub handler", conn
|
||||
except CatchableError as exc:
|
||||
trace "FloodSub handler leaks an error", description = exc.msg, conn
|
||||
except CancelledError as exc:
|
||||
trace "Unexpected cancellation in floodsub handler", conn, description = exc.msg
|
||||
raise exc
|
||||
|
||||
f.handler = handler
|
||||
f.codec = FloodSubCodec
|
||||
|
||||
method publish*(f: FloodSub, topic: string, data: seq[byte]): Future[int] {.async.} =
|
||||
method publish*(
|
||||
f: FloodSub, topic: string, data: seq[byte], useCustomConn: bool = false
|
||||
): Future[int] {.async: (raises: []).} =
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(f).publish(topic, data)
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ import
|
||||
../../utility,
|
||||
../../switch
|
||||
|
||||
import stew/results
|
||||
import results
|
||||
export results
|
||||
|
||||
import ./gossipsub/[types, scoring, behavior], ../../utils/heartbeat
|
||||
@@ -102,6 +102,7 @@ proc init*(
|
||||
overheadRateLimit = Opt.none(tuple[bytes: int, interval: Duration]),
|
||||
disconnectPeerAboveRateLimit = false,
|
||||
maxNumElementsInNonPriorityQueue = DefaultMaxNumElementsInNonPriorityQueue,
|
||||
sendIDontWantOnPublish = false,
|
||||
): GossipSubParams =
|
||||
GossipSubParams(
|
||||
explicit: true,
|
||||
@@ -139,6 +140,7 @@ proc init*(
|
||||
overheadRateLimit: overheadRateLimit,
|
||||
disconnectPeerAboveRateLimit: disconnectPeerAboveRateLimit,
|
||||
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue,
|
||||
sendIDontWantOnPublish: sendIDontWantOnPublish,
|
||||
)
|
||||
|
||||
proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
|
||||
@@ -208,19 +210,16 @@ proc validateParameters*(parameters: TopicParams): Result[void, cstring] =
|
||||
ok()
|
||||
|
||||
method init*(g: GossipSub) =
|
||||
proc handler(conn: Connection, proto: string) {.async.} =
|
||||
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
## main protocol handler that gets triggered on every
|
||||
## connection for a protocol string
|
||||
## e.g. ``/floodsub/1.0.0``, etc...
|
||||
##
|
||||
try:
|
||||
await g.handleConn(conn, proto)
|
||||
except CancelledError:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propogate CancelledError.
|
||||
trace "Unexpected cancellation in gossipsub handler", conn
|
||||
except CatchableError as exc:
|
||||
trace "GossipSub handler leaks an error", description = exc.msg, conn
|
||||
except CancelledError as exc:
|
||||
trace "Unexpected cancellation in gossipsub handler", conn, description = exc.msg
|
||||
raise exc
|
||||
|
||||
g.handler = handler
|
||||
g.codecs &= GossipSubCodec_12
|
||||
@@ -383,9 +382,43 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
|
||||
trace "sending iwant reply messages", peer
|
||||
g.send(peer, RPCMsg(messages: messages), isHighPriority = false)
|
||||
|
||||
proc sendIDontWant(
|
||||
g: GossipSub,
|
||||
msg: Message,
|
||||
msgId: MessageId,
|
||||
peersToSendIDontWant: HashSet[PubSubPeer],
|
||||
) =
|
||||
# If the message is "large enough", let the mesh know that we do not want
|
||||
# any more copies of it, regardless if it is valid or not.
|
||||
#
|
||||
# In the case that it is not valid, this leads to some redundancy
|
||||
# (since the other peer should not send us an invalid message regardless),
|
||||
# but the expectation is that this is rare (due to such peers getting
|
||||
# descored) and that the savings from honest peers are greater than the
|
||||
# cost a dishonest peer can incur in short time (since the IDONTWANT is
|
||||
# small).
|
||||
|
||||
# IDONTWANT is only supported by >= GossipSubCodec_12
|
||||
let peers = peersToSendIDontWant.filterIt(
|
||||
it.codec != GossipSubCodec_10 and it.codec != GossipSubCodec_11
|
||||
)
|
||||
|
||||
g.broadcast(
|
||||
peers,
|
||||
RPCMsg(
|
||||
control: some(ControlMessage(idontwant: @[ControlIWant(messageIDs: @[msgId])]))
|
||||
),
|
||||
isHighPriority = true,
|
||||
)
|
||||
|
||||
const iDontWantMessageSizeThreshold* = 512
|
||||
|
||||
proc isLargeMessage(msg: Message, msgId: MessageId): bool =
|
||||
msg.data.len > max(iDontWantMessageSizeThreshold, msgId.len * 10)
|
||||
|
||||
proc validateAndRelay(
|
||||
g: GossipSub, msg: Message, msgId: MessageId, saltedId: SaltedId, peer: PubSubPeer
|
||||
) {.async.} =
|
||||
) {.async: (raises: []).} =
|
||||
try:
|
||||
template topic(): string =
|
||||
msg.topic
|
||||
@@ -399,29 +432,10 @@ proc validateAndRelay(
|
||||
toSendPeers.incl(peers[])
|
||||
toSendPeers.excl(peer)
|
||||
|
||||
if msg.data.len > max(512, msgId.len * 10):
|
||||
# If the message is "large enough", let the mesh know that we do not want
|
||||
# any more copies of it, regardless if it is valid or not.
|
||||
#
|
||||
# In the case that it is not valid, this leads to some redundancy
|
||||
# (since the other peer should not send us an invalid message regardless),
|
||||
# but the expectation is that this is rare (due to such peers getting
|
||||
# descored) and that the savings from honest peers are greater than the
|
||||
# cost a dishonest peer can incur in short time (since the IDONTWANT is
|
||||
# small).
|
||||
if isLargeMessage(msg, msgId):
|
||||
var peersToSendIDontWant = HashSet[PubSubPeer]()
|
||||
addToSendPeers(peersToSendIDontWant)
|
||||
peersToSendIDontWant.exclIfIt(
|
||||
it.codec == GossipSubCodec_10 or it.codec == GossipSubCodec_11
|
||||
)
|
||||
g.broadcast(
|
||||
peersToSendIDontWant,
|
||||
RPCMsg(
|
||||
control:
|
||||
some(ControlMessage(idontwant: @[ControlIWant(messageIDs: @[msgId])]))
|
||||
),
|
||||
isHighPriority = true,
|
||||
)
|
||||
g.sendIDontWant(msg, msgId, peersToSendIDontWant)
|
||||
|
||||
let validation = await g.validate(msg)
|
||||
|
||||
@@ -490,7 +504,9 @@ proc validateAndRelay(
|
||||
)
|
||||
|
||||
await handleData(g, topic, msg.data)
|
||||
except CatchableError as exc:
|
||||
except CancelledError as exc:
|
||||
info "validateAndRelay failed", description = exc.msg
|
||||
except PeerRateLimitError as exc:
|
||||
info "validateAndRelay failed", description = exc.msg
|
||||
|
||||
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
|
||||
@@ -511,7 +527,9 @@ proc messageOverhead(g: GossipSub, msg: RPCMsg, msgSize: int): int =
|
||||
|
||||
msgSize - payloadSize - controlSize
|
||||
|
||||
proc rateLimit*(g: GossipSub, peer: PubSubPeer, overhead: int) {.async.} =
|
||||
proc rateLimit*(
|
||||
g: GossipSub, peer: PubSubPeer, overhead: int
|
||||
) {.async: (raises: [PeerRateLimitError]).} =
|
||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||
if not overheadRateLimit.tryConsume(overhead):
|
||||
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()])
|
||||
@@ -524,7 +542,9 @@ proc rateLimit*(g: GossipSub, peer: PubSubPeer, overhead: int) {.async.} =
|
||||
PeerRateLimitError, "Peer disconnected because it's above rate limit."
|
||||
)
|
||||
|
||||
method rpcHandler*(g: GossipSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||
method rpcHandler*(
|
||||
g: GossipSub, peer: PubSubPeer, data: seq[byte]
|
||||
) {.async: (raises: [CancelledError, PeerMessageDecodeError, PeerRateLimitError]).} =
|
||||
let msgSize = data.len
|
||||
var rpcMsg = decodeRpcMsg(data).valueOr:
|
||||
debug "failed to decode msg from peer", peer, err = error
|
||||
@@ -534,7 +554,7 @@ method rpcHandler*(g: GossipSub, peer: PubSubPeer, data: seq[byte]) {.async.} =
|
||||
# TODO evaluate behaviour penalty values
|
||||
peer.behaviourPenalty += 0.1
|
||||
|
||||
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
||||
raise newException(PeerMessageDecodeError, "Peer msg couldn't be decoded")
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
for m in rpcMsg.messages:
|
||||
@@ -682,22 +702,27 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
|
||||
# Send unsubscribe (in reverse order to sub/graft)
|
||||
procCall PubSub(g).onTopicSubscription(topic, subscribed)
|
||||
|
||||
method publish*(g: GossipSub, topic: string, data: seq[byte]): Future[int] {.async.} =
|
||||
logScope:
|
||||
topic
|
||||
proc makePeersForPublishUsingCustomConn(
|
||||
g: GossipSub, topic: string
|
||||
): HashSet[PubSubPeer] =
|
||||
assert g.customConnCallbacks.isSome,
|
||||
"GossipSub misconfiguration: useCustomConn was true, but no customConnCallbacks provided"
|
||||
|
||||
if topic.len <= 0: # data could be 0/empty
|
||||
debug "Empty topic, skipping publish"
|
||||
return 0
|
||||
trace "Selecting peers via custom connection callback"
|
||||
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(g).publish(topic, data)
|
||||
|
||||
trace "Publishing message on topic", data = data.shortLog
|
||||
return g.customConnCallbacks.get().customPeerSelectionCB(
|
||||
g.gossipsub.getOrDefault(topic),
|
||||
g.subscribedDirectPeers.getOrDefault(topic),
|
||||
g.mesh.getOrDefault(topic),
|
||||
g.fanout.getOrDefault(topic),
|
||||
)
|
||||
|
||||
proc makePeersForPublishDefault(
|
||||
g: GossipSub, topic: string, data: seq[byte]
|
||||
): HashSet[PubSubPeer] =
|
||||
var peers: HashSet[PubSubPeer]
|
||||
|
||||
# add always direct peers
|
||||
# Always include direct peers
|
||||
peers.incl(g.subscribedDirectPeers.getOrDefault(topic))
|
||||
|
||||
if topic in g.topics: # if we're subscribed use the mesh
|
||||
@@ -747,6 +772,29 @@ method publish*(g: GossipSub, topic: string, data: seq[byte]): Future[int] {.asy
|
||||
# ultimately is not sent)
|
||||
g.lastFanoutPubSub[topic] = Moment.fromNow(g.parameters.fanoutTTL)
|
||||
|
||||
return peers
|
||||
|
||||
method publish*(
|
||||
g: GossipSub, topic: string, data: seq[byte], useCustomConn: bool = false
|
||||
): Future[int] {.async: (raises: []).} =
|
||||
logScope:
|
||||
topic
|
||||
|
||||
if topic.len <= 0: # data could be 0/empty
|
||||
debug "Empty topic, skipping publish"
|
||||
return 0
|
||||
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(g).publish(topic, data)
|
||||
|
||||
trace "Publishing message on topic", data = data.shortLog
|
||||
|
||||
let peers =
|
||||
if useCustomConn:
|
||||
g.makePeersForPublishUsingCustomConn(topic)
|
||||
else:
|
||||
g.makePeersForPublishDefault(topic, data)
|
||||
|
||||
if peers.len == 0:
|
||||
let topicPeers = g.gossipsub.getOrDefault(topic).toSeq()
|
||||
debug "No peers for topic, skipping publish",
|
||||
@@ -782,7 +830,15 @@ method publish*(g: GossipSub, topic: string, data: seq[byte]): Future[int] {.asy
|
||||
|
||||
g.mcache.put(msgId, msg)
|
||||
|
||||
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
|
||||
if g.parameters.sendIDontWantOnPublish and isLargeMessage(msg, msgId):
|
||||
g.sendIDontWant(msg, msgId, peers)
|
||||
|
||||
g.broadcast(
|
||||
peers,
|
||||
RPCMsg(messages: @[msg]),
|
||||
isHighPriority = true,
|
||||
useCustomConn = useCustomConn,
|
||||
)
|
||||
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])
|
||||
@@ -792,7 +848,9 @@ method publish*(g: GossipSub, topic: string, data: seq[byte]): Future[int] {.asy
|
||||
trace "Published message to peers", peers = peers.len
|
||||
return peers.len
|
||||
|
||||
proc maintainDirectPeer(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
proc maintainDirectPeer(
|
||||
g: GossipSub, id: PeerId, addrs: seq[MultiAddress]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
if id notin g.peers:
|
||||
trace "Attempting to dial a direct peer", peer = id
|
||||
if g.switch.isConnected(id):
|
||||
@@ -805,14 +863,16 @@ proc maintainDirectPeer(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.as
|
||||
except CancelledError as exc:
|
||||
trace "Direct peer dial canceled"
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except DialFailedError as exc:
|
||||
debug "Direct peer error dialing", description = exc.msg
|
||||
|
||||
proc addDirectPeer*(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
proc addDirectPeer*(
|
||||
g: GossipSub, id: PeerId, addrs: seq[MultiAddress]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
g.parameters.directPeers[id] = addrs
|
||||
await g.maintainDirectPeer(id, addrs)
|
||||
|
||||
proc maintainDirectPeers(g: GossipSub) {.async.} =
|
||||
proc maintainDirectPeers(g: GossipSub) {.async: (raises: [CancelledError]).} =
|
||||
heartbeat "GossipSub DirectPeers", 1.minutes:
|
||||
for id, addrs in g.parameters.directPeers:
|
||||
await g.addDirectPeer(id, addrs)
|
||||
@@ -846,9 +906,9 @@ method stop*(g: GossipSub): Future[void] {.async: (raises: [], raw: true).} =
|
||||
return fut
|
||||
|
||||
# stop heartbeat interval
|
||||
g.directPeersLoop.cancel()
|
||||
g.scoringHeartbeatFut.cancel()
|
||||
g.heartbeatFut.cancel()
|
||||
g.directPeersLoop.cancelSoon()
|
||||
g.scoringHeartbeatFut.cancelSoon()
|
||||
g.heartbeatFut.cancelSoon()
|
||||
g.heartbeatFut = nil
|
||||
fut
|
||||
|
||||
|
||||
@@ -126,8 +126,7 @@ proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] =
|
||||
if x.peerId in sprBook:
|
||||
sprBook[x.peerId].encode().get(default(seq[byte]))
|
||||
else:
|
||||
default(seq[byte])
|
||||
,
|
||||
default(seq[byte]),
|
||||
)
|
||||
|
||||
proc handleGraft*(
|
||||
@@ -306,9 +305,9 @@ proc handleIHave*(
|
||||
proc handleIDontWant*(g: GossipSub, peer: PubSubPeer, iDontWants: seq[ControlIWant]) =
|
||||
for dontWant in iDontWants:
|
||||
for messageId in dontWant.messageIDs:
|
||||
if peer.iDontWants[^1].len > 1000:
|
||||
if peer.iDontWants[0].len >= IDontWantMaxCount:
|
||||
break
|
||||
peer.iDontWants[^1].incl(g.salt(messageId))
|
||||
peer.iDontWants[0].incl(g.salt(messageId))
|
||||
|
||||
proc handleIWant*(
|
||||
g: GossipSub, peer: PubSubPeer, iwants: seq[ControlIWant]
|
||||
@@ -458,8 +457,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
prunes = toSeq(
|
||||
try:
|
||||
g.mesh[topic]
|
||||
except KeyError:
|
||||
raiseAssert "have peers"
|
||||
except KeyError as e:
|
||||
raiseAssert "have peers: " & e.msg
|
||||
)
|
||||
# avoid pruning peers we are currently grafting in this heartbeat
|
||||
prunes.keepIf do(x: PubSubPeer) -> bool:
|
||||
@@ -514,8 +513,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
var peers = toSeq(
|
||||
try:
|
||||
g.mesh[topic]
|
||||
except KeyError:
|
||||
raiseAssert "have peers"
|
||||
except KeyError as e:
|
||||
raiseAssert "have peers: " & e.msg
|
||||
)
|
||||
# grafting so high score has priority
|
||||
peers.sort(byScore, SortOrder.Descending)
|
||||
@@ -539,8 +538,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
it.peerId notin backingOff:
|
||||
avail.add(it)
|
||||
|
||||
# by spec, grab only 2
|
||||
if avail.len > 1:
|
||||
# by spec, grab only up to MaxOpportunisticGraftPeers
|
||||
if avail.len >= MaxOpportunisticGraftPeers:
|
||||
break
|
||||
|
||||
for peer in avail:
|
||||
@@ -691,7 +690,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
|
||||
for peer in allPeers:
|
||||
control.mgetOrPut(peer, ControlMessage()).ihave.add(ihave)
|
||||
for msgId in ihave.messageIDs:
|
||||
peer.sentIHaves[^1].incl(msgId)
|
||||
peer.sentIHaves[0].incl(msgId)
|
||||
|
||||
libp2p_gossipsub_cache_window_size.set(cacheWindowSize.int64)
|
||||
|
||||
@@ -770,7 +769,7 @@ proc onHeartbeat(g: GossipSub) =
|
||||
|
||||
g.mcache.shift() # shift the cache
|
||||
|
||||
proc heartbeat*(g: GossipSub) {.async.} =
|
||||
proc heartbeat*(g: GossipSub) {.async: (raises: [CancelledError]).} =
|
||||
heartbeat "GossipSub", g.parameters.heartbeatInterval:
|
||||
trace "running heartbeat", instance = cast[int](g)
|
||||
g.onHeartbeat()
|
||||
|
||||
@@ -131,7 +131,7 @@ proc colocationFactor(g: GossipSub, peer: PubSubPeer): float64 =
|
||||
else:
|
||||
0.0
|
||||
|
||||
proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async.} =
|
||||
proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async: (raises: []).} =
|
||||
try:
|
||||
await g.switch.disconnect(peer.peerId)
|
||||
except CatchableError as exc: # Never cancelled
|
||||
@@ -313,12 +313,14 @@ proc updateScores*(g: GossipSub) = # avoid async
|
||||
|
||||
trace "updated scores", peers = g.peers.len
|
||||
|
||||
proc scoringHeartbeat*(g: GossipSub) {.async.} =
|
||||
proc scoringHeartbeat*(g: GossipSub) {.async: (raises: [CancelledError]).} =
|
||||
heartbeat "Gossipsub scoring", g.parameters.decayInterval:
|
||||
trace "running scoring heartbeat", instance = cast[int](g)
|
||||
g.updateScores()
|
||||
|
||||
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
|
||||
proc punishInvalidMessage*(
|
||||
g: GossipSub, peer: PubSubPeer, msg: Message
|
||||
) {.async: (raises: [PeerRateLimitError]).} =
|
||||
let uselessAppBytesNum = msg.data.len
|
||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
|
||||
|
||||
@@ -50,6 +50,9 @@ const
|
||||
# rust sigp: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/config.rs#L572
|
||||
# go: https://github.com/libp2p/go-libp2p-pubsub/blob/08c17398fb11b2ab06ca141dddc8ec97272eb772/gossipsub.go#L155
|
||||
IHaveMaxLength* = 5000
|
||||
IDontWantMaxCount* = 1000
|
||||
# maximum number of IDontWant messages in one slot of the history
|
||||
MaxOpportunisticGraftPeers* = 2
|
||||
|
||||
type
|
||||
TopicInfo* = object # gossip 1.1 related
|
||||
@@ -154,6 +157,9 @@ type
|
||||
# Max number of elements allowed in the non-priority queue. When this limit has been reached, the peer will be disconnected.
|
||||
maxNumElementsInNonPriorityQueue*: int
|
||||
|
||||
# Broadcast an IDONTWANT message automatically when the message exceeds the IDONTWANT message size threshold
|
||||
sendIDontWantOnPublish*: bool
|
||||
|
||||
BackoffTable* = Table[string, Table[PeerId, Moment]]
|
||||
ValidationSeenTable* = Table[SaltedId, HashSet[PubSubPeer]]
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ import
|
||||
../../errors,
|
||||
../../utility
|
||||
|
||||
import stew/results
|
||||
import results
|
||||
export results
|
||||
|
||||
export tables, sets
|
||||
@@ -125,6 +125,8 @@ declarePublicCounter(
|
||||
type
|
||||
InitializationError* = object of LPError
|
||||
|
||||
PeerMessageDecodeError* = object of CatchableError
|
||||
|
||||
TopicHandler* {.public.} =
|
||||
proc(topic: string, data: seq[byte]): Future[void] {.gcsafe, raises: [].}
|
||||
|
||||
@@ -174,6 +176,7 @@ type
|
||||
rng*: ref HmacDrbgContext
|
||||
|
||||
knownTopics*: HashSet[string]
|
||||
customConnCallbacks*: Option[CustomConnectionCallbacks]
|
||||
|
||||
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||
## handle peer disconnects
|
||||
@@ -185,7 +188,11 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||
libp2p_pubsub_peers.set(p.peers.len.int64)
|
||||
|
||||
proc send*(
|
||||
p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool
|
||||
p: PubSub,
|
||||
peer: PubSubPeer,
|
||||
msg: RPCMsg,
|
||||
isHighPriority: bool,
|
||||
useCustomConn: bool = false,
|
||||
) {.raises: [].} =
|
||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to the specified remote peer in the PubSub network.
|
||||
##
|
||||
@@ -198,13 +205,14 @@ proc send*(
|
||||
## priority messages have been sent.
|
||||
|
||||
trace "sending pubsub message to peer", peer, payload = shortLog(msg)
|
||||
peer.send(msg, p.anonymize, isHighPriority)
|
||||
peer.send(msg, p.anonymize, isHighPriority, useCustomConn)
|
||||
|
||||
proc broadcast*(
|
||||
p: PubSub,
|
||||
sendPeers: auto, # Iteratble[PubSubPeer]
|
||||
msg: RPCMsg,
|
||||
isHighPriority: bool,
|
||||
useCustomConn: bool = false,
|
||||
) {.raises: [].} =
|
||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to a specified group of peers in the PubSub network.
|
||||
##
|
||||
@@ -259,12 +267,12 @@ proc broadcast*(
|
||||
|
||||
if anyIt(sendPeers, it.hasObservers):
|
||||
for peer in sendPeers:
|
||||
p.send(peer, msg, isHighPriority)
|
||||
p.send(peer, msg, isHighPriority, useCustomConn)
|
||||
else:
|
||||
# Fast path that only encodes message once
|
||||
let encoded = encodeRpcMsg(msg, p.anonymize)
|
||||
for peer in sendPeers:
|
||||
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
|
||||
asyncSpawn peer.sendEncoded(encoded, isHighPriority, useCustomConn)
|
||||
|
||||
proc sendSubs*(
|
||||
p: PubSub, peer: PubSubPeer, topics: openArray[string], subscribe: bool
|
||||
@@ -327,7 +335,9 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
|
||||
|
||||
method rpcHandler*(
|
||||
p: PubSub, peer: PubSubPeer, data: seq[byte]
|
||||
): Future[void] {.base, async.} =
|
||||
): Future[void] {.
|
||||
base, async: (raises: [CancelledError, PeerMessageDecodeError, PeerRateLimitError])
|
||||
.} =
|
||||
## Handler that must be overridden by concrete implementation
|
||||
raiseAssert "Unimplemented"
|
||||
|
||||
@@ -355,15 +365,28 @@ method getOrCreatePeer*(
|
||||
peer[].codec = protoNegotiated
|
||||
return peer[]
|
||||
|
||||
proc getConn(): Future[Connection] {.async.} =
|
||||
return await p.switch.dial(peerId, protosToDial)
|
||||
proc getConn(): Future[Connection] {.
|
||||
async: (raises: [CancelledError, GetConnDialError])
|
||||
.} =
|
||||
try:
|
||||
return await p.switch.dial(peerId, protosToDial)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except DialFailedError as e:
|
||||
raise (ref GetConnDialError)(parent: e)
|
||||
|
||||
proc onEvent(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
|
||||
p.onPubSubPeerEvent(peer, event)
|
||||
|
||||
# create new pubsub peer
|
||||
let pubSubPeer =
|
||||
PubSubPeer.new(peerId, getConn, onEvent, protoNegotiated, p.maxMessageSize)
|
||||
let pubSubPeer = PubSubPeer.new(
|
||||
peerId,
|
||||
getConn,
|
||||
onEvent,
|
||||
protoNegotiated,
|
||||
p.maxMessageSize,
|
||||
customConnCallbacks = p.customConnCallbacks,
|
||||
)
|
||||
debug "created new pubsub peer", peerId
|
||||
|
||||
p.peers[peerId] = pubSubPeer
|
||||
@@ -376,7 +399,9 @@ method getOrCreatePeer*(
|
||||
|
||||
return pubSubPeer
|
||||
|
||||
proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
|
||||
proc handleData*(
|
||||
p: PubSub, topic: string, data: seq[byte]
|
||||
): Future[void] {.async: (raises: [], raw: true).} =
|
||||
# Start work on all data handlers without copying data into closure like
|
||||
# happens on {.async.} transformation
|
||||
p.topics.withValue(topic, handlers):
|
||||
@@ -389,7 +414,7 @@ proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
|
||||
futs.add(fut)
|
||||
|
||||
if futs.len() > 0:
|
||||
proc waiter(): Future[void] {.async.} =
|
||||
proc waiter(): Future[void] {.async: (raises: []).} =
|
||||
# slow path - we have to wait for the handlers to complete
|
||||
try:
|
||||
futs = await allFinished(futs)
|
||||
@@ -397,12 +422,12 @@ proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
|
||||
# propagate cancellation
|
||||
for fut in futs:
|
||||
if not (fut.finished):
|
||||
fut.cancel()
|
||||
fut.cancelSoon()
|
||||
|
||||
# check for errors in futures
|
||||
for fut in futs:
|
||||
if fut.failed:
|
||||
let err = fut.readError()
|
||||
let err = fut.error()
|
||||
warn "Error in topic handler", description = err.msg
|
||||
|
||||
return waiter()
|
||||
@@ -412,7 +437,9 @@ proc handleData*(p: PubSub, topic: string, data: seq[byte]): Future[void] =
|
||||
res.complete()
|
||||
return res
|
||||
|
||||
method handleConn*(p: PubSub, conn: Connection, proto: string) {.base, async.} =
|
||||
method handleConn*(
|
||||
p: PubSub, conn: Connection, proto: string
|
||||
) {.base, async: (raises: [CancelledError]).} =
|
||||
## handle incoming connections
|
||||
##
|
||||
## this proc will:
|
||||
@@ -424,7 +451,9 @@ method handleConn*(p: PubSub, conn: Connection, proto: string) {.base, async.} =
|
||||
## that we're interested in
|
||||
##
|
||||
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]): Future[void] =
|
||||
proc handler(
|
||||
peer: PubSubPeer, data: seq[byte]
|
||||
): Future[void] {.async: (raises: []).} =
|
||||
# call pubsub rpc handler
|
||||
p.rpcHandler(peer, data)
|
||||
|
||||
@@ -436,7 +465,7 @@ method handleConn*(p: PubSub, conn: Connection, proto: string) {.base, async.} =
|
||||
trace "pubsub peer handler ended", conn
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except PeerMessageDecodeError as exc:
|
||||
trace "exception ocurred in pubsub handle", description = exc.msg, conn
|
||||
finally:
|
||||
await conn.closeWithEOF()
|
||||
@@ -541,8 +570,8 @@ proc subscribe*(p: PubSub, topic: string, handler: TopicHandler) {.public.} =
|
||||
p.updateTopicMetrics(topic)
|
||||
|
||||
method publish*(
|
||||
p: PubSub, topic: string, data: seq[byte]
|
||||
): Future[int] {.base, async, public.} =
|
||||
p: PubSub, topic: string, data: seq[byte], useCustomConn: bool = false
|
||||
): Future[int] {.base, async: (raises: []), public.} =
|
||||
## publish to a ``topic``
|
||||
##
|
||||
## The return value is the number of neighbours that we attempted to send the
|
||||
@@ -572,7 +601,7 @@ method addValidator*(
|
||||
|
||||
method removeValidator*(
|
||||
p: PubSub, topic: varargs[string], hook: ValidatorHandler
|
||||
) {.base, public.} =
|
||||
) {.base, public, gcsafe.} =
|
||||
for t in topic:
|
||||
p.validators.withValue(t, validators):
|
||||
validators[].excl(hook)
|
||||
@@ -581,7 +610,7 @@ method removeValidator*(
|
||||
|
||||
method validate*(
|
||||
p: PubSub, message: Message
|
||||
): Future[ValidationResult] {.async, base.} =
|
||||
): Future[ValidationResult] {.async: (raises: [CancelledError]), base.} =
|
||||
var pending: seq[Future[ValidationResult]]
|
||||
trace "about to validate message"
|
||||
let topic = message.topic
|
||||
@@ -589,22 +618,27 @@ method validate*(
|
||||
topic = topic, registered = toSeq(p.validators.keys)
|
||||
if topic in p.validators:
|
||||
trace "running validators for topic", topic = topic
|
||||
for validator in p.validators[topic]:
|
||||
pending.add(validator(topic, message))
|
||||
|
||||
result = ValidationResult.Accept
|
||||
p.validators.withValue(topic, validators):
|
||||
for validator in validators[]:
|
||||
pending.add(validator(topic, message))
|
||||
var valResult = ValidationResult.Accept
|
||||
let futs = await allFinished(pending)
|
||||
for fut in futs:
|
||||
if fut.failed:
|
||||
result = ValidationResult.Reject
|
||||
valResult = ValidationResult.Reject
|
||||
break
|
||||
let res = fut.read()
|
||||
if res != ValidationResult.Accept:
|
||||
result = res
|
||||
if res == ValidationResult.Reject:
|
||||
break
|
||||
try:
|
||||
let res = fut.read()
|
||||
if res != ValidationResult.Accept:
|
||||
valResult = res
|
||||
if res == ValidationResult.Reject:
|
||||
break
|
||||
except CatchableError as e:
|
||||
trace "validator for message could not be executed, ignoring",
|
||||
topic = topic, err = e.msg
|
||||
valResult = ValidationResult.Ignore
|
||||
|
||||
case result
|
||||
case valResult
|
||||
of ValidationResult.Accept:
|
||||
libp2p_pubsub_validation_success.inc()
|
||||
of ValidationResult.Reject:
|
||||
@@ -612,6 +646,8 @@ method validate*(
|
||||
of ValidationResult.Ignore:
|
||||
libp2p_pubsub_validation_ignore.inc()
|
||||
|
||||
valResult
|
||||
|
||||
proc init*[PubParams: object | bool](
|
||||
P: typedesc[PubSub],
|
||||
switch: Switch,
|
||||
@@ -624,6 +660,8 @@ proc init*[PubParams: object | bool](
|
||||
maxMessageSize: int = 1024 * 1024,
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
parameters: PubParams = false,
|
||||
customConnCallbacks: Option[CustomConnectionCallbacks] =
|
||||
none(CustomConnectionCallbacks),
|
||||
): P {.raises: [InitializationError], public.} =
|
||||
let pubsub =
|
||||
when PubParams is bool:
|
||||
@@ -639,6 +677,7 @@ proc init*[PubParams: object | bool](
|
||||
maxMessageSize: maxMessageSize,
|
||||
rng: rng,
|
||||
topicsHigh: int.high,
|
||||
customConnCallbacks: customConnCallbacks,
|
||||
)
|
||||
else:
|
||||
P(
|
||||
@@ -654,9 +693,12 @@ proc init*[PubParams: object | bool](
|
||||
maxMessageSize: maxMessageSize,
|
||||
rng: rng,
|
||||
topicsHigh: int.high,
|
||||
customConnCallbacks: customConnCallbacks,
|
||||
)
|
||||
|
||||
proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} =
|
||||
proc peerEventHandler(
|
||||
peerId: PeerId, event: PeerEvent
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
if event.kind == PeerEventKind.Joined:
|
||||
pubsub.subscribePeer(peerId)
|
||||
else:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user