mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 12:07:54 -05:00
Compare commits
137 Commits
v1.1.0
...
penalty-no
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a16293bdca | ||
|
|
77d40c34f4 | ||
|
|
2fa2c4425f | ||
|
|
47990be775 | ||
|
|
0911cb20f4 | ||
|
|
3ca49a2f40 | ||
|
|
1b91b97499 | ||
|
|
21cbe3a91a | ||
|
|
88e233db81 | ||
|
|
84659af45b | ||
|
|
aef44ed1ce | ||
|
|
02c96fc003 | ||
|
|
c4da9be32c | ||
|
|
8014a848a0 | ||
|
|
2b5319622c | ||
|
|
5cbb473d1b | ||
|
|
87110e551a | ||
|
|
b30b2656d5 | ||
|
|
917dbf83cd | ||
|
|
0c61f12b85 | ||
|
|
a6237bd1c1 | ||
|
|
d9a60f339e | ||
|
|
89cad5a3ba | ||
|
|
09b3e11956 | ||
|
|
03f67d3db5 | ||
|
|
bb97a9de79 | ||
|
|
1a707e1264 | ||
|
|
458b0885dd | ||
|
|
a2027003cd | ||
|
|
c5db35d9b0 | ||
|
|
d1e51beb7f | ||
|
|
275d649287 | ||
|
|
467b5b4f0c | ||
|
|
fdf53d18cd | ||
|
|
48a3ac06ff | ||
|
|
49a92e5641 | ||
|
|
08a48faf41 | ||
|
|
61b299e411 | ||
|
|
ca01ee06a8 | ||
|
|
6c43ab3fce | ||
|
|
ae13a0d583 | ||
|
|
28609597d1 | ||
|
|
8294d5b9df | ||
|
|
78e83889ee | ||
|
|
7603b8de5e | ||
|
|
8cccd54125 | ||
|
|
18e00a741b | ||
|
|
ee264fdf11 | ||
|
|
9059a8aced | ||
|
|
0b753e7cf2 | ||
|
|
d43c5feab0 | ||
|
|
1609fd7197 | ||
|
|
42cd78e95b | ||
|
|
44cada9c55 | ||
|
|
6c873481ac | ||
|
|
d08ce17144 | ||
|
|
bd6ead95ef | ||
|
|
53e3825e07 | ||
|
|
e9b456162a | ||
|
|
250024f6cc | ||
|
|
fec632d28d | ||
|
|
349496e40f | ||
|
|
7faa0fac23 | ||
|
|
c5e4f8e12d | ||
|
|
fe4ff79885 | ||
|
|
aa4ebb0b3c | ||
|
|
e0f70b7177 | ||
|
|
c1dfd58772 | ||
|
|
04af0c4323 | ||
|
|
eb0890cd6f | ||
|
|
9bc5ec1566 | ||
|
|
5594bcb33e | ||
|
|
d46bcdb6ac | ||
|
|
9468bb6b4d | ||
|
|
2725be64ba | ||
|
|
e3c967ad19 | ||
|
|
d2c98bd87d | ||
|
|
3011ba4326 | ||
|
|
c6566707fa | ||
|
|
3be681ec4d | ||
|
|
2ede0fa40c | ||
|
|
7c195ab927 | ||
|
|
3230407ffe | ||
|
|
deb72c8580 | ||
|
|
ce0685c272 | ||
|
|
1f4b090227 | ||
|
|
fb05f5ae22 | ||
|
|
e12f65f193 | ||
|
|
4b3bc4f819 | ||
|
|
6791f5e7bb | ||
|
|
08d9c84aca | ||
|
|
4e7eaba67a | ||
|
|
5f7a3ab829 | ||
|
|
ebef85c9d7 | ||
|
|
3fc1236659 | ||
|
|
fc4e9a8bb8 | ||
|
|
60f953629d | ||
|
|
18b0f726df | ||
|
|
459f6851e7 | ||
|
|
575344e2e9 | ||
|
|
75871817ee | ||
|
|
61929aed6c | ||
|
|
56599f5b9d | ||
|
|
b2eac7ecbd | ||
|
|
20b0e40f7d | ||
|
|
ff77d52851 | ||
|
|
545a31d4f0 | ||
|
|
b76bac752f | ||
|
|
c6aa085e98 | ||
|
|
e03547ea3e | ||
|
|
f80ce3133c | ||
|
|
d6263bf751 | ||
|
|
56c23a286a | ||
|
|
7a369dd1bf | ||
|
|
b784167805 | ||
|
|
440461b24b | ||
|
|
fab1340020 | ||
|
|
1721f078c7 | ||
|
|
74c402ed9d | ||
|
|
c45f9705ab | ||
|
|
81b861b34e | ||
|
|
43359dd9d1 | ||
|
|
f85d0f75ea | ||
|
|
66f9dc9167 | ||
|
|
1c4d0832ce | ||
|
|
224f92e172 | ||
|
|
5efa089196 | ||
|
|
9d4c4307de | ||
|
|
49dfa84c6f | ||
|
|
a65b7b028f | ||
|
|
67711478ce | ||
|
|
c28d8bb353 | ||
|
|
eb78292d9c | ||
|
|
3725f6a95b | ||
|
|
3640b4dd89 | ||
|
|
32085ca88a | ||
|
|
c76d1e18ef |
10
.github/workflows/bumper.yml
vendored
10
.github/workflows/bumper.yml
vendored
@@ -2,8 +2,7 @@ name: Bumper
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- unstable
|
||||
- bumper
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
@@ -24,13 +23,14 @@ jobs:
|
||||
repository: ${{ matrix.target.repo }}
|
||||
ref: ${{ matrix.target.branch }}
|
||||
path: nbc
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
- name: Checkout this ref
|
||||
run: |
|
||||
cd nbc/vendor/nim-libp2p
|
||||
cd nbc
|
||||
git submodule update --init vendor/nim-libp2p
|
||||
cd vendor/nim-libp2p
|
||||
git checkout $GITHUB_SHA
|
||||
|
||||
- name: Commit this bump
|
||||
@@ -38,7 +38,7 @@ jobs:
|
||||
cd nbc
|
||||
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
|
||||
git config --global user.name = "${{ github.actor }}"
|
||||
git commit -a -m "auto-bump nim-libp2p"
|
||||
git commit --allow-empty -a -m "auto-bump nim-libp2p"
|
||||
git branch -D nim-libp2p-auto-bump-${GITHUB_REF##*/} || true
|
||||
git switch -c nim-libp2p-auto-bump-${GITHUB_REF##*/}
|
||||
git push -f origin nim-libp2p-auto-bump-${GITHUB_REF##*/}
|
||||
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
cpu: amd64
|
||||
#- os: windows
|
||||
#cpu: i386
|
||||
branch: [version-1-2, version-1-6]
|
||||
branch: [version-1-6]
|
||||
include:
|
||||
- target:
|
||||
os: linux
|
||||
|
||||
12
.github/workflows/daily.yml
vendored
Normal file
12
.github/workflows/daily.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Daily
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['version-1-6','version-2-0']"
|
||||
cpu: "['amd64']"
|
||||
84
.github/workflows/daily_common.yml
vendored
Normal file
84
.github/workflows/daily_common.yml
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
name: daily-common
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
nim-branch:
|
||||
description: 'Nim branch'
|
||||
required: true
|
||||
type: string
|
||||
cpu:
|
||||
description: 'CPU'
|
||||
required: true
|
||||
type: string
|
||||
exclude:
|
||||
description: 'Exclude matrix configurations'
|
||||
required: false
|
||||
type: string
|
||||
default: "[]"
|
||||
|
||||
jobs:
|
||||
delete-cache:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
build:
|
||||
needs: delete-cache
|
||||
timeout-minutes: 120
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- os: linux
|
||||
builder: ubuntu-20
|
||||
shell: bash
|
||||
- os: macos
|
||||
builder: macos-12
|
||||
shell: bash
|
||||
- os: windows
|
||||
builder: windows-2019
|
||||
shell: msys2 {0}
|
||||
branch: ${{ fromJSON(inputs.nim-branch) }}
|
||||
cpu: ${{ fromJSON(inputs.cpu) }}
|
||||
exclude: ${{ fromJSON(inputs.exclude) }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.branch }})'
|
||||
runs-on: ${{ matrix.platform.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.platform.os }}
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
cpu: ${{ matrix.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
cache: false
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install -y --depsOnly
|
||||
NIMFLAGS="${NIMFLAGS} --mm:refc" nimble test
|
||||
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
||||
echo -e "\nTesting with '--mm:orc':\n"
|
||||
NIMFLAGS="${NIMFLAGS} --mm:orc" nimble test
|
||||
fi
|
||||
13
.github/workflows/daily_i386.yml
vendored
Normal file
13
.github/workflows/daily_i386.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
name: Daily i386
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['version-1-6','version-2-0', 'devel']"
|
||||
cpu: "['i386']"
|
||||
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
|
||||
12
.github/workflows/daily_nim_devel.yml
vendored
Normal file
12
.github/workflows/daily_nim_devel.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Daily Nim Devel
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['devel']"
|
||||
cpu: "['amd64']"
|
||||
2
.github/workflows/doc.yml
vendored
2
.github/workflows/doc.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
|
||||
- uses: jiro4989/setup-nim-action@v1
|
||||
with:
|
||||
nim-version: 'stable'
|
||||
nim-version: '1.6.x'
|
||||
|
||||
- name: Generate doc
|
||||
run: |
|
||||
|
||||
58
.github/workflows/interop.yml
vendored
58
.github/workflows/interop.yml
vendored
@@ -11,44 +11,30 @@ concurrency:
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
run-multidim-interop:
|
||||
name: Run multidimensional interoperability tests
|
||||
run-transport-interop:
|
||||
name: Run transport interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
repository: libp2p/test-plans
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: >
|
||||
cd multidim-interop/impl/nim/v1.0 &&
|
||||
make commitSha=$GITHUB_SHA image_name=nim-libp2p-head
|
||||
|
||||
- name: Create ping-version.json
|
||||
run: >
|
||||
(cat << EOF
|
||||
{
|
||||
"id": "nim-libp2p-head",
|
||||
"containerImageID": "nim-libp2p-head",
|
||||
"transports": [
|
||||
"tcp",
|
||||
"ws"
|
||||
],
|
||||
"secureChannels": [
|
||||
"noise"
|
||||
],
|
||||
"muxers": [
|
||||
"mplex",
|
||||
"yamux"
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
) > ${{ github.workspace }}/test_head.json
|
||||
|
||||
- uses: libp2p/test-plans/.github/actions/run-interop-ping-test@master
|
||||
run: docker buildx build --load -t nim-libp2p-head -f tests/transport-interop/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/test_head.json
|
||||
extra-versions: ${{ github.workspace }}/tests/transport-interop/version.json
|
||||
|
||||
run-hole-punching-interop:
|
||||
name: Run hole-punching interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json
|
||||
|
||||
82
.github/workflows/multi_nim.yml
vendored
82
.github/workflows/multi_nim.yml
vendored
@@ -1,82 +0,0 @@
|
||||
name: Daily
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
delete-cache:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
build:
|
||||
needs: delete-cache
|
||||
timeout-minutes: 120
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- os: linux
|
||||
cpu: amd64
|
||||
- os: linux
|
||||
cpu: i386
|
||||
- os: macos
|
||||
cpu: amd64
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
#- os: windows
|
||||
#cpu: i386
|
||||
branch: [version-1-2, version-1-6, devel]
|
||||
include:
|
||||
- target:
|
||||
os: linux
|
||||
builder: ubuntu-20.04
|
||||
shell: bash
|
||||
- target:
|
||||
os: macos
|
||||
builder: macos-12
|
||||
shell: bash
|
||||
- target:
|
||||
os: windows
|
||||
builder: windows-2019
|
||||
shell: msys2 {0}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }}
|
||||
|
||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.target.os }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
cpu: ${{ matrix.target.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install -y --depsOnly
|
||||
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
|
||||
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
||||
echo -e "\nTesting with '--gc:orc':\n"
|
||||
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
|
||||
fi
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -16,3 +16,4 @@ tests/pubsub/testgossipsub
|
||||
examples/*.md
|
||||
nimble.develop
|
||||
nimble.paths
|
||||
go-libp2p-daemon/
|
||||
|
||||
31
.pinned
31
.pinned
@@ -1,16 +1,17 @@
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#acf9645e328bdcab481cfda1c158e07ecd46bd7b
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#1e6350870855541b381d77d4659688bc0d2c4227
|
||||
chronos;https://github.com/status-im/nim-chronos@#ab5a8c2e0f6941fe3debd61dff0293790079d1b0
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#814f8927e1f356f39219f37f069b83066bcc893a
|
||||
httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#a7d815ed92f200f490c95d3cfd722089cc923ce6
|
||||
metrics;https://github.com/status-im/nim-metrics@#abf3acc7f06cee9ee2c287d2f31413dc3df4c04e
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#4014ef939b51e02053c2e16dd3481d47bc9267dd
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#fd173fdff863ce2e211cf64c9a03bc7539fe40b0
|
||||
serialization;https://github.com/status-im/nim-serialization@#5b7cea55efeb074daa8abd8146a03a34adb4521a
|
||||
stew;https://github.com/status-im/nim-stew@#003fe9f0c83c2b0b2ccbd37087e6d1ccd30a3234
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#e4157639db180e52727712a47deaefcbbac6ec86
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
|
||||
chronos;https://github.com/status-im/nim-chronos@#672db137b7cad9b384b8f4fb551fb6bbeaabfe1b
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
|
||||
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
|
||||
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
|
||||
results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
|
||||
serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35
|
||||
stew;https://github.com/status-im/nim-stew@#3159137d9a3110edb4024145ce0ba778975de40e
|
||||
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#883c7a50ad3b82158e64d074c5578fe33ab3c452
|
||||
websock;https://github.com/status-im/nim-websock@#fea05cde8b123b38d1a0a8524b77efbc84daa848
|
||||
zlib;https://github.com/status-im/nim-zlib@#826e2fc013f55b4478802d4f2e39f187c50d520a
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#2300fa9924a76e6c96bc4ea79d043e3a0f27120c
|
||||
websock;https://github.com/status-im/nim-websock@#f8ed9b40a5ff27ad02a3c237c4905b0924e3f982
|
||||
zlib;https://github.com/status-im/nim-zlib@#38b72eda9d70067df4a953f56b5ed59630f2a17b
|
||||
15
README.md
15
README.md
@@ -20,6 +20,7 @@
|
||||
- [Background](#background)
|
||||
- [Install](#install)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Go-libp2p-daemon](#go-libp2p-daemon)
|
||||
- [Modules](#modules)
|
||||
- [Users](#users)
|
||||
- [Stability](#stability)
|
||||
@@ -40,6 +41,8 @@ Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p'
|
||||
## Install
|
||||
**Prerequisite**
|
||||
- [Nim](https://nim-lang.org/install.html)
|
||||
> The currently supported Nim version is 1.6.18.
|
||||
|
||||
```
|
||||
nimble install libp2p
|
||||
```
|
||||
@@ -47,11 +50,11 @@ nimble install libp2p
|
||||
## Getting Started
|
||||
You'll find the nim-libp2p documentation [here](https://status-im.github.io/nim-libp2p/docs/).
|
||||
|
||||
**Go Daemon:**
|
||||
Please find the installation and usage intructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
|
||||
### Testing
|
||||
Remember you'll need to build the `go-libp2p-daemon` binary to run the `nim-libp2p` tests.
|
||||
To do so, please follow the installation instructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
|
||||
|
||||
## Modules
|
||||
|
||||
List of packages modules implemented in nim-libp2p:
|
||||
|
||||
| Name | Description |
|
||||
@@ -68,7 +71,6 @@ List of packages modules implemented in nim-libp2p:
|
||||
| [libp2p-ws](libp2p/transports/wstransport.nim) | WebSocket & WebSocket Secure transport |
|
||||
| [libp2p-tor](libp2p/transports/tortransport.nim) | Tor Transport |
|
||||
| **Secure Channels** | |
|
||||
| [libp2p-secio](libp2p/protocols/secure/secio.nim) | Secio secure channel |
|
||||
| [libp2p-noise](libp2p/protocols/secure/noise.nim) | [Noise](https://docs.libp2p.io/concepts/secure-comm/noise/) secure channel |
|
||||
| [libp2p-plaintext](libp2p/protocols/secure/plaintext.nim) | Plain Text for development purposes |
|
||||
| **Stream Multiplexers** | |
|
||||
@@ -105,7 +107,7 @@ The versioning follows [semver](https://semver.org/), with some additions:
|
||||
- Some of libp2p procedures are marked as `.public.`, they will remain compatible during each `MAJOR` version
|
||||
- The rest of the procedures are considered internal, and can change at any `MINOR` version (but remain compatible for each new `PATCH`)
|
||||
|
||||
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.2 & 1.6`
|
||||
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.6 & 2.0`
|
||||
|
||||
## Development
|
||||
Clone and Install dependencies:
|
||||
@@ -132,7 +134,8 @@ The libp2p implementation in Nim is a work in progress. We welcome contributors
|
||||
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
|
||||
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
|
||||
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
|
||||
|
||||
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
|
||||
- **Code format**. Please format code using [nph](https://github.com/arnetheduck/nph).
|
||||
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
|
||||
|
||||
### Contributors
|
||||
|
||||
@@ -7,13 +7,9 @@ if dirExists("nimbledeps/pkgs2"):
|
||||
switch("warning", "CaseTransition:off")
|
||||
switch("warning", "ObservableStores:off")
|
||||
switch("warning", "LockLevel:off")
|
||||
--define:chronosStrictException
|
||||
--styleCheck:usages
|
||||
if (NimMajor, NimMinor) < (1, 6):
|
||||
--styleCheck:hint
|
||||
else:
|
||||
switch("warningAsError", "UseBase:on")
|
||||
--styleCheck:error
|
||||
switch("warningAsError", "UseBase:on")
|
||||
--styleCheck:error
|
||||
|
||||
# Avoid some rare stack corruption while using exceptions with a SEH-enabled
|
||||
# toolchain: https://github.com/status-im/nimbus-eth2/issues/3121
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
# Table of Contents
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Installation](#installation)
|
||||
- [Script](#script)
|
||||
- [Usage](#usage)
|
||||
- [Example](#example)
|
||||
- [Getting Started](#getting-started)
|
||||
@@ -8,26 +10,29 @@
|
||||
# Introduction
|
||||
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
|
||||
For more information about the go daemon, check out [this repository](https://github.com/libp2p/go-libp2p-daemon).
|
||||
> **Required only** for running the tests.
|
||||
|
||||
# Prerequisites
|
||||
Go with version `1.15.15`.
|
||||
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
|
||||
|
||||
# Installation
|
||||
Follow one of the methods below:
|
||||
|
||||
## Script
|
||||
Run the build script while having the `go` command pointing to the correct Go version.
|
||||
We recommend using `1.15.15`, as previously stated.
|
||||
```sh
|
||||
# clone and install dependencies
|
||||
git clone https://github.com/status-im/nim-libp2p
|
||||
cd nim-libp2p
|
||||
nimble install
|
||||
|
||||
# perform unit tests
|
||||
nimble test
|
||||
|
||||
# update the git submodule to install the go daemon
|
||||
git submodule update --init --recursive
|
||||
go version
|
||||
git clone https://github.com/libp2p/go-libp2p-daemon
|
||||
cd go-libp2p-daemon
|
||||
git checkout v0.0.1
|
||||
go install ./...
|
||||
cd ..
|
||||
./scripts/build_p2pd.sh
|
||||
```
|
||||
If everything goes correctly, the binary (`p2pd`) should be built and placed in the correct directory.
|
||||
If you find any issues, please head into our discord and ask for our asistance.
|
||||
|
||||
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
|
||||
```sh
|
||||
export PATH="$PATH:$HOME/go/bin"
|
||||
```
|
||||
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
|
||||
|
||||
# Usage
|
||||
|
||||
|
||||
@@ -13,14 +13,14 @@ type
|
||||
proc new(T: typedesc[TestProto]): T =
|
||||
|
||||
# every incoming connections will be in handled in this closure
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.writeLp("Roger p2p!")
|
||||
|
||||
# We must close the connections ourselves when we're done with it
|
||||
await conn.close()
|
||||
|
||||
return T(codecs: @[TestCodec], handler: handle)
|
||||
return T.new(codecs = @[TestCodec], handler = handle)
|
||||
|
||||
##
|
||||
# Helper to create a switch/node
|
||||
@@ -40,7 +40,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
||||
##
|
||||
# The actual application
|
||||
##
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng() # Single random number source for the whole application
|
||||
# port 0 will take a random available port
|
||||
|
||||
@@ -53,7 +53,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
||||
##
|
||||
##
|
||||
## Let's now start to create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng()
|
||||
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
@@ -25,7 +25,7 @@ type TestProto = ref object of LPProtocol
|
||||
|
||||
proc new(T: typedesc[TestProto]): T =
|
||||
# every incoming connections will in be handled in this closure
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
# Read up to 1024 bytes from this connection, and transform them into
|
||||
# a string
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
@@ -44,7 +44,7 @@ proc hello(p: TestProto, conn: Connection) {.async.} =
|
||||
## Again, pretty straight-forward, we just send a message on the connection.
|
||||
##
|
||||
## We can now create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng()
|
||||
testProto = TestProto.new()
|
||||
|
||||
@@ -108,7 +108,7 @@ type
|
||||
|
||||
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
|
||||
var res: MetricProto
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
let
|
||||
metrics = await res.metricGetter()
|
||||
asProtobuf = metrics.encode()
|
||||
@@ -126,7 +126,7 @@ proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
|
||||
return MetricList.decode(protobuf).tryGet()
|
||||
|
||||
## We can now create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let rng = newRng()
|
||||
proc randomMetricGenerator: Future[MetricList] {.async.} =
|
||||
let metricCount = rng[].generate(uint32) mod 16
|
||||
|
||||
@@ -33,7 +33,7 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
|
||||
const DumbCodec = "/dumb/proto/1.0.0"
|
||||
type DumbProto = ref object of LPProtocol
|
||||
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.close()
|
||||
return T.new(codecs = @[DumbCodec], handler = handle)
|
||||
@@ -49,7 +49,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||
## (rendezvous in this case) as a bootnode. For this example, we'll
|
||||
## create a bootnode, and then every peer will advertise itself on the
|
||||
## bootnode, and use it to find other peers
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let bootNode = createSwitch()
|
||||
await bootNode.start()
|
||||
|
||||
|
||||
@@ -143,7 +143,7 @@ proc draw(g: Game) =
|
||||
## peer know that we are available, check that he is also available,
|
||||
## and launch the game.
|
||||
proc new(T: typedesc[GameProto], g: Game): T =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
defer: await conn.closeWithEof()
|
||||
if g.peerFound.finished or g.hasCandidate:
|
||||
await conn.close()
|
||||
|
||||
@@ -7,24 +7,34 @@ description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
|
||||
requires "nim >= 1.2.0",
|
||||
requires "nim >= 1.6.0",
|
||||
"nimcrypto >= 0.4.1",
|
||||
"dnsclient >= 0.3.0 & < 0.4.0",
|
||||
"bearssl >= 0.1.4",
|
||||
"chronicles >= 0.10.2",
|
||||
"chronos >= 3.0.6",
|
||||
"chronos >= 4.0.0",
|
||||
"metrics",
|
||||
"secp256k1",
|
||||
"stew#head",
|
||||
"websock",
|
||||
"unittest2 >= 0.0.5 & < 0.1.0"
|
||||
"unittest2"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
|
||||
let flags = getEnv("NIMFLAGS", "") # Extra flags for the compiler
|
||||
let verbose = getEnv("V", "") notin ["", "0"]
|
||||
|
||||
let cfg =
|
||||
" --styleCheck:usages --styleCheck:error" &
|
||||
(if verbose: "" else: " --verbosity:0 --hints:off") &
|
||||
" --skipParentCfg --skipUserCfg -f" &
|
||||
" --threads:on --opt:speed"
|
||||
|
||||
import hashes, strutils
|
||||
|
||||
import hashes
|
||||
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||
moreoptions: string = "") =
|
||||
var excstr = "nim c --skipParentCfg --opt:speed -d:debug "
|
||||
excstr.add(" " & getEnv("NIMFLAGS") & " ")
|
||||
excstr.add(" --verbosity:0 --hints:off ")
|
||||
var excstr = nimc & " " & lang & " -d:debug " & cfg & " " & flags
|
||||
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
||||
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
|
||||
excstr.add(" " & moreoptions & " ")
|
||||
@@ -34,7 +44,7 @@ proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||
rmFile "tests/" & filename.toExe
|
||||
|
||||
proc buildSample(filename: string, run = false, extraFlags = "") =
|
||||
var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off -p:. " & extraFlags
|
||||
var excstr = nimc & " " & lang & " " & cfg & " " & flags & " -p:. " & extraFlags
|
||||
excstr.add(" examples/" & filename)
|
||||
exec excstr
|
||||
if run:
|
||||
@@ -42,7 +52,7 @@ proc buildSample(filename: string, run = false, extraFlags = "") =
|
||||
rmFile "examples/" & filename.toExe
|
||||
|
||||
proc tutorialToMd(filename: string) =
|
||||
let markdown = gorge "cat " & filename & " | nim c -r --verbosity:0 --hints:off tools/markdown_builder.nim "
|
||||
let markdown = gorge "cat " & filename & " | " & nimc & " " & lang & " -r --verbosity:0 --hints:off tools/markdown_builder.nim "
|
||||
writeFile(filename.replace(".nim", ".md"), markdown)
|
||||
|
||||
task testnative, "Runs libp2p native tests":
|
||||
@@ -104,15 +114,12 @@ task examples_build, "Build the samples":
|
||||
buildSample("circuitrelay", true)
|
||||
buildSample("tutorial_1_connect", true)
|
||||
buildSample("tutorial_2_customproto", true)
|
||||
if (NimMajor, NimMinor) > (1, 2):
|
||||
# These tutorials relies on post 1.4 exception tracking
|
||||
buildSample("tutorial_3_protobuf", true)
|
||||
buildSample("tutorial_4_gossipsub", true)
|
||||
buildSample("tutorial_5_discovery", true)
|
||||
# Nico doesn't work in 1.2
|
||||
exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
|
||||
exec "nimble install -y nico"
|
||||
buildSample("tutorial_6_game", false, "--styleCheck:off")
|
||||
buildSample("tutorial_3_protobuf", true)
|
||||
buildSample("tutorial_4_gossipsub", true)
|
||||
buildSample("tutorial_5_discovery", true)
|
||||
exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
|
||||
exec "nimble install -y nico"
|
||||
buildSample("tutorial_6_game", false, "--styleCheck:off")
|
||||
|
||||
# pin system
|
||||
# while nimble lockfile
|
||||
@@ -123,7 +130,7 @@ task pin, "Create a lockfile":
|
||||
# pinner.nim was originally here
|
||||
# but you can't read output from
|
||||
# a command in a nimscript
|
||||
exec "nim c -r tools/pinner.nim"
|
||||
exec nimc & " c -r tools/pinner.nim"
|
||||
|
||||
import sequtils
|
||||
import os
|
||||
|
||||
@@ -16,10 +16,7 @@ runnableExamples:
|
||||
# etc
|
||||
.build()
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
options, tables, chronos, chronicles, sequtils,
|
||||
@@ -28,7 +25,7 @@ import
|
||||
muxers/[muxer, mplex/mplex, yamux/yamux],
|
||||
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
||||
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
|
||||
connmanager, upgrademngrs/muxedupgrade,
|
||||
connmanager, upgrademngrs/muxedupgrade, observedaddrmanager,
|
||||
nameresolving/nameresolver,
|
||||
errors, utility
|
||||
|
||||
@@ -36,11 +33,10 @@ export
|
||||
switch, peerid, peerinfo, connection, multiaddress, crypto, errors
|
||||
|
||||
type
|
||||
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [Defect].}
|
||||
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [].}
|
||||
|
||||
SecureProtocol* {.pure.} = enum
|
||||
Noise,
|
||||
Secio {.deprecated.}
|
||||
Noise
|
||||
|
||||
SwitchBuilder* = ref object
|
||||
privKey: Option[PrivateKey]
|
||||
@@ -57,11 +53,12 @@ type
|
||||
protoVersion: string
|
||||
agentVersion: string
|
||||
nameResolver: NameResolver
|
||||
peerStoreCapacity: Option[int]
|
||||
peerStoreCapacity: Opt[int]
|
||||
autonat: bool
|
||||
circuitRelay: Relay
|
||||
rdv: RendezVous
|
||||
services: seq[Service]
|
||||
observedAddrManager: ObservedAddrManager
|
||||
|
||||
proc new*(T: type[SwitchBuilder]): T {.public.} =
|
||||
## Creates a SwitchBuilder
|
||||
@@ -124,8 +121,12 @@ proc withMplex*(
|
||||
b.muxers.add(MuxerProvider.new(newMuxer, MplexCodec))
|
||||
b
|
||||
|
||||
proc withYamux*(b: SwitchBuilder): SwitchBuilder =
|
||||
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn)
|
||||
proc withYamux*(b: SwitchBuilder,
|
||||
windowSize: int = YamuxDefaultWindowSize,
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes): SwitchBuilder =
|
||||
proc newMuxer(conn: Connection): Muxer =
|
||||
Yamux.new(conn, windowSize, inTimeout = inTimeout, outTimeout = outTimeout)
|
||||
|
||||
assert b.muxers.countIt(it.codec == YamuxCodec) == 0, "Yamux build multiple times"
|
||||
b.muxers.add(MuxerProvider.new(newMuxer, YamuxCodec))
|
||||
@@ -173,7 +174,7 @@ proc withMaxConnsPerPeer*(b: SwitchBuilder, maxConnsPerPeer: int): SwitchBuilder
|
||||
b
|
||||
|
||||
proc withPeerStore*(b: SwitchBuilder, capacity: int): SwitchBuilder {.public.} =
|
||||
b.peerStoreCapacity = some(capacity)
|
||||
b.peerStoreCapacity = Opt.some(capacity)
|
||||
b
|
||||
|
||||
proc withProtoVersion*(b: SwitchBuilder, protoVersion: string): SwitchBuilder {.public.} =
|
||||
@@ -204,8 +205,12 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
|
||||
b.services = services
|
||||
b
|
||||
|
||||
proc withObservedAddrManager*(b: SwitchBuilder, observedAddrManager: ObservedAddrManager): SwitchBuilder =
|
||||
b.observedAddrManager = observedAddrManager
|
||||
b
|
||||
|
||||
proc build*(b: SwitchBuilder): Switch
|
||||
{.raises: [Defect, LPError], public.} =
|
||||
{.raises: [LPError], public.} =
|
||||
|
||||
if b.rng == nil: # newRng could fail
|
||||
raise newException(Defect, "Cannot initialize RNG")
|
||||
@@ -226,11 +231,16 @@ proc build*(b: SwitchBuilder): Switch
|
||||
protoVersion = b.protoVersion,
|
||||
agentVersion = b.agentVersion)
|
||||
|
||||
let identify =
|
||||
if b.observedAddrManager != nil:
|
||||
Identify.new(peerInfo, b.sendSignedPeerRecord, b.observedAddrManager)
|
||||
else:
|
||||
Identify.new(peerInfo, b.sendSignedPeerRecord)
|
||||
|
||||
let
|
||||
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
|
||||
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
|
||||
ms = MultistreamSelect.new()
|
||||
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, connManager, ms)
|
||||
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
|
||||
|
||||
let
|
||||
transports = block:
|
||||
@@ -245,9 +255,9 @@ proc build*(b: SwitchBuilder): Switch
|
||||
if isNil(b.rng):
|
||||
b.rng = newRng()
|
||||
|
||||
let peerStore =
|
||||
if isSome(b.peerStoreCapacity):
|
||||
PeerStore.new(identify, b.peerStoreCapacity.get())
|
||||
let peerStore = block:
|
||||
b.peerStoreCapacity.withValue(capacity):
|
||||
PeerStore.new(identify, capacity)
|
||||
else:
|
||||
PeerStore.new(identify)
|
||||
|
||||
@@ -280,29 +290,25 @@ proc build*(b: SwitchBuilder): Switch
|
||||
return switch
|
||||
|
||||
proc newStandardSwitch*(
|
||||
privKey = none(PrivateKey),
|
||||
addrs: MultiAddress | seq[MultiAddress] = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
|
||||
secureManagers: openArray[SecureProtocol] = [
|
||||
privKey = none(PrivateKey),
|
||||
addrs: MultiAddress | seq[MultiAddress] =
|
||||
MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("valid address"),
|
||||
secureManagers: openArray[SecureProtocol] = [
|
||||
SecureProtocol.Noise,
|
||||
],
|
||||
transportFlags: set[ServerFlags] = {},
|
||||
rng = newRng(),
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes,
|
||||
maxConnections = MaxConnections,
|
||||
maxIn = -1,
|
||||
maxOut = -1,
|
||||
maxConnsPerPeer = MaxConnectionsPerPeer,
|
||||
nameResolver: NameResolver = nil,
|
||||
sendSignedPeerRecord = false,
|
||||
peerStoreCapacity = 1000): Switch
|
||||
{.raises: [Defect, LPError], public.} =
|
||||
transportFlags: set[ServerFlags] = {},
|
||||
rng = newRng(),
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes,
|
||||
maxConnections = MaxConnections,
|
||||
maxIn = -1,
|
||||
maxOut = -1,
|
||||
maxConnsPerPeer = MaxConnectionsPerPeer,
|
||||
nameResolver: NameResolver = nil,
|
||||
sendSignedPeerRecord = false,
|
||||
peerStoreCapacity = 1000
|
||||
): Switch {.raises: [LPError], public.} =
|
||||
## Helper for common switch configurations.
|
||||
{.push warning[Deprecated]:off.}
|
||||
if SecureProtocol.Secio in secureManagers:
|
||||
quit("Secio is deprecated!") # use of secio is unsafe
|
||||
{.pop.}
|
||||
|
||||
let addrs = when addrs is MultiAddress: @[addrs] else: addrs
|
||||
var b = SwitchBuilder
|
||||
.new()
|
||||
@@ -319,7 +325,7 @@ proc newStandardSwitch*(
|
||||
.withNameResolver(nameResolver)
|
||||
.withNoise()
|
||||
|
||||
if privKey.isSome():
|
||||
b = b.withPrivateKey(privKey.get())
|
||||
privKey.withValue(pkey):
|
||||
b = b.withPrivateKey(pkey)
|
||||
|
||||
b.build()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implementes CID (Content IDentifier).
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import tables, hashes
|
||||
import multibase, multicodec, multihash, vbuffer, varint
|
||||
@@ -264,12 +261,6 @@ proc write*(vb: var VBuffer, cid: Cid) {.inline.} =
|
||||
## Write CID value ``cid`` to buffer ``vb``.
|
||||
vb.writeArray(cid.data.buffer)
|
||||
|
||||
proc encode*(mbtype: typedesc[MultiBase], encoding: string,
|
||||
cid: Cid): string {.inline.} =
|
||||
## Get MultiBase encoded representation of ``cid`` using encoding
|
||||
## ``encoding``.
|
||||
result = MultiBase.encode(encoding, cid.data.buffer).tryGet()
|
||||
|
||||
proc hash*(cid: Cid): Hash {.inline.} =
|
||||
hash(cid.data.buffer)
|
||||
|
||||
@@ -279,9 +270,6 @@ proc `$`*(cid: Cid): string =
|
||||
BTCBase58.encode(cid.data.buffer)
|
||||
elif cid.cidver == CIDv1:
|
||||
let res = MultiBase.encode("base58btc", cid.data.buffer)
|
||||
if res.isOk():
|
||||
res.get()
|
||||
else:
|
||||
""
|
||||
res.get("")
|
||||
else:
|
||||
""
|
||||
|
||||
@@ -7,12 +7,9 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, tables, sequtils, sets]
|
||||
import std/[tables, sequtils, sets]
|
||||
import pkg/[chronos, chronicles, metrics]
|
||||
import peerinfo,
|
||||
peerstore,
|
||||
@@ -51,7 +48,7 @@ type
|
||||
|
||||
ConnEventHandler* =
|
||||
proc(peerId: PeerId, event: ConnEvent): Future[void]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
{.gcsafe, raises: [].}
|
||||
|
||||
PeerEventKind* {.pure.} = enum
|
||||
Left,
|
||||
@@ -65,7 +62,7 @@ type
|
||||
discard
|
||||
|
||||
PeerEventHandler* =
|
||||
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [Defect].}
|
||||
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [].}
|
||||
|
||||
ConnManager* = ref object of RootObj
|
||||
maxConnsPerPeer: int
|
||||
@@ -131,7 +128,7 @@ proc removeConnEventHandler*(c: ConnManager,
|
||||
|
||||
proc triggerConnEvent*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
event: ConnEvent) {.async, gcsafe.} =
|
||||
event: ConnEvent) {.async.} =
|
||||
try:
|
||||
trace "About to trigger connection events", peer = peerId
|
||||
if c.connEvents[event.kind].len() > 0:
|
||||
@@ -163,7 +160,7 @@ proc removePeerEventHandler*(c: ConnManager,
|
||||
|
||||
proc triggerPeerEvents*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
event: PeerEvent) {.async, gcsafe.} =
|
||||
event: PeerEvent) {.async.} =
|
||||
|
||||
trace "About to trigger peer events", peer = peerId
|
||||
if c.peerEvents[event.kind].len == 0:
|
||||
@@ -285,7 +282,7 @@ proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer =
|
||||
|
||||
proc storeMuxer*(c: ConnManager,
|
||||
muxer: Muxer)
|
||||
{.raises: [Defect, CatchableError].} =
|
||||
{.raises: [CatchableError].} =
|
||||
## store the connection and muxer
|
||||
##
|
||||
|
||||
@@ -314,12 +311,14 @@ proc storeMuxer*(c: ConnManager,
|
||||
|
||||
raise newTooManyConnectionsError()
|
||||
|
||||
assert muxer notin c.muxed.getOrDefault(peerId)
|
||||
|
||||
let
|
||||
newPeer = peerId notin c.muxed
|
||||
assert newPeer or c.muxed[peerId].len > 0
|
||||
c.muxed.mgetOrPut(peerId, newSeq[Muxer]()).add(muxer)
|
||||
var newPeer = false
|
||||
c.muxed.withValue(peerId, muxers):
|
||||
doAssert muxers[].len > 0
|
||||
doAssert muxer notin muxers[]
|
||||
muxers[].add(muxer)
|
||||
do:
|
||||
c.muxed[peerId] = @[muxer]
|
||||
newPeer = true
|
||||
libp2p_peers.set(c.muxed.len.int64)
|
||||
|
||||
asyncSpawn c.triggerConnEvent(
|
||||
@@ -338,7 +337,7 @@ proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} =
|
||||
await c.inSema.acquire()
|
||||
return ConnectionSlot(connManager: c, direction: In)
|
||||
|
||||
proc getOutgoingSlot*(c: ConnManager, forceDial = false): ConnectionSlot {.raises: [Defect, TooManyConnectionsError].} =
|
||||
proc getOutgoingSlot*(c: ConnManager, forceDial = false): ConnectionSlot {.raises: [TooManyConnectionsError].} =
|
||||
if forceDial:
|
||||
c.outSema.forceAcquire()
|
||||
elif not c.outSema.tryAcquire():
|
||||
@@ -382,7 +381,7 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
|
||||
cs.trackConnection(mux.connection)
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
muxer: Muxer): Future[Connection] {.async, gcsafe.} =
|
||||
muxer: Muxer): Future[Connection] {.async.} =
|
||||
## get a muxed stream for the passed muxer
|
||||
##
|
||||
|
||||
@@ -390,7 +389,7 @@ proc getStream*(c: ConnManager,
|
||||
return await muxer.newStream()
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
|
||||
peerId: PeerId): Future[Connection] {.async.} =
|
||||
## get a muxed stream for the passed peer from any connection
|
||||
##
|
||||
|
||||
@@ -398,7 +397,7 @@ proc getStream*(c: ConnManager,
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
dir: Direction): Future[Connection] {.async, gcsafe.} =
|
||||
dir: Direction): Future[Connection] {.async.} =
|
||||
## get a muxed stream for the passed peer from a connection with `dir`
|
||||
##
|
||||
|
||||
|
||||
@@ -15,10 +15,7 @@
|
||||
|
||||
# RFC @ https://tools.ietf.org/html/rfc7539
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/blockx
|
||||
from stew/assign2 import assign
|
||||
|
||||
@@ -8,10 +8,7 @@
|
||||
# those terms.
|
||||
|
||||
## This module implements Public Key and Private Key interface for libp2p.
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
from strutils import split, strip, cmpIgnoreCase
|
||||
|
||||
@@ -68,11 +65,13 @@ when supported(PKScheme.Ed25519):
|
||||
import ed25519/ed25519
|
||||
when supported(PKScheme.Secp256k1):
|
||||
import secp
|
||||
when supported(PKScheme.ECDSA):
|
||||
import ecnist
|
||||
|
||||
# We are still importing `ecnist` because, it is used for SECIO handshake,
|
||||
# but it will be impossible to create ECNIST keys or import ECNIST keys.
|
||||
# These used to be declared in `crypto` itself
|
||||
export ecnist.ephemeral, ecnist.ECDHEScheme
|
||||
|
||||
import ecnist, bearssl/rand, bearssl/hash as bhash
|
||||
import bearssl/rand, bearssl/hash as bhash
|
||||
import ../protobuf/minprotobuf, ../vbuffer, ../multihash, ../multicodec
|
||||
import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
@@ -89,8 +88,6 @@ type
|
||||
Sha256,
|
||||
Sha512
|
||||
|
||||
ECDHEScheme* = EcCurveKind
|
||||
|
||||
PublicKey* = object
|
||||
case scheme*: PKScheme
|
||||
of PKScheme.RSA:
|
||||
@@ -458,7 +455,8 @@ proc getBytes*(sig: Signature): seq[byte] =
|
||||
## Return signature ``sig`` in binary form.
|
||||
result = sig.data
|
||||
|
||||
proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
|
||||
template initImpl[T: PrivateKey|PublicKey](
|
||||
key: var T, data: openArray[byte]): bool =
|
||||
## Initialize private key ``key`` from libp2p's protobuf serialized raw
|
||||
## binary form.
|
||||
##
|
||||
@@ -471,7 +469,7 @@ proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
|
||||
var pb = initProtoBuffer(@data)
|
||||
let r1 = pb.getField(1, id)
|
||||
let r2 = pb.getField(2, buffer)
|
||||
if not(r1.isOk() and r1.get() and r2.isOk() and r2.get()):
|
||||
if not(r1.get(false) and r2.get(false)):
|
||||
false
|
||||
else:
|
||||
if cast[int8](id) notin SupportedSchemesInt or len(buffer) <= 0:
|
||||
@@ -520,6 +518,14 @@ proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
|
||||
else:
|
||||
false
|
||||
|
||||
{.push warning[ProveField]:off.} # https://github.com/nim-lang/Nim/issues/22060
|
||||
proc init*(key: var PrivateKey, data: openArray[byte]): bool =
|
||||
initImpl(key, data)
|
||||
|
||||
proc init*(key: var PublicKey, data: openArray[byte]): bool =
|
||||
initImpl(key, data)
|
||||
{.pop.}
|
||||
|
||||
proc init*(sig: var Signature, data: openArray[byte]): bool =
|
||||
## Initialize signature ``sig`` from raw binary form.
|
||||
##
|
||||
@@ -873,34 +879,6 @@ proc mac*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
offset += secret.ivsize + secret.keysize
|
||||
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.macsize)
|
||||
|
||||
proc ephemeral*(
|
||||
scheme: ECDHEScheme,
|
||||
rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
|
||||
## Generate ephemeral keys used to perform ECDHE.
|
||||
var keypair: EcKeyPair
|
||||
if scheme == Secp256r1:
|
||||
keypair = ? EcKeyPair.random(Secp256r1, rng).orError(KeyError)
|
||||
elif scheme == Secp384r1:
|
||||
keypair = ? EcKeyPair.random(Secp384r1, rng).orError(KeyError)
|
||||
elif scheme == Secp521r1:
|
||||
keypair = ? EcKeyPair.random(Secp521r1, rng).orError(KeyError)
|
||||
ok(keypair)
|
||||
|
||||
proc ephemeral*(
|
||||
scheme: string, rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
|
||||
## Generate ephemeral keys used to perform ECDHE using string encoding.
|
||||
##
|
||||
## Currently supported encoding strings are P-256, P-384, P-521, if encoding
|
||||
## string is not supported P-521 key will be generated.
|
||||
if scheme == "P-256":
|
||||
ephemeral(Secp256r1, rng)
|
||||
elif scheme == "P-384":
|
||||
ephemeral(Secp384r1, rng)
|
||||
elif scheme == "P-521":
|
||||
ephemeral(Secp521r1, rng)
|
||||
else:
|
||||
ephemeral(Secp521r1, rng)
|
||||
|
||||
proc getOrder*(remotePubkey, localNonce: openArray[byte],
|
||||
localPubkey, remoteNonce: openArray[byte]): CryptoResult[int] =
|
||||
## Compare values and calculate `order` parameter.
|
||||
@@ -946,84 +924,30 @@ proc selectBest*(order: int, p1, p2: string): string =
|
||||
if felement == selement:
|
||||
return felement
|
||||
|
||||
proc createProposal*(nonce, pubkey: openArray[byte],
|
||||
exchanges, ciphers, hashes: string): seq[byte] =
|
||||
## Create SecIO proposal message using random ``nonce``, local public key
|
||||
## ``pubkey``, comma-delimieted list of supported exchange schemes
|
||||
## ``exchanges``, comma-delimeted list of supported ciphers ``ciphers`` and
|
||||
## comma-delimeted list of supported hashes ``hashes``.
|
||||
var msg = initProtoBuffer({WithUint32BeLength})
|
||||
msg.write(1, nonce)
|
||||
msg.write(2, pubkey)
|
||||
msg.write(3, exchanges)
|
||||
msg.write(4, ciphers)
|
||||
msg.write(5, hashes)
|
||||
msg.finish()
|
||||
msg.buffer
|
||||
|
||||
proc decodeProposal*(message: seq[byte], nonce, pubkey: var seq[byte],
|
||||
exchanges, ciphers, hashes: var string): bool =
|
||||
## Parse incoming proposal message and decode remote random nonce ``nonce``,
|
||||
## remote public key ``pubkey``, comma-delimieted list of supported exchange
|
||||
## schemes ``exchanges``, comma-delimeted list of supported ciphers
|
||||
## ``ciphers`` and comma-delimeted list of supported hashes ``hashes``.
|
||||
##
|
||||
## Procedure returns ``true`` on success and ``false`` on error.
|
||||
var pb = initProtoBuffer(message)
|
||||
let r1 = pb.getField(1, nonce)
|
||||
let r2 = pb.getField(2, pubkey)
|
||||
let r3 = pb.getField(3, exchanges)
|
||||
let r4 = pb.getField(4, ciphers)
|
||||
let r5 = pb.getField(5, hashes)
|
||||
|
||||
r1.isOk() and r1.get() and r2.isOk() and r2.get() and
|
||||
r3.isOk() and r3.get() and r4.isOk() and r4.get() and
|
||||
r5.isOk() and r5.get()
|
||||
|
||||
proc createExchange*(epubkey, signature: openArray[byte]): seq[byte] =
|
||||
## Create SecIO exchange message using ephemeral public key ``epubkey`` and
|
||||
## signature of proposal blocks ``signature``.
|
||||
var msg = initProtoBuffer({WithUint32BeLength})
|
||||
msg.write(1, epubkey)
|
||||
msg.write(2, signature)
|
||||
msg.finish()
|
||||
msg.buffer
|
||||
|
||||
proc decodeExchange*(message: seq[byte],
|
||||
pubkey, signature: var seq[byte]): bool =
|
||||
## Parse incoming exchange message and decode remote ephemeral public key
|
||||
## ``pubkey`` and signature ``signature``.
|
||||
##
|
||||
## Procedure returns ``true`` on success and ``false`` on error.
|
||||
var pb = initProtoBuffer(message)
|
||||
let r1 = pb.getField(1, pubkey)
|
||||
let r2 = pb.getField(2, signature)
|
||||
r1.isOk() and r1.get() and r2.isOk() and r2.get()
|
||||
|
||||
## Serialization/Deserialization helpers
|
||||
|
||||
proc write*(vb: var VBuffer, pubkey: PublicKey) {.
|
||||
inline, raises: [Defect, ResultError[CryptoError]].} =
|
||||
inline, raises: [ResultError[CryptoError]].} =
|
||||
## Write PublicKey value ``pubkey`` to buffer ``vb``.
|
||||
vb.writeSeq(pubkey.getBytes().tryGet())
|
||||
|
||||
proc write*(vb: var VBuffer, seckey: PrivateKey) {.
|
||||
inline, raises: [Defect, ResultError[CryptoError]].} =
|
||||
inline, raises: [ResultError[CryptoError]].} =
|
||||
## Write PrivateKey value ``seckey`` to buffer ``vb``.
|
||||
vb.writeSeq(seckey.getBytes().tryGet())
|
||||
|
||||
proc write*(vb: var VBuffer, sig: PrivateKey) {.
|
||||
inline, raises: [Defect, ResultError[CryptoError]].} =
|
||||
inline, raises: [ResultError[CryptoError]].} =
|
||||
## Write Signature value ``sig`` to buffer ``vb``.
|
||||
vb.writeSeq(sig.getBytes().tryGet())
|
||||
|
||||
proc write*[T: PublicKey|PrivateKey](pb: var ProtoBuffer, field: int,
|
||||
key: T) {.
|
||||
inline, raises: [Defect, ResultError[CryptoError]].} =
|
||||
inline, raises: [ResultError[CryptoError]].} =
|
||||
write(pb, field, key.getBytes().tryGet())
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, sig: Signature) {.
|
||||
inline, raises: [Defect].} =
|
||||
inline, raises: [].} =
|
||||
write(pb, field, sig.getBytes())
|
||||
|
||||
proc getField*[T: PublicKey|PrivateKey](pb: ProtoBuffer, field: int,
|
||||
|
||||
@@ -15,10 +15,7 @@
|
||||
|
||||
# RFC @ https://tools.ietf.org/html/rfc7748
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/[ec, rand]
|
||||
import stew/results
|
||||
|
||||
@@ -14,10 +14,7 @@
|
||||
## BearSSL library <https://bearssl.org/>
|
||||
## Copyright(C) 2018 Thomas Pornin <pornin@bolet.org>.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/[ec, rand, hash]
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
@@ -997,3 +994,33 @@ proc verify*[T: byte|char](sig: EcSignature, message: openArray[T],
|
||||
# Clear context with initial value
|
||||
kv.init(addr hc.vtable)
|
||||
result = (res == 1)
|
||||
|
||||
type ECDHEScheme* = EcCurveKind
|
||||
|
||||
proc ephemeral*(
|
||||
scheme: ECDHEScheme,
|
||||
rng: var HmacDrbgContext): EcResult[EcKeyPair] =
|
||||
## Generate ephemeral keys used to perform ECDHE.
|
||||
var keypair: EcKeyPair
|
||||
if scheme == Secp256r1:
|
||||
keypair = ? EcKeyPair.random(Secp256r1, rng)
|
||||
elif scheme == Secp384r1:
|
||||
keypair = ? EcKeyPair.random(Secp384r1, rng)
|
||||
elif scheme == Secp521r1:
|
||||
keypair = ? EcKeyPair.random(Secp521r1, rng)
|
||||
ok(keypair)
|
||||
|
||||
proc ephemeral*(
|
||||
scheme: string, rng: var HmacDrbgContext): EcResult[EcKeyPair] =
|
||||
## Generate ephemeral keys used to perform ECDHE using string encoding.
|
||||
##
|
||||
## Currently supported encoding strings are P-256, P-384, P-521, if encoding
|
||||
## string is not supported P-521 key will be generated.
|
||||
if scheme == "P-256":
|
||||
ephemeral(Secp256r1, rng)
|
||||
elif scheme == "P-384":
|
||||
ephemeral(Secp384r1, rng)
|
||||
elif scheme == "P-521":
|
||||
ephemeral(Secp521r1, rng)
|
||||
else:
|
||||
ephemeral(Secp521r1, rng)
|
||||
|
||||
@@ -11,10 +11,7 @@
|
||||
## This code is a port of the public domain, "ref10" implementation of ed25519
|
||||
## from SUPERCOP.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/rand
|
||||
import constants
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
# https://tools.ietf.org/html/rfc5869
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import nimcrypto
|
||||
import bearssl/[kdf, hash]
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implements minimal ASN.1 encoding/decoding primitives.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/[endians2, results, ctops]
|
||||
export results
|
||||
|
||||
@@ -13,10 +13,7 @@
|
||||
## BearSSL library <https://bearssl.org/>
|
||||
## Copyright(C) 2018 Thomas Pornin <pornin@bolet.org>.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/[rsa, rand, hash]
|
||||
import minasn1
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/rand
|
||||
import
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
## This module implementes API for `go-libp2p-daemon`.
|
||||
import std/[os, osproc, strutils, tables, strtabs, sequtils]
|
||||
@@ -153,10 +150,10 @@ type
|
||||
key*: PublicKey
|
||||
|
||||
P2PStreamCallback* = proc(api: DaemonAPI,
|
||||
stream: P2PStream): Future[void] {.gcsafe, raises: [Defect, CatchableError].}
|
||||
stream: P2PStream): Future[void] {.gcsafe, raises: [CatchableError].}
|
||||
P2PPubSubCallback* = proc(api: DaemonAPI,
|
||||
ticket: PubsubTicket,
|
||||
message: PubSubMessage): Future[bool] {.gcsafe, raises: [Defect, CatchableError].}
|
||||
message: PubSubMessage): Future[bool] {.gcsafe, raises: [CatchableError].}
|
||||
|
||||
DaemonError* = object of LPError
|
||||
DaemonRemoteError* = object of DaemonError
|
||||
@@ -474,7 +471,7 @@ proc checkResponse(pb: ProtoBuffer): ResponseKind {.inline.} =
|
||||
else:
|
||||
result = ResponseKind.Error
|
||||
|
||||
proc getErrorMessage(pb: ProtoBuffer): string {.inline, raises: [Defect, DaemonLocalError].} =
|
||||
proc getErrorMessage(pb: ProtoBuffer): string {.inline, raises: [DaemonLocalError].} =
|
||||
var error: seq[byte]
|
||||
if pb.getRequiredField(ResponseType.ERROR.int, error).isOk():
|
||||
if initProtoBuffer(error).getRequiredField(1, result).isErr():
|
||||
@@ -504,7 +501,7 @@ proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
|
||||
result = buffer
|
||||
|
||||
proc newConnection*(api: DaemonAPI): Future[StreamTransport]
|
||||
{.raises: [Defect, LPError].} =
|
||||
{.raises: [LPError].} =
|
||||
result = connect(api.address)
|
||||
|
||||
proc closeConnection*(api: DaemonAPI, transp: StreamTransport): Future[void] =
|
||||
@@ -515,7 +512,7 @@ proc socketExists(address: MultiAddress): Future[bool] {.async.} =
|
||||
var transp = await connect(address)
|
||||
await transp.closeWait()
|
||||
result = true
|
||||
except CatchableError, Defect:
|
||||
except CatchableError:
|
||||
result = false
|
||||
|
||||
when defined(windows):
|
||||
@@ -556,7 +553,7 @@ proc getSocket(pattern: string,
|
||||
closeSocket(sock)
|
||||
|
||||
# This is forward declaration needed for newDaemonApi()
|
||||
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async, gcsafe.}
|
||||
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
|
||||
|
||||
proc copyEnv(): StringTableRef =
|
||||
## This procedure copy all environment variables into StringTable.
|
||||
@@ -758,7 +755,7 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
|
||||
|
||||
# Starting daemon process
|
||||
# echo "Starting ", cmd, " ", args.join(" ")
|
||||
api.process =
|
||||
api.process =
|
||||
exceptionToAssert:
|
||||
startProcess(cmd, "", args, env, {poParentStreams})
|
||||
# Waiting until daemon will not be bound to control socket.
|
||||
@@ -837,7 +834,7 @@ proc transactMessage(transp: StreamTransport,
|
||||
result = initProtoBuffer(message)
|
||||
|
||||
proc getPeerInfo(pb: ProtoBuffer): PeerInfo
|
||||
{.raises: [Defect, DaemonLocalError].} =
|
||||
{.raises: [DaemonLocalError].} =
|
||||
## Get PeerInfo object from ``pb``.
|
||||
result.addresses = newSeq[MultiAddress]()
|
||||
if pb.getRequiredField(1, result.peer).isErr():
|
||||
@@ -868,7 +865,7 @@ proc connect*(api: DaemonAPI, peer: PeerId,
|
||||
timeout))
|
||||
pb.withMessage() do:
|
||||
discard
|
||||
except CatchableError, Defect:
|
||||
except CatchableError:
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
|
||||
@@ -928,7 +925,7 @@ proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
|
||||
asyncSpawn handler(api, stream)
|
||||
|
||||
proc addHandler*(api: DaemonAPI, protocols: seq[string],
|
||||
handler: P2PStreamCallback) {.async, raises: [Defect, LPError].} =
|
||||
handler: P2PStreamCallback) {.async, raises: [LPError].} =
|
||||
## Add stream handler ``handler`` for set of protocols ``protocols``.
|
||||
var transp = await api.newConnection()
|
||||
let maddress = await getSocket(api.pattern, addr api.ucounter)
|
||||
@@ -998,7 +995,7 @@ proc cmTrimPeers*(api: DaemonAPI) {.async.} =
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo
|
||||
{.raises: [Defect, DaemonLocalError].} =
|
||||
{.raises: [DaemonLocalError].} =
|
||||
var res: seq[byte]
|
||||
if pb.getRequiredField(2, res).isOk():
|
||||
result = initProtoBuffer(res).getPeerInfo()
|
||||
@@ -1006,23 +1003,23 @@ proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo
|
||||
raise newException(DaemonLocalError, "Missing required field `peer`!")
|
||||
|
||||
proc dhtGetSingleValue(pb: ProtoBuffer): seq[byte]
|
||||
{.raises: [Defect, DaemonLocalError].} =
|
||||
{.raises: [DaemonLocalError].} =
|
||||
result = newSeq[byte]()
|
||||
if pb.getRequiredField(3, result).isErr():
|
||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||
|
||||
proc dhtGetSinglePublicKey(pb: ProtoBuffer): PublicKey
|
||||
{.raises: [Defect, DaemonLocalError].} =
|
||||
{.raises: [DaemonLocalError].} =
|
||||
if pb.getRequiredField(3, result).isErr():
|
||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||
|
||||
proc dhtGetSinglePeerId(pb: ProtoBuffer): PeerId
|
||||
{.raises: [Defect, DaemonLocalError].} =
|
||||
{.raises: [DaemonLocalError].} =
|
||||
if pb.getRequiredField(3, result).isErr():
|
||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||
|
||||
proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
|
||||
{.inline, raises: [Defect, DaemonLocalError].} =
|
||||
{.inline, raises: [DaemonLocalError].} =
|
||||
var dhtResponse: seq[byte]
|
||||
if pb.getRequiredField(ResponseType.DHT.int, dhtResponse).isOk():
|
||||
var pbDhtResponse = initProtoBuffer(dhtResponse)
|
||||
@@ -1035,13 +1032,13 @@ proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
|
||||
var value: seq[byte]
|
||||
if pbDhtResponse.getRequiredField(3, value).isErr():
|
||||
raise newException(DaemonLocalError, "Missing required DHT field `value`!")
|
||||
|
||||
|
||||
return initProtoBuffer(value)
|
||||
else:
|
||||
raise newException(DaemonLocalError, "Wrong message type!")
|
||||
|
||||
proc enterPsMessage(pb: ProtoBuffer): ProtoBuffer
|
||||
{.inline, raises: [Defect, DaemonLocalError].} =
|
||||
{.inline, raises: [DaemonLocalError].} =
|
||||
var res: seq[byte]
|
||||
if pb.getRequiredField(ResponseType.PUBSUB.int, res).isErr():
|
||||
raise newException(DaemonLocalError, "Wrong message type!")
|
||||
@@ -1049,7 +1046,7 @@ proc enterPsMessage(pb: ProtoBuffer): ProtoBuffer
|
||||
initProtoBuffer(res)
|
||||
|
||||
proc getDhtMessageType(pb: ProtoBuffer): DHTResponseType
|
||||
{.inline, raises: [Defect, DaemonLocalError].} =
|
||||
{.inline, raises: [DaemonLocalError].} =
|
||||
var dtype: uint
|
||||
if pb.getRequiredField(1, dtype).isErr():
|
||||
raise newException(DaemonLocalError, "Missing required DHT field `type`!")
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
## This module implements Pool of StreamTransport.
|
||||
import chronos
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
## 5. LocalAddress: optional bytes
|
||||
## 6. RemoteAddress: optional bytes
|
||||
## 7. Message: required bytes
|
||||
import os, options
|
||||
import os
|
||||
import nimcrypto/utils, stew/endians2
|
||||
import protobuf/minprotobuf, stream/connection, protocols/secure/secure,
|
||||
multiaddress, peerid, varint, muxers/mplex/coder
|
||||
@@ -33,7 +33,7 @@ import protobuf/minprotobuf, stream/connection, protocols/secure/secure,
|
||||
from times import getTime, toUnix, fromUnix, nanosecond, format, Time,
|
||||
NanosecondRange, initTime
|
||||
from strutils import toHex, repeat
|
||||
export peerid, options, multiaddress
|
||||
export peerid, multiaddress
|
||||
|
||||
type
|
||||
FlowDirection* = enum
|
||||
@@ -43,10 +43,10 @@ type
|
||||
timestamp*: uint64
|
||||
direction*: FlowDirection
|
||||
message*: seq[byte]
|
||||
seqID*: Option[uint64]
|
||||
mtype*: Option[uint64]
|
||||
local*: Option[MultiAddress]
|
||||
remote*: Option[MultiAddress]
|
||||
seqID*: Opt[uint64]
|
||||
mtype*: Opt[uint64]
|
||||
local*: Opt[MultiAddress]
|
||||
remote*: Opt[MultiAddress]
|
||||
|
||||
const
|
||||
libp2p_dump_dir* {.strdefine.} = "nim-libp2p"
|
||||
@@ -72,7 +72,8 @@ proc dumpMessage*(conn: SecureConn, direction: FlowDirection,
|
||||
var pb = initProtoBuffer(options = {WithVarintLength})
|
||||
pb.write(2, getTimestamp())
|
||||
pb.write(4, uint64(direction))
|
||||
pb.write(6, conn.observedAddr)
|
||||
conn.observedAddr.withValue(oaddr):
|
||||
pb.write(6, oaddr)
|
||||
pb.write(7, data)
|
||||
pb.finish()
|
||||
|
||||
@@ -100,7 +101,7 @@ proc dumpMessage*(conn: SecureConn, direction: FlowDirection,
|
||||
finally:
|
||||
close(handle)
|
||||
|
||||
proc decodeDumpMessage*(data: openArray[byte]): Option[ProtoMessage] =
|
||||
proc decodeDumpMessage*(data: openArray[byte]): Opt[ProtoMessage] =
|
||||
## Decode protobuf's message ProtoMessage from array of bytes ``data``.
|
||||
var
|
||||
pb = initProtoBuffer(data)
|
||||
@@ -108,13 +109,12 @@ proc decodeDumpMessage*(data: openArray[byte]): Option[ProtoMessage] =
|
||||
ma1, ma2: MultiAddress
|
||||
pmsg: ProtoMessage
|
||||
|
||||
let res2 = pb.getField(2, pmsg.timestamp)
|
||||
if res2.isErr() or not(res2.get()):
|
||||
return none[ProtoMessage]()
|
||||
|
||||
let res4 = pb.getField(4, value)
|
||||
if res4.isErr() or not(res4.get()):
|
||||
return none[ProtoMessage]()
|
||||
let
|
||||
r2 = pb.getField(2, pmsg.timestamp)
|
||||
r4 = pb.getField(4, value)
|
||||
r7 = pb.getField(7, pmsg.message)
|
||||
if not r2.get(false) or not r4.get(false) or not r7.get(false):
|
||||
return Opt.none(ProtoMessage)
|
||||
|
||||
# `case` statement could not work here with an error "selector must be of an
|
||||
# ordinal type, float or string"
|
||||
@@ -124,30 +124,27 @@ proc decodeDumpMessage*(data: openArray[byte]): Option[ProtoMessage] =
|
||||
elif value == uint64(Incoming):
|
||||
Incoming
|
||||
else:
|
||||
return none[ProtoMessage]()
|
||||
return Opt.none(ProtoMessage)
|
||||
|
||||
let res7 = pb.getField(7, pmsg.message)
|
||||
if res7.isErr() or not(res7.get()):
|
||||
return none[ProtoMessage]()
|
||||
let r1 = pb.getField(1, value)
|
||||
if r1.get(false):
|
||||
pmsg.seqID = Opt.some(value)
|
||||
|
||||
value = 0'u64
|
||||
let res1 = pb.getField(1, value)
|
||||
if res1.isOk() and res1.get():
|
||||
pmsg.seqID = some(value)
|
||||
value = 0'u64
|
||||
let res3 = pb.getField(3, value)
|
||||
if res3.isOk() and res3.get():
|
||||
pmsg.mtype = some(value)
|
||||
let res5 = pb.getField(5, ma1)
|
||||
if res5.isOk() and res5.get():
|
||||
pmsg.local = some(ma1)
|
||||
let res6 = pb.getField(6, ma2)
|
||||
if res6.isOk() and res6.get():
|
||||
pmsg.remote = some(ma2)
|
||||
let r3 = pb.getField(3, value)
|
||||
if r3.get(false):
|
||||
pmsg.mtype = Opt.some(value)
|
||||
|
||||
some(pmsg)
|
||||
let
|
||||
r5 = pb.getField(5, ma1)
|
||||
r6 = pb.getField(6, ma2)
|
||||
if r5.get(false):
|
||||
pmsg.local = Opt.some(ma1)
|
||||
if r6.get(false):
|
||||
pmsg.remote = Opt.some(ma2)
|
||||
|
||||
iterator messages*(data: seq[byte]): Option[ProtoMessage] =
|
||||
Opt.some(pmsg)
|
||||
|
||||
iterator messages*(data: seq[byte]): Opt[ProtoMessage] =
|
||||
## Iterate over sequence of bytes and decode all the ``ProtoMessage``
|
||||
## messages we found.
|
||||
var value: uint64
|
||||
@@ -242,27 +239,19 @@ proc toString*(msg: ProtoMessage, dump = true): string =
|
||||
" >> "
|
||||
let address =
|
||||
block:
|
||||
let local =
|
||||
if msg.local.isSome():
|
||||
"[" & $(msg.local.get()) & "]"
|
||||
else:
|
||||
"[LOCAL]"
|
||||
let remote =
|
||||
if msg.remote.isSome():
|
||||
"[" & $(msg.remote.get()) & "]"
|
||||
else:
|
||||
"[REMOTE]"
|
||||
let local = block:
|
||||
msg.local.withValue(loc): "[" & $loc & "]"
|
||||
else: "[LOCAL]"
|
||||
let remote = block:
|
||||
msg.remote.withValue(rem): "[" & $rem & "]"
|
||||
else: "[REMOTE]"
|
||||
local & direction & remote
|
||||
let seqid =
|
||||
if msg.seqID.isSome():
|
||||
"seqID = " & $(msg.seqID.get()) & " "
|
||||
else:
|
||||
""
|
||||
let mtype =
|
||||
if msg.mtype.isSome():
|
||||
"type = " & $(msg.mtype.get()) & " "
|
||||
else:
|
||||
""
|
||||
let seqid = block:
|
||||
msg.seqID.withValue(seqid): "seqID = " & $seqid & " "
|
||||
else: ""
|
||||
let mtype = block:
|
||||
msg.mtype.withValue(typ): "type = " & $typ & " "
|
||||
else: ""
|
||||
res.add(" ")
|
||||
res.add(address)
|
||||
res.add(" ")
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import stew/results
|
||||
@@ -29,7 +26,7 @@ method connect*(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out) {.async, base.} =
|
||||
dir = Direction.Out) {.async, base.} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
|
||||
@@ -53,21 +53,21 @@ proc dialAndUpgrade(
|
||||
peerId: Opt[PeerId],
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
upgradeDir = Direction.Out):
|
||||
dir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
|
||||
for transport in self.transports: # for each transport
|
||||
if transport.handles(address): # check if it can dial it
|
||||
trace "Dialing address", address, peerId, hostname
|
||||
trace "Dialing address", address, peerId = peerId.get(default(PeerId)), hostname
|
||||
let dialed =
|
||||
try:
|
||||
libp2p_total_dial_attempts.inc()
|
||||
await transport.dial(hostname, address, peerId)
|
||||
except CancelledError as exc:
|
||||
debug "Dialing canceled", msg = exc.msg, peerId
|
||||
debug "Dialing canceled", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Dialing failed", msg = exc.msg, peerId
|
||||
debug "Dialing failed", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
libp2p_failed_dials.inc()
|
||||
return nil # Try the next address
|
||||
|
||||
@@ -75,23 +75,29 @@ proc dialAndUpgrade(
|
||||
|
||||
let mux =
|
||||
try:
|
||||
dialed.transportDir = upgradeDir
|
||||
await transport.upgrade(dialed, upgradeDir, peerId)
|
||||
# This is for the very specific case of a simultaneous dial during DCUtR. In this case, both sides will have
|
||||
# an Outbound direction at the transport level. Therefore we update the DCUtR initiator transport direction to Inbound.
|
||||
# The if below is more general and might handle other use cases in the future.
|
||||
if dialed.dir != dir:
|
||||
dialed.dir = dir
|
||||
await transport.upgrade(dialed, peerId)
|
||||
except CancelledError as exc:
|
||||
await dialed.close()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
# If we failed to establish the connection through one transport,
|
||||
# we won't succeeded through another - no use in trying again
|
||||
await dialed.close()
|
||||
debug "Upgrade failed", msg = exc.msg, peerId
|
||||
if exc isnot CancelledError:
|
||||
if upgradeDir == Direction.Out:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
else:
|
||||
libp2p_failed_upgrades_incoming.inc()
|
||||
debug "Connection upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
if dialed.dir == Direction.Out:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
else:
|
||||
libp2p_failed_upgrades_incoming.inc()
|
||||
|
||||
# Try other address
|
||||
return nil
|
||||
|
||||
doAssert not isNil(mux), "connection died after upgrade " & $upgradeDir
|
||||
doAssert not isNil(mux), "connection died after upgrade " & $dialed.dir
|
||||
debug "Dial successful", peerId = mux.connection.peerId
|
||||
return mux
|
||||
return nil
|
||||
@@ -128,10 +134,10 @@ proc dialAndUpgrade(
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
addrs: seq[MultiAddress],
|
||||
upgradeDir = Direction.Out):
|
||||
dir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
|
||||
debug "Dialing peer", peerId
|
||||
debug "Dialing peer", peerId = peerId.get(default(PeerId))
|
||||
|
||||
for rawAddress in addrs:
|
||||
# resolve potential dnsaddr
|
||||
@@ -146,11 +152,11 @@ proc dialAndUpgrade(
|
||||
else: await self.nameResolver.resolveMAddress(expandedAddress)
|
||||
|
||||
for resolvedAddress in resolvedAddresses:
|
||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, upgradeDir)
|
||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
|
||||
if not isNil(result):
|
||||
return result
|
||||
|
||||
proc tryReusingConnection(self: Dialer, peerId: PeerId): Future[Opt[Muxer]] {.async.} =
|
||||
proc tryReusingConnection(self: Dialer, peerId: PeerId): Opt[Muxer] =
|
||||
let muxer = self.connManager.selectMuxer(peerId)
|
||||
if muxer == nil:
|
||||
return Opt.none(Muxer)
|
||||
@@ -164,7 +170,7 @@ proc internalConnect(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial: bool,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out):
|
||||
dir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
if Opt.some(self.localPeerId) == peerId:
|
||||
raise newException(CatchableError, "can't dial self!")
|
||||
@@ -174,15 +180,15 @@ proc internalConnect(
|
||||
try:
|
||||
await lock.acquire()
|
||||
|
||||
if peerId.isSome and reuseConnection:
|
||||
let muxOpt = await self.tryReusingConnection(peerId.get())
|
||||
if muxOpt.isSome:
|
||||
return muxOpt.get()
|
||||
if reuseConnection:
|
||||
peerId.withValue(peerId):
|
||||
self.tryReusingConnection(peerId).withValue(mux):
|
||||
return mux
|
||||
|
||||
let slot = self.connManager.getOutgoingSlot(forceDial)
|
||||
let muxed =
|
||||
try:
|
||||
await self.dialAndUpgrade(peerId, addrs, upgradeDir)
|
||||
await self.dialAndUpgrade(peerId, addrs, dir)
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise exc
|
||||
@@ -209,7 +215,7 @@ method connect*(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out) {.async.} =
|
||||
dir = Direction.Out) {.async.} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
@@ -217,7 +223,7 @@ method connect*(
|
||||
if self.connManager.connCount(peerId) > 0 and reuseConnection:
|
||||
return
|
||||
|
||||
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, upgradeDir)
|
||||
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
|
||||
|
||||
method connect*(
|
||||
self: Dialer,
|
||||
@@ -225,20 +231,20 @@ method connect*(
|
||||
allowUnknownPeerId = false): Future[PeerId] {.async.} =
|
||||
## Connects to a peer and retrieve its PeerId
|
||||
|
||||
let fullAddress = parseFullAddress(address)
|
||||
if fullAddress.isOk:
|
||||
parseFullAddress(address).toOpt().withValue(fullAddress):
|
||||
return (await self.internalConnect(
|
||||
Opt.some(fullAddress.get()[0]),
|
||||
@[fullAddress.get()[1]],
|
||||
false)).connection.peerId
|
||||
else:
|
||||
if allowUnknownPeerId == false:
|
||||
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
|
||||
return (await self.internalConnect(
|
||||
Opt.none(PeerId),
|
||||
@[address],
|
||||
Opt.some(fullAddress[0]),
|
||||
@[fullAddress[1]],
|
||||
false)).connection.peerId
|
||||
|
||||
if allowUnknownPeerId == false:
|
||||
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
|
||||
|
||||
return (await self.internalConnect(
|
||||
Opt.none(PeerId),
|
||||
@[address],
|
||||
false)).connection.peerId
|
||||
|
||||
proc negotiateStream(
|
||||
self: Dialer,
|
||||
conn: Connection,
|
||||
@@ -324,7 +330,7 @@ method dial*(
|
||||
await cleanup()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Error dialing", conn, msg = exc.msg
|
||||
debug "Error dialing", conn, err = exc.msg
|
||||
await cleanup()
|
||||
raise exc
|
||||
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import chronos, chronicles, stew/results
|
||||
@@ -18,7 +15,7 @@ import ../errors
|
||||
|
||||
type
|
||||
BaseAttr = ref object of RootObj
|
||||
comparator: proc(f, c: BaseAttr): bool {.gcsafe, raises: [Defect].}
|
||||
comparator: proc(f, c: BaseAttr): bool {.gcsafe, raises: [].}
|
||||
|
||||
Attribute[T] = ref object of BaseAttr
|
||||
value: T
|
||||
@@ -60,7 +57,7 @@ proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
|
||||
return Opt.some(f.to(T))
|
||||
Opt.none(T)
|
||||
|
||||
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [Defect, KeyError].} =
|
||||
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [KeyError].} =
|
||||
pa{T}.valueOr: raise newException(KeyError, "Attritute not found")
|
||||
|
||||
proc match*(pa, candidate: PeerAttributes): bool =
|
||||
@@ -73,7 +70,7 @@ proc match*(pa, candidate: PeerAttributes): bool =
|
||||
return true
|
||||
|
||||
type
|
||||
PeerFoundCallback* = proc(pa: PeerAttributes) {.raises: [Defect], gcsafe.}
|
||||
PeerFoundCallback* = proc(pa: PeerAttributes) {.raises: [], gcsafe.}
|
||||
|
||||
DiscoveryInterface* = ref object of RootObj
|
||||
onPeerFound*: PeerFoundCallback
|
||||
@@ -125,20 +122,15 @@ proc request*[T](dm: DiscoveryManager, value: T): DiscoveryQuery =
|
||||
pa.add(value)
|
||||
return dm.request(pa)
|
||||
|
||||
proc advertise*(dm: DiscoveryManager, pa: PeerAttributes) =
|
||||
proc advertise*[T](dm: DiscoveryManager, value: T) =
|
||||
for i in dm.interfaces:
|
||||
i.toAdvertise = pa
|
||||
i.toAdvertise.add(value)
|
||||
if i.advertiseLoop.isNil:
|
||||
i.advertisementUpdated = newAsyncEvent()
|
||||
i.advertiseLoop = i.advertise()
|
||||
else:
|
||||
i.advertisementUpdated.fire()
|
||||
|
||||
proc advertise*[T](dm: DiscoveryManager, value: T) =
|
||||
var pa: PeerAttributes
|
||||
pa.add(value)
|
||||
dm.advertise(pa)
|
||||
|
||||
template forEach*(query: DiscoveryQuery, code: untyped) =
|
||||
## Will execute `code` for each discovered peer. The
|
||||
## peer attritubtes are available through the variable
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import ./discoverymngr,
|
||||
@@ -22,6 +19,7 @@ type
|
||||
rdv*: RendezVous
|
||||
timeToRequest: Duration
|
||||
timeToAdvertise: Duration
|
||||
ttl: Duration
|
||||
|
||||
RdvNamespace* = distinct string
|
||||
|
||||
@@ -65,12 +63,16 @@ method advertise*(self: RendezVousInterface) {.async.} =
|
||||
|
||||
self.advertisementUpdated.clear()
|
||||
for toAdv in toAdvertise:
|
||||
await self.rdv.advertise(toAdv, self.timeToAdvertise)
|
||||
try:
|
||||
await self.rdv.advertise(toAdv, self.ttl)
|
||||
except CatchableError as error:
|
||||
debug "RendezVous advertise error: ", msg = error.msg
|
||||
|
||||
await sleepAsync(self.timeToAdvertise) or self.advertisementUpdated.wait()
|
||||
|
||||
proc new*(T: typedesc[RendezVousInterface],
|
||||
rdv: RendezVous,
|
||||
ttr: Duration = 1.minutes,
|
||||
tta: Duration = MinimumDuration): RendezVousInterface =
|
||||
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta)
|
||||
tta: Duration = 1.minutes,
|
||||
ttl: Duration = MinimumDuration): RendezVousInterface =
|
||||
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta, ttl: ttl)
|
||||
|
||||
@@ -19,7 +19,8 @@ func toException*(e: string): ref LPError =
|
||||
# sadly nim needs more love for hygienic templates
|
||||
# so here goes the macro, its based on the proc/template version
|
||||
# and uses quote do so it's quite readable
|
||||
macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
|
||||
# TODO https://github.com/nim-lang/Nim/issues/22936
|
||||
macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
|
||||
let nexclude = exclude.len
|
||||
case nexclude
|
||||
of 0:
|
||||
@@ -43,34 +44,3 @@ macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
|
||||
# We still don't abort but warn
|
||||
debug "A future has failed, enable trace logging for details", error=exc.name
|
||||
trace "Exception details", msg=exc.msg
|
||||
|
||||
proc allFuturesThrowing*[T](args: varargs[Future[T]]): Future[void] =
|
||||
var futs: seq[Future[T]]
|
||||
for fut in args:
|
||||
futs &= fut
|
||||
proc call() {.async.} =
|
||||
var first: ref CatchableError = nil
|
||||
futs = await allFinished(futs)
|
||||
for fut in futs:
|
||||
if fut.failed:
|
||||
let err = fut.readError()
|
||||
if err of Defect:
|
||||
raise err
|
||||
else:
|
||||
if err of CancelledError:
|
||||
raise err
|
||||
if isNil(first):
|
||||
first = err
|
||||
if not isNil(first):
|
||||
raise first
|
||||
|
||||
return call()
|
||||
|
||||
template tryAndWarn*(message: static[string]; body: untyped): untyped =
|
||||
try:
|
||||
body
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "An exception has ocurred, enable trace logging for details", name = exc.name, msg = message
|
||||
trace "Exception details", exc = exc.msg
|
||||
|
||||
@@ -9,15 +9,12 @@
|
||||
|
||||
## This module implements MultiAddress.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import pkg/chronos, chronicles
|
||||
import std/[nativesockets, hashes]
|
||||
import tables, strutils, sets, stew/shims/net
|
||||
import std/[nativesockets, net, hashes]
|
||||
import tables, strutils, sets
|
||||
import multicodec, multihash, multibase, transcoder, vbuffer, peerid,
|
||||
protobuf/minprotobuf, errors, utility
|
||||
import stew/[base58, base32, endians2, results]
|
||||
@@ -122,23 +119,46 @@ proc ip6VB(vb: var VBuffer): bool =
|
||||
if vb.readArray(a.address_v6) == 16:
|
||||
result = true
|
||||
|
||||
proc ip6zoneStB(s: string, vb: var VBuffer): bool =
|
||||
## IPv6 stringToBuffer() implementation.
|
||||
template pathStringToBuffer(s: string, vb: var VBuffer): bool =
|
||||
if len(s) > 0:
|
||||
vb.writeSeq(s)
|
||||
result = true
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
template pathBufferToString(vb: var VBuffer, s: var string): bool =
|
||||
s = ""
|
||||
if (vb.readSeq(s) > 0) and (len(s) > 0):
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
template pathBufferToStringNoSlash(vb: var VBuffer, s: var string): bool =
|
||||
s = ""
|
||||
if (vb.readSeq(s) > 0) and (len(s) > 0) and (s.find('/') == -1):
|
||||
true
|
||||
else:
|
||||
false
|
||||
|
||||
template pathValidateBuffer(vb: var VBuffer): bool =
|
||||
var s = ""
|
||||
pathBufferToString(vb, s)
|
||||
|
||||
template pathValidateBufferNoSlash(vb: var VBuffer): bool =
|
||||
var s = ""
|
||||
pathBufferToStringNoSlash(vb, s)
|
||||
|
||||
proc ip6zoneStB(s: string, vb: var VBuffer): bool =
|
||||
## IPv6 stringToBuffer() implementation.
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc ip6zoneBtS(vb: var VBuffer, s: var string): bool =
|
||||
## IPv6 bufferToString() implementation.
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathBufferToStringNoSlash(vb, s)
|
||||
|
||||
proc ip6zoneVB(vb: var VBuffer): bool =
|
||||
## IPv6 validateBuffer() implementation.
|
||||
var s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
if s.find('/') == -1:
|
||||
result = true
|
||||
pathValidateBufferNoSlash(vb)
|
||||
|
||||
proc portStB(s: string, vb: var VBuffer): bool =
|
||||
## Port number stringToBuffer() implementation.
|
||||
@@ -157,7 +177,8 @@ proc portBtS(vb: var VBuffer, s: var string): bool =
|
||||
## Port number bufferToString() implementation.
|
||||
var port: array[2, byte]
|
||||
if vb.readArray(port) == 2:
|
||||
var nport = (safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
|
||||
let nport =
|
||||
(safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
|
||||
s = $nport
|
||||
result = true
|
||||
|
||||
@@ -217,7 +238,8 @@ proc onionBtS(vb: var VBuffer, s: var string): bool =
|
||||
## ONION address bufferToString() implementation.
|
||||
var buf: array[12, byte]
|
||||
if vb.readArray(buf) == 12:
|
||||
var nport = (safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
|
||||
let nport =
|
||||
(safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
|
||||
s = Base32Lower.encode(buf.toOpenArray(0, 9))
|
||||
s.add(":")
|
||||
s.add($nport)
|
||||
@@ -251,7 +273,8 @@ proc onion3BtS(vb: var VBuffer, s: var string): bool =
|
||||
## ONION address bufferToString() implementation.
|
||||
var buf: array[37, byte]
|
||||
if vb.readArray(buf) == 37:
|
||||
var nport = (safeConvert[uint16](buf[35]) shl 8) or safeConvert[uint16](buf[36])
|
||||
var nport =
|
||||
(safeConvert[uint16](buf[35]) shl 8) or safeConvert[uint16](buf[36])
|
||||
s = Base32Lower.encode(buf.toOpenArray(0, 34))
|
||||
s.add(":")
|
||||
s.add($nport)
|
||||
@@ -265,40 +288,27 @@ proc onion3VB(vb: var VBuffer): bool =
|
||||
|
||||
proc unixStB(s: string, vb: var VBuffer): bool =
|
||||
## Unix socket name stringToBuffer() implementation.
|
||||
if len(s) > 0:
|
||||
vb.writeSeq(s)
|
||||
result = true
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc unixBtS(vb: var VBuffer, s: var string): bool =
|
||||
## Unix socket name bufferToString() implementation.
|
||||
s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathBufferToString(vb, s)
|
||||
|
||||
proc unixVB(vb: var VBuffer): bool =
|
||||
## Unix socket name validateBuffer() implementation.
|
||||
var s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathValidateBuffer(vb)
|
||||
|
||||
proc dnsStB(s: string, vb: var VBuffer): bool =
|
||||
## DNS name stringToBuffer() implementation.
|
||||
if len(s) > 0:
|
||||
vb.writeSeq(s)
|
||||
result = true
|
||||
pathStringToBuffer(s, vb)
|
||||
|
||||
proc dnsBtS(vb: var VBuffer, s: var string): bool =
|
||||
## DNS name bufferToString() implementation.
|
||||
s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
result = true
|
||||
pathBufferToStringNoSlash(vb, s)
|
||||
|
||||
proc dnsVB(vb: var VBuffer): bool =
|
||||
## DNS name validateBuffer() implementation.
|
||||
var s = ""
|
||||
if vb.readSeq(s) > 0:
|
||||
if s.find('/') == -1:
|
||||
result = true
|
||||
pathValidateBufferNoSlash(vb)
|
||||
|
||||
proc mapEq*(codec: string): MaPattern =
|
||||
## ``Equal`` operator for pattern
|
||||
@@ -401,6 +411,9 @@ const
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("quic"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("quic-v1"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("ip6zone"), kind: Length, size: 0,
|
||||
coder: TranscoderIP6Zone
|
||||
@@ -419,6 +432,9 @@ const
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("wss"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("tls"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("ipfs"), kind: Length, size: 0,
|
||||
coder: TranscoderP2P
|
||||
@@ -471,7 +487,7 @@ const
|
||||
IP* = mapOr(IP4, IP6)
|
||||
DNS_OR_IP* = mapOr(DNS, IP)
|
||||
TCP_DNS* = mapAnd(DNS, mapEq("tcp"))
|
||||
TCP_IP* =mapAnd(IP, mapEq("tcp"))
|
||||
TCP_IP* = mapAnd(IP, mapEq("tcp"))
|
||||
TCP* = mapOr(TCP_DNS, TCP_IP)
|
||||
UDP_DNS* = mapAnd(DNS, mapEq("udp"))
|
||||
UDP_IP* = mapAnd(IP, mapEq("udp"))
|
||||
@@ -482,9 +498,10 @@ const
|
||||
WS_DNS* = mapAnd(TCP_DNS, mapEq("ws"))
|
||||
WS_IP* = mapAnd(TCP_IP, mapEq("ws"))
|
||||
WS* = mapAnd(TCP, mapEq("ws"))
|
||||
WSS_DNS* = mapAnd(TCP_DNS, mapEq("wss"))
|
||||
WSS_IP* = mapAnd(TCP_IP, mapEq("wss"))
|
||||
WSS* = mapAnd(TCP, mapEq("wss"))
|
||||
TLS_WS* = mapOr(mapEq("wss"), mapAnd(mapEq("tls"), mapEq("ws")))
|
||||
WSS_DNS* = mapAnd(TCP_DNS, TLS_WS)
|
||||
WSS_IP* = mapAnd(TCP_IP, TLS_WS)
|
||||
WSS* = mapAnd(TCP, TLS_WS)
|
||||
WebSockets_DNS* = mapOr(WS_DNS, WSS_DNS)
|
||||
WebSockets_IP* = mapOr(WS_IP, WSS_IP)
|
||||
WebSockets* = mapOr(WS, WSS)
|
||||
@@ -656,7 +673,8 @@ proc getPart(ma: MultiAddress, index: int): MaResult[MultiAddress] =
|
||||
inc(offset)
|
||||
ok(res)
|
||||
|
||||
proc getParts[U, V](ma: MultiAddress, slice: HSlice[U, V]): MaResult[MultiAddress] =
|
||||
proc getParts[U, V](ma: MultiAddress,
|
||||
slice: HSlice[U, V]): MaResult[MultiAddress] =
|
||||
when slice.a is BackwardsIndex or slice.b is BackwardsIndex:
|
||||
let maLength = ? len(ma)
|
||||
template normalizeIndex(index): int =
|
||||
@@ -670,7 +688,8 @@ proc getParts[U, V](ma: MultiAddress, slice: HSlice[U, V]): MaResult[MultiAddres
|
||||
? res.append(? ma[i])
|
||||
ok(res)
|
||||
|
||||
proc `[]`*(ma: MultiAddress, i: int | BackwardsIndex): MaResult[MultiAddress] {.inline.} =
|
||||
proc `[]`*(ma: MultiAddress,
|
||||
i: int | BackwardsIndex): MaResult[MultiAddress] {.inline.} =
|
||||
## Returns part with index ``i`` of MultiAddress ``ma``.
|
||||
when i is BackwardsIndex:
|
||||
let maLength = ? len(ma)
|
||||
@@ -765,7 +784,7 @@ proc toString*(value: MultiAddress): MaResult[string] =
|
||||
if not proto.coder.bufferToString(vb.data, part):
|
||||
return err("multiaddress: Decoding protocol error")
|
||||
parts.add($(proto.mcodec))
|
||||
if proto.kind == Path and part[0] == '/':
|
||||
if len(part) > 0 and (proto.kind == Path) and (part[0] == '/'):
|
||||
parts.add(part[1..^1])
|
||||
else:
|
||||
parts.add(part)
|
||||
@@ -775,7 +794,7 @@ proc toString*(value: MultiAddress): MaResult[string] =
|
||||
res = "/" & parts.join("/")
|
||||
ok(res)
|
||||
|
||||
proc `$`*(value: MultiAddress): string {.raises: [Defect].} =
|
||||
proc `$`*(value: MultiAddress): string =
|
||||
## Return string representation of MultiAddress ``value``.
|
||||
let s = value.toString()
|
||||
if s.isErr: s.error
|
||||
@@ -954,7 +973,7 @@ proc init*(mtype: typedesc[MultiAddress]): MultiAddress =
|
||||
## Initialize empty MultiAddress.
|
||||
result.data = initVBuffer()
|
||||
|
||||
proc init*(mtype: typedesc[MultiAddress], address: ValidIpAddress,
|
||||
proc init*(mtype: typedesc[MultiAddress], address: IpAddress,
|
||||
protocol: IpTransportProtocol, port: Port): MultiAddress =
|
||||
var res: MultiAddress
|
||||
res.data = initVBuffer()
|
||||
@@ -1025,7 +1044,7 @@ proc append*(m1: var MultiAddress, m2: MultiAddress): MaResult[void] =
|
||||
ok()
|
||||
|
||||
proc `&`*(m1, m2: MultiAddress): MultiAddress {.
|
||||
raises: [Defect, LPError].} =
|
||||
raises: [LPError].} =
|
||||
## Concatenates two addresses ``m1`` and ``m2``, and returns result.
|
||||
##
|
||||
## This procedure performs validation of concatenated result and can raise
|
||||
@@ -1035,7 +1054,7 @@ proc `&`*(m1, m2: MultiAddress): MultiAddress {.
|
||||
concat(m1, m2).tryGet()
|
||||
|
||||
proc `&=`*(m1: var MultiAddress, m2: MultiAddress) {.
|
||||
raises: [Defect, LPError].} =
|
||||
raises: [LPError].} =
|
||||
## Concatenates two addresses ``m1`` and ``m2``.
|
||||
##
|
||||
## This procedure performs validation of concatenated result and can raise
|
||||
@@ -1079,19 +1098,15 @@ proc matchPart(pat: MaPattern, protos: seq[MultiCodec]): MaPatResult =
|
||||
proc match*(pat: MaPattern, address: MultiAddress): bool =
|
||||
## Match full ``address`` using pattern ``pat`` and return ``true`` if
|
||||
## ``address`` satisfies pattern.
|
||||
let protos = address.protocols()
|
||||
if protos.isErr():
|
||||
return false
|
||||
let res = matchPart(pat, protos.get())
|
||||
let protos = address.protocols().valueOr: return false
|
||||
let res = matchPart(pat, protos)
|
||||
res.flag and (len(res.rem) == 0)
|
||||
|
||||
proc matchPartial*(pat: MaPattern, address: MultiAddress): bool =
|
||||
## Match prefix part of ``address`` using pattern ``pat`` and return
|
||||
## ``true`` if ``address`` starts with pattern.
|
||||
let protos = address.protocols()
|
||||
if protos.isErr():
|
||||
return false
|
||||
let res = matchPart(pat, protos.get())
|
||||
let protos = address.protocols().valueOr: return false
|
||||
let res = matchPart(pat, protos)
|
||||
res.flag
|
||||
|
||||
proc `$`*(pat: MaPattern): string =
|
||||
@@ -1120,20 +1135,20 @@ proc getField*(pb: ProtoBuffer, field: int,
|
||||
if not(res):
|
||||
ok(false)
|
||||
else:
|
||||
let ma = MultiAddress.init(buffer)
|
||||
if ma.isOk():
|
||||
value = ma.get()
|
||||
ok(true)
|
||||
else:
|
||||
err(ProtoError.IncorrectBlob)
|
||||
value = MultiAddress.init(buffer).valueOr:
|
||||
return err(ProtoError.IncorrectBlob)
|
||||
ok(true)
|
||||
|
||||
proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
||||
value: var seq[MultiAddress]): ProtoResult[bool] {.
|
||||
inline.} =
|
||||
## Read repeated field from protobuf message. ``field`` is field number. If the message is malformed, an error is returned.
|
||||
## If field is not present in message, then ``ok(false)`` is returned and value is empty. If field is present,
|
||||
## but no items could be parsed, then ``err(ProtoError.IncorrectBlob)`` is returned and value is empty.
|
||||
## If field is present and some item could be parsed, then ``true`` is returned and value contains the parsed values.
|
||||
## Read repeated field from protobuf message. ``field`` is field number.
|
||||
## If the message is malformed, an error is returned. If field is not present
|
||||
## in message, then ``ok(false)`` is returned and value is empty. If field is
|
||||
## present, but no items could be parsed, then
|
||||
## ``err(ProtoError.IncorrectBlob)`` is returned and value is empty.
|
||||
## If field is present and some item could be parsed, then ``true`` is
|
||||
## returned and value contains the parsed values.
|
||||
var items: seq[seq[byte]]
|
||||
value.setLen(0)
|
||||
let res = ? pb.getRepeatedField(field, items)
|
||||
@@ -1141,11 +1156,11 @@ proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
||||
ok(false)
|
||||
else:
|
||||
for item in items:
|
||||
let ma = MultiAddress.init(item)
|
||||
if ma.isOk():
|
||||
value.add(ma.get())
|
||||
else:
|
||||
debug "Not supported MultiAddress in blob", ma = item
|
||||
let ma = MultiAddress.init(item).valueOr:
|
||||
debug "Unsupported MultiAddress in blob", ma = item
|
||||
continue
|
||||
|
||||
value.add(ma)
|
||||
if value.len == 0:
|
||||
err(ProtoError.IncorrectBlob)
|
||||
else:
|
||||
|
||||
@@ -13,10 +13,7 @@
|
||||
## 1. base32z
|
||||
##
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import tables
|
||||
import stew/[base32, base58, base64, results]
|
||||
@@ -27,17 +24,17 @@ type
|
||||
|
||||
MultiBase* = object
|
||||
|
||||
MBCodeSize = proc(length: int): int {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
||||
MBCodeSize = proc(length: int): int {.nimcall, gcsafe, noSideEffect, raises: [].}
|
||||
|
||||
MBCodec = object
|
||||
code: char
|
||||
name: string
|
||||
encr: proc(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
||||
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
|
||||
decr: proc(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
||||
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
|
||||
encl: MBCodeSize
|
||||
decl: MBCodeSize
|
||||
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implements MultiCodec.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import tables, hashes
|
||||
import varint, vbuffer
|
||||
@@ -194,9 +191,11 @@ const MultiCodecList = [
|
||||
("p2p", 0x01A5),
|
||||
("http", 0x01E0),
|
||||
("https", 0x01BB),
|
||||
("tls", 0x01C0),
|
||||
("quic", 0x01CC),
|
||||
("quic-v1", 0x01CD),
|
||||
("ws", 0x01DD),
|
||||
("wss", 0x01DE), # not in multicodec list
|
||||
("wss", 0x01DE),
|
||||
("p2p-websocket-star", 0x01DF), # not in multicodec list
|
||||
("p2p-webrtc-star", 0x0113), # not in multicodec list
|
||||
("p2p-webrtc-direct", 0x0114), # not in multicodec list
|
||||
|
||||
@@ -21,10 +21,7 @@
|
||||
## 1. SKEIN
|
||||
## 2. MURMUR
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import tables
|
||||
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
|
||||
@@ -45,7 +42,7 @@ const
|
||||
|
||||
type
|
||||
MHashCoderProc* = proc(data: openArray[byte],
|
||||
output: var openArray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
||||
output: var openArray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [].}
|
||||
MHash* = object
|
||||
mcodec*: MultiCodec
|
||||
size*: int
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strutils, sequtils, tables]
|
||||
import chronos, chronicles, stew/byteutils
|
||||
@@ -28,7 +25,7 @@ const
|
||||
Ls = "ls\n"
|
||||
|
||||
type
|
||||
Matcher* = proc (proto: string): bool {.gcsafe, raises: [Defect].}
|
||||
Matcher* = proc (proto: string): bool {.gcsafe, raises: [].}
|
||||
|
||||
MultiStreamError* = object of LPError
|
||||
|
||||
@@ -48,15 +45,18 @@ proc new*(T: typedesc[MultistreamSelect]): T =
|
||||
)
|
||||
|
||||
template validateSuffix(str: string): untyped =
|
||||
if str.endsWith("\n"):
|
||||
str.removeSuffix("\n")
|
||||
else:
|
||||
raise newException(MultiStreamError, "MultistreamSelect failed, malformed message")
|
||||
if str.endsWith("\n"):
|
||||
str.removeSuffix("\n")
|
||||
else:
|
||||
raise (ref MultiStreamError)(msg:
|
||||
"MultistreamSelect failed, malformed message")
|
||||
|
||||
proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: seq[string]):
|
||||
Future[string] {.async.} =
|
||||
proc select*(
|
||||
_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: seq[string]
|
||||
): Future[string] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
trace "initiating handshake", conn, codec = Codec
|
||||
## select a remote protocol
|
||||
await conn.writeLp(Codec & "\n") # write handshake
|
||||
@@ -69,7 +69,7 @@ proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
|
||||
if s != Codec:
|
||||
notice "handshake failed", conn, codec = s
|
||||
raise newException(MultiStreamError, "MultistreamSelect handshake failed")
|
||||
raise (ref MultiStreamError)(msg: "MultistreamSelect handshake failed")
|
||||
else:
|
||||
trace "multistream handshake success", conn
|
||||
|
||||
@@ -101,19 +101,29 @@ proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
# No alternatives, fail
|
||||
return ""
|
||||
|
||||
proc select*(_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: string): Future[bool] {.async.} =
|
||||
proc select*(
|
||||
_: MultistreamSelect | type MultistreamSelect,
|
||||
conn: Connection,
|
||||
proto: string
|
||||
): Future[bool] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
if proto.len > 0:
|
||||
return (await MultistreamSelect.select(conn, @[proto])) == proto
|
||||
(await MultistreamSelect.select(conn, @[proto])) == proto
|
||||
else:
|
||||
return (await MultistreamSelect.select(conn, @[])) == Codec
|
||||
(await MultistreamSelect.select(conn, @[])) == Codec
|
||||
|
||||
proc select*(m: MultistreamSelect, conn: Connection): Future[bool] =
|
||||
proc select*(
|
||||
m: MultistreamSelect,
|
||||
conn: Connection
|
||||
): Future[bool] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError], raw: true).} =
|
||||
m.select(conn, "")
|
||||
|
||||
proc list*(m: MultistreamSelect,
|
||||
conn: Connection): Future[seq[string]] {.async.} =
|
||||
proc list*(
|
||||
m: MultistreamSelect,
|
||||
conn: Connection
|
||||
): Future[seq[string]] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
## list remote protos requests on connection
|
||||
if not await m.select(conn):
|
||||
return
|
||||
@@ -129,12 +139,13 @@ proc list*(m: MultistreamSelect,
|
||||
result = list
|
||||
|
||||
proc handle*(
|
||||
_: type MultistreamSelect,
|
||||
conn: Connection,
|
||||
protos: seq[string],
|
||||
matchers = newSeq[Matcher](),
|
||||
active: bool = false,
|
||||
): Future[string] {.async, gcsafe.} =
|
||||
_: type MultistreamSelect,
|
||||
conn: Connection,
|
||||
protos: seq[string],
|
||||
matchers = newSeq[Matcher](),
|
||||
active: bool = false
|
||||
): Future[string] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MultiStreamError]).} =
|
||||
trace "Starting multistream negotiation", conn, handshaked = active
|
||||
var handshaked = active
|
||||
while not conn.atEof:
|
||||
@@ -143,8 +154,8 @@ proc handle*(
|
||||
|
||||
if not handshaked and ms != Codec:
|
||||
debug "expected handshake message", conn, instead=ms
|
||||
raise newException(CatchableError,
|
||||
"MultistreamSelect handling failed, invalid first message")
|
||||
raise (ref MultiStreamError)(msg:
|
||||
"MultistreamSelect handling failed, invalid first message")
|
||||
|
||||
trace "handle: got request", conn, ms
|
||||
if ms.len() <= 0:
|
||||
@@ -175,14 +186,16 @@ proc handle*(
|
||||
trace "no handlers", conn, protocol = ms
|
||||
await conn.writeLp(Na)
|
||||
|
||||
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
|
||||
proc handle*(
|
||||
m: MultistreamSelect,
|
||||
conn: Connection,
|
||||
active: bool = false) {.async: (raises: [CancelledError]).} =
|
||||
trace "Starting multistream handler", conn, handshaked = active
|
||||
var
|
||||
handshaked = active
|
||||
protos: seq[string]
|
||||
matchers: seq[Matcher]
|
||||
for h in m.handlers:
|
||||
if not isNil(h.match):
|
||||
if h.match != nil:
|
||||
matchers.add(h.match)
|
||||
for proto in h.protos:
|
||||
protos.add(proto)
|
||||
@@ -190,12 +203,13 @@ proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.asy
|
||||
try:
|
||||
let ms = await MultistreamSelect.handle(conn, protos, matchers, active)
|
||||
for h in m.handlers:
|
||||
if (not isNil(h.match) and h.match(ms)) or h.protos.contains(ms):
|
||||
if (h.match != nil and h.match(ms)) or h.protos.contains(ms):
|
||||
trace "found handler", conn, protocol = ms
|
||||
|
||||
var protocolHolder = h
|
||||
let maxIncomingStreams = protocolHolder.protocol.maxIncomingStreams
|
||||
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >= maxIncomingStreams:
|
||||
if protocolHolder.openedStreams.getOrDefault(conn.peerId) >=
|
||||
maxIncomingStreams:
|
||||
debug "Max streams for protocol reached, blocking new stream",
|
||||
conn, protocol = ms, maxIncomingStreams
|
||||
return
|
||||
@@ -232,10 +246,14 @@ proc addHandler*(m: MultistreamSelect,
|
||||
matcher: Matcher = nil) =
|
||||
addHandler(m, @[codec], protocol, matcher)
|
||||
|
||||
proc addHandler*(m: MultistreamSelect,
|
||||
codec: string,
|
||||
handler: LPProtoHandler,
|
||||
matcher: Matcher = nil) =
|
||||
proc addHandler*[E](
|
||||
m: MultistreamSelect,
|
||||
codec: string,
|
||||
handler: LPProtoHandler |
|
||||
proc (
|
||||
conn: Connection,
|
||||
proto: string): InternalRaisesFuture[void, E],
|
||||
matcher: Matcher = nil) =
|
||||
## helper to allow registering pure handlers
|
||||
trace "registering proto handler", proto = codec
|
||||
let protocol = new LPProtocol
|
||||
@@ -246,8 +264,34 @@ proc addHandler*(m: MultistreamSelect,
|
||||
protocol: protocol,
|
||||
match: matcher))
|
||||
|
||||
proc start*(m: MultistreamSelect) {.async.} =
|
||||
await allFutures(m.handlers.mapIt(it.protocol.start()))
|
||||
proc start*(m: MultistreamSelect) {.async: (raises: [CancelledError]).} =
|
||||
# Nim 1.6.18: Using `mapIt` results in a seq of `.Raising([])`
|
||||
# TODO https://github.com/nim-lang/Nim/issues/23445
|
||||
var futs = newSeqOfCap[Future[void].Raising([CancelledError])](m.handlers.len)
|
||||
for it in m.handlers:
|
||||
futs.add it.protocol.start()
|
||||
try:
|
||||
await allFutures(futs)
|
||||
for fut in futs:
|
||||
await fut
|
||||
except CancelledError as exc:
|
||||
var pending: seq[Future[void].Raising([])]
|
||||
doAssert m.handlers.len == futs.len, "Handlers modified while starting"
|
||||
for i, fut in futs:
|
||||
if not fut.finished:
|
||||
pending.add fut.cancelAndWait()
|
||||
elif fut.completed:
|
||||
pending.add m.handlers[i].protocol.stop()
|
||||
else:
|
||||
static: doAssert typeof(fut).E is (CancelledError,)
|
||||
await noCancel allFutures(pending)
|
||||
raise exc
|
||||
|
||||
proc stop*(m: MultistreamSelect) {.async.} =
|
||||
await allFutures(m.handlers.mapIt(it.protocol.stop()))
|
||||
proc stop*(m: MultistreamSelect) {.async: (raises: []).} =
|
||||
# Nim 1.6.18: Using `mapIt` results in a seq of `.Raising([CancelledError])`
|
||||
var futs = newSeqOfCap[Future[void].Raising([])](m.handlers.len)
|
||||
for it in m.handlers:
|
||||
futs.add it.protocol.stop()
|
||||
await noCancel allFutures(futs)
|
||||
for fut in futs:
|
||||
await fut
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/[chronos, chronicles, stew/byteutils]
|
||||
import ../../stream/connection,
|
||||
@@ -45,7 +42,10 @@ const MaxMsgSize* = 1 shl 20 # 1mb
|
||||
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
|
||||
newException(InvalidMplexMsgType, "invalid message type")
|
||||
|
||||
proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
|
||||
proc readMsg*(
|
||||
conn: Connection
|
||||
): Future[Msg] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError]).} =
|
||||
let header = await conn.readVarint()
|
||||
trace "read header varint", varint = header, conn
|
||||
|
||||
@@ -58,10 +58,13 @@ proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
|
||||
|
||||
return (header shr 3, MessageType(msgType), data)
|
||||
|
||||
proc writeMsg*(conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: seq[byte] = @[]): Future[void] =
|
||||
proc writeMsg*(
|
||||
conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: seq[byte] = @[]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
var
|
||||
left = data.len
|
||||
offset = 0
|
||||
@@ -87,8 +90,11 @@ proc writeMsg*(conn: Connection,
|
||||
# message gets written before some of the chunks
|
||||
conn.write(buf.buffer)
|
||||
|
||||
proc writeMsg*(conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: string): Future[void] =
|
||||
proc writeMsg*(
|
||||
conn: Connection,
|
||||
id: uint64,
|
||||
msgType: MessageType,
|
||||
data: string
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
conn.writeMsg(id, msgType, data.toBytes())
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[oids, strformat]
|
||||
import pkg/[chronos, chronicles, metrics]
|
||||
@@ -31,7 +28,8 @@ when defined(libp2p_mplex_metrics):
|
||||
declareHistogram libp2p_mplex_qtime, "message queuing time"
|
||||
|
||||
when defined(libp2p_network_protocols_metrics):
|
||||
declareCounter libp2p_protocols_bytes, "total sent or received bytes", ["protocol", "direction"]
|
||||
declareCounter libp2p_protocols_bytes,
|
||||
"total sent or received bytes", ["protocol", "direction"]
|
||||
|
||||
## Channel half-closed states
|
||||
##
|
||||
@@ -67,16 +65,16 @@ type
|
||||
|
||||
func shortLog*(s: LPChannel): auto =
|
||||
try:
|
||||
if s.isNil: "LPChannel(nil)"
|
||||
if s == nil: "LPChannel(nil)"
|
||||
elif s.name != $s.oid and s.name.len > 0:
|
||||
&"{shortLog(s.conn.peerId)}:{s.oid}:{s.name}"
|
||||
else: &"{shortLog(s.conn.peerId)}:{s.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(LPChannel): shortLog(it)
|
||||
|
||||
proc open*(s: LPChannel) {.async, gcsafe.} =
|
||||
proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
trace "Opening channel", s, conn = s.conn
|
||||
if s.conn.isClosed:
|
||||
return
|
||||
@@ -85,20 +83,20 @@ proc open*(s: LPChannel) {.async, gcsafe.} =
|
||||
s.isOpen = true
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
await s.conn.close()
|
||||
raise exc
|
||||
|
||||
method closed*(s: LPChannel): bool =
|
||||
s.closedLocal
|
||||
|
||||
proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
|
||||
proc closeUnderlying(s: LPChannel): Future[void] {.async: (raises: []).} =
|
||||
## Channels may be closed for reading and writing in any order - we'll close
|
||||
## the underlying bufferstream when both directions are closed
|
||||
if s.closedLocal and s.atEof():
|
||||
await procCall BufferStream(s).close()
|
||||
|
||||
proc reset*(s: LPChannel) {.async, gcsafe.} =
|
||||
proc reset*(s: LPChannel) {.async: (raises: []).} =
|
||||
if s.isClosed:
|
||||
trace "Already closed", s
|
||||
return
|
||||
@@ -111,22 +109,21 @@ proc reset*(s: LPChannel) {.async, gcsafe.} =
|
||||
|
||||
if s.isOpen and not s.conn.isClosed:
|
||||
# If the connection is still active, notify the other end
|
||||
proc resetMessage() {.async.} =
|
||||
proc resetMessage() {.async: (raises: []).} =
|
||||
try:
|
||||
trace "sending reset message", s, conn = s.conn
|
||||
await s.conn.writeMsg(s.id, s.resetCode) # write reset
|
||||
except CatchableError as exc:
|
||||
# No cancellations
|
||||
await s.conn.close()
|
||||
await noCancel s.conn.writeMsg(s.id, s.resetCode) # write reset
|
||||
except LPStreamError as exc:
|
||||
trace "Can't send reset message", s, conn = s.conn, msg = exc.msg
|
||||
await s.conn.close()
|
||||
|
||||
asyncSpawn resetMessage()
|
||||
|
||||
await s.closeImpl() # noraises, nocancels
|
||||
await s.closeImpl()
|
||||
|
||||
trace "Channel reset", s
|
||||
|
||||
method close*(s: LPChannel) {.async, gcsafe.} =
|
||||
method close*(s: LPChannel) {.async: (raises: []).} =
|
||||
## Close channel for writing - a message will be sent to the other peer
|
||||
## informing them that the channel is closed and that we're waiting for
|
||||
## their acknowledgement.
|
||||
@@ -140,10 +137,9 @@ method close*(s: LPChannel) {.async, gcsafe.} =
|
||||
if s.isOpen and not s.conn.isClosed:
|
||||
try:
|
||||
await s.conn.writeMsg(s.id, s.closeCode) # write close
|
||||
except CancelledError as exc:
|
||||
except CancelledError:
|
||||
await s.conn.close()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
# It's harmless that close message cannot be sent - the connection is
|
||||
# likely down already
|
||||
await s.conn.close()
|
||||
@@ -157,16 +153,17 @@ method initStream*(s: LPChannel) =
|
||||
if s.objName.len == 0:
|
||||
s.objName = LPChannelTrackerName
|
||||
|
||||
s.timeoutHandler = proc(): Future[void] {.gcsafe.} =
|
||||
s.timeoutHandler = proc(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
trace "Idle timeout expired, resetting LPChannel", s
|
||||
s.reset()
|
||||
|
||||
procCall BufferStream(s).initStream()
|
||||
|
||||
method readOnce*(s: LPChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
method readOnce*(
|
||||
s: LPChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Mplex relies on reading being done regularly from every channel, or all
|
||||
## channels are blocked - in particular, this means that reading from one
|
||||
## channel must not be done from within a callback / read handler of another
|
||||
@@ -189,15 +186,19 @@ method readOnce*(s: LPChannel,
|
||||
if bytes == 0:
|
||||
await s.closeUnderlying()
|
||||
return bytes
|
||||
except CatchableError as exc:
|
||||
# readOnce in BufferStream generally raises on EOF or cancellation - for
|
||||
# the former, resetting is harmless, for the latter it's necessary because
|
||||
# data has been lost in s.readBuf and there's no way to gracefully recover /
|
||||
# use the channel any more
|
||||
except CancelledError as exc:
|
||||
await s.reset()
|
||||
raise exc
|
||||
except LPStreamError as exc:
|
||||
# Resetting is necessary because data has been lost in s.readBuf and
|
||||
# there's no way to gracefully recover / use the channel any more
|
||||
await s.reset()
|
||||
raise newLPStreamConnDownError(exc)
|
||||
|
||||
proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
|
||||
proc prepareWrite(
|
||||
s: LPChannel,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
# prepareWrite is the slow path of writing a message - see conditions in
|
||||
# write
|
||||
if s.remoteReset:
|
||||
@@ -225,7 +226,10 @@ proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
|
||||
await s.conn.writeMsg(s.id, s.msgCode, msg)
|
||||
|
||||
proc completeWrite(
|
||||
s: LPChannel, fut: Future[void], msgLen: int): Future[void] {.async.} =
|
||||
s: LPChannel,
|
||||
fut: Future[void].Raising([CancelledError, LPStreamError]),
|
||||
msgLen: int
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
try:
|
||||
s.writes += 1
|
||||
|
||||
@@ -238,7 +242,10 @@ proc completeWrite(
|
||||
|
||||
when defined(libp2p_network_protocols_metrics):
|
||||
if s.protocol.len > 0:
|
||||
libp2p_protocols_bytes.inc(msgLen.int64, labelValues=[s.protocol, "out"])
|
||||
# This crashes on Nim 2.0.2 with `--mm:orc` during `nimble test`
|
||||
# https://github.com/status-im/nim-metrics/issues/79
|
||||
libp2p_protocols_bytes.inc(
|
||||
msgLen.int64, labelValues = [s.protocol, "out"])
|
||||
|
||||
s.activity = true
|
||||
except CancelledError as exc:
|
||||
@@ -250,7 +257,7 @@ proc completeWrite(
|
||||
raise exc
|
||||
except LPStreamEOFError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
trace "exception in lpchannel write handler", s, msg = exc.msg
|
||||
await s.reset()
|
||||
await s.conn.close()
|
||||
@@ -258,7 +265,11 @@ proc completeWrite(
|
||||
finally:
|
||||
s.writes -= 1
|
||||
|
||||
method write*(s: LPChannel, msg: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
s: LPChannel,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
## Write to mplex channel - there may be up to MaxWrite concurrent writes
|
||||
## pending after which the peer is disconnected
|
||||
|
||||
@@ -279,13 +290,12 @@ method write*(s: LPChannel, msg: seq[byte]): Future[void] =
|
||||
method getWrapped*(s: LPChannel): Connection = s.conn
|
||||
|
||||
proc init*(
|
||||
L: type LPChannel,
|
||||
id: uint64,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
name: string = "",
|
||||
timeout: Duration = DefaultChanTimeout): LPChannel =
|
||||
|
||||
L: type LPChannel,
|
||||
id: uint64,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
name: string = "",
|
||||
timeout: Duration = DefaultChanTimeout): LPChannel =
|
||||
let chann = L(
|
||||
id: id,
|
||||
name: name,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import tables, sequtils, oids
|
||||
import chronos, chronicles, stew/byteutils, metrics
|
||||
@@ -59,7 +56,7 @@ proc newTooManyChannels(): ref TooManyChannels =
|
||||
proc newInvalidChannelIdError(): ref InvalidChannelIdError =
|
||||
newException(InvalidChannelIdError, "max allowed channel count exceeded")
|
||||
|
||||
proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
|
||||
proc cleanupChann(m: Mplex, chann: LPChannel) {.async: (raises: []), inline.} =
|
||||
## remove the local channel from the internal tables
|
||||
##
|
||||
try:
|
||||
@@ -71,19 +68,19 @@ proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
|
||||
libp2p_mplex_channels.set(
|
||||
m.channels[chann.initiator].len.int64,
|
||||
labelValues = [$chann.initiator, $m.connection.peerId])
|
||||
except CatchableError as exc:
|
||||
except CancelledError as exc:
|
||||
warn "Error cleaning up mplex channel", m, chann, msg = exc.msg
|
||||
|
||||
proc newStreamInternal*(m: Mplex,
|
||||
initiator: bool = true,
|
||||
chanId: uint64 = 0,
|
||||
name: string = "",
|
||||
timeout: Duration): LPChannel
|
||||
{.gcsafe, raises: [Defect, InvalidChannelIdError].} =
|
||||
proc newStreamInternal*(
|
||||
m: Mplex,
|
||||
initiator: bool = true,
|
||||
chanId: uint64 = 0,
|
||||
name: string = "",
|
||||
timeout: Duration): LPChannel {.gcsafe, raises: [InvalidChannelIdError].} =
|
||||
## create new channel/stream
|
||||
##
|
||||
let id = if initiator:
|
||||
m.currentId.inc(); m.currentId
|
||||
let id =
|
||||
if initiator: m.currentId.inc(); m.currentId
|
||||
else: chanId
|
||||
|
||||
if id in m.channels[initiator]:
|
||||
@@ -114,18 +111,14 @@ proc newStreamInternal*(m: Mplex,
|
||||
m.channels[initiator].len.int64,
|
||||
labelValues = [$initiator, $m.connection.peerId])
|
||||
|
||||
proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
|
||||
proc handleStream(m: Mplex, chann: LPChannel) {.async: (raises: []).} =
|
||||
## call the muxer stream handler for this channel
|
||||
##
|
||||
try:
|
||||
await m.streamHandler(chann)
|
||||
trace "finished handling stream", m, chann
|
||||
doAssert(chann.closed, "connection not closed by handler!")
|
||||
except CatchableError as exc:
|
||||
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
|
||||
await chann.reset()
|
||||
await m.streamHandler(chann)
|
||||
trace "finished handling stream", m, chann
|
||||
doAssert(chann.closed, "connection not closed by handler!")
|
||||
|
||||
method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
method handle*(m: Mplex) {.async: (raises: []).} =
|
||||
trace "Starting mplex handler", m
|
||||
try:
|
||||
while not m.connection.atEof:
|
||||
@@ -153,7 +146,7 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
else:
|
||||
if m.channels[false].len > m.maxChannCount - 1:
|
||||
warn "too many channels created by remote peer",
|
||||
allowedMax = MaxChannelCount, m
|
||||
allowedMax = MaxChannelCount, m
|
||||
raise newTooManyChannels()
|
||||
|
||||
let name = string.fromBytes(data)
|
||||
@@ -162,59 +155,65 @@ method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
trace "Processing channel message", m, channel, data = data.shortLog
|
||||
|
||||
case msgType:
|
||||
of MessageType.New:
|
||||
trace "created channel", m, channel
|
||||
of MessageType.New:
|
||||
trace "created channel", m, channel
|
||||
|
||||
if not isNil(m.streamHandler):
|
||||
# Launch handler task
|
||||
# All the errors are handled inside `handleStream()` procedure.
|
||||
asyncSpawn m.handleStream(channel)
|
||||
if m.streamHandler != nil:
|
||||
# Launch handler task
|
||||
# All the errors are handled inside `handleStream()` procedure.
|
||||
asyncSpawn m.handleStream(channel)
|
||||
|
||||
of MessageType.MsgIn, MessageType.MsgOut:
|
||||
if data.len > MaxMsgSize:
|
||||
warn "attempting to send a packet larger than allowed",
|
||||
allowed = MaxMsgSize, channel
|
||||
raise newLPStreamLimitError()
|
||||
of MessageType.MsgIn, MessageType.MsgOut:
|
||||
if data.len > MaxMsgSize:
|
||||
warn "attempting to send a packet larger than allowed",
|
||||
allowed = MaxMsgSize, channel
|
||||
raise newLPStreamLimitError()
|
||||
|
||||
trace "pushing data to channel", m, channel, len = data.len
|
||||
try:
|
||||
await channel.pushData(data)
|
||||
trace "pushed data to channel", m, channel, len = data.len
|
||||
except LPStreamClosedError as exc:
|
||||
# Channel is being closed, but `cleanupChann` was not yet triggered.
|
||||
trace "pushing data to channel failed", m, channel, len = data.len,
|
||||
msg = exc.msg
|
||||
discard # Ignore message, same as if `cleanupChann` had completed.
|
||||
trace "pushing data to channel", m, channel, len = data.len
|
||||
try:
|
||||
await channel.pushData(data)
|
||||
trace "pushed data to channel", m, channel, len = data.len
|
||||
except LPStreamClosedError as exc:
|
||||
# Channel is being closed, but `cleanupChann` was not yet triggered.
|
||||
trace "pushing data to channel failed", m, channel, len = data.len,
|
||||
msg = exc.msg
|
||||
discard # Ignore message, same as if `cleanupChann` had completed.
|
||||
|
||||
of MessageType.CloseIn, MessageType.CloseOut:
|
||||
await channel.pushEof()
|
||||
of MessageType.ResetIn, MessageType.ResetOut:
|
||||
channel.remoteReset = true
|
||||
await channel.reset()
|
||||
of MessageType.CloseIn, MessageType.CloseOut:
|
||||
await channel.pushEof()
|
||||
of MessageType.ResetIn, MessageType.ResetOut:
|
||||
channel.remoteReset = true
|
||||
await channel.reset()
|
||||
except CancelledError:
|
||||
debug "Unexpected cancellation in mplex handler", m
|
||||
except LPStreamEOFError as exc:
|
||||
trace "Stream EOF", m, msg = exc.msg
|
||||
except CatchableError as exc:
|
||||
debug "Unexpected exception in mplex read loop", m, msg = exc.msg
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected stream exception in mplex read loop", m, msg = exc.msg
|
||||
except MuxerError as exc:
|
||||
debug "Unexpected muxer exception in mplex read loop", m, msg = exc.msg
|
||||
finally:
|
||||
await m.close()
|
||||
trace "Stopped mplex handler", m
|
||||
|
||||
proc new*(M: type Mplex,
|
||||
conn: Connection,
|
||||
inTimeout: Duration = DefaultChanTimeout,
|
||||
outTimeout: Duration = DefaultChanTimeout,
|
||||
maxChannCount: int = MaxChannelCount): Mplex =
|
||||
proc new*(
|
||||
M: type Mplex,
|
||||
conn: Connection,
|
||||
inTimeout: Duration = DefaultChanTimeout,
|
||||
outTimeout: Duration = DefaultChanTimeout,
|
||||
maxChannCount: int = MaxChannelCount): Mplex =
|
||||
M(connection: conn,
|
||||
inChannTimeout: inTimeout,
|
||||
outChannTimeout: outTimeout,
|
||||
oid: genOid(),
|
||||
maxChannCount: maxChannCount)
|
||||
|
||||
method newStream*(m: Mplex,
|
||||
name: string = "",
|
||||
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
||||
method newStream*(
|
||||
m: Mplex,
|
||||
name: string = "",
|
||||
lazy: bool = false
|
||||
): Future[Connection] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError]).} =
|
||||
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
|
||||
|
||||
if not lazy:
|
||||
@@ -222,7 +221,7 @@ method newStream*(m: Mplex,
|
||||
|
||||
return Connection(channel)
|
||||
|
||||
method close*(m: Mplex) {.async, gcsafe.} =
|
||||
method close*(m: Mplex) {.async: (raises: []).} =
|
||||
if m.isClosed:
|
||||
trace "Already closed", m
|
||||
return
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles
|
||||
import ../stream/connection,
|
||||
@@ -26,16 +23,17 @@ type
|
||||
MuxerError* = object of LPError
|
||||
TooManyChannels* = object of MuxerError
|
||||
|
||||
StreamHandler* = proc(conn: Connection): Future[void] {.gcsafe, raises: [Defect].}
|
||||
MuxerHandler* = proc(muxer: Muxer): Future[void] {.gcsafe, raises: [Defect].}
|
||||
StreamHandler* = proc(conn: Connection): Future[void] {.async: (raises: []).}
|
||||
MuxerHandler* = proc(muxer: Muxer): Future[void] {.async: (raises: []).}
|
||||
|
||||
Muxer* = ref object of RootObj
|
||||
streamHandler*: StreamHandler
|
||||
handler*: Future[void]
|
||||
handler*: Future[void].Raising([])
|
||||
connection*: Connection
|
||||
|
||||
# user provider proc that returns a constructed Muxer
|
||||
MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [Defect].}
|
||||
MuxerConstructor* =
|
||||
proc(conn: Connection): Muxer {.gcsafe, closure, raises: [].}
|
||||
|
||||
# this wraps a creator proc that knows how to make muxers
|
||||
MuxerProvider* = object
|
||||
@@ -43,24 +41,32 @@ type
|
||||
codec*: string
|
||||
|
||||
func shortLog*(m: Muxer): auto =
|
||||
if isNil(m): "nil"
|
||||
if m == nil: "nil"
|
||||
else: shortLog(m.connection)
|
||||
|
||||
chronicles.formatIt(Muxer): shortLog(it)
|
||||
|
||||
# muxer interface
|
||||
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
|
||||
Future[Connection] {.base, async, gcsafe.} = discard
|
||||
method close*(m: Muxer) {.base, async, gcsafe.} =
|
||||
if not isNil(m.connection):
|
||||
method newStream*(
|
||||
m: Muxer,
|
||||
name: string = "",
|
||||
lazy: bool = false
|
||||
): Future[Connection] {.base, async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError], raw: true).} =
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
method close*(m: Muxer) {.base, async: (raises: []).} =
|
||||
if m.connection != nil:
|
||||
await m.connection.close()
|
||||
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
|
||||
|
||||
method handle*(m: Muxer): Future[void] {.base, async: (raises: []).} = discard
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MuxerProvider],
|
||||
creator: MuxerConstructor,
|
||||
codec: string): T {.gcsafe.} =
|
||||
|
||||
T: typedesc[MuxerProvider],
|
||||
creator: MuxerConstructor,
|
||||
codec: string): T {.gcsafe.} =
|
||||
let muxerProvider = T(newMuxer: creator, codec: codec)
|
||||
muxerProvider
|
||||
|
||||
method getStreams*(m: Muxer): seq[Connection] {.base.} = doAssert false, "not implemented"
|
||||
method getStreams*(m: Muxer): seq[Connection] {.base.} =
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import sequtils, std/[tables]
|
||||
import chronos, chronicles, metrics, stew/[endians2, byteutils, objects]
|
||||
@@ -25,18 +22,22 @@ logScope:
|
||||
const
|
||||
YamuxCodec* = "/yamux/1.0.0"
|
||||
YamuxVersion = 0.uint8
|
||||
DefaultWindowSize = 256000
|
||||
YamuxDefaultWindowSize* = 256000
|
||||
MaxSendQueueSize = 256000
|
||||
MaxChannelCount = 200
|
||||
|
||||
when defined(libp2p_yamux_metrics):
|
||||
declareGauge(libp2p_yamux_channels, "yamux channels", labels = ["initiator", "peer"])
|
||||
declareHistogram libp2p_yamux_send_queue, "message send queue length (in byte)",
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
|
||||
declareHistogram libp2p_yamux_recv_queue, "message recv queue length (in byte)",
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
|
||||
declareGauge libp2p_yamux_channels,
|
||||
"yamux channels", labels = ["initiator", "peer"]
|
||||
declareHistogram libp2p_yamux_send_queue,
|
||||
"message send queue length (in byte)", buckets = [
|
||||
0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
|
||||
declareHistogram libp2p_yamux_recv_queue,
|
||||
"message recv queue length (in byte)", buckets = [
|
||||
0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
|
||||
|
||||
type
|
||||
YamuxError* = object of CatchableError
|
||||
YamuxError* = object of MuxerError
|
||||
|
||||
MsgType = enum
|
||||
Data = 0x0
|
||||
@@ -62,7 +63,10 @@ type
|
||||
streamId: uint32
|
||||
length: uint32
|
||||
|
||||
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
|
||||
proc readHeader(
|
||||
conn: LPStream
|
||||
): Future[YamuxHeader] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError]).} =
|
||||
var buffer: array[12, byte]
|
||||
await conn.readExactly(addr buffer[0], 12)
|
||||
|
||||
@@ -76,10 +80,10 @@ proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
|
||||
return result
|
||||
|
||||
proc `$`(header: YamuxHeader): string =
|
||||
result = "{" & $header.msgType & ", "
|
||||
result &= "{" & header.flags.foldl(if a != "": a & ", " & $b else: $b, "") & "}, "
|
||||
result &= "streamId: " & $header.streamId & ", "
|
||||
result &= "length: " & $header.length & "}"
|
||||
"{" & $header.msgType & ", " &
|
||||
"{" & header.flags.foldl(if a != "": a & ", " & $b else: $b, "") & "}, " &
|
||||
"streamId: " & $header.streamId & ", " &
|
||||
"length: " & $header.length & "}"
|
||||
|
||||
proc encode(header: YamuxHeader): array[12, byte] =
|
||||
result[0] = header.version
|
||||
@@ -88,10 +92,14 @@ proc encode(header: YamuxHeader): array[12, byte] =
|
||||
result[4..7] = toBytesBE(header.streamId)
|
||||
result[8..11] = toBytesBE(header.length)
|
||||
|
||||
proc write(conn: LPStream, header: YamuxHeader): Future[void] {.gcsafe.} =
|
||||
proc write(
|
||||
conn: LPStream,
|
||||
header: YamuxHeader
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
trace "write directly on stream", h = $header
|
||||
var buffer = header.encode()
|
||||
return conn.write(@buffer)
|
||||
conn.write(@buffer)
|
||||
|
||||
proc ping(T: type[YamuxHeader], flag: MsgFlags, pingData: uint32): T =
|
||||
T(
|
||||
@@ -109,11 +117,10 @@ proc goAway(T: type[YamuxHeader], status: GoAwayStatus): T =
|
||||
)
|
||||
|
||||
proc data(
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
length: uint32 = 0,
|
||||
flags: set[MsgFlags] = {},
|
||||
): T =
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
length: uint32 = 0,
|
||||
flags: set[MsgFlags] = {}): T =
|
||||
T(
|
||||
version: YamuxVersion,
|
||||
msgType: MsgType.Data,
|
||||
@@ -123,11 +130,10 @@ proc data(
|
||||
)
|
||||
|
||||
proc windowUpdate(
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
delta: uint32,
|
||||
flags: set[MsgFlags] = {},
|
||||
): T =
|
||||
T: type[YamuxHeader],
|
||||
streamId: uint32,
|
||||
delta: uint32,
|
||||
flags: set[MsgFlags] = {}): T =
|
||||
T(
|
||||
version: YamuxVersion,
|
||||
msgType: MsgType.WindowUpdate,
|
||||
@@ -140,12 +146,13 @@ type
|
||||
ToSend = tuple
|
||||
data: seq[byte]
|
||||
sent: int
|
||||
fut: Future[void]
|
||||
fut: Future[void].Raising([CancelledError, LPStreamError])
|
||||
YamuxChannel* = ref object of Connection
|
||||
id: uint32
|
||||
recvWindow: int
|
||||
sendWindow: int
|
||||
maxRecvWindow: int
|
||||
maxSendQueueSize: int
|
||||
conn: Connection
|
||||
isSrc: bool
|
||||
opened: bool
|
||||
@@ -154,16 +161,15 @@ type
|
||||
recvQueue: seq[byte]
|
||||
isReset: bool
|
||||
remoteReset: bool
|
||||
closedRemotely: Future[void]
|
||||
closedRemotely: Future[void].Raising([])
|
||||
closedLocally: bool
|
||||
receivedData: AsyncEvent
|
||||
returnedEof: bool
|
||||
|
||||
proc `$`(channel: YamuxChannel): string =
|
||||
result = if channel.conn.dir == Out: "=> " else: "<= "
|
||||
result &= $channel.id
|
||||
var s: seq[string] = @[]
|
||||
if channel.closedRemotely.done():
|
||||
if channel.closedRemotely.completed():
|
||||
s.add("ClosedRemotely")
|
||||
if channel.closedLocally:
|
||||
s.add("ClosedLocally")
|
||||
@@ -172,29 +178,44 @@ proc `$`(channel: YamuxChannel): string =
|
||||
if s.len > 0:
|
||||
result &= " {" & s.foldl(if a != "": a & ", " & b else: b, "") & "}"
|
||||
|
||||
proc sendQueueBytes(channel: YamuxChannel, limit: bool = false): int =
|
||||
for (elem, sent, _) in channel.sendQueue:
|
||||
result.inc(min(elem.len - sent, if limit: channel.maxRecvWindow div 3 else: elem.len - sent))
|
||||
proc lengthSendQueue(channel: YamuxChannel): int =
|
||||
## Returns the length of what remains to be sent
|
||||
##
|
||||
channel.sendQueue.foldl(a + b.data.len - b.sent, 0)
|
||||
|
||||
proc actuallyClose(channel: YamuxChannel) {.async.} =
|
||||
proc lengthSendQueueWithLimit(channel: YamuxChannel): int =
|
||||
## Returns the length of what remains to be sent, but limit the size of big messages.
|
||||
##
|
||||
# For leniency, limit big messages size to the third of maxSendQueueSize
|
||||
# This value is arbitrary, it's not in the specs, it permits to store up to
|
||||
# 3 big messages if the peer is stalling.
|
||||
channel.sendQueue.foldl(a + min(b.data.len - b.sent, channel.maxSendQueueSize div 3), 0)
|
||||
|
||||
proc actuallyClose(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if channel.closedLocally and channel.sendQueue.len == 0 and
|
||||
channel.closedRemotely.done():
|
||||
channel.closedRemotely.completed():
|
||||
await procCall Connection(channel).closeImpl()
|
||||
|
||||
proc remoteClosed(channel: YamuxChannel) {.async.} =
|
||||
if not channel.closedRemotely.done():
|
||||
proc remoteClosed(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if not channel.closedRemotely.completed():
|
||||
channel.closedRemotely.complete()
|
||||
await channel.actuallyClose()
|
||||
|
||||
method closeImpl*(channel: YamuxChannel) {.async, gcsafe.} =
|
||||
method closeImpl*(channel: YamuxChannel) {.async: (raises: []).} =
|
||||
if not channel.closedLocally:
|
||||
trace "Closing yamux channel locally", streamId = channel.id, conn = channel.conn
|
||||
channel.closedLocally = true
|
||||
|
||||
if channel.isReset == false and channel.sendQueue.len == 0:
|
||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
|
||||
if not channel.isReset and channel.sendQueue.len == 0:
|
||||
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
|
||||
except CancelledError, LPStreamError: discard
|
||||
await channel.actuallyClose()
|
||||
|
||||
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
|
||||
proc reset(
|
||||
channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).} =
|
||||
# If we reset locally, we want to flush up to a maximum of recvWindow
|
||||
# bytes. It's because the peer we're connected to can send us data before
|
||||
# it receives the reset.
|
||||
if channel.isReset:
|
||||
return
|
||||
trace "Reset channel"
|
||||
@@ -206,20 +227,24 @@ proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
|
||||
channel.recvQueue = @[]
|
||||
channel.sendWindow = 0
|
||||
if not channel.closedLocally:
|
||||
if isLocal:
|
||||
if isLocal and not channel.isSending:
|
||||
try: await channel.conn.write(YamuxHeader.data(channel.id, 0, {Rst}))
|
||||
except LPStreamEOFError as exc: discard
|
||||
except LPStreamClosedError as exc: discard
|
||||
except CancelledError, LPStreamError: discard
|
||||
await channel.close()
|
||||
if not channel.closedRemotely.done():
|
||||
if not channel.closedRemotely.completed():
|
||||
await channel.remoteClosed()
|
||||
channel.receivedData.fire()
|
||||
if not isLocal:
|
||||
# If we reset locally, we want to flush up to a maximum of recvWindow
|
||||
# bytes. We use the recvWindow in the proc cleanupChann.
|
||||
# If the reset is remote, there's no reason to flush anything.
|
||||
channel.recvWindow = 0
|
||||
|
||||
proc updateRecvWindow(channel: YamuxChannel) {.async.} =
|
||||
proc updateRecvWindow(
|
||||
channel: YamuxChannel
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Send to the peer a window update when the recvWindow is empty enough
|
||||
##
|
||||
# In order to avoid spamming a window update everytime a byte is read,
|
||||
# we send it everytime half of the maxRecvWindow is read.
|
||||
let inWindow = channel.recvWindow + channel.recvQueue.len
|
||||
if inWindow > channel.maxRecvWindow div 2:
|
||||
return
|
||||
@@ -233,31 +258,36 @@ proc updateRecvWindow(channel: YamuxChannel) {.async.} =
|
||||
trace "increasing the recvWindow", delta
|
||||
|
||||
method readOnce*(
|
||||
channel: YamuxChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
channel: YamuxChannel,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Read from a yamux channel
|
||||
|
||||
if channel.isReset:
|
||||
raise if channel.remoteReset:
|
||||
raise
|
||||
if channel.remoteReset:
|
||||
newLPStreamResetError()
|
||||
elif channel.closedLocally:
|
||||
newLPStreamClosedError()
|
||||
else:
|
||||
newLPStreamConnDownError()
|
||||
if channel.returnedEof:
|
||||
if channel.isEof:
|
||||
raise newLPStreamRemoteClosedError()
|
||||
if channel.recvQueue.len == 0:
|
||||
channel.receivedData.clear()
|
||||
await channel.closedRemotely or channel.receivedData.wait()
|
||||
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
|
||||
channel.returnedEof = true
|
||||
return 0
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(channel.closedRemotely, channel.receivedData.wait())
|
||||
except ValueError: raiseAssert("Futures list is not empty")
|
||||
if channel.closedRemotely.completed() and channel.recvQueue.len == 0:
|
||||
channel.isEof = true
|
||||
return 0 # we return 0 to indicate that the channel is closed for reading from now on
|
||||
|
||||
let toRead = min(channel.recvQueue.len, nbytes)
|
||||
|
||||
var p = cast[ptr UncheckedArray[byte]](pbytes)
|
||||
toOpenArray(p, 0, nbytes - 1)[0..<toRead] = channel.recvQueue.toOpenArray(0, toRead - 1)
|
||||
toOpenArray(p, 0, nbytes - 1)[0..<toRead] =
|
||||
channel.recvQueue.toOpenArray(0, toRead - 1)
|
||||
channel.recvQueue = channel.recvQueue[toRead..^1]
|
||||
|
||||
# We made some room in the recv buffer let the peer know
|
||||
@@ -265,7 +295,9 @@ method readOnce*(
|
||||
channel.activity = true
|
||||
return toRead
|
||||
|
||||
proc gotDataFromRemote(channel: YamuxChannel, b: seq[byte]) {.async.} =
|
||||
proc gotDataFromRemote(
|
||||
channel: YamuxChannel,
|
||||
b: seq[byte]) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
channel.recvWindow -= b.len
|
||||
channel.recvQueue = channel.recvQueue.concat(b)
|
||||
channel.receivedData.fire()
|
||||
@@ -276,26 +308,27 @@ proc gotDataFromRemote(channel: YamuxChannel, b: seq[byte]) {.async.} =
|
||||
proc setMaxRecvWindow*(channel: YamuxChannel, maxRecvWindow: int) =
|
||||
channel.maxRecvWindow = maxRecvWindow
|
||||
|
||||
proc trySend(channel: YamuxChannel) {.async.} =
|
||||
proc trySend(
|
||||
channel: YamuxChannel
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
if channel.isSending:
|
||||
return
|
||||
channel.isSending = true
|
||||
defer: channel.isSending = false
|
||||
|
||||
while channel.sendQueue.len != 0:
|
||||
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
|
||||
if channel.sendWindow == 0:
|
||||
trace "send window empty"
|
||||
if channel.sendQueueBytes(true) > channel.maxRecvWindow:
|
||||
debug "channel send queue too big, resetting", maxSendWindow=channel.maxRecvWindow,
|
||||
currentQueueSize = channel.sendQueueBytes(true)
|
||||
try:
|
||||
await channel.reset(true)
|
||||
except CatchableError as exc:
|
||||
debug "failed to reset", msg=exc.msg
|
||||
trace "trying to send while the sendWindow is empty"
|
||||
if channel.lengthSendQueueWithLimit() > channel.maxSendQueueSize:
|
||||
trace "channel send queue too big, resetting",
|
||||
maxSendQueueSize = channel.maxSendQueueSize,
|
||||
currentQueueSize = channel.lengthSendQueueWithLimit()
|
||||
await channel.reset(isLocal = true)
|
||||
break
|
||||
|
||||
let
|
||||
bytesAvailable = channel.sendQueueBytes()
|
||||
bytesAvailable = channel.lengthSendQueue()
|
||||
toSend = min(channel.sendWindow, bytesAvailable)
|
||||
var
|
||||
sendBuffer = newSeqUninitialized[byte](toSend + 12)
|
||||
@@ -308,22 +341,33 @@ proc trySend(channel: YamuxChannel) {.async.} =
|
||||
|
||||
sendBuffer[0..<12] = header.encode()
|
||||
|
||||
var futures: seq[Future[void]]
|
||||
var futures: seq[Future[void].Raising([CancelledError, LPStreamError])]
|
||||
while inBuffer < toSend:
|
||||
# concatenate the different message we try to send into one buffer
|
||||
let (data, sent, fut) = channel.sendQueue[0]
|
||||
let bufferToSend = min(data.len - sent, toSend - inBuffer)
|
||||
sendBuffer.toOpenArray(12, 12 + toSend - 1)[inBuffer..<(inBuffer+bufferToSend)] =
|
||||
channel.sendQueue[0].data.toOpenArray(sent, sent + bufferToSend - 1)
|
||||
channel.sendQueue[0].sent.inc(bufferToSend)
|
||||
if channel.sendQueue[0].sent >= data.len:
|
||||
# if every byte of the message is in the buffer, add the write future to the
|
||||
# sequence of futures to be completed (or failed) when the buffer is sent
|
||||
futures.add(fut)
|
||||
channel.sendQueue.delete(0)
|
||||
inBuffer.inc(bufferToSend)
|
||||
|
||||
trace "build send buffer", h = $header, msg=string.fromBytes(sendBuffer[12..^1])
|
||||
trace "try to send the buffer", h = $header
|
||||
channel.sendWindow.dec(toSend)
|
||||
try: await channel.conn.write(sendBuffer)
|
||||
except CatchableError as exc:
|
||||
try:
|
||||
await channel.conn.write(sendBuffer)
|
||||
except CancelledError:
|
||||
trace "cancelled sending the buffer"
|
||||
for fut in futures.items():
|
||||
fut.cancelSoon()
|
||||
await channel.reset()
|
||||
break
|
||||
except LPStreamError as exc:
|
||||
trace "failed to send the buffer"
|
||||
let connDown = newLPStreamConnDownError(exc)
|
||||
for fut in futures.items():
|
||||
fut.fail(connDown)
|
||||
@@ -333,7 +377,13 @@ proc trySend(channel: YamuxChannel) {.async.} =
|
||||
fut.complete()
|
||||
channel.activity = true
|
||||
|
||||
method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
channel: YamuxChannel,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
## Write to yamux channel
|
||||
##
|
||||
result = newFuture[void]("Yamux Send")
|
||||
if channel.remoteReset:
|
||||
result.fail(newLPStreamResetError())
|
||||
@@ -346,15 +396,22 @@ method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
|
||||
return result
|
||||
channel.sendQueue.add((msg, 0, result))
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_recv_queue.observe(channel.sendQueueBytes().int64)
|
||||
libp2p_yamux_send_queue.observe(channel.lengthSendQueue().int64)
|
||||
asyncSpawn channel.trySend()
|
||||
|
||||
proc open*(channel: YamuxChannel) {.async, gcsafe.} =
|
||||
proc open(
|
||||
channel: YamuxChannel
|
||||
) {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
## Open a yamux channel by sending a window update with Syn or Ack flag
|
||||
##
|
||||
if channel.opened:
|
||||
trace "Try to open channel twice"
|
||||
return
|
||||
channel.opened = true
|
||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {if channel.isSrc: Syn else: Ack}))
|
||||
await channel.conn.write(YamuxHeader.windowUpdate(
|
||||
channel.id,
|
||||
uint32(max(channel.maxRecvWindow - YamuxDefaultWindowSize, 0)),
|
||||
{if channel.isSrc: Syn else: Ack}))
|
||||
|
||||
method getWrapped*(channel: YamuxChannel): Connection = channel.conn
|
||||
|
||||
@@ -365,48 +422,73 @@ type
|
||||
currentId: uint32
|
||||
isClosed: bool
|
||||
maxChannCount: int
|
||||
windowSize: int
|
||||
maxSendQueueSize: int
|
||||
inTimeout: Duration
|
||||
outTimeout: Duration
|
||||
|
||||
proc lenBySrc(m: Yamux, isSrc: bool): int =
|
||||
for v in m.channels.values():
|
||||
if v.isSrc == isSrc: result += 1
|
||||
|
||||
proc cleanupChann(m: Yamux, channel: YamuxChannel) {.async.} =
|
||||
await channel.join()
|
||||
proc cleanupChannel(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
|
||||
try:
|
||||
await channel.join()
|
||||
except CancelledError:
|
||||
discard
|
||||
m.channels.del(channel.id)
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_channels.set(m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId])
|
||||
libp2p_yamux_channels.set(
|
||||
m.lenBySrc(channel.isSrc).int64, [$channel.isSrc, $channel.peerId])
|
||||
if channel.isReset and channel.recvWindow > 0:
|
||||
m.flushed[channel.id] = channel.recvWindow
|
||||
|
||||
proc createStream(m: Yamux, id: uint32, isSrc: bool): YamuxChannel =
|
||||
result = YamuxChannel(
|
||||
proc createStream(
|
||||
m: Yamux, id: uint32, isSrc: bool,
|
||||
recvWindow: int, maxSendQueueSize: int): YamuxChannel =
|
||||
# During initialization, recvWindow can be larger than maxRecvWindow.
|
||||
# This is because the peer we're connected to will always assume
|
||||
# that the initial recvWindow is 256k.
|
||||
# To solve this contradiction, no updateWindow will be sent until
|
||||
# recvWindow is less than maxRecvWindow
|
||||
proc newClosedRemotelyFut(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
newFuture[void]()
|
||||
var stream = YamuxChannel(
|
||||
id: id,
|
||||
maxRecvWindow: DefaultWindowSize,
|
||||
recvWindow: DefaultWindowSize,
|
||||
sendWindow: DefaultWindowSize,
|
||||
maxRecvWindow: recvWindow,
|
||||
recvWindow: if recvWindow > YamuxDefaultWindowSize: recvWindow else: YamuxDefaultWindowSize,
|
||||
sendWindow: YamuxDefaultWindowSize,
|
||||
maxSendQueueSize: maxSendQueueSize,
|
||||
isSrc: isSrc,
|
||||
conn: m.connection,
|
||||
receivedData: newAsyncEvent(),
|
||||
closedRemotely: newFuture[void]()
|
||||
closedRemotely: newClosedRemotelyFut()
|
||||
)
|
||||
result.objName = "YamuxStream"
|
||||
result.dir = if isSrc: Direction.Out else: Direction.In
|
||||
result.timeoutHandler = proc(): Future[void] {.gcsafe.} =
|
||||
trace "Idle timeout expired, resetting YamuxChannel"
|
||||
result.reset()
|
||||
result.initStream()
|
||||
result.peerId = m.connection.peerId
|
||||
result.observedAddr = m.connection.observedAddr
|
||||
result.transportDir = m.connection.transportDir
|
||||
stream.objName = "YamuxStream"
|
||||
if isSrc:
|
||||
stream.dir = Direction.Out
|
||||
stream.timeout = m.outTimeout
|
||||
else:
|
||||
stream.dir = Direction.In
|
||||
stream.timeout = m.inTimeout
|
||||
stream.timeoutHandler =
|
||||
proc(): Future[void] {.async: (raises: [], raw: true).} =
|
||||
trace "Idle timeout expired, resetting YamuxChannel"
|
||||
stream.reset(isLocal = true)
|
||||
stream.initStream()
|
||||
stream.peerId = m.connection.peerId
|
||||
stream.observedAddr = m.connection.observedAddr
|
||||
stream.transportDir = m.connection.transportDir
|
||||
when defined(libp2p_agents_metrics):
|
||||
result.shortAgent = m.connection.shortAgent
|
||||
m.channels[id] = result
|
||||
asyncSpawn m.cleanupChann(result)
|
||||
stream.shortAgent = m.connection.shortAgent
|
||||
m.channels[id] = stream
|
||||
asyncSpawn m.cleanupChannel(stream)
|
||||
trace "created channel", id, pid=m.connection.peerId
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $result.peerId])
|
||||
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $stream.peerId])
|
||||
return stream
|
||||
|
||||
method close*(m: Yamux) {.async.} =
|
||||
method close*(m: Yamux) {.async: (raises: []).} =
|
||||
if m.isClosed == true:
|
||||
trace "Already closed"
|
||||
return
|
||||
@@ -415,24 +497,21 @@ method close*(m: Yamux) {.async.} =
|
||||
trace "Closing yamux"
|
||||
let channels = toSeq(m.channels.values())
|
||||
for channel in channels:
|
||||
await channel.reset(true)
|
||||
await channel.reset(isLocal = true)
|
||||
try: await m.connection.write(YamuxHeader.goAway(NormalTermination))
|
||||
except CatchableError as exc: trace "failed to send goAway", msg=exc.msg
|
||||
except CancelledError as exc: trace "cancelled sending goAway", msg = exc.msg
|
||||
except LPStreamError as exc: trace "failed to send goAway", msg = exc.msg
|
||||
await m.connection.close()
|
||||
trace "Closed yamux"
|
||||
|
||||
proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
|
||||
## call the muxer stream handler for this channel
|
||||
proc handleStream(m: Yamux, channel: YamuxChannel) {.async: (raises: []).} =
|
||||
## Call the muxer stream handler for this channel
|
||||
##
|
||||
try:
|
||||
await m.streamHandler(channel)
|
||||
trace "finished handling stream"
|
||||
doAssert(channel.isClosed, "connection not closed by handler!")
|
||||
except CatchableError as exc:
|
||||
trace "Exception in yamux stream handler", msg = exc.msg
|
||||
await channel.reset()
|
||||
await m.streamHandler(channel)
|
||||
trace "finished handling stream"
|
||||
doAssert(channel.isClosed, "connection not closed by handler!")
|
||||
|
||||
method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
method handle*(m: Yamux) {.async: (raises: []).} =
|
||||
trace "Starting yamux handler", pid=m.connection.peerId
|
||||
try:
|
||||
while not m.connection.atEof:
|
||||
@@ -456,28 +535,39 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
else:
|
||||
if header.streamId in m.flushed:
|
||||
m.flushed.del(header.streamId)
|
||||
|
||||
if header.streamId mod 2 == m.currentId mod 2:
|
||||
debug "Peer used our reserved stream id, skipping", id=header.streamId, currentId=m.currentId, peerId=m.connection.peerId
|
||||
raise newException(YamuxError, "Peer used our reserved stream id")
|
||||
let newStream = m.createStream(header.streamId, false)
|
||||
let newStream = m.createStream(header.streamId, false, m.windowSize, m.maxSendQueueSize)
|
||||
if m.channels.len >= m.maxChannCount:
|
||||
await newStream.reset()
|
||||
continue
|
||||
await newStream.open()
|
||||
asyncSpawn m.handleStream(newStream)
|
||||
elif header.streamId notin m.channels:
|
||||
if header.streamId notin m.flushed:
|
||||
raise newException(YamuxError, "Unknown stream ID: " & $header.streamId)
|
||||
elif header.msgType == Data:
|
||||
# Flush the data
|
||||
m.flushed[header.streamId].dec(int(header.length))
|
||||
if m.flushed[header.streamId] < 0:
|
||||
raise newException(YamuxError, "Peer exhausted the recvWindow after reset")
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||
# Flush the data
|
||||
m.flushed.withValue(header.streamId, flushed):
|
||||
if header.msgType == Data:
|
||||
flushed[].dec(int(header.length))
|
||||
if flushed[] < 0:
|
||||
raise newException(YamuxError,
|
||||
"Peer exhausted the recvWindow after reset")
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
await m.connection.readExactly(
|
||||
addr buffer[0], int(header.length))
|
||||
do:
|
||||
raise newException(YamuxError,
|
||||
"Unknown stream ID: " & $header.streamId)
|
||||
continue
|
||||
|
||||
let channel = m.channels[header.streamId]
|
||||
let channel =
|
||||
try:
|
||||
m.channels[header.streamId]
|
||||
except KeyError:
|
||||
raise newException(YamuxError,
|
||||
"Stream was cleaned up before handling data: " & $header.streamId)
|
||||
|
||||
if header.msgType == WindowUpdate:
|
||||
channel.sendWindow += int(header.length)
|
||||
@@ -490,7 +580,7 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
if header.length > 0:
|
||||
var buffer = newSeqUninitialized[byte](header.length)
|
||||
await m.connection.readExactly(addr buffer[0], int(header.length))
|
||||
trace "Msg Rcv", msg=string.fromBytes(buffer)
|
||||
trace "Msg Rcv", msg=shortLog(buffer)
|
||||
await channel.gotDataFromRemote(buffer)
|
||||
|
||||
if MsgFlags.Fin in header.flags:
|
||||
@@ -499,11 +589,24 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
if MsgFlags.Rst in header.flags:
|
||||
trace "remote reset channel"
|
||||
await channel.reset()
|
||||
except CancelledError as exc:
|
||||
debug "Unexpected cancellation in yamux handler", msg = exc.msg
|
||||
except LPStreamEOFError as exc:
|
||||
trace "Stream EOF", msg = exc.msg
|
||||
except LPStreamError as exc:
|
||||
debug "Unexpected stream exception in yamux read loop", msg = exc.msg
|
||||
except YamuxError as exc:
|
||||
trace "Closing yamux connection", error=exc.msg
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
except CancelledError, LPStreamError:
|
||||
discard
|
||||
except MuxerError as exc:
|
||||
debug "Unexpected muxer exception in yamux read loop", msg = exc.msg
|
||||
try:
|
||||
await m.connection.write(YamuxHeader.goAway(ProtocolError))
|
||||
except CancelledError, LPStreamError:
|
||||
discard
|
||||
finally:
|
||||
await m.close()
|
||||
trace "Stopped yamux handler"
|
||||
@@ -512,21 +615,32 @@ method getStreams*(m: Yamux): seq[Connection] =
|
||||
for c in m.channels.values: result.add(c)
|
||||
|
||||
method newStream*(
|
||||
m: Yamux,
|
||||
name: string = "",
|
||||
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
||||
|
||||
m: Yamux,
|
||||
name: string = "",
|
||||
lazy: bool = false
|
||||
): Future[Connection] {.async: (raises: [
|
||||
CancelledError, LPStreamError, MuxerError]).} =
|
||||
if m.channels.len > m.maxChannCount - 1:
|
||||
raise newException(TooManyChannels, "max allowed channel count exceeded")
|
||||
let stream = m.createStream(m.currentId, true)
|
||||
let stream = m.createStream(m.currentId, true, m.windowSize, m.maxSendQueueSize)
|
||||
m.currentId += 2
|
||||
if not lazy:
|
||||
await stream.open()
|
||||
return stream
|
||||
|
||||
proc new*(T: type[Yamux], conn: Connection, maxChannCount: int = MaxChannelCount): T =
|
||||
proc new*(
|
||||
T: type[Yamux], conn: Connection,
|
||||
maxChannCount: int = MaxChannelCount,
|
||||
windowSize: int = YamuxDefaultWindowSize,
|
||||
maxSendQueueSize: int = MaxSendQueueSize,
|
||||
inTimeout: Duration = 5.minutes,
|
||||
outTimeout: Duration = 5.minutes): T =
|
||||
T(
|
||||
connection: conn,
|
||||
currentId: if conn.dir == Out: 1 else: 2,
|
||||
maxChannCount: maxChannCount
|
||||
maxChannCount: maxChannCount,
|
||||
windowSize: windowSize,
|
||||
maxSendQueueSize: maxSendQueueSize,
|
||||
inTimeout: inTimeout,
|
||||
outTimeout: outTimeout
|
||||
)
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[streams, strutils, sets, sequtils],
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sugar, sets, sequtils, strutils]
|
||||
import
|
||||
@@ -55,7 +52,7 @@ proc resolveOneAddress(
|
||||
ma: MultiAddress,
|
||||
domain: Domain = Domain.AF_UNSPEC,
|
||||
prefix = ""): Future[seq[MultiAddress]]
|
||||
{.async, raises: [Defect, MaError, TransportAddressError].} =
|
||||
{.async.} =
|
||||
#Resolve a single address
|
||||
var pbuf: array[2, byte]
|
||||
|
||||
@@ -121,7 +118,7 @@ proc resolveMAddress*(
|
||||
if not DNS.matchPartial(address):
|
||||
res.incl(address)
|
||||
else:
|
||||
let code = address[0].get().protoCode().get()
|
||||
let code = address[0].tryGet().protoCode().tryGet()
|
||||
let seq = case code:
|
||||
of multiCodec("dns"):
|
||||
await self.resolveOneAddress(address)
|
||||
@@ -132,7 +129,7 @@ proc resolveMAddress*(
|
||||
of multiCodec("dnsaddr"):
|
||||
await self.resolveDnsAddr(address)
|
||||
else:
|
||||
doAssert false
|
||||
assert false
|
||||
@[address]
|
||||
for ad in seq:
|
||||
res.incl(ad)
|
||||
|
||||
@@ -7,15 +7,11 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, tables],
|
||||
chronos, chronicles,
|
||||
multiaddress, multicodec
|
||||
import std/[sequtils, tables, sugar]
|
||||
import chronos
|
||||
import multiaddress, multicodec
|
||||
|
||||
type
|
||||
## Manages observed MultiAddresses by reomte peers. It keeps track of the most observed IP and IP/Port.
|
||||
@@ -36,14 +32,16 @@ proc getProtocol(self: ObservedAddrManager, observations: seq[MultiAddress], mul
|
||||
countTable.sort()
|
||||
var orderedPairs = toSeq(countTable.pairs)
|
||||
for (ma, count) in orderedPairs:
|
||||
let maFirst = ma[0].get()
|
||||
if maFirst.protoCode.get() == multiCodec and count >= self.minCount:
|
||||
let protoCode = (ma[0].flatMap(protoCode)).valueOr: continue
|
||||
if protoCode == multiCodec and count >= self.minCount:
|
||||
return Opt.some(ma)
|
||||
return Opt.none(MultiAddress)
|
||||
|
||||
proc getMostObservedProtocol(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
|
||||
## Returns the most observed IP address or none if the number of observations are less than minCount.
|
||||
let observedIPs = self.observedIPsAndPorts.mapIt(it[0].get())
|
||||
let observedIPs = collect:
|
||||
for observedIp in self.observedIPsAndPorts:
|
||||
observedIp[0].valueOr: continue
|
||||
return self.getProtocol(observedIPs, multiCodec)
|
||||
|
||||
proc getMostObservedProtoAndPort(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
|
||||
@@ -54,34 +52,24 @@ proc getMostObservedProtosAndPorts*(self: ObservedAddrManager): seq[MultiAddress
|
||||
## Returns the most observed IP4/Port and IP6/Port address or an empty seq if the number of observations
|
||||
## are less than minCount.
|
||||
var res: seq[MultiAddress]
|
||||
let ip4 = self.getMostObservedProtoAndPort(multiCodec("ip4"))
|
||||
if ip4.isSome():
|
||||
res.add(ip4.get())
|
||||
let ip6 = self.getMostObservedProtoAndPort(multiCodec("ip6"))
|
||||
if ip6.isSome():
|
||||
res.add(ip6.get())
|
||||
self.getMostObservedProtoAndPort(multiCodec("ip4")).withValue(ip4):
|
||||
res.add(ip4)
|
||||
self.getMostObservedProtoAndPort(multiCodec("ip6")).withValue(ip6):
|
||||
res.add(ip6)
|
||||
return res
|
||||
|
||||
proc guessDialableAddr*(
|
||||
self: ObservedAddrManager,
|
||||
ma: MultiAddress): MultiAddress =
|
||||
## Replaces the first proto valeu of each listen address by the corresponding (matching the proto code) most observed value.
|
||||
## Replaces the first proto value of each listen address by the corresponding (matching the proto code) most observed value.
|
||||
## If the most observed value is not available, the original MultiAddress is returned.
|
||||
try:
|
||||
let maFirst = ma[0]
|
||||
let maRest = ma[1..^1]
|
||||
if maRest.isErr():
|
||||
return ma
|
||||
let
|
||||
maFirst = ma[0].valueOr: return ma
|
||||
maRest = ma[1..^1].valueOr: return ma
|
||||
maFirstProto = maFirst.protoCode().valueOr: return ma
|
||||
|
||||
let observedIP = self.getMostObservedProtocol(maFirst.get().protoCode().get())
|
||||
return
|
||||
if observedIP.isNone() or maFirst.get() == observedIP.get():
|
||||
ma
|
||||
else:
|
||||
observedIP.get() & maRest.get()
|
||||
except CatchableError as error:
|
||||
debug "Error while handling manual port forwarding", msg = error.msg
|
||||
return ma
|
||||
let observedIP = self.getMostObservedProtocol(maFirstProto).valueOr: return ma
|
||||
return concat(observedIP, maRest).valueOr: ma
|
||||
|
||||
proc `$`*(self: ObservedAddrManager): string =
|
||||
## Returns a string representation of the ObservedAddrManager.
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implementes API for libp2p peer.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import
|
||||
@@ -44,10 +41,7 @@ func shortLog*(pid: PeerId): string =
|
||||
if len(spid) > 10:
|
||||
spid[3] = '*'
|
||||
|
||||
when (NimMajor, NimMinor) > (1, 4):
|
||||
spid.delete(4 .. spid.high - 6)
|
||||
else:
|
||||
spid.delete(4, spid.high - 6)
|
||||
spid.delete(4 .. spid.high - 6)
|
||||
|
||||
spid
|
||||
|
||||
@@ -191,19 +185,11 @@ proc random*(t: typedesc[PeerId], rng = newRng()): Result[PeerId, cstring] =
|
||||
|
||||
func match*(pid: PeerId, pubkey: PublicKey): bool =
|
||||
## Returns ``true`` if ``pid`` matches public key ``pubkey``.
|
||||
let p = PeerId.init(pubkey)
|
||||
if p.isErr:
|
||||
false
|
||||
else:
|
||||
pid == p.get()
|
||||
PeerId.init(pubkey) == Result[PeerId, cstring].ok(pid)
|
||||
|
||||
func match*(pid: PeerId, seckey: PrivateKey): bool =
|
||||
## Returns ``true`` if ``pid`` matches private key ``seckey``.
|
||||
let p = PeerId.init(seckey)
|
||||
if p.isErr:
|
||||
false
|
||||
else:
|
||||
pid == p.get()
|
||||
PeerId.init(seckey) == Result[PeerId, cstring].ok(pid)
|
||||
|
||||
## Serialization/Deserialization helpers
|
||||
|
||||
|
||||
@@ -7,13 +7,10 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import std/[options, sequtils]
|
||||
import std/sequtils
|
||||
import pkg/[chronos, chronicles, stew/results]
|
||||
import peerid, multiaddress, multicodec, crypto/crypto, routing_record, errors, utility
|
||||
|
||||
@@ -26,7 +23,7 @@ type
|
||||
|
||||
AddressMapper* =
|
||||
proc(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
{.gcsafe, raises: [].}
|
||||
|
||||
PeerInfo* {.public.} = ref object
|
||||
peerId*: PeerId
|
||||
@@ -56,15 +53,12 @@ proc update*(p: PeerInfo) {.async.} =
|
||||
for mapper in p.addressMappers:
|
||||
p.addrs = await mapper(p.addrs)
|
||||
|
||||
let sprRes = SignedPeerRecord.init(
|
||||
p.signedPeerRecord = SignedPeerRecord.init(
|
||||
p.privateKey,
|
||||
PeerRecord.init(p.peerId, p.addrs)
|
||||
)
|
||||
if sprRes.isOk:
|
||||
p.signedPeerRecord = sprRes.get()
|
||||
else:
|
||||
discard
|
||||
#info "Can't update the signed peer record"
|
||||
).valueOr():
|
||||
info "Can't update the signed peer record"
|
||||
return
|
||||
|
||||
proc addrs*(p: PeerInfo): seq[MultiAddress] =
|
||||
p.addrs
|
||||
@@ -99,7 +93,7 @@ proc new*(
|
||||
agentVersion: string = "",
|
||||
addressMappers = newSeq[AddressMapper](),
|
||||
): PeerInfo
|
||||
{.raises: [Defect, LPError].} =
|
||||
{.raises: [LPError].} =
|
||||
|
||||
let pubkey = try:
|
||||
key.getPublicKey().tryGet()
|
||||
|
||||
@@ -16,15 +16,12 @@ runnableExamples:
|
||||
# Create a custom book type
|
||||
type MoodBook = ref object of PeerBook[string]
|
||||
|
||||
var somePeerId = PeerId.random().get()
|
||||
var somePeerId = PeerId.random().expect("get random key")
|
||||
|
||||
peerStore[MoodBook][somePeerId] = "Happy"
|
||||
doAssert peerStore[MoodBook][somePeerId] == "Happy"
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[tables, sets, options, macros],
|
||||
@@ -45,7 +42,7 @@ type
|
||||
# Handler types #
|
||||
#################
|
||||
|
||||
PeerBookChangeHandler* = proc(peerId: PeerId) {.gcsafe, raises: [Defect].}
|
||||
PeerBookChangeHandler* = proc(peerId: PeerId) {.gcsafe, raises: [].}
|
||||
|
||||
#########
|
||||
# Books #
|
||||
@@ -161,20 +158,20 @@ proc updatePeerInfo*(
|
||||
if info.addrs.len > 0:
|
||||
peerStore[AddressBook][info.peerId] = info.addrs
|
||||
|
||||
if info.pubkey.isSome:
|
||||
peerStore[KeyBook][info.peerId] = info.pubkey.get()
|
||||
info.pubkey.withValue(pubkey):
|
||||
peerStore[KeyBook][info.peerId] = pubkey
|
||||
|
||||
if info.agentVersion.isSome:
|
||||
peerStore[AgentBook][info.peerId] = info.agentVersion.get().string
|
||||
info.agentVersion.withValue(agentVersion):
|
||||
peerStore[AgentBook][info.peerId] = agentVersion.string
|
||||
|
||||
if info.protoVersion.isSome:
|
||||
peerStore[ProtoVersionBook][info.peerId] = info.protoVersion.get().string
|
||||
info.protoVersion.withValue(protoVersion):
|
||||
peerStore[ProtoVersionBook][info.peerId] = protoVersion.string
|
||||
|
||||
if info.protos.len > 0:
|
||||
peerStore[ProtoBook][info.peerId] = info.protos
|
||||
|
||||
if info.signedPeerRecord.isSome:
|
||||
peerStore[SPRBook][info.peerId] = info.signedPeerRecord.get()
|
||||
info.signedPeerRecord.withValue(signedPeerRecord):
|
||||
peerStore[SPRBook][info.peerId] = signedPeerRecord
|
||||
|
||||
let cleanupPos = peerStore.toClean.find(info.peerId)
|
||||
if cleanupPos >= 0:
|
||||
@@ -210,11 +207,11 @@ proc identify*(
|
||||
let info = await peerStore.identify.identify(stream, stream.peerId)
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
var knownAgent = "unknown"
|
||||
if info.agentVersion.isSome and info.agentVersion.get().len > 0:
|
||||
let shortAgent = info.agentVersion.get().split("/")[0].safeToLowerAscii()
|
||||
if shortAgent.isOk() and KnownLibP2PAgentsSeq.contains(shortAgent.get()):
|
||||
knownAgent = shortAgent.get()
|
||||
var
|
||||
knownAgent = "unknown"
|
||||
shortAgent = info.agentVersion.get("").split("/")[0].safeToLowerAscii().get("")
|
||||
if KnownLibP2PAgentsSeq.contains(shortAgent):
|
||||
knownAgent = shortAgent
|
||||
muxer.connection.setShortAgent(knownAgent)
|
||||
|
||||
peerStore.updatePeerInfo(info)
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implements minimal Google's ProtoBuf primitives.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import ../varint, ../utility, stew/[endians2, results]
|
||||
export results, utility
|
||||
@@ -579,26 +576,18 @@ proc getField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
|
||||
proc getField*(pb: ProtoBuffer, field: int,
|
||||
output: var ProtoBuffer): ProtoResult[bool] {.inline.} =
|
||||
var buffer: seq[byte]
|
||||
let res = pb.getField(field, buffer)
|
||||
if res.isOk():
|
||||
if res.get():
|
||||
output = initProtoBuffer(buffer)
|
||||
ok(true)
|
||||
else:
|
||||
ok(false)
|
||||
if ? pb.getField(field, buffer):
|
||||
output = initProtoBuffer(buffer)
|
||||
ok(true)
|
||||
else:
|
||||
err(res.error)
|
||||
ok(false)
|
||||
|
||||
proc getRequiredField*[T](pb: ProtoBuffer, field: int,
|
||||
output: var T): ProtoResult[void] {.inline.} =
|
||||
let res = pb.getField(field, output)
|
||||
if res.isOk():
|
||||
if res.get():
|
||||
ok()
|
||||
else:
|
||||
err(RequiredFieldMissing)
|
||||
if ? pb.getField(field, output):
|
||||
ok()
|
||||
else:
|
||||
err(res.error)
|
||||
err(RequiredFieldMissing)
|
||||
|
||||
proc getRepeatedField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
|
||||
output: var seq[T]): ProtoResult[bool] =
|
||||
@@ -678,14 +667,10 @@ proc getRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
|
||||
|
||||
proc getRequiredRepeatedField*[T](pb: ProtoBuffer, field: int,
|
||||
output: var seq[T]): ProtoResult[void] {.inline.} =
|
||||
let res = pb.getRepeatedField(field, output)
|
||||
if res.isOk():
|
||||
if res.get():
|
||||
ok()
|
||||
else:
|
||||
err(RequiredFieldMissing)
|
||||
if ? pb.getRepeatedField(field, output):
|
||||
ok()
|
||||
else:
|
||||
err(res.error)
|
||||
err(RequiredFieldMissing)
|
||||
|
||||
proc getPackedRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
|
||||
output: var seq[T]): ProtoResult[bool] =
|
||||
|
||||
@@ -7,12 +7,8 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/options
|
||||
import stew/results
|
||||
import chronos, chronicles
|
||||
import ../../../switch,
|
||||
@@ -27,8 +23,8 @@ type
|
||||
AutonatClient* = ref object of RootObj
|
||||
|
||||
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
|
||||
id: some(pid),
|
||||
let pb = AutonatDial(peerInfo: Opt.some(AutonatPeerInfo(
|
||||
id: Opt.some(pid),
|
||||
addrs: addrs
|
||||
))).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
@@ -36,15 +32,13 @@ proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.}
|
||||
method dialMe*(self: AutonatClient, switch: Switch, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()):
|
||||
Future[MultiAddress] {.base, async.} =
|
||||
|
||||
proc getResponseOrRaise(autonatMsg: Option[AutonatMsg]): AutonatDialResponse {.raises: [Defect, AutonatError].} =
|
||||
if autonatMsg.isNone() or
|
||||
autonatMsg.get().msgType != DialResponse or
|
||||
autonatMsg.get().response.isNone() or
|
||||
(autonatMsg.get().response.get().status == Ok and
|
||||
autonatMsg.get().response.get().ma.isNone()):
|
||||
raise newException(AutonatError, "Unexpected response")
|
||||
else:
|
||||
autonatMsg.get().response.get()
|
||||
proc getResponseOrRaise(autonatMsg: Opt[AutonatMsg]): AutonatDialResponse {.raises: [AutonatError].} =
|
||||
autonatMsg.withValue(msg):
|
||||
if msg.msgType == DialResponse:
|
||||
msg.response.withValue(res):
|
||||
if not (res.status == Ok and res.ma.isNone()):
|
||||
return res
|
||||
raise newException(AutonatError, "Unexpected response")
|
||||
|
||||
let conn =
|
||||
try:
|
||||
@@ -69,7 +63,7 @@ method dialMe*(self: AutonatClient, switch: Switch, pid: PeerId, addrs: seq[Mult
|
||||
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
|
||||
return case response.status:
|
||||
of ResponseStatus.Ok:
|
||||
response.ma.get()
|
||||
response.ma.tryGet()
|
||||
of ResponseStatus.DialError:
|
||||
raise newException(AutonatUnreachableError, "Peer could not dial us back: " & response.text.get(""))
|
||||
else:
|
||||
|
||||
@@ -7,12 +7,8 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options]
|
||||
import stew/[results, objects]
|
||||
import chronos, chronicles
|
||||
import ../../../multiaddress,
|
||||
@@ -42,29 +38,29 @@ type
|
||||
InternalError = 300
|
||||
|
||||
AutonatPeerInfo* = object
|
||||
id*: Option[PeerId]
|
||||
id*: Opt[PeerId]
|
||||
addrs*: seq[MultiAddress]
|
||||
|
||||
AutonatDial* = object
|
||||
peerInfo*: Option[AutonatPeerInfo]
|
||||
peerInfo*: Opt[AutonatPeerInfo]
|
||||
|
||||
AutonatDialResponse* = object
|
||||
status*: ResponseStatus
|
||||
text*: Option[string]
|
||||
ma*: Option[MultiAddress]
|
||||
text*: Opt[string]
|
||||
ma*: Opt[MultiAddress]
|
||||
|
||||
AutonatMsg* = object
|
||||
msgType*: MsgType
|
||||
dial*: Option[AutonatDial]
|
||||
response*: Option[AutonatDialResponse]
|
||||
dial*: Opt[AutonatDial]
|
||||
response*: Opt[AutonatDialResponse]
|
||||
|
||||
NetworkReachability* {.pure.} = enum
|
||||
Unknown, NotReachable, Reachable
|
||||
|
||||
proc encode(p: AutonatPeerInfo): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
if p.id.isSome():
|
||||
result.write(1, p.id.get())
|
||||
p.id.withValue(id):
|
||||
result.write(1, id)
|
||||
for ma in p.addrs:
|
||||
result.write(2, ma.data.buffer)
|
||||
result.finish()
|
||||
@@ -73,8 +69,8 @@ proc encode*(d: AutonatDial): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, MsgType.Dial.uint)
|
||||
var dial = initProtoBuffer()
|
||||
if d.peerInfo.isSome():
|
||||
dial.write(1, encode(d.peerInfo.get()))
|
||||
d.peerInfo.withValue(pinfo):
|
||||
dial.write(1, encode(pinfo))
|
||||
dial.finish()
|
||||
result.write(2, dial.buffer)
|
||||
result.finish()
|
||||
@@ -84,72 +80,60 @@ proc encode*(r: AutonatDialResponse): ProtoBuffer =
|
||||
result.write(1, MsgType.DialResponse.uint)
|
||||
var bufferResponse = initProtoBuffer()
|
||||
bufferResponse.write(1, r.status.uint)
|
||||
if r.text.isSome():
|
||||
bufferResponse.write(2, r.text.get())
|
||||
if r.ma.isSome():
|
||||
bufferResponse.write(3, r.ma.get())
|
||||
r.text.withValue(text):
|
||||
bufferResponse.write(2, text)
|
||||
r.ma.withValue(ma):
|
||||
bufferResponse.write(3, ma)
|
||||
bufferResponse.finish()
|
||||
result.write(3, bufferResponse.buffer)
|
||||
result.finish()
|
||||
|
||||
proc encode*(msg: AutonatMsg): ProtoBuffer =
|
||||
if msg.dial.isSome():
|
||||
return encode(msg.dial.get())
|
||||
if msg.response.isSome():
|
||||
return encode(msg.response.get())
|
||||
msg.dial.withValue(dial):
|
||||
return encode(dial)
|
||||
msg.response.withValue(res):
|
||||
return encode(res)
|
||||
|
||||
proc decode*(_: typedesc[AutonatMsg], buf: seq[byte]): Option[AutonatMsg] =
|
||||
proc decode*(_: typedesc[AutonatMsg], buf: seq[byte]): Opt[AutonatMsg] =
|
||||
var
|
||||
msgTypeOrd: uint32
|
||||
pbDial: ProtoBuffer
|
||||
pbResponse: ProtoBuffer
|
||||
msg: AutonatMsg
|
||||
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getField(1, msgTypeOrd)
|
||||
r2 = pb.getField(2, pbDial)
|
||||
r3 = pb.getField(3, pbResponse)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr(): return none(AutonatMsg)
|
||||
let pb = initProtoBuffer(buf)
|
||||
|
||||
if r1.get() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
|
||||
return none(AutonatMsg)
|
||||
if r2.get():
|
||||
if ? pb.getField(1, msgTypeOrd).toOpt() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
|
||||
return Opt.none(AutonatMsg)
|
||||
if ? pb.getField(2, pbDial).toOpt():
|
||||
var
|
||||
pbPeerInfo: ProtoBuffer
|
||||
dial: AutonatDial
|
||||
let
|
||||
r4 = pbDial.getField(1, pbPeerInfo)
|
||||
if r4.isErr(): return none(AutonatMsg)
|
||||
let r4 = ? pbDial.getField(1, pbPeerInfo).toOpt()
|
||||
|
||||
var peerInfo: AutonatPeerInfo
|
||||
if r4.get():
|
||||
if r4:
|
||||
var pid: PeerId
|
||||
let
|
||||
r5 = pbPeerInfo.getField(1, pid)
|
||||
r6 = pbPeerInfo.getRepeatedField(2, peerInfo.addrs)
|
||||
if r5.isErr() or r6.isErr(): return none(AutonatMsg)
|
||||
if r5.get(): peerInfo.id = some(pid)
|
||||
dial.peerInfo = some(peerInfo)
|
||||
msg.dial = some(dial)
|
||||
r5 = ? pbPeerInfo.getField(1, pid).toOpt()
|
||||
r6 = ? pbPeerInfo.getRepeatedField(2, peerInfo.addrs).toOpt()
|
||||
if r5: peerInfo.id = Opt.some(pid)
|
||||
dial.peerInfo = Opt.some(peerInfo)
|
||||
msg.dial = Opt.some(dial)
|
||||
|
||||
if r3.get():
|
||||
if ? pb.getField(3, pbResponse).toOpt():
|
||||
var
|
||||
statusOrd: uint
|
||||
text: string
|
||||
ma: MultiAddress
|
||||
response: AutonatDialResponse
|
||||
|
||||
let
|
||||
r4 = pbResponse.getField(1, statusOrd)
|
||||
r5 = pbResponse.getField(2, text)
|
||||
r6 = pbResponse.getField(3, ma)
|
||||
|
||||
if r4.isErr() or r5.isErr() or r6.isErr() or
|
||||
(r4.get() and not checkedEnumAssign(response.status, statusOrd)):
|
||||
return none(AutonatMsg)
|
||||
if r5.get(): response.text = some(text)
|
||||
if r6.get(): response.ma = some(ma)
|
||||
msg.response = some(response)
|
||||
|
||||
return some(msg)
|
||||
if ? pbResponse.getField(1, statusOrd).optValue():
|
||||
if not checkedEnumAssign(response.status, statusOrd):
|
||||
return Opt.none(AutonatMsg)
|
||||
if ? pbResponse.getField(2, text).optValue():
|
||||
response.text = Opt.some(text)
|
||||
if ? pbResponse.getField(3, ma).optValue():
|
||||
response.ma = Opt.some(ma)
|
||||
msg.response = Opt.some(response)
|
||||
return Opt.some(msg)
|
||||
|
||||
@@ -7,12 +7,9 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, sets, sequtils]
|
||||
import std/[sets, sequtils]
|
||||
import stew/results
|
||||
import chronos, chronicles
|
||||
import ../../protocol,
|
||||
@@ -36,8 +33,8 @@ type
|
||||
dialTimeout: Duration
|
||||
|
||||
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
|
||||
id: some(pid),
|
||||
let pb = AutonatDial(peerInfo: Opt.some(AutonatPeerInfo(
|
||||
id: Opt.some(pid),
|
||||
addrs: addrs
|
||||
))).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
@@ -45,16 +42,16 @@ proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.}
|
||||
proc sendResponseError(conn: Connection, status: ResponseStatus, text: string = "") {.async.} =
|
||||
let pb = AutonatDialResponse(
|
||||
status: status,
|
||||
text: if text == "": none(string) else: some(text),
|
||||
ma: none(MultiAddress)
|
||||
text: if text == "": Opt.none(string) else: Opt.some(text),
|
||||
ma: Opt.none(MultiAddress)
|
||||
).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
|
||||
let pb = AutonatDialResponse(
|
||||
status: ResponseStatus.Ok,
|
||||
text: some("Ok"),
|
||||
ma: some(ma)
|
||||
text: Opt.some("Ok"),
|
||||
ma: Opt.some(ma)
|
||||
).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
@@ -73,8 +70,8 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
|
||||
futs = addrs.mapIt(autonat.switch.dialer.tryDial(conn.peerId, @[it]))
|
||||
let fut = await anyCompleted(futs).wait(autonat.dialTimeout)
|
||||
let ma = await fut
|
||||
if ma.isSome:
|
||||
await conn.sendResponseOk(ma.get())
|
||||
ma.withValue(maddr):
|
||||
await conn.sendResponseOk(maddr)
|
||||
else:
|
||||
await conn.sendResponseError(DialError, "Missing observed address")
|
||||
except CancelledError as exc:
|
||||
@@ -95,42 +92,40 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
|
||||
f.cancel()
|
||||
|
||||
proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[void] =
|
||||
if msg.dial.isNone() or msg.dial.get().peerInfo.isNone():
|
||||
let dial = msg.dial.valueOr:
|
||||
return conn.sendResponseError(BadRequest, "Missing Dial")
|
||||
let peerInfo = dial.peerInfo.valueOr:
|
||||
return conn.sendResponseError(BadRequest, "Missing Peer Info")
|
||||
let peerInfo = msg.dial.get().peerInfo.get()
|
||||
if peerInfo.id.isSome() and peerInfo.id.get() != conn.peerId:
|
||||
return conn.sendResponseError(BadRequest, "PeerId mismatch")
|
||||
peerInfo.id.withValue(id):
|
||||
if id != conn.peerId:
|
||||
return conn.sendResponseError(BadRequest, "PeerId mismatch")
|
||||
|
||||
if conn.observedAddr.isNone:
|
||||
let observedAddr = conn.observedAddr.valueOr:
|
||||
return conn.sendResponseError(BadRequest, "Missing observed address")
|
||||
let observedAddr = conn.observedAddr.get()
|
||||
|
||||
var isRelayed = observedAddr.contains(multiCodec("p2p-circuit"))
|
||||
if isRelayed.isErr() or isRelayed.get():
|
||||
var isRelayed = observedAddr.contains(multiCodec("p2p-circuit")).valueOr:
|
||||
return conn.sendResponseError(DialRefused, "Invalid observed address")
|
||||
if isRelayed:
|
||||
return conn.sendResponseError(DialRefused, "Refused to dial a relayed observed address")
|
||||
let hostIp = observedAddr[0]
|
||||
if hostIp.isErr() or not IP.match(hostIp.get()):
|
||||
trace "wrong observed address", address=observedAddr
|
||||
let hostIp = observedAddr[0].valueOr:
|
||||
return conn.sendResponseError(InternalError, "Wrong observed address")
|
||||
if not IP.match(hostIp):
|
||||
return conn.sendResponseError(InternalError, "Expected an IP address")
|
||||
var addrs = initHashSet[MultiAddress]()
|
||||
addrs.incl(observedAddr)
|
||||
trace "addrs received", addrs = peerInfo.addrs
|
||||
for ma in peerInfo.addrs:
|
||||
isRelayed = ma.contains(multiCodec("p2p-circuit"))
|
||||
if isRelayed.isErr() or isRelayed.get():
|
||||
continue
|
||||
let maFirst = ma[0]
|
||||
if maFirst.isErr() or not DNS_OR_IP.match(maFirst.get()):
|
||||
continue
|
||||
isRelayed = ma.contains(multiCodec("p2p-circuit")).valueOr: continue
|
||||
let maFirst = ma[0].valueOr: continue
|
||||
if not DNS_OR_IP.match(maFirst): continue
|
||||
|
||||
try:
|
||||
addrs.incl(
|
||||
if maFirst.get() == hostIp.get():
|
||||
if maFirst == hostIp:
|
||||
ma
|
||||
else:
|
||||
let maEnd = ma[1..^1]
|
||||
if maEnd.isErr(): continue
|
||||
hostIp.get() & maEnd.get()
|
||||
let maEnd = ma[1..^1].valueOr: continue
|
||||
hostIp & maEnd
|
||||
)
|
||||
except LPError as exc:
|
||||
continue
|
||||
@@ -145,12 +140,12 @@ proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[voi
|
||||
|
||||
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
|
||||
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
let msgOpt = AutonatMsg.decode(await conn.readLp(1024))
|
||||
if msgOpt.isNone() or msgOpt.get().msgType != MsgType.Dial:
|
||||
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
|
||||
raise newException(AutonatError, "Received malformed message")
|
||||
let msg = msgOpt.get()
|
||||
if msg.msgType != MsgType.Dial:
|
||||
raise newException(AutonatError, "Message type should be dial")
|
||||
await autonat.handleDial(conn, msg)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
@@ -7,12 +7,9 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, deques, sequtils]
|
||||
import std/[deques, sequtils]
|
||||
import chronos, metrics
|
||||
import ../../../switch
|
||||
import ../../../wire
|
||||
@@ -21,7 +18,7 @@ from core import NetworkReachability, AutonatUnreachableError
|
||||
import ../../../utils/heartbeat
|
||||
import ../../../crypto/crypto
|
||||
|
||||
export options, core.NetworkReachability
|
||||
export core.NetworkReachability
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonatservice"
|
||||
@@ -34,12 +31,12 @@ type
|
||||
addressMapper: AddressMapper
|
||||
scheduleHandle: Future[void]
|
||||
networkReachability*: NetworkReachability
|
||||
confidence: Option[float]
|
||||
confidence: Opt[float]
|
||||
answers: Deque[NetworkReachability]
|
||||
autonatClient: AutonatClient
|
||||
statusAndConfidenceHandler: StatusAndConfidenceHandler
|
||||
rng: ref HmacDrbgContext
|
||||
scheduleInterval: Option[Duration]
|
||||
scheduleInterval: Opt[Duration]
|
||||
askNewConnectedPeers: bool
|
||||
numPeersToAsk: int
|
||||
maxQueueSize: int
|
||||
@@ -47,13 +44,13 @@ type
|
||||
dialTimeout: Duration
|
||||
enableAddressMapper: bool
|
||||
|
||||
StatusAndConfidenceHandler* = proc (networkReachability: NetworkReachability, confidence: Option[float]): Future[void] {.gcsafe, raises: [Defect].}
|
||||
StatusAndConfidenceHandler* = proc (networkReachability: NetworkReachability, confidence: Opt[float]): Future[void] {.gcsafe, raises: [].}
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutonatService],
|
||||
autonatClient: AutonatClient,
|
||||
rng: ref HmacDrbgContext,
|
||||
scheduleInterval: Option[Duration] = none(Duration),
|
||||
scheduleInterval: Opt[Duration] = Opt.none(Duration),
|
||||
askNewConnectedPeers = true,
|
||||
numPeersToAsk: int = 5,
|
||||
maxQueueSize: int = 10,
|
||||
@@ -63,7 +60,7 @@ proc new*(
|
||||
return T(
|
||||
scheduleInterval: scheduleInterval,
|
||||
networkReachability: Unknown,
|
||||
confidence: none(float),
|
||||
confidence: Opt.none(float),
|
||||
answers: initDeque[NetworkReachability](),
|
||||
autonatClient: autonatClient,
|
||||
rng: rng,
|
||||
@@ -85,27 +82,33 @@ proc hasEnoughIncomingSlots(switch: Switch): bool =
|
||||
proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
|
||||
return switch.connManager.selectMuxer(peerId, In) != nil
|
||||
|
||||
proc handleAnswer(self: AutonatService, ans: NetworkReachability) {.async.} =
|
||||
proc handleAnswer(self: AutonatService, ans: NetworkReachability): Future[bool] {.async.} =
|
||||
|
||||
if ans == Unknown:
|
||||
return
|
||||
|
||||
let oldNetworkReachability = self.networkReachability
|
||||
let oldConfidence = self.confidence
|
||||
|
||||
if self.answers.len == self.maxQueueSize:
|
||||
self.answers.popFirst()
|
||||
self.answers.addLast(ans)
|
||||
|
||||
self.networkReachability = Unknown
|
||||
self.confidence = none(float)
|
||||
self.confidence = Opt.none(float)
|
||||
const reachabilityPriority = [Reachable, NotReachable]
|
||||
for reachability in reachabilityPriority:
|
||||
let confidence = self.answers.countIt(it == reachability) / self.maxQueueSize
|
||||
libp2p_autonat_reachability_confidence.set(value = confidence, labelValues = [$reachability])
|
||||
if self.confidence.isNone and confidence >= self.minConfidence:
|
||||
self.networkReachability = reachability
|
||||
self.confidence = some(confidence)
|
||||
self.confidence = Opt.some(confidence)
|
||||
|
||||
debug "Current status", currentStats = $self.networkReachability, confidence = $self.confidence, answers = self.answers
|
||||
|
||||
# Return whether anything has changed
|
||||
return self.networkReachability != oldNetworkReachability or self.confidence != oldConfidence
|
||||
|
||||
proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[NetworkReachability] {.async.} =
|
||||
logScope:
|
||||
peerId = $peerId
|
||||
@@ -132,9 +135,9 @@ proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[Netwo
|
||||
except CatchableError as error:
|
||||
debug "dialMe unexpected error", msg = error.msg
|
||||
Unknown
|
||||
await self.handleAnswer(ans)
|
||||
if not isNil(self.statusAndConfidenceHandler):
|
||||
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
|
||||
let hasReachabilityOrConfidenceChanged = await self.handleAnswer(ans)
|
||||
if hasReachabilityOrConfidenceChanged:
|
||||
await self.callHandler()
|
||||
await switch.peerInfo.update()
|
||||
return ans
|
||||
|
||||
@@ -159,7 +162,7 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy
|
||||
proc addressMapper(
|
||||
self: AutonatService,
|
||||
peerStore: PeerStore,
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
|
||||
if self.networkReachability != NetworkReachability.Reachable:
|
||||
return listenAddrs
|
||||
@@ -168,8 +171,7 @@ proc addressMapper(
|
||||
for listenAddr in listenAddrs:
|
||||
var processedMA = listenAddr
|
||||
try:
|
||||
let hostIP = initTAddress(listenAddr).get()
|
||||
if not hostIP.isGlobal() and self.networkReachability == NetworkReachability.Reachable:
|
||||
if not listenAddr.isPublicMA() and self.networkReachability == NetworkReachability.Reachable:
|
||||
processedMA = peerStore.guessDialableAddr(listenAddr) # handle manual port forwarding
|
||||
except CatchableError as exc:
|
||||
debug "Error while handling address mapper", msg = exc.msg
|
||||
@@ -177,7 +179,7 @@ proc addressMapper(
|
||||
return addrs
|
||||
|
||||
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return await addressMapper(self, switch.peerStore, listenAddrs)
|
||||
|
||||
info "Setting up AutonatService"
|
||||
@@ -187,8 +189,8 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
||||
self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} =
|
||||
discard askPeer(self, switch, peerId)
|
||||
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
||||
if self.scheduleInterval.isSome():
|
||||
self.scheduleHandle = schedule(self, switch, self.scheduleInterval.get())
|
||||
self.scheduleInterval.withValue(interval):
|
||||
self.scheduleHandle = schedule(self, switch, interval)
|
||||
if self.enableAddressMapper:
|
||||
switch.peerInfo.addressMappers.add(self.addressMapper)
|
||||
return hasBeenSetup
|
||||
@@ -196,7 +198,6 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
||||
method run*(self: AutonatService, switch: Switch) {.async, public.} =
|
||||
trace "Running AutonatService"
|
||||
await askConnectedPeers(self, switch)
|
||||
await self.callHandler()
|
||||
|
||||
method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public.} =
|
||||
info "Stopping AutonatService"
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
|
||||
@@ -69,7 +66,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
|
||||
|
||||
if peerDialableAddrs.len > self.maxDialableAddrs:
|
||||
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false))
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.In))
|
||||
try:
|
||||
discard await anyCompleted(futs).wait(self.connectTimeout)
|
||||
debug "Dcutr initiator has directly connected to the remote peer."
|
||||
@@ -84,8 +81,8 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
|
||||
debug "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
|
||||
raise newException(DcutrError, "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", err)
|
||||
except CatchableError as err:
|
||||
debug "Unexpected error when trying direct conn", err = err.msg
|
||||
raise newException(DcutrError, "Unexpected error when trying a direct conn", err)
|
||||
debug "Unexpected error when Dcutr initiator tried to connect to the remote peer", err = err.msg
|
||||
raise newException(DcutrError, "Unexpected error when Dcutr initiator tried to connect to the remote peer", err)
|
||||
finally:
|
||||
if stream != nil:
|
||||
await stream.close()
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
|
||||
@@ -44,7 +41,7 @@ proc encode*(msg: DcutrMsg): ProtoBuffer =
|
||||
result.write(2, addr)
|
||||
result.finish()
|
||||
|
||||
proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [Defect, DcutrError].} =
|
||||
proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [DcutrError].} =
|
||||
var
|
||||
msgTypeOrd: uint32
|
||||
dcutrMsg: DcutrMsg
|
||||
@@ -59,5 +56,10 @@ proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async
|
||||
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] =
|
||||
addrs.filterIt(TCP.match(it))
|
||||
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] {.raises: [LPError]} =
|
||||
var result = newSeq[MultiAddress]()
|
||||
for a in addrs:
|
||||
# This is necessary to also accept addrs like /ip4/198.51.100/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N
|
||||
if [TCP, mapAnd(TCP_DNS, P2PPattern), mapAnd(TCP_IP, P2PPattern)].anyIt(it.match(a)):
|
||||
result.add(a[0..1].tryGet())
|
||||
return result
|
||||
|
||||
@@ -7,13 +7,9 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, sets, sequtils]
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, sequtils]
|
||||
import stew/[results, objects]
|
||||
import chronos, chronicles
|
||||
|
||||
@@ -23,7 +19,6 @@ import ../../protocol,
|
||||
../../../switch,
|
||||
../../../utils/future
|
||||
|
||||
export DcutrError
|
||||
export chronicles
|
||||
|
||||
type Dcutr* = ref object of LPProtocol
|
||||
@@ -33,7 +28,7 @@ logScope:
|
||||
|
||||
proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
|
||||
|
||||
proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(stream: Connection, proto: string) {.async.} =
|
||||
var peerDialableAddrs: seq[MultiAddress]
|
||||
try:
|
||||
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
|
||||
@@ -60,7 +55,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
|
||||
|
||||
if peerDialableAddrs.len > maxDialableAddrs:
|
||||
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, upgradeDir = Direction.In))
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.Out))
|
||||
try:
|
||||
discard await anyCompleted(futs).wait(connectTimeout)
|
||||
debug "Dcutr receiver has directly connected to the remote peer."
|
||||
@@ -69,14 +64,14 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
|
||||
except CancelledError as err:
|
||||
raise err
|
||||
except AllFuturesFailedError as err:
|
||||
debug "Dcutr receiver could not connect to the remote peer, all connect attempts failed", peerDialableAddrs, msg = err.msg
|
||||
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts failed", err)
|
||||
debug "Dcutr receiver could not connect to the remote peer, " &
|
||||
"all connect attempts failed", peerDialableAddrs, msg = err.msg
|
||||
except AsyncTimeoutError as err:
|
||||
debug "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
|
||||
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", err)
|
||||
debug "Dcutr receiver could not connect to the remote peer, " &
|
||||
"all connect attempts timed out", peerDialableAddrs, msg = err.msg
|
||||
except CatchableError as err:
|
||||
warn "Unexpected error in dcutr handler", msg = err.msg
|
||||
raise newException(DcutrError, "Unexpected error in dcutr handler", err)
|
||||
warn "Unexpected error when Dcutr receiver tried to connect " &
|
||||
"to the remote peer", msg = err.msg
|
||||
|
||||
let self = T()
|
||||
self.handler = handleStream
|
||||
|
||||
@@ -7,15 +7,10 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import times, options
|
||||
{.push raises: [].}
|
||||
|
||||
import times
|
||||
import chronos, chronicles
|
||||
|
||||
import ./relay,
|
||||
./messages,
|
||||
./rconn,
|
||||
@@ -25,8 +20,6 @@ import ./relay,
|
||||
../../../multiaddress,
|
||||
../../../stream/connection
|
||||
|
||||
export options
|
||||
|
||||
logScope:
|
||||
topics = "libp2p relay relay-client"
|
||||
|
||||
@@ -39,7 +32,7 @@ type
|
||||
RelayV2DialError* = object of RelayClientError
|
||||
RelayClientAddConn* = proc(conn: Connection,
|
||||
duration: uint32,
|
||||
data: uint64): Future[void] {.gcsafe, raises: [Defect].}
|
||||
data: uint64): Future[void] {.gcsafe, raises: [].}
|
||||
RelayClient* = ref object of Relay
|
||||
onNewConnection*: RelayClientAddConn
|
||||
canHop: bool
|
||||
@@ -47,28 +40,27 @@ type
|
||||
Rsvp* = object
|
||||
expire*: uint64 # required, Unix expiration time (UTC)
|
||||
addrs*: seq[MultiAddress] # relay address for reserving peer
|
||||
voucher*: Option[Voucher] # optional, reservation voucher
|
||||
voucher*: Opt[Voucher] # optional, reservation voucher
|
||||
limitDuration*: uint32 # seconds
|
||||
limitData*: uint64 # bytes
|
||||
|
||||
proc sendStopError(conn: Connection, code: StatusV2) {.async.} =
|
||||
trace "send stop status", status = $code & " (" & $ord(code) & ")"
|
||||
let msg = StopMessage(msgType: StopMessageType.Status, status: some(code))
|
||||
let msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
|
||||
await conn.writeLp(encode(msg).buffer)
|
||||
|
||||
proc handleRelayedConnect(cl: RelayClient, conn: Connection, msg: StopMessage) {.async.} =
|
||||
if msg.peer.isNone():
|
||||
await sendStopError(conn, MalformedMessage)
|
||||
return
|
||||
let
|
||||
# TODO: check the go version to see in which way this could fail
|
||||
# it's unclear in the spec
|
||||
src = msg.peer.get()
|
||||
src = msg.peer.valueOr:
|
||||
await sendStopError(conn, MalformedMessage)
|
||||
return
|
||||
limitDuration = msg.limit.duration
|
||||
limitData = msg.limit.data
|
||||
msg = StopMessage(
|
||||
msgType: StopMessageType.Status,
|
||||
status: some(Ok))
|
||||
status: Opt.some(Ok))
|
||||
pb = encode(msg)
|
||||
|
||||
trace "incoming relay connection", src
|
||||
@@ -92,7 +84,7 @@ proc reserve*(cl: RelayClient,
|
||||
pb = encode(HopMessage(msgType: HopMessageType.Reserve))
|
||||
msg = try:
|
||||
await conn.writeLp(pb.buffer)
|
||||
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).get()
|
||||
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).tryGet()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
@@ -103,21 +95,21 @@ proc reserve*(cl: RelayClient,
|
||||
raise newException(ReservationError, "Unexpected relay response type")
|
||||
if msg.status.get(UnexpectedMessage) != Ok:
|
||||
raise newException(ReservationError, "Reservation failed")
|
||||
if msg.reservation.isNone():
|
||||
raise newException(ReservationError, "Missing reservation information")
|
||||
|
||||
let reservation = msg.reservation.get()
|
||||
let reservation = msg.reservation.valueOr:
|
||||
raise newException(ReservationError, "Missing reservation information")
|
||||
if reservation.expire > int64.high().uint64 or
|
||||
now().utc > reservation.expire.int64.fromUnix.utc:
|
||||
raise newException(ReservationError, "Bad expiration date")
|
||||
result.expire = reservation.expire
|
||||
result.addrs = reservation.addrs
|
||||
|
||||
if reservation.svoucher.isSome():
|
||||
let svoucher = SignedVoucher.decode(reservation.svoucher.get())
|
||||
if svoucher.isErr() or svoucher.get().data.relayPeerId != peerId:
|
||||
reservation.svoucher.withValue(sv):
|
||||
let svoucher = SignedVoucher.decode(sv).valueOr:
|
||||
raise newException(ReservationError, "Invalid voucher")
|
||||
result.voucher = some(svoucher.get().data)
|
||||
if svoucher.data.relayPeerId != peerId:
|
||||
raise newException(ReservationError, "Invalid voucher PeerId")
|
||||
result.voucher = Opt.some(svoucher.data)
|
||||
|
||||
result.limitDuration = msg.limit.duration
|
||||
result.limitData = msg.limit.data
|
||||
@@ -129,9 +121,9 @@ proc dialPeerV1*(
|
||||
dstAddrs: seq[MultiAddress]): Future[Connection] {.async.} =
|
||||
var
|
||||
msg = RelayMessage(
|
||||
msgType: some(RelayType.Hop),
|
||||
srcPeer: some(RelayPeer(peerId: cl.switch.peerInfo.peerId, addrs: cl.switch.peerInfo.addrs)),
|
||||
dstPeer: some(RelayPeer(peerId: dstPeerId, addrs: dstAddrs)))
|
||||
msgType: Opt.some(RelayType.Hop),
|
||||
srcPeer: Opt.some(RelayPeer(peerId: cl.switch.peerInfo.peerId, addrs: cl.switch.peerInfo.addrs)),
|
||||
dstPeer: Opt.some(RelayPeer(peerId: dstPeerId, addrs: dstAddrs)))
|
||||
pb = encode(msg)
|
||||
|
||||
trace "Dial peer", msgSend=msg
|
||||
@@ -154,16 +146,18 @@ proc dialPeerV1*(
|
||||
raise exc
|
||||
|
||||
try:
|
||||
if msgRcvFromRelayOpt.isNone:
|
||||
let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
|
||||
raise newException(RelayV1DialError, "Hop can't open destination stream")
|
||||
let msgRcvFromRelay = msgRcvFromRelayOpt.get()
|
||||
if msgRcvFromRelay.msgType.isNone or msgRcvFromRelay.msgType.get() != RelayType.Status:
|
||||
if msgRcvFromRelay.msgType.tryGet() != RelayType.Status:
|
||||
raise newException(RelayV1DialError, "Hop can't open destination stream: wrong message type")
|
||||
if msgRcvFromRelay.status.isNone or msgRcvFromRelay.status.get() != StatusV1.Success:
|
||||
if msgRcvFromRelay.status.tryGet() != StatusV1.Success:
|
||||
raise newException(RelayV1DialError, "Hop can't open destination stream: status failed")
|
||||
except RelayV1DialError as exc:
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise exc
|
||||
except ValueError as exc:
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise newException(RelayV1DialError, exc.msg)
|
||||
result = conn
|
||||
|
||||
proc dialPeerV2*(
|
||||
@@ -173,13 +167,13 @@ proc dialPeerV2*(
|
||||
dstAddrs: seq[MultiAddress]): Future[Connection] {.async.} =
|
||||
let
|
||||
p = Peer(peerId: dstPeerId, addrs: dstAddrs)
|
||||
pb = encode(HopMessage(msgType: HopMessageType.Connect, peer: some(p)))
|
||||
pb = encode(HopMessage(msgType: HopMessageType.Connect, peer: Opt.some(p)))
|
||||
|
||||
trace "Dial peer", p
|
||||
|
||||
let msgRcvFromRelay = try:
|
||||
await conn.writeLp(pb.buffer)
|
||||
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).get()
|
||||
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).tryGet()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
@@ -189,19 +183,17 @@ proc dialPeerV2*(
|
||||
if msgRcvFromRelay.msgType != HopMessageType.Status:
|
||||
raise newException(RelayV2DialError, "Unexpected stop response")
|
||||
if msgRcvFromRelay.status.get(UnexpectedMessage) != Ok:
|
||||
trace "Relay stop failed", msg = msgRcvFromRelay.status.get()
|
||||
trace "Relay stop failed", msg = msgRcvFromRelay.status
|
||||
raise newException(RelayV2DialError, "Relay stop failure")
|
||||
conn.limitDuration = msgRcvFromRelay.limit.duration
|
||||
conn.limitData = msgRcvFromRelay.limit.data
|
||||
return conn
|
||||
|
||||
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
||||
let msgOpt = StopMessage.decode(await conn.readLp(RelayClientMsgSize))
|
||||
if msgOpt.isNone():
|
||||
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
|
||||
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
||||
await sendHopStatus(conn, MalformedMessage)
|
||||
return
|
||||
trace "client circuit relay v2 handle stream", msg = msgOpt.get()
|
||||
let msg = msgOpt.get()
|
||||
trace "client circuit relay v2 handle stream", msg
|
||||
|
||||
if msg.msgType == StopMessageType.Connect:
|
||||
await cl.handleRelayedConnect(conn, msg)
|
||||
@@ -209,17 +201,15 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
||||
trace "Unexpected client / relayv2 handshake", msgType=msg.msgType
|
||||
await sendStopError(conn, MalformedMessage)
|
||||
|
||||
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, gcsafe.} =
|
||||
if msg.srcPeer.isNone:
|
||||
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
|
||||
let src = msg.srcPeer.valueOr:
|
||||
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
|
||||
return
|
||||
let src = msg.srcPeer.get()
|
||||
|
||||
if msg.dstPeer.isNone:
|
||||
let dst = msg.dstPeer.valueOr:
|
||||
await sendStatus(conn, StatusV1.StopDstMultiaddrInvalid)
|
||||
return
|
||||
|
||||
let dst = msg.dstPeer.get()
|
||||
if dst.peerId != cl.switch.peerInfo.peerId:
|
||||
await sendStatus(conn, StatusV1.StopDstMultiaddrInvalid)
|
||||
return
|
||||
@@ -236,14 +226,17 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, g
|
||||
if cl.onNewConnection != nil: await cl.onNewConnection(conn, 0, 0)
|
||||
else: await conn.close()
|
||||
|
||||
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
||||
let msgOpt = RelayMessage.decode(await conn.readLp(RelayClientMsgSize))
|
||||
if msgOpt.isNone:
|
||||
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
|
||||
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
trace "client circuit relay v1 handle stream", msg = msgOpt.get()
|
||||
let msg = msgOpt.get()
|
||||
case msg.msgType.get:
|
||||
trace "client circuit relay v1 handle stream", msg
|
||||
|
||||
let typ = msg.msgType.valueOr:
|
||||
trace "Message type not set"
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
case typ:
|
||||
of RelayType.Hop:
|
||||
if cl.canHop: await cl.handleHop(conn, msg)
|
||||
else: await sendStatus(conn, StatusV1.HopCantSpeakRelay)
|
||||
@@ -273,7 +266,7 @@ proc new*(T: typedesc[RelayClient], canHop: bool = false,
|
||||
maxCircuitPerPeer: maxCircuitPerPeer,
|
||||
msgSize: msgSize,
|
||||
isCircuitRelayV1: circuitRelayV1)
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
case proto:
|
||||
of RelayV1Codec: await cl.handleStreamV1(conn)
|
||||
|
||||
@@ -7,13 +7,10 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import options, macros
|
||||
import stew/objects
|
||||
import macros
|
||||
import stew/[objects, results]
|
||||
import ../../../peerinfo,
|
||||
../../../signed_envelope
|
||||
|
||||
@@ -49,36 +46,36 @@ type
|
||||
addrs*: seq[MultiAddress]
|
||||
|
||||
RelayMessage* = object
|
||||
msgType*: Option[RelayType]
|
||||
srcPeer*: Option[RelayPeer]
|
||||
dstPeer*: Option[RelayPeer]
|
||||
status*: Option[StatusV1]
|
||||
msgType*: Opt[RelayType]
|
||||
srcPeer*: Opt[RelayPeer]
|
||||
dstPeer*: Opt[RelayPeer]
|
||||
status*: Opt[StatusV1]
|
||||
|
||||
proc encode*(msg: RelayMessage): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
|
||||
if isSome(msg.msgType):
|
||||
result.write(1, msg.msgType.get().ord.uint)
|
||||
if isSome(msg.srcPeer):
|
||||
msg.msgType.withValue(typ):
|
||||
result.write(1, typ.ord.uint)
|
||||
msg.srcPeer.withValue(srcPeer):
|
||||
var peer = initProtoBuffer()
|
||||
peer.write(1, msg.srcPeer.get().peerId)
|
||||
for ma in msg.srcPeer.get().addrs:
|
||||
peer.write(1, srcPeer.peerId)
|
||||
for ma in srcPeer.addrs:
|
||||
peer.write(2, ma.data.buffer)
|
||||
peer.finish()
|
||||
result.write(2, peer.buffer)
|
||||
if isSome(msg.dstPeer):
|
||||
msg.dstPeer.withValue(dstPeer):
|
||||
var peer = initProtoBuffer()
|
||||
peer.write(1, msg.dstPeer.get().peerId)
|
||||
for ma in msg.dstPeer.get().addrs:
|
||||
peer.write(1, dstPeer.peerId)
|
||||
for ma in dstPeer.addrs:
|
||||
peer.write(2, ma.data.buffer)
|
||||
peer.finish()
|
||||
result.write(3, peer.buffer)
|
||||
if isSome(msg.status):
|
||||
result.write(4, msg.status.get().ord.uint)
|
||||
msg.status.withValue(status):
|
||||
result.write(4, status.ord.uint)
|
||||
|
||||
result.finish()
|
||||
|
||||
proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Option[RelayMessage] =
|
||||
proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Opt[RelayMessage] =
|
||||
var
|
||||
rMsg: RelayMessage
|
||||
msgTypeOrd: uint32
|
||||
@@ -88,38 +85,29 @@ proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Option[RelayMessage] =
|
||||
pbSrc: ProtoBuffer
|
||||
pbDst: ProtoBuffer
|
||||
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getField(1, msgTypeOrd)
|
||||
r2 = pb.getField(2, pbSrc)
|
||||
r3 = pb.getField(3, pbDst)
|
||||
r4 = pb.getField(4, statusOrd)
|
||||
let pb = initProtoBuffer(buf)
|
||||
|
||||
if r1.isErr() or r2.isErr() or r3.isErr() or r4.isErr():
|
||||
return none(RelayMessage)
|
||||
|
||||
if r2.get() and
|
||||
(pbSrc.getField(1, src.peerId).isErr() or
|
||||
pbSrc.getRepeatedField(2, src.addrs).isErr()):
|
||||
return none(RelayMessage)
|
||||
|
||||
if r3.get() and
|
||||
(pbDst.getField(1, dst.peerId).isErr() or
|
||||
pbDst.getRepeatedField(2, dst.addrs).isErr()):
|
||||
return none(RelayMessage)
|
||||
|
||||
if r1.get():
|
||||
if ? pb.getField(1, msgTypeOrd).toOpt():
|
||||
if msgTypeOrd.int notin RelayType:
|
||||
return none(RelayMessage)
|
||||
rMsg.msgType = some(RelayType(msgTypeOrd))
|
||||
if r2.get(): rMsg.srcPeer = some(src)
|
||||
if r3.get(): rMsg.dstPeer = some(dst)
|
||||
if r4.get():
|
||||
return Opt.none(RelayMessage)
|
||||
rMsg.msgType = Opt.some(RelayType(msgTypeOrd))
|
||||
|
||||
if ? pb.getField(2, pbSrc).toOpt():
|
||||
discard ? pbSrc.getField(1, src.peerId).toOpt()
|
||||
discard ? pbSrc.getRepeatedField(2, src.addrs).toOpt()
|
||||
rMsg.srcPeer = Opt.some(src)
|
||||
|
||||
if ? pb.getField(3, pbDst).toOpt():
|
||||
discard ? pbDst.getField(1, dst.peerId).toOpt()
|
||||
discard ? pbDst.getRepeatedField(2, dst.addrs).toOpt()
|
||||
rMsg.dstPeer = Opt.some(dst)
|
||||
|
||||
if ? pb.getField(4, statusOrd).toOpt():
|
||||
var status: StatusV1
|
||||
if not checkedEnumAssign(status, statusOrd):
|
||||
return none(RelayMessage)
|
||||
rMsg.status = some(status)
|
||||
some(rMsg)
|
||||
return Opt.none(RelayMessage)
|
||||
rMsg.status = Opt.some(status)
|
||||
Opt.some(rMsg)
|
||||
|
||||
# Voucher
|
||||
|
||||
@@ -179,7 +167,7 @@ type
|
||||
Reservation* = object
|
||||
expire*: uint64 # required, Unix expiration time (UTC)
|
||||
addrs*: seq[MultiAddress] # relay address for reserving peer
|
||||
svoucher*: Option[seq[byte]] # optional, reservation voucher
|
||||
svoucher*: Opt[seq[byte]] # optional, reservation voucher
|
||||
Limit* = object
|
||||
duration*: uint32 # seconds
|
||||
data*: uint64 # bytes
|
||||
@@ -199,30 +187,29 @@ type
|
||||
Status = 2
|
||||
HopMessage* = object
|
||||
msgType*: HopMessageType
|
||||
peer*: Option[Peer]
|
||||
reservation*: Option[Reservation]
|
||||
peer*: Opt[Peer]
|
||||
reservation*: Opt[Reservation]
|
||||
limit*: Limit
|
||||
status*: Option[StatusV2]
|
||||
status*: Opt[StatusV2]
|
||||
|
||||
proc encode*(msg: HopMessage): ProtoBuffer =
|
||||
var pb = initProtoBuffer()
|
||||
|
||||
pb.write(1, msg.msgType.ord.uint)
|
||||
if msg.peer.isSome():
|
||||
msg.peer.withValue(peer):
|
||||
var ppb = initProtoBuffer()
|
||||
ppb.write(1, msg.peer.get().peerId)
|
||||
for ma in msg.peer.get().addrs:
|
||||
ppb.write(1, peer.peerId)
|
||||
for ma in peer.addrs:
|
||||
ppb.write(2, ma.data.buffer)
|
||||
ppb.finish()
|
||||
pb.write(2, ppb.buffer)
|
||||
if msg.reservation.isSome():
|
||||
let rsrv = msg.reservation.get()
|
||||
msg.reservation.withValue(rsrv):
|
||||
var rpb = initProtoBuffer()
|
||||
rpb.write(1, rsrv.expire)
|
||||
for ma in rsrv.addrs:
|
||||
rpb.write(2, ma.data.buffer)
|
||||
if rsrv.svoucher.isSome():
|
||||
rpb.write(3, rsrv.svoucher.get())
|
||||
rsrv.svoucher.withValue(vouch):
|
||||
rpb.write(3, vouch)
|
||||
rpb.finish()
|
||||
pb.write(3, rpb.buffer)
|
||||
if msg.limit.duration > 0 or msg.limit.data > 0:
|
||||
@@ -231,66 +218,51 @@ proc encode*(msg: HopMessage): ProtoBuffer =
|
||||
if msg.limit.data > 0: lpb.write(2, msg.limit.data)
|
||||
lpb.finish()
|
||||
pb.write(4, lpb.buffer)
|
||||
if msg.status.isSome():
|
||||
pb.write(5, msg.status.get().ord.uint)
|
||||
msg.status.withValue(status):
|
||||
pb.write(5, status.ord.uint)
|
||||
|
||||
pb.finish()
|
||||
pb
|
||||
|
||||
proc decode*(_: typedesc[HopMessage], buf: seq[byte]): Option[HopMessage] =
|
||||
var
|
||||
msg: HopMessage
|
||||
msgTypeOrd: uint32
|
||||
pbPeer: ProtoBuffer
|
||||
pbReservation: ProtoBuffer
|
||||
pbLimit: ProtoBuffer
|
||||
statusOrd: uint32
|
||||
peer: Peer
|
||||
reservation: Reservation
|
||||
limit: Limit
|
||||
res: bool
|
||||
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getRequiredField(1, msgTypeOrd)
|
||||
r2 = pb.getField(2, pbPeer)
|
||||
r3 = pb.getField(3, pbReservation)
|
||||
r4 = pb.getField(4, pbLimit)
|
||||
r5 = pb.getField(5, statusOrd)
|
||||
|
||||
if r1.isErr() or r2.isErr() or r3.isErr() or r4.isErr() or r5.isErr():
|
||||
return none(HopMessage)
|
||||
|
||||
if r2.get() and
|
||||
(pbPeer.getRequiredField(1, peer.peerId).isErr() or
|
||||
pbPeer.getRepeatedField(2, peer.addrs).isErr()):
|
||||
return none(HopMessage)
|
||||
|
||||
if r3.get():
|
||||
var svoucher: seq[byte]
|
||||
let rSVoucher = pbReservation.getField(3, svoucher)
|
||||
if pbReservation.getRequiredField(1, reservation.expire).isErr() or
|
||||
pbReservation.getRepeatedField(2, reservation.addrs).isErr() or
|
||||
rSVoucher.isErr():
|
||||
return none(HopMessage)
|
||||
if rSVoucher.get(): reservation.svoucher = some(svoucher)
|
||||
|
||||
if r4.get() and
|
||||
(pbLimit.getField(1, limit.duration).isErr() or
|
||||
pbLimit.getField(2, limit.data).isErr()):
|
||||
return none(HopMessage)
|
||||
proc decode*(_: typedesc[HopMessage], buf: seq[byte]): Opt[HopMessage] =
|
||||
var msg: HopMessage
|
||||
let pb = initProtoBuffer(buf)
|
||||
|
||||
var msgTypeOrd: uint32
|
||||
? pb.getRequiredField(1, msgTypeOrd).toOpt()
|
||||
if not checkedEnumAssign(msg.msgType, msgTypeOrd):
|
||||
return none(HopMessage)
|
||||
if r2.get(): msg.peer = some(peer)
|
||||
if r3.get(): msg.reservation = some(reservation)
|
||||
if r4.get(): msg.limit = limit
|
||||
if r5.get():
|
||||
return Opt.none(HopMessage)
|
||||
|
||||
var pbPeer: ProtoBuffer
|
||||
if ? pb.getField(2, pbPeer).toOpt():
|
||||
var peer: Peer
|
||||
? pbPeer.getRequiredField(1, peer.peerId).toOpt()
|
||||
discard ? pbPeer.getRepeatedField(2, peer.addrs).toOpt()
|
||||
msg.peer = Opt.some(peer)
|
||||
|
||||
var pbReservation: ProtoBuffer
|
||||
if ? pb.getField(3, pbReservation).toOpt():
|
||||
var
|
||||
svoucher: seq[byte]
|
||||
reservation: Reservation
|
||||
if ? pbReservation.getField(3, svoucher).toOpt():
|
||||
reservation.svoucher = Opt.some(svoucher)
|
||||
? pbReservation.getRequiredField(1, reservation.expire).toOpt()
|
||||
discard ? pbReservation.getRepeatedField(2, reservation.addrs).toOpt()
|
||||
msg.reservation = Opt.some(reservation)
|
||||
|
||||
var pbLimit: ProtoBuffer
|
||||
if ? pb.getField(4, pbLimit).toOpt():
|
||||
discard ? pbLimit.getField(1, msg.limit.duration).toOpt()
|
||||
discard ? pbLimit.getField(2, msg.limit.data).toOpt()
|
||||
|
||||
var statusOrd: uint32
|
||||
if ? pb.getField(5, statusOrd).toOpt():
|
||||
var status: StatusV2
|
||||
if not checkedEnumAssign(status, statusOrd):
|
||||
return none(HopMessage)
|
||||
msg.status = some(status)
|
||||
some(msg)
|
||||
return Opt.none(HopMessage)
|
||||
msg.status = Opt.some(status)
|
||||
Opt.some(msg)
|
||||
|
||||
# Circuit Relay V2 Stop Message
|
||||
|
||||
@@ -300,19 +272,19 @@ type
|
||||
Status = 1
|
||||
StopMessage* = object
|
||||
msgType*: StopMessageType
|
||||
peer*: Option[Peer]
|
||||
peer*: Opt[Peer]
|
||||
limit*: Limit
|
||||
status*: Option[StatusV2]
|
||||
status*: Opt[StatusV2]
|
||||
|
||||
|
||||
proc encode*(msg: StopMessage): ProtoBuffer =
|
||||
var pb = initProtoBuffer()
|
||||
|
||||
pb.write(1, msg.msgType.ord.uint)
|
||||
if msg.peer.isSome():
|
||||
msg.peer.withValue(peer):
|
||||
var ppb = initProtoBuffer()
|
||||
ppb.write(1, msg.peer.get().peerId)
|
||||
for ma in msg.peer.get().addrs:
|
||||
ppb.write(1, peer.peerId)
|
||||
for ma in peer.addrs:
|
||||
ppb.write(2, ma.data.buffer)
|
||||
ppb.finish()
|
||||
pb.write(2, ppb.buffer)
|
||||
@@ -322,52 +294,40 @@ proc encode*(msg: StopMessage): ProtoBuffer =
|
||||
if msg.limit.data > 0: lpb.write(2, msg.limit.data)
|
||||
lpb.finish()
|
||||
pb.write(3, lpb.buffer)
|
||||
if msg.status.isSome():
|
||||
pb.write(4, msg.status.get().ord.uint)
|
||||
msg.status.withValue(status):
|
||||
pb.write(4, status.ord.uint)
|
||||
|
||||
pb.finish()
|
||||
pb
|
||||
|
||||
proc decode*(_: typedesc[StopMessage], buf: seq[byte]): Option[StopMessage] =
|
||||
var
|
||||
msg: StopMessage
|
||||
msgTypeOrd: uint32
|
||||
pbPeer: ProtoBuffer
|
||||
pbLimit: ProtoBuffer
|
||||
statusOrd: uint32
|
||||
peer: Peer
|
||||
limit: Limit
|
||||
rVoucher: ProtoResult[bool]
|
||||
res: bool
|
||||
proc decode*(_: typedesc[StopMessage], buf: seq[byte]): Opt[StopMessage] =
|
||||
var msg: StopMessage
|
||||
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getRequiredField(1, msgTypeOrd)
|
||||
r2 = pb.getField(2, pbPeer)
|
||||
r3 = pb.getField(3, pbLimit)
|
||||
r4 = pb.getField(4, statusOrd)
|
||||
let pb = initProtoBuffer(buf)
|
||||
|
||||
if r1.isErr() or r2.isErr() or r3.isErr() or r4.isErr():
|
||||
return none(StopMessage)
|
||||
|
||||
if r2.get() and
|
||||
(pbPeer.getRequiredField(1, peer.peerId).isErr() or
|
||||
pbPeer.getRepeatedField(2, peer.addrs).isErr()):
|
||||
return none(StopMessage)
|
||||
|
||||
if r3.get() and
|
||||
(pbLimit.getField(1, limit.duration).isErr() or
|
||||
pbLimit.getField(2, limit.data).isErr()):
|
||||
return none(StopMessage)
|
||||
|
||||
if msgTypeOrd.int notin StopMessageType.low.ord .. StopMessageType.high.ord:
|
||||
return none(StopMessage)
|
||||
var msgTypeOrd: uint32
|
||||
? pb.getRequiredField(1, msgTypeOrd).toOpt()
|
||||
if msgTypeOrd.int notin StopMessageType:
|
||||
return Opt.none(StopMessage)
|
||||
msg.msgType = StopMessageType(msgTypeOrd)
|
||||
if r2.get(): msg.peer = some(peer)
|
||||
if r3.get(): msg.limit = limit
|
||||
if r4.get():
|
||||
|
||||
|
||||
var pbPeer: ProtoBuffer
|
||||
if ? pb.getField(2, pbPeer).toOpt():
|
||||
var peer: Peer
|
||||
? pbPeer.getRequiredField(1, peer.peerId).toOpt()
|
||||
discard ? pbPeer.getRepeatedField(2, peer.addrs).toOpt()
|
||||
msg.peer = Opt.some(peer)
|
||||
|
||||
var pbLimit: ProtoBuffer
|
||||
if ? pb.getField(3, pbLimit).toOpt():
|
||||
discard ? pbLimit.getField(1, msg.limit.duration).toOpt()
|
||||
discard ? pbLimit.getField(2, msg.limit.data).toOpt()
|
||||
|
||||
var statusOrd: uint32
|
||||
if ? pb.getField(4, statusOrd).toOpt():
|
||||
var status: StatusV2
|
||||
if not checkedEnumAssign(status, statusOrd):
|
||||
return none(StopMessage)
|
||||
msg.status = some(status)
|
||||
some(msg)
|
||||
return Opt.none(StopMessage)
|
||||
msg.status = Opt.some(status)
|
||||
Opt.some(msg)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
|
||||
@@ -26,11 +23,15 @@ type
|
||||
method readOnce*(
|
||||
self: RelayConnection,
|
||||
pbytes: pointer,
|
||||
nbytes: int): Future[int] {.async.} =
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
self.activity = true
|
||||
return await self.conn.readOnce(pbytes, nbytes)
|
||||
self.conn.readOnce(pbytes, nbytes)
|
||||
|
||||
method write*(self: RelayConnection, msg: seq[byte]): Future[void] {.async.} =
|
||||
method write*(
|
||||
self: RelayConnection,
|
||||
msg: seq[byte]
|
||||
): Future[void] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
self.dataSent.inc(msg.len)
|
||||
if self.limitData != 0 and self.dataSent > self.limitData:
|
||||
await self.close()
|
||||
@@ -38,24 +39,25 @@ method write*(self: RelayConnection, msg: seq[byte]): Future[void] {.async.} =
|
||||
self.activity = true
|
||||
await self.conn.write(msg)
|
||||
|
||||
method closeImpl*(self: RelayConnection): Future[void] {.async.} =
|
||||
method closeImpl*(self: RelayConnection): Future[void] {.async: (raises: []).} =
|
||||
await self.conn.close()
|
||||
await procCall Connection(self).closeImpl()
|
||||
|
||||
method getWrapped*(self: RelayConnection): Connection = self.conn
|
||||
|
||||
proc new*(
|
||||
T: typedesc[RelayConnection],
|
||||
conn: Connection,
|
||||
limitDuration: uint32,
|
||||
limitData: uint64): T =
|
||||
T: typedesc[RelayConnection],
|
||||
conn: Connection,
|
||||
limitDuration: uint32,
|
||||
limitData: uint64): T =
|
||||
let rc = T(conn: conn, limitDuration: limitDuration, limitData: limitData)
|
||||
rc.dir = conn.dir
|
||||
rc.initStream()
|
||||
if limitDuration > 0:
|
||||
proc checkDurationConnection() {.async.} =
|
||||
let sleep = sleepAsync(limitDuration.seconds())
|
||||
await sleep or conn.join()
|
||||
if sleep.finished: await conn.close()
|
||||
else: sleep.cancel()
|
||||
proc checkDurationConnection() {.async: (raises: []).} =
|
||||
try:
|
||||
await noCancel conn.join().wait(limitDuration.seconds())
|
||||
except AsyncTimeoutError:
|
||||
await conn.close()
|
||||
asyncSpawn checkDurationConnection()
|
||||
return rc
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -7,12 +7,9 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import options, sequtils, tables
|
||||
import sequtils, tables
|
||||
|
||||
import chronos, chronicles
|
||||
|
||||
@@ -93,11 +90,11 @@ proc createReserveResponse(
|
||||
rsrv = Reservation(expire: expireUnix,
|
||||
addrs: r.switch.peerInfo.addrs.mapIt(
|
||||
? it.concat(ma).orErr(CryptoError.KeyError)),
|
||||
svoucher: some(? sv.encode))
|
||||
svoucher: Opt.some(? sv.encode))
|
||||
msg = HopMessage(msgType: HopMessageType.Status,
|
||||
reservation: some(rsrv),
|
||||
reservation: Opt.some(rsrv),
|
||||
limit: r.limit,
|
||||
status: some(Ok))
|
||||
status: Opt.some(Ok))
|
||||
return ok(msg)
|
||||
|
||||
proc isRelayed*(conn: Connection): bool =
|
||||
@@ -108,7 +105,7 @@ proc isRelayed*(conn: Connection): bool =
|
||||
wrappedConn = wrappedConn.getWrapped()
|
||||
return false
|
||||
|
||||
proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
proc handleReserve(r: Relay, conn: Connection) {.async.} =
|
||||
if conn.isRelayed():
|
||||
trace "reservation attempt over relay connection", pid = conn.peerId
|
||||
await sendHopStatus(conn, PermissionDenied)
|
||||
@@ -118,32 +115,30 @@ proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
trace "Too many reservations", pid = conn.peerId
|
||||
await sendHopStatus(conn, ReservationRefused)
|
||||
return
|
||||
trace "reserving relay slot for", pid = conn.peerId
|
||||
let
|
||||
pid = conn.peerId
|
||||
expire = now().utc + r.reservationTTL
|
||||
msg = r.createReserveResponse(pid, expire)
|
||||
msg = r.createReserveResponse(pid, expire).valueOr:
|
||||
trace "error signing the voucher", pid
|
||||
return
|
||||
|
||||
trace "reserving relay slot for", pid
|
||||
if msg.isErr():
|
||||
trace "error signing the voucher", error = error(msg), pid
|
||||
return
|
||||
r.rsvp[pid] = expire
|
||||
await conn.writeLp(encode(msg.get()).buffer)
|
||||
await conn.writeLp(encode(msg).buffer)
|
||||
|
||||
proc handleConnect(r: Relay,
|
||||
connSrc: Connection,
|
||||
msg: HopMessage) {.async, gcsafe.} =
|
||||
msg: HopMessage) {.async.} =
|
||||
if connSrc.isRelayed():
|
||||
trace "connection attempt over relay connection"
|
||||
await sendHopStatus(connSrc, PermissionDenied)
|
||||
return
|
||||
if msg.peer.isNone():
|
||||
await sendHopStatus(connSrc, MalformedMessage)
|
||||
return
|
||||
|
||||
let
|
||||
msgPeer = msg.peer.valueOr:
|
||||
await sendHopStatus(connSrc, MalformedMessage)
|
||||
return
|
||||
src = connSrc.peerId
|
||||
dst = msg.peer.get().peerId
|
||||
dst = msgPeer.peerId
|
||||
if dst notin r.rsvp:
|
||||
trace "refusing connection, no reservation", src, dst
|
||||
await sendHopStatus(connSrc, NoReservation)
|
||||
@@ -176,16 +171,17 @@ proc handleConnect(r: Relay,
|
||||
|
||||
proc sendStopMsg() {.async.} =
|
||||
let stopMsg = StopMessage(msgType: StopMessageType.Connect,
|
||||
peer: some(Peer(peerId: src, addrs: @[])),
|
||||
peer: Opt.some(Peer(peerId: src, addrs: @[])),
|
||||
limit: r.limit)
|
||||
await connDst.writeLp(encode(stopMsg).buffer)
|
||||
let msg = StopMessage.decode(await connDst.readLp(r.msgSize)).get()
|
||||
let msg = StopMessage.decode(await connDst.readLp(r.msgSize)).valueOr:
|
||||
raise newException(SendStopError, "Malformed message")
|
||||
if msg.msgType != StopMessageType.Status:
|
||||
raise newException(SendStopError, "Unexpected stop response, not a status message")
|
||||
if msg.status.get(UnexpectedMessage) != Ok:
|
||||
raise newException(SendStopError, "Relay stop failure")
|
||||
await connSrc.writeLp(encode(HopMessage(msgType: HopMessageType.Status,
|
||||
status: some(Ok))).buffer)
|
||||
status: Opt.some(Ok))).buffer)
|
||||
try:
|
||||
await sendStopMsg()
|
||||
except CancelledError as exc:
|
||||
@@ -204,13 +200,11 @@ proc handleConnect(r: Relay,
|
||||
await rconnDst.close()
|
||||
await bridge(rconnSrc, rconnDst)
|
||||
|
||||
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
let msgOpt = HopMessage.decode(await conn.readLp(r.msgSize))
|
||||
if msgOpt.isNone():
|
||||
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
|
||||
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
||||
await sendHopStatus(conn, MalformedMessage)
|
||||
return
|
||||
trace "relayv2 handle stream", msg = msgOpt.get()
|
||||
let msg = msgOpt.get()
|
||||
trace "relayv2 handle stream", msg = msg
|
||||
case msg.msgType:
|
||||
of HopMessageType.Reserve: await r.handleReserve(conn)
|
||||
of HopMessageType.Connect: await r.handleConnect(conn, msg)
|
||||
@@ -220,7 +214,7 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
|
||||
# Relay V1
|
||||
|
||||
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsafe.} =
|
||||
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
|
||||
r.streamCount.inc()
|
||||
defer: r.streamCount.dec()
|
||||
if r.streamCount + r.rsvp.len() >= r.maxCircuit:
|
||||
@@ -228,15 +222,14 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
|
||||
await sendStatus(connSrc, StatusV1.HopCantSpeakRelay)
|
||||
return
|
||||
|
||||
var src, dst: RelayPeer
|
||||
proc checkMsg(): Result[RelayMessage, StatusV1] =
|
||||
if msg.srcPeer.isNone:
|
||||
src = msg.srcPeer.valueOr:
|
||||
return err(StatusV1.HopSrcMultiaddrInvalid)
|
||||
let src = msg.srcPeer.get()
|
||||
if src.peerId != connSrc.peerId:
|
||||
return err(StatusV1.HopSrcMultiaddrInvalid)
|
||||
if msg.dstPeer.isNone:
|
||||
dst = msg.dstPeer.valueOr:
|
||||
return err(StatusV1.HopDstMultiaddrInvalid)
|
||||
let dst = msg.dstPeer.get()
|
||||
if dst.peerId == r.switch.peerInfo.peerId:
|
||||
return err(StatusV1.HopCantRelayToSelf)
|
||||
if not r.switch.isConnected(dst.peerId):
|
||||
@@ -248,9 +241,6 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
|
||||
await sendStatus(connSrc, check.error())
|
||||
return
|
||||
|
||||
let
|
||||
src = msg.srcPeer.get()
|
||||
dst = msg.dstPeer.get()
|
||||
if r.peerCount[src.peerId] >= r.maxCircuitPerPeer or
|
||||
r.peerCount[dst.peerId] >= r.maxCircuitPerPeer:
|
||||
trace "refusing connection; too many connection from src or to dst", src, dst
|
||||
@@ -274,9 +264,9 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
|
||||
await connDst.close()
|
||||
|
||||
let msgToSend = RelayMessage(
|
||||
msgType: some(RelayType.Stop),
|
||||
srcPeer: some(src),
|
||||
dstPeer: some(dst))
|
||||
msgType: Opt.some(RelayType.Stop),
|
||||
srcPeer: Opt.some(src),
|
||||
dstPeer: Opt.some(dst))
|
||||
|
||||
let msgRcvFromDstOpt = try:
|
||||
await connDst.writeLp(encode(msgToSend).buffer)
|
||||
@@ -288,12 +278,11 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
|
||||
await sendStatus(connSrc, StatusV1.HopCantOpenDstStream)
|
||||
return
|
||||
|
||||
if msgRcvFromDstOpt.isNone:
|
||||
let msgRcvFromDst = msgRcvFromDstOpt.valueOr:
|
||||
trace "error reading stop response", msg = msgRcvFromDstOpt
|
||||
await sendStatus(connSrc, StatusV1.HopCantOpenDstStream)
|
||||
return
|
||||
|
||||
let msgRcvFromDst = msgRcvFromDstOpt.get()
|
||||
if msgRcvFromDst.msgType.get(RelayType.Stop) != RelayType.Status or
|
||||
msgRcvFromDst.status.get(StatusV1.StopRelayRefused) != StatusV1.Success:
|
||||
trace "unexcepted relay stop response", msgRcvFromDst
|
||||
@@ -304,14 +293,17 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
|
||||
trace "relaying connection", src, dst
|
||||
await bridge(connSrc, connDst)
|
||||
|
||||
proc handleStreamV1(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
let msgOpt = RelayMessage.decode(await conn.readLp(r.msgSize))
|
||||
if msgOpt.isNone:
|
||||
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
|
||||
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
trace "relay handle stream", msg = msgOpt.get()
|
||||
let msg = msgOpt.get()
|
||||
case msg.msgType.get:
|
||||
trace "relay handle stream", msg
|
||||
|
||||
let typ = msg.msgType.valueOr:
|
||||
trace "Message type not set"
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
case typ:
|
||||
of RelayType.Hop: await r.handleHop(conn, msg)
|
||||
of RelayType.Stop: await sendStatus(conn, StatusV1.StopRelayRefused)
|
||||
of RelayType.CanHop: await sendStatus(conn, StatusV1.Success)
|
||||
@@ -344,7 +336,7 @@ proc new*(T: typedesc[Relay],
|
||||
msgSize: msgSize,
|
||||
isCircuitRelayV1: circuitRelayV1)
|
||||
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
case proto:
|
||||
of RelayV2HopCodec: await r.handleHopStreamV2(conn)
|
||||
@@ -369,17 +361,25 @@ proc deletesReservation(r: Relay) {.async.} =
|
||||
if n > r.rsvp[k]:
|
||||
r.rsvp.del(k)
|
||||
|
||||
method start*(r: Relay) {.async.} =
|
||||
method start*(
|
||||
r: Relay
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if not r.reservationLoop.isNil:
|
||||
warn "Starting relay twice"
|
||||
return
|
||||
return fut
|
||||
r.reservationLoop = r.deletesReservation()
|
||||
r.started = true
|
||||
fut
|
||||
|
||||
method stop*(r: Relay) {.async.} =
|
||||
method stop*(r: Relay): Future[void] {.async: (raises: [], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if r.reservationLoop.isNil:
|
||||
warn "Stopping relay without starting it"
|
||||
return
|
||||
return fut
|
||||
r.started = false
|
||||
r.reservationLoop.cancel()
|
||||
r.reservationLoop = nil
|
||||
fut
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import sequtils, strutils
|
||||
|
||||
@@ -40,33 +37,33 @@ method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
|
||||
self.client.onNewConnection = proc(
|
||||
conn: Connection,
|
||||
duration: uint32 = 0,
|
||||
data: uint64 = 0) {.async, gcsafe, raises: [Defect].} =
|
||||
data: uint64 = 0) {.async.} =
|
||||
await self.queue.addLast(RelayConnection.new(conn, duration, data))
|
||||
await conn.join()
|
||||
self.selfRunning = true
|
||||
await procCall Transport(self).start(ma)
|
||||
trace "Starting Relay transport"
|
||||
|
||||
method stop*(self: RelayTransport) {.async, gcsafe.} =
|
||||
method stop*(self: RelayTransport) {.async.} =
|
||||
self.running = false
|
||||
self.selfRunning = false
|
||||
self.client.onNewConnection = nil
|
||||
while not self.queue.empty():
|
||||
await self.queue.popFirstNoWait().close()
|
||||
|
||||
method accept*(self: RelayTransport): Future[Connection] {.async, gcsafe.} =
|
||||
method accept*(self: RelayTransport): Future[Connection] {.async.} =
|
||||
result = await self.queue.popFirst()
|
||||
|
||||
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
||||
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
|
||||
let
|
||||
sma = toSeq(ma.items())
|
||||
relayAddrs = sma[0..sma.len-4].mapIt(it.tryGet()).foldl(a & b)
|
||||
var
|
||||
relayPeerId: PeerId
|
||||
dstPeerId: PeerId
|
||||
if not relayPeerId.init(($(sma[^3].get())).split('/')[2]):
|
||||
if not relayPeerId.init(($(sma[^3].tryGet())).split('/')[2]):
|
||||
raise newException(RelayV2DialError, "Relay doesn't exist")
|
||||
if not dstPeerId.init(($(sma[^1].get())).split('/')[2]):
|
||||
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
|
||||
raise newException(RelayV2DialError, "Destination doesn't exist")
|
||||
trace "Dial", relayPeerId, dstPeerId
|
||||
|
||||
@@ -93,14 +90,18 @@ method dial*(
|
||||
self: RelayTransport,
|
||||
hostname: string,
|
||||
ma: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
||||
let address = MultiAddress.init($ma & "/p2p/" & $peerId.get()).tryGet()
|
||||
result = await self.dial(address)
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
|
||||
peerId.withValue(pid):
|
||||
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
|
||||
result = await self.dial(address)
|
||||
|
||||
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe} =
|
||||
if ma.protocols.isOk():
|
||||
let sma = toSeq(ma.items())
|
||||
result = sma.len >= 2 and CircuitRelay.match(sma[^1].get())
|
||||
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe.} =
|
||||
try:
|
||||
if ma.protocols.isOk():
|
||||
let sma = toSeq(ma.items())
|
||||
result = sma.len >= 2 and CircuitRelay.match(sma[^1].tryGet())
|
||||
except CatchableError as exc:
|
||||
result = false
|
||||
trace "Handles return", ma, result
|
||||
|
||||
proc new*(T: typedesc[RelayTransport], cl: RelayClient, upgrader: Upgrade): T =
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -7,15 +7,9 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import options
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles
|
||||
|
||||
import ./messages,
|
||||
../../../stream/connection
|
||||
|
||||
@@ -27,63 +21,77 @@ const
|
||||
RelayV2HopCodec* = "/libp2p/circuit/relay/0.2.0/hop"
|
||||
RelayV2StopCodec* = "/libp2p/circuit/relay/0.2.0/stop"
|
||||
|
||||
proc sendStatus*(conn: Connection, code: StatusV1) {.async, gcsafe.} =
|
||||
proc sendStatus*(
|
||||
conn: Connection,
|
||||
code: StatusV1
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
|
||||
let
|
||||
msg = RelayMessage(msgType: some(RelayType.Status), status: some(code))
|
||||
msg = RelayMessage(
|
||||
msgType: Opt.some(RelayType.Status), status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
await conn.writeLp(pb.buffer)
|
||||
conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async, gcsafe.} =
|
||||
proc sendHopStatus*(
|
||||
conn: Connection,
|
||||
code: StatusV2
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
|
||||
let
|
||||
msg = HopMessage(msgType: HopMessageType.Status, status: some(code))
|
||||
msg = HopMessage(msgType: HopMessageType.Status, status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
await conn.writeLp(pb.buffer)
|
||||
conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendStopStatus*(conn: Connection, code: StatusV2) {.async.} =
|
||||
proc sendStopStatus*(
|
||||
conn: Connection,
|
||||
code: StatusV2
|
||||
) {.async: (raises: [CancelledError, LPStreamError], raw: true).} =
|
||||
trace "send stop relay/v2 status", status = $code & " (" & $ord(code) & ")"
|
||||
let
|
||||
msg = StopMessage(msgType: StopMessageType.Status, status: some(code))
|
||||
msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
await conn.writeLp(pb.buffer)
|
||||
conn.writeLp(pb.buffer)
|
||||
|
||||
proc bridge*(connSrc: Connection, connDst: Connection) {.async.} =
|
||||
proc bridge*(
|
||||
connSrc: Connection,
|
||||
connDst: Connection) {.async: (raises: [CancelledError]).} =
|
||||
const bufferSize = 4096
|
||||
var
|
||||
bufSrcToDst: array[bufferSize, byte]
|
||||
bufDstToSrc: array[bufferSize, byte]
|
||||
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.high + 1)
|
||||
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.high + 1)
|
||||
bytesSendFromSrcToDst = 0
|
||||
bytesSendFromDstToSrc = 0
|
||||
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.len)
|
||||
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.len)
|
||||
bytesSentFromSrcToDst = 0
|
||||
bytesSentFromDstToSrc = 0
|
||||
bufRead: int
|
||||
|
||||
try:
|
||||
while not connSrc.closed() and not connDst.closed():
|
||||
await futSrc or futDst
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(futSrc, futDst)
|
||||
except ValueError: raiseAssert("Futures list is not empty")
|
||||
if futSrc.finished():
|
||||
bufRead = await futSrc
|
||||
if bufRead > 0:
|
||||
bytesSendFromSrcToDst.inc(bufRead)
|
||||
await connDst.write(@bufSrcToDst[0..<bufRead])
|
||||
zeroMem(addr(bufSrcToDst), bufSrcToDst.high + 1)
|
||||
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.high + 1)
|
||||
bytesSentFromSrcToDst.inc(bufRead)
|
||||
await connDst.write(@bufSrcToDst[0 ..< bufRead])
|
||||
zeroMem(addr bufSrcToDst[0], bufSrcToDst.len)
|
||||
futSrc = connSrc.readOnce(addr bufSrcToDst[0], bufSrcToDst.len)
|
||||
if futDst.finished():
|
||||
bufRead = await futDst
|
||||
if bufRead > 0:
|
||||
bytesSendFromDstToSrc += bufRead
|
||||
await connSrc.write(bufDstToSrc[0..<bufRead])
|
||||
zeroMem(addr(bufDstToSrc), bufDstToSrc.high + 1)
|
||||
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.high + 1)
|
||||
bytesSentFromDstToSrc += bufRead
|
||||
await connSrc.write(bufDstToSrc[0 ..< bufRead])
|
||||
zeroMem(addr bufDstToSrc[0], bufDstToSrc.len)
|
||||
futDst = connDst.readOnce(addr bufDstToSrc[0], bufDstToSrc.len)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
if connSrc.closed() or connSrc.atEof():
|
||||
trace "relay src closed connection", src = connSrc.peerId
|
||||
if connDst.closed() or connDst.atEof():
|
||||
trace "relay dst closed connection", dst = connDst.peerId
|
||||
trace "relay error", exc=exc.msg
|
||||
trace "end relaying", bytesSendFromSrcToDst, bytesSendFromDstToSrc
|
||||
trace "end relaying", bytesSentFromSrcToDst, bytesSentFromDstToSrc
|
||||
await futSrc.cancelAndWait()
|
||||
await futDst.cancelAndWait()
|
||||
|
||||
@@ -10,10 +10,7 @@
|
||||
## `Identify <https://docs.libp2p.io/concepts/protocols/#identify>`_ and
|
||||
## `Push Identify <https://docs.libp2p.io/concepts/protocols/#identify-push>`_ implementation
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, options, strutils, sugar]
|
||||
import stew/results
|
||||
@@ -24,6 +21,7 @@ import ../protobuf/minprotobuf,
|
||||
../peerid,
|
||||
../crypto/crypto,
|
||||
../multiaddress,
|
||||
../multicodec,
|
||||
../protocols/protocol,
|
||||
../utility,
|
||||
../errors,
|
||||
@@ -65,7 +63,7 @@ type
|
||||
peer: PeerId,
|
||||
newInfo: IdentifyInfo):
|
||||
Future[void]
|
||||
{.gcsafe, raises: [Defect], public.}
|
||||
{.gcsafe, raises: [], public.}
|
||||
|
||||
IdentifyPush* = ref object of LPProtocol
|
||||
identifyHandler: IdentifyPushHandler
|
||||
@@ -74,30 +72,28 @@ chronicles.expandIt(IdentifyInfo):
|
||||
pubkey = ($it.pubkey).shortLog
|
||||
addresses = it.addrs.map(x => $x).join(",")
|
||||
protocols = it.protos.map(x => $x).join(",")
|
||||
observable_address =
|
||||
if it.observedAddr.isSome(): $it.observedAddr.get()
|
||||
else: "None"
|
||||
observable_address = $it.observedAddr
|
||||
proto_version = it.protoVersion.get("None")
|
||||
agent_version = it.agentVersion.get("None")
|
||||
signedPeerRecord =
|
||||
# The SPR contains the same data as the identify message
|
||||
# would be cumbersome to log
|
||||
if iinfo.signedPeerRecord.isSome(): "Some"
|
||||
if it.signedPeerRecord.isSome(): "Some"
|
||||
else: "None"
|
||||
|
||||
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
|
||||
{.raises: [Defect].} =
|
||||
{.raises: [].} =
|
||||
result = initProtoBuffer()
|
||||
|
||||
let pkey = peerInfo.publicKey
|
||||
|
||||
result.write(1, pkey.getBytes().get())
|
||||
result.write(1, pkey.getBytes().expect("valid key"))
|
||||
for ma in peerInfo.addrs:
|
||||
result.write(2, ma.data.buffer)
|
||||
for proto in peerInfo.protocols:
|
||||
result.write(3, proto)
|
||||
if observedAddr.isSome:
|
||||
result.write(4, observedAddr.get().data.buffer)
|
||||
observedAddr.withValue(observed):
|
||||
result.write(4, observed.data.buffer)
|
||||
let protoVersion = ProtoVersion
|
||||
result.write(5, protoVersion)
|
||||
let agentVersion = if peerInfo.agentVersion.len <= 0:
|
||||
@@ -109,13 +105,12 @@ proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: boo
|
||||
## Optionally populate signedPeerRecord field.
|
||||
## See https://github.com/libp2p/go-libp2p/blob/ddf96ce1cfa9e19564feb9bd3e8269958bbc0aba/p2p/protocol/identify/pb/identify.proto for reference.
|
||||
if sendSpr:
|
||||
let sprBuff = peerInfo.signedPeerRecord.envelope.encode()
|
||||
if sprBuff.isOk():
|
||||
result.write(8, sprBuff.get())
|
||||
peerInfo.signedPeerRecord.envelope.encode().toOpt().withValue(sprBuff):
|
||||
result.write(8, sprBuff)
|
||||
|
||||
result.finish()
|
||||
|
||||
proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
|
||||
proc decodeMsg*(buf: seq[byte]): Opt[IdentifyInfo] =
|
||||
var
|
||||
iinfo: IdentifyInfo
|
||||
pubkey: PublicKey
|
||||
@@ -125,53 +120,38 @@ proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
|
||||
signedPeerRecord: SignedPeerRecord
|
||||
|
||||
var pb = initProtoBuffer(buf)
|
||||
if ? pb.getField(1, pubkey).toOpt():
|
||||
iinfo.pubkey = some(pubkey)
|
||||
if ? pb.getField(8, signedPeerRecord).toOpt() and
|
||||
pubkey == signedPeerRecord.envelope.publicKey:
|
||||
iinfo.signedPeerRecord = some(signedPeerRecord.envelope)
|
||||
discard ? pb.getRepeatedField(2, iinfo.addrs).toOpt()
|
||||
discard ? pb.getRepeatedField(3, iinfo.protos).toOpt()
|
||||
if ? pb.getField(4, oaddr).toOpt():
|
||||
iinfo.observedAddr = some(oaddr)
|
||||
if ? pb.getField(5, protoVersion).toOpt():
|
||||
iinfo.protoVersion = some(protoVersion)
|
||||
if ? pb.getField(6, agentVersion).toOpt():
|
||||
iinfo.agentVersion = some(agentVersion)
|
||||
|
||||
let r1 = pb.getField(1, pubkey)
|
||||
let r2 = pb.getRepeatedField(2, iinfo.addrs)
|
||||
let r3 = pb.getRepeatedField(3, iinfo.protos)
|
||||
let r4 = pb.getField(4, oaddr)
|
||||
let r5 = pb.getField(5, protoVersion)
|
||||
let r6 = pb.getField(6, agentVersion)
|
||||
|
||||
let r8 = pb.getField(8, signedPeerRecord)
|
||||
|
||||
let res = r1.isOk() and r2.isOk() and r3.isOk() and
|
||||
r4.isOk() and r5.isOk() and r6.isOk() and
|
||||
r8.isOk()
|
||||
|
||||
if res:
|
||||
if r1.get():
|
||||
iinfo.pubkey = some(pubkey)
|
||||
if r4.get():
|
||||
iinfo.observedAddr = some(oaddr)
|
||||
if r5.get():
|
||||
iinfo.protoVersion = some(protoVersion)
|
||||
if r6.get():
|
||||
iinfo.agentVersion = some(agentVersion)
|
||||
if r8.get() and r1.get():
|
||||
if iinfo.pubkey.get() == signedPeerRecord.envelope.publicKey:
|
||||
iinfo.signedPeerRecord = some(signedPeerRecord.envelope)
|
||||
debug "decodeMsg: decoded identify", iinfo
|
||||
some(iinfo)
|
||||
else:
|
||||
trace "decodeMsg: failed to decode received message"
|
||||
none[IdentifyInfo]()
|
||||
Opt.some(iinfo)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[Identify],
|
||||
peerInfo: PeerInfo,
|
||||
sendSignedPeerRecord = false
|
||||
sendSignedPeerRecord = false,
|
||||
observedAddrManager = ObservedAddrManager.new(),
|
||||
): T =
|
||||
let identify = T(
|
||||
peerInfo: peerInfo,
|
||||
sendSignedPeerRecord: sendSignedPeerRecord,
|
||||
observedAddrManager: ObservedAddrManager.new(),
|
||||
observedAddrManager: observedAddrManager,
|
||||
)
|
||||
identify.init()
|
||||
identify
|
||||
|
||||
method init*(p: Identify) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
trace "handling identify request", conn
|
||||
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
|
||||
@@ -189,33 +169,31 @@ method init*(p: Identify) =
|
||||
|
||||
proc identify*(self: Identify,
|
||||
conn: Connection,
|
||||
remotePeerId: PeerId): Future[IdentifyInfo] {.async, gcsafe.} =
|
||||
remotePeerId: PeerId): Future[IdentifyInfo] {.async.} =
|
||||
trace "initiating identify", conn
|
||||
var message = await conn.readLp(64*1024)
|
||||
if len(message) == 0:
|
||||
trace "identify: Empty message received!", conn
|
||||
raise newException(IdentityInvalidMsgError, "Empty message received!")
|
||||
|
||||
let infoOpt = decodeMsg(message)
|
||||
if infoOpt.isNone():
|
||||
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||
var info = decodeMsg(message).valueOr: raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||
debug "identify: decoded message", conn, info
|
||||
let
|
||||
pubkey = info.pubkey.valueOr: raise newException(IdentityInvalidMsgError, "No pubkey in identify")
|
||||
peer = PeerId.init(pubkey).valueOr: raise newException(IdentityInvalidMsgError, $error)
|
||||
|
||||
var info = infoOpt.get()
|
||||
if info.pubkey.isNone():
|
||||
raise newException(IdentityInvalidMsgError, "No pubkey in identify")
|
||||
|
||||
let peer = PeerId.init(info.pubkey.get())
|
||||
if peer.isErr:
|
||||
raise newException(IdentityInvalidMsgError, $peer.error)
|
||||
|
||||
if peer.get() != remotePeerId:
|
||||
if peer != remotePeerId:
|
||||
trace "Peer ids don't match", remote = peer, local = remotePeerId
|
||||
raise newException(IdentityNoMatchError, "Peer ids don't match")
|
||||
info.peerId = peer.get()
|
||||
info.peerId = peer
|
||||
|
||||
if info.observedAddr.isSome:
|
||||
if not self.observedAddrManager.addObservation(info.observedAddr.get()):
|
||||
debug "Observed address is not valid", observedAddr = info.observedAddr.get()
|
||||
info.observedAddr.withValue(observed):
|
||||
# Currently, we use the ObservedAddrManager only to find our dialable external NAT address. Therefore, addresses
|
||||
# like "...\p2p-circuit\p2p\..." and "\p2p\..." are not useful to us.
|
||||
if observed.contains(multiCodec("p2p-circuit")).get(false) or P2PPattern.matchPartial(observed):
|
||||
trace "Not adding address to ObservedAddrManager.", observed
|
||||
elif not self.observedAddrManager.addObservation(observed):
|
||||
trace "Observed address is not valid.", observedAddr = observed
|
||||
return info
|
||||
|
||||
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
|
||||
@@ -226,26 +204,24 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
|
||||
identifypush
|
||||
|
||||
proc init*(p: IdentifyPush) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
trace "handling identify push", conn
|
||||
try:
|
||||
var message = await conn.readLp(64*1024)
|
||||
|
||||
let infoOpt = decodeMsg(message)
|
||||
if infoOpt.isNone():
|
||||
var identInfo = decodeMsg(message).valueOr:
|
||||
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||
debug "identify push: decoded message", conn, identInfo
|
||||
|
||||
var indentInfo = infoOpt.get()
|
||||
|
||||
if indentInfo.pubkey.isSome:
|
||||
let receivedPeerId = PeerId.init(indentInfo.pubkey.get()).tryGet()
|
||||
identInfo.pubkey.withValue(pubkey):
|
||||
let receivedPeerId = PeerId.init(pubkey).tryGet()
|
||||
if receivedPeerId != conn.peerId:
|
||||
raise newException(IdentityNoMatchError, "Peer ids don't match")
|
||||
indentInfo.peerId = receivedPeerId
|
||||
identInfo.peerId = receivedPeerId
|
||||
|
||||
trace "triggering peer event", peerInfo = conn.peerId
|
||||
if not isNil(p.identifyHandler):
|
||||
await p.identifyHandler(conn.peerId, indentInfo)
|
||||
await p.identifyHandler(conn.peerId, identInfo)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
|
||||
47
libp2p/protocols/perf/client.nim
Normal file
47
libp2p/protocols/perf/client.nim
Normal file
@@ -0,0 +1,47 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
## `Perf <https://github.com/libp2p/specs/blob/master/perf/perf.md>`_ protocol specification
|
||||
|
||||
import chronos, chronicles, sequtils
|
||||
import stew/endians2
|
||||
import ./core, ../../stream/connection
|
||||
|
||||
logScope:
|
||||
topics = "libp2p perf"
|
||||
|
||||
type PerfClient* = ref object of RootObj
|
||||
|
||||
proc perf*(_: typedesc[PerfClient], conn: Connection,
|
||||
sizeToWrite: uint64 = 0, sizeToRead: uint64 = 0):
|
||||
Future[Duration] {.async, public.} =
|
||||
var
|
||||
size = sizeToWrite
|
||||
buf: array[PerfSize, byte]
|
||||
let start = Moment.now()
|
||||
trace "starting performance benchmark", conn, sizeToWrite, sizeToRead
|
||||
|
||||
await conn.write(toSeq(toBytesBE(sizeToRead)))
|
||||
while size > 0:
|
||||
let toWrite = min(size, PerfSize)
|
||||
await conn.write(buf[0..<toWrite])
|
||||
size -= toWrite
|
||||
|
||||
await conn.close()
|
||||
|
||||
size = sizeToRead
|
||||
|
||||
while size > 0:
|
||||
let toRead = min(size, PerfSize)
|
||||
await conn.readExactly(addr buf[0], toRead.int)
|
||||
size = size - toRead
|
||||
|
||||
let duration = Moment.now() - start
|
||||
trace "finishing performance benchmark", duration
|
||||
return duration
|
||||
14
libp2p/protocols/perf/core.nim
Normal file
14
libp2p/protocols/perf/core.nim
Normal file
@@ -0,0 +1,14 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
## `Perf <https://github.com/libp2p/specs/blob/master/perf/perf.md>`_ protocol specification
|
||||
|
||||
const
|
||||
PerfCodec* = "/perf/1.0.0"
|
||||
PerfSize* = 65536
|
||||
60
libp2p/protocols/perf/server.nim
Normal file
60
libp2p/protocols/perf/server.nim
Normal file
@@ -0,0 +1,60 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
## `Perf <https://github.com/libp2p/specs/blob/master/perf/perf.md>`_ protocol specification
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles
|
||||
import stew/endians2
|
||||
import ./core,
|
||||
../protocol,
|
||||
../../stream/connection,
|
||||
../../utility
|
||||
|
||||
export chronicles, connection
|
||||
|
||||
logScope:
|
||||
topics = "libp2p perf"
|
||||
|
||||
type Perf* = ref object of LPProtocol
|
||||
|
||||
proc new*(T: typedesc[Perf]): T {.public.} =
|
||||
var p = T()
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
var bytesRead = 0
|
||||
try:
|
||||
trace "Received benchmark performance check", conn
|
||||
var
|
||||
sizeBuffer: array[8, byte]
|
||||
size: uint64
|
||||
await conn.readExactly(addr sizeBuffer[0], 8)
|
||||
size = uint64.fromBytesBE(sizeBuffer)
|
||||
|
||||
var toReadBuffer: array[PerfSize, byte]
|
||||
try:
|
||||
while true:
|
||||
bytesRead += await conn.readOnce(addr toReadBuffer[0], PerfSize)
|
||||
except CatchableError as exc:
|
||||
discard
|
||||
|
||||
var buf: array[PerfSize, byte]
|
||||
while size > 0:
|
||||
let toWrite = min(size, PerfSize)
|
||||
await conn.write(buf[0..<toWrite])
|
||||
size -= toWrite
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in perf handler", exc = exc.msg, conn
|
||||
await conn.close()
|
||||
|
||||
p.handler = handle
|
||||
p.codec = PerfCodec
|
||||
return p
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## `Ping <https://docs.libp2p.io/concepts/protocols/#ping>`_ protocol implementation
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles
|
||||
import bearssl/rand
|
||||
@@ -42,7 +39,7 @@ type
|
||||
PingHandler* {.public.} = proc (
|
||||
peer: PeerId):
|
||||
Future[void]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
{.gcsafe, raises: [].}
|
||||
|
||||
Ping* = ref object of LPProtocol
|
||||
pingHandler*: PingHandler
|
||||
@@ -54,12 +51,12 @@ proc new*(T: typedesc[Ping], handler: PingHandler = nil, rng: ref HmacDrbgContex
|
||||
ping
|
||||
|
||||
method init*(p: Ping) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
trace "handling ping", conn
|
||||
var buf: array[PingSize, byte]
|
||||
await conn.readExactly(addr buf[0], PingSize)
|
||||
trace "echoing ping", conn
|
||||
trace "echoing ping", conn, pingData = @buf
|
||||
await conn.write(@buf)
|
||||
if not isNil(p.pingHandler):
|
||||
await p.pingHandler(conn.peerId)
|
||||
@@ -74,7 +71,7 @@ method init*(p: Ping) =
|
||||
proc ping*(
|
||||
p: Ping,
|
||||
conn: Connection,
|
||||
): Future[Duration] {.async, gcsafe, public.} =
|
||||
): Future[Duration] {.async, public.} =
|
||||
## Sends ping to `conn`, returns the delay
|
||||
|
||||
trace "initiating ping", conn
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, stew/results
|
||||
import ../stream/connection
|
||||
@@ -22,20 +19,29 @@ const
|
||||
|
||||
type
|
||||
LPProtoHandler* = proc (
|
||||
conn: Connection,
|
||||
proto: string):
|
||||
Future[void]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
conn: Connection,
|
||||
proto: string): Future[void] {.async.}
|
||||
|
||||
LPProtocol* = ref object of RootObj
|
||||
codecs*: seq[string]
|
||||
handler*: LPProtoHandler ## this handler gets invoked by the protocol negotiator
|
||||
handlerImpl: LPProtoHandler ## invoked by the protocol negotiator
|
||||
started*: bool
|
||||
maxIncomingStreams: Opt[int]
|
||||
|
||||
method init*(p: LPProtocol) {.base, gcsafe.} = discard
|
||||
method start*(p: LPProtocol) {.async, base.} = p.started = true
|
||||
method stop*(p: LPProtocol) {.async, base.} = p.started = false
|
||||
|
||||
method start*(
|
||||
p: LPProtocol) {.async: (raises: [CancelledError], raw: true), base.} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
p.started = true
|
||||
fut
|
||||
|
||||
method stop*(p: LPProtocol) {.async: (raises: [], raw: true), base.} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
p.started = false
|
||||
fut
|
||||
|
||||
proc maxIncomingStreams*(p: LPProtocol): int =
|
||||
p.maxIncomingStreams.get(DefaultMaxIncomingStreams)
|
||||
@@ -44,7 +50,7 @@ proc `maxIncomingStreams=`*(p: LPProtocol, val: int) =
|
||||
p.maxIncomingStreams = Opt.some(val)
|
||||
|
||||
func codec*(p: LPProtocol): string =
|
||||
assert(p.codecs.len > 0, "Codecs sequence was empty!")
|
||||
doAssert(p.codecs.len > 0, "Codecs sequence was empty!")
|
||||
p.codecs[0]
|
||||
|
||||
func `codec=`*(p: LPProtocol, codec: string) =
|
||||
@@ -52,15 +58,51 @@ func `codec=`*(p: LPProtocol, codec: string) =
|
||||
# if we use this abstraction
|
||||
p.codecs.insert(codec, 0)
|
||||
|
||||
template `handler`*(p: LPProtocol): LPProtoHandler =
|
||||
p.handlerImpl
|
||||
|
||||
template `handler`*(
|
||||
p: LPProtocol, conn: Connection, proto: string): Future[void] =
|
||||
p.handlerImpl(conn, proto)
|
||||
|
||||
func `handler=`*(p: LPProtocol, handler: LPProtoHandler) =
|
||||
p.handlerImpl = handler
|
||||
|
||||
# Callbacks that are annotated with `{.async: (raises).}` explicitly
|
||||
# document the types of errors that they may raise, but are not compatible
|
||||
# with `LPProtoHandler` and need to use a custom `proc` type.
|
||||
# They are internally wrapped into a `LPProtoHandler`, but still allow the
|
||||
# compiler to check that their `{.async: (raises).}` annotation is correct.
|
||||
# https://github.com/nim-lang/Nim/issues/23432
|
||||
func `handler=`*[E](
|
||||
p: LPProtocol,
|
||||
handler: proc (
|
||||
conn: Connection,
|
||||
proto: string): InternalRaisesFuture[void, E]) =
|
||||
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
|
||||
await handler(conn, proto)
|
||||
p.handlerImpl = wrap
|
||||
|
||||
proc new*(
|
||||
T: type LPProtocol,
|
||||
codecs: seq[string],
|
||||
handler: LPProtoHandler, # default(Opt[int]) or Opt.none(int) don't work on 1.2
|
||||
maxIncomingStreams: Opt[int] | int = Opt[int]()): T =
|
||||
T: type LPProtocol,
|
||||
codecs: seq[string],
|
||||
handler: LPProtoHandler,
|
||||
maxIncomingStreams: Opt[int] | int = Opt.none(int)): T =
|
||||
T(
|
||||
codecs: codecs,
|
||||
handler: handler,
|
||||
handlerImpl: handler,
|
||||
maxIncomingStreams:
|
||||
when maxIncomingStreams is int: Opt.some(maxIncomingStreams)
|
||||
else: maxIncomingStreams
|
||||
)
|
||||
|
||||
proc new*[E](
|
||||
T: type LPProtocol,
|
||||
codecs: seq[string],
|
||||
handler: proc (
|
||||
conn: Connection,
|
||||
proto: string): InternalRaisesFuture[void, E],
|
||||
maxIncomingStreams: Opt[int] | int = Opt.none(int)): T =
|
||||
proc wrap(conn: Connection, proto: string): Future[void] {.async.} =
|
||||
await handler(conn, proto)
|
||||
T.new(codec, wrap, maxIncomingStreams)
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, hashes, tables]
|
||||
import chronos, chronicles, metrics
|
||||
@@ -18,7 +15,8 @@ import ./pubsub,
|
||||
./pubsubpeer,
|
||||
./timedcache,
|
||||
./peertable,
|
||||
./rpc/[message, messages],
|
||||
./rpc/[message, messages, protobuf],
|
||||
nimcrypto/[hash, sha2],
|
||||
../../crypto/crypto,
|
||||
../../stream/connection,
|
||||
../../peerid,
|
||||
@@ -35,25 +33,34 @@ const FloodSubCodec* = "/floodsub/1.0.0"
|
||||
type
|
||||
FloodSub* {.public.} = ref object of PubSub
|
||||
floodsub*: PeerTable # topic to remote peer map
|
||||
seen*: TimedCache[MessageId] # message id:s already seen on the network
|
||||
seenSalt*: seq[byte]
|
||||
seen*: TimedCache[SaltedId]
|
||||
# Early filter for messages recently observed on the network
|
||||
# We use a salted id because the messages in this cache have not yet
|
||||
# been validated meaning that an attacker has greater control over the
|
||||
# hash key and therefore could poison the table
|
||||
seenSalt*: sha256
|
||||
# The salt in this case is a partially updated SHA256 context pre-seeded
|
||||
# with some random data
|
||||
|
||||
proc hasSeen*(f: FloodSub, msgId: MessageId): bool =
|
||||
f.seenSalt & msgId in f.seen
|
||||
proc salt*(f: FloodSub, msgId: MessageId): SaltedId =
|
||||
var tmp = f.seenSalt
|
||||
tmp.update(msgId)
|
||||
SaltedId(data: tmp.finish())
|
||||
|
||||
proc addSeen*(f: FloodSub, msgId: MessageId): bool =
|
||||
# Salting the seen hash helps avoid attacks against the hash function used
|
||||
# in the nim hash table
|
||||
proc hasSeen*(f: FloodSub, saltedId: SaltedId): bool =
|
||||
saltedId in f.seen
|
||||
|
||||
proc addSeen*(f: FloodSub, saltedId: SaltedId): bool =
|
||||
# Return true if the message has already been seen
|
||||
f.seen.put(f.seenSalt & msgId)
|
||||
f.seen.put(saltedId)
|
||||
|
||||
proc firstSeen*(f: FloodSub, msgId: MessageId): Moment =
|
||||
f.seen.addedAt(f.seenSalt & msgId)
|
||||
proc firstSeen*(f: FloodSub, saltedId: SaltedId): Moment =
|
||||
f.seen.addedAt(saltedId)
|
||||
|
||||
proc handleSubscribe*(f: FloodSub,
|
||||
peer: PubSubPeer,
|
||||
topic: string,
|
||||
subscribe: bool) =
|
||||
proc handleSubscribe(f: FloodSub,
|
||||
peer: PubSubPeer,
|
||||
topic: string,
|
||||
subscribe: bool) =
|
||||
logScope:
|
||||
peer
|
||||
topic
|
||||
@@ -98,7 +105,15 @@ method unsubscribePeer*(f: FloodSub, peer: PeerId) =
|
||||
|
||||
method rpcHandler*(f: FloodSub,
|
||||
peer: PubSubPeer,
|
||||
rpcMsg: RPCMsg) {.async.} =
|
||||
data: seq[byte]) {.async.} =
|
||||
var rpcMsg = decodeRpcMsg(data).valueOr:
|
||||
debug "failed to decode msg from peer", peer, err = error
|
||||
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
||||
|
||||
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||
# trigger hooks
|
||||
peer.recvObservers(rpcMsg)
|
||||
|
||||
for i in 0..<min(f.topicsHigh, rpcMsg.subscriptions.len):
|
||||
template sub: untyped = rpcMsg.subscriptions[i]
|
||||
f.handleSubscribe(peer, sub.topic, sub.subscribe)
|
||||
@@ -111,9 +126,11 @@ method rpcHandler*(f: FloodSub,
|
||||
# TODO: descore peers due to error during message validation (malicious?)
|
||||
continue
|
||||
|
||||
let msgId = msgIdResult.get
|
||||
let
|
||||
msgId = msgIdResult.get
|
||||
saltedId = f.salt(msgId)
|
||||
|
||||
if f.addSeen(msgId):
|
||||
if f.addSeen(saltedId):
|
||||
trace "Dropping already-seen message", msgId, peer
|
||||
continue
|
||||
|
||||
@@ -142,16 +159,19 @@ method rpcHandler*(f: FloodSub,
|
||||
discard
|
||||
|
||||
var toSendPeers = initHashSet[PubSubPeer]()
|
||||
for t in msg.topicIds: # for every topic in the message
|
||||
if t notin f.topics:
|
||||
continue
|
||||
f.floodsub.withValue(t, peers): toSendPeers.incl(peers[])
|
||||
let topic = msg.topic
|
||||
if topic notin f.topics:
|
||||
debug "Dropping message due to topic not in floodsub topics", topic, msgId, peer
|
||||
continue
|
||||
|
||||
await handleData(f, t, msg.data)
|
||||
f.floodsub.withValue(topic, peers):
|
||||
toSendPeers.incl(peers[])
|
||||
|
||||
await handleData(f, topic, msg.data)
|
||||
|
||||
# In theory, if topics are the same in all messages, we could batch - we'd
|
||||
# also have to be careful to only include validated messages
|
||||
f.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
||||
f.broadcast(toSendPeers, RPCMsg(messages: @[msg]), isHighPriority = false)
|
||||
trace "Forwared message to peers", peers = toSendPeers.len
|
||||
|
||||
f.updateMetrics(rpcMsg)
|
||||
@@ -207,13 +227,13 @@ method publish*(f: FloodSub,
|
||||
trace "Created new message",
|
||||
msg = shortLog(msg), peers = peers.len, topic, msgId
|
||||
|
||||
if f.addSeen(msgId):
|
||||
if f.addSeen(f.salt(msgId)):
|
||||
# custom msgid providers might cause this
|
||||
trace "Dropping already-seen message", msgId, topic
|
||||
return 0
|
||||
|
||||
# Try to send to all peers that are known to be interested
|
||||
f.broadcast(peers, RPCMsg(messages: @[msg]))
|
||||
f.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
libp2p_pubsub_messages_published.inc(labelValues = [topic])
|
||||
@@ -223,10 +243,13 @@ method publish*(f: FloodSub,
|
||||
return peers.len
|
||||
|
||||
method initPubSub*(f: FloodSub)
|
||||
{.raises: [Defect, InitializationError].} =
|
||||
{.raises: [InitializationError].} =
|
||||
procCall PubSub(f).initPubSub()
|
||||
f.seen = TimedCache[MessageId].init(2.minutes)
|
||||
f.seenSalt = newSeqUninitialized[byte](sizeof(Hash))
|
||||
hmacDrbgGenerate(f.rng[], f.seenSalt)
|
||||
f.seen = TimedCache[SaltedId].init(2.minutes)
|
||||
f.seenSalt.init()
|
||||
|
||||
var tmp: array[32, byte]
|
||||
hmacDrbgGenerate(f.rng[], tmp)
|
||||
f.seenSalt.update(tmp)
|
||||
|
||||
f.init()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -9,20 +9,18 @@
|
||||
|
||||
## Gossip based publishing
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, sequtils]
|
||||
import chronos, chronicles, metrics
|
||||
import chronos/ratelimit
|
||||
import ./pubsub,
|
||||
./floodsub,
|
||||
./pubsubpeer,
|
||||
./peertable,
|
||||
./mcache,
|
||||
./timedcache,
|
||||
./rpc/[messages, message],
|
||||
./rpc/[messages, message, protobuf],
|
||||
../protocol,
|
||||
../../stream/connection,
|
||||
../../peerinfo,
|
||||
@@ -43,41 +41,87 @@ logScope:
|
||||
declareCounter(libp2p_gossipsub_failed_publish, "number of failed publish")
|
||||
declareCounter(libp2p_gossipsub_invalid_topic_subscription, "number of invalid topic subscriptions that happened")
|
||||
declareCounter(libp2p_gossipsub_duplicate_during_validation, "number of duplicates received during message validation")
|
||||
declareCounter(libp2p_gossipsub_idontwant_saved_messages, "number of duplicates avoided by idontwant")
|
||||
declareCounter(libp2p_gossipsub_saved_bytes, "bytes saved by gossipsub optimizations", labels=["kind"])
|
||||
declareCounter(libp2p_gossipsub_duplicate, "number of duplicates received")
|
||||
declareCounter(libp2p_gossipsub_received, "number of messages received (deduplicated)")
|
||||
|
||||
proc init*(_: type[GossipSubParams]): GossipSubParams =
|
||||
when defined(libp2p_expensive_metrics):
|
||||
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
|
||||
|
||||
proc init*(
|
||||
_: type[GossipSubParams],
|
||||
pruneBackoff = 1.minutes,
|
||||
unsubscribeBackoff = 5.seconds,
|
||||
floodPublish = true,
|
||||
gossipFactor: float64 = 0.25,
|
||||
d = GossipSubD,
|
||||
dLow = GossipSubDlo,
|
||||
dHigh = GossipSubDhi,
|
||||
dScore = GossipSubDlo,
|
||||
dOut = GossipSubDlo - 1, # DLow - 1
|
||||
dLazy = GossipSubD, # Like D,
|
||||
heartbeatInterval = GossipSubHeartbeatInterval,
|
||||
historyLength = GossipSubHistoryLength,
|
||||
historyGossip = GossipSubHistoryGossip,
|
||||
fanoutTTL = GossipSubFanoutTTL,
|
||||
seenTTL = 2.minutes,
|
||||
gossipThreshold = -100.0,
|
||||
publishThreshold = -1000.0,
|
||||
graylistThreshold = -10000.0,
|
||||
opportunisticGraftThreshold = 0.0,
|
||||
decayInterval = 1.seconds,
|
||||
decayToZero = 0.01,
|
||||
retainScore = 2.minutes,
|
||||
appSpecificWeight = 0.0,
|
||||
ipColocationFactorWeight = 0.0,
|
||||
ipColocationFactorThreshold = 1.0,
|
||||
behaviourPenaltyWeight = -1.0,
|
||||
behaviourPenaltyDecay = 0.999,
|
||||
directPeers = initTable[PeerId, seq[MultiAddress]](),
|
||||
disconnectBadPeers = false,
|
||||
enablePX = false,
|
||||
bandwidthEstimatebps = 100_000_000, # 100 Mbps or 12.5 MBps
|
||||
overheadRateLimit = Opt.none(tuple[bytes: int, interval: Duration]),
|
||||
disconnectPeerAboveRateLimit = false,
|
||||
maxNumElementsInNonPriorityQueue = DefaultMaxNumElementsInNonPriorityQueue): GossipSubParams =
|
||||
|
||||
GossipSubParams(
|
||||
explicit: true,
|
||||
pruneBackoff: 1.minutes,
|
||||
unsubscribeBackoff: 5.seconds,
|
||||
floodPublish: true,
|
||||
gossipFactor: 0.25,
|
||||
d: GossipSubD,
|
||||
dLow: GossipSubDlo,
|
||||
dHigh: GossipSubDhi,
|
||||
dScore: GossipSubDlo,
|
||||
dOut: GossipSubDlo - 1, # DLow - 1
|
||||
dLazy: GossipSubD, # Like D
|
||||
heartbeatInterval: GossipSubHeartbeatInterval,
|
||||
historyLength: GossipSubHistoryLength,
|
||||
historyGossip: GossipSubHistoryGossip,
|
||||
fanoutTTL: GossipSubFanoutTTL,
|
||||
seenTTL: 2.minutes,
|
||||
gossipThreshold: -100,
|
||||
publishThreshold: -1000,
|
||||
graylistThreshold: -10000,
|
||||
opportunisticGraftThreshold: 0,
|
||||
decayInterval: 1.seconds,
|
||||
decayToZero: 0.01,
|
||||
retainScore: 2.minutes,
|
||||
appSpecificWeight: 0.0,
|
||||
ipColocationFactorWeight: 0.0,
|
||||
ipColocationFactorThreshold: 1.0,
|
||||
behaviourPenaltyWeight: -1.0,
|
||||
behaviourPenaltyDecay: 0.999,
|
||||
disconnectBadPeers: false,
|
||||
enablePX: false
|
||||
pruneBackoff: pruneBackoff,
|
||||
unsubscribeBackoff: unsubscribeBackoff,
|
||||
floodPublish: floodPublish,
|
||||
gossipFactor: gossipFactor,
|
||||
d: d,
|
||||
dLow: dLow,
|
||||
dHigh: dHigh,
|
||||
dScore: dScore,
|
||||
dOut: dOut,
|
||||
dLazy: dLazy,
|
||||
heartbeatInterval: heartbeatInterval,
|
||||
historyLength: historyLength,
|
||||
historyGossip: historyGossip,
|
||||
fanoutTTL: fanoutTTL,
|
||||
seenTTL: seenTTL,
|
||||
gossipThreshold: gossipThreshold,
|
||||
publishThreshold: publishThreshold,
|
||||
graylistThreshold: graylistThreshold,
|
||||
opportunisticGraftThreshold: opportunisticGraftThreshold,
|
||||
decayInterval: decayInterval,
|
||||
decayToZero: decayToZero,
|
||||
retainScore: retainScore,
|
||||
appSpecificWeight: appSpecificWeight,
|
||||
ipColocationFactorWeight: ipColocationFactorWeight,
|
||||
ipColocationFactorThreshold: ipColocationFactorThreshold,
|
||||
behaviourPenaltyWeight: behaviourPenaltyWeight,
|
||||
behaviourPenaltyDecay: behaviourPenaltyDecay,
|
||||
directPeers: directPeers,
|
||||
disconnectBadPeers: disconnectBadPeers,
|
||||
enablePX: enablePX,
|
||||
bandwidthEstimatebps: bandwidthEstimatebps,
|
||||
overheadRateLimit: overheadRateLimit,
|
||||
disconnectPeerAboveRateLimit: disconnectPeerAboveRateLimit,
|
||||
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue
|
||||
)
|
||||
|
||||
proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
|
||||
@@ -108,6 +152,8 @@ proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
|
||||
err("gossipsub: behaviourPenaltyWeight parameter error, Must be negative")
|
||||
elif parameters.behaviourPenaltyDecay < 0 or parameters.behaviourPenaltyDecay >= 1:
|
||||
err("gossipsub: behaviourPenaltyDecay parameter error, Must be between 0 and 1")
|
||||
elif parameters.maxNumElementsInNonPriorityQueue <= 0:
|
||||
err("gossipsub: maxNumElementsInNonPriorityQueue parameter error, Must be > 0")
|
||||
else:
|
||||
ok()
|
||||
|
||||
@@ -150,7 +196,7 @@ method init*(g: GossipSub) =
|
||||
g.codecs &= GossipSubCodec
|
||||
g.codecs &= GossipSubCodec_10
|
||||
|
||||
method onNewPeer(g: GossipSub, peer: PubSubPeer) =
|
||||
method onNewPeer*(g: GossipSub, peer: PubSubPeer) =
|
||||
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
|
||||
# Make sure stats and peer information match, even when reloading peer stats
|
||||
# from a previous connection
|
||||
@@ -158,14 +204,18 @@ method onNewPeer(g: GossipSub, peer: PubSubPeer) =
|
||||
peer.appScore = stats.appScore
|
||||
peer.behaviourPenalty = stats.behaviourPenalty
|
||||
|
||||
# Check if the score is below the threshold and disconnect the peer if necessary
|
||||
g.disconnectIfBadScorePeer(peer, stats.score)
|
||||
|
||||
peer.iHaveBudget = IHavePeerBudget
|
||||
peer.pingBudget = PingsPeerBudget
|
||||
|
||||
method onPubSubPeerEvent*(p: GossipSub, peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
|
||||
case event.kind
|
||||
of PubSubPeerEventKind.Connected:
|
||||
of PubSubPeerEventKind.StreamOpened:
|
||||
discard
|
||||
of PubSubPeerEventKind.Disconnected:
|
||||
# If a send connection is lost, it's better to remove peer from the mesh -
|
||||
of PubSubPeerEventKind.StreamClosed:
|
||||
# If a send stream is lost, it's better to remove peer from the mesh -
|
||||
# if it gets reestablished, the peer will be readded to the mesh, and if it
|
||||
# doesn't, well.. then we hope the peer is going away!
|
||||
for topic, peers in p.mesh.mpairs():
|
||||
@@ -173,6 +223,8 @@ method onPubSubPeerEvent*(p: GossipSub, peer: PubSubPeer, event: PubSubPeerEvent
|
||||
peers.excl(peer)
|
||||
for _, peers in p.fanout.mpairs():
|
||||
peers.excl(peer)
|
||||
of PubSubPeerEventKind.DisconnectionRequested:
|
||||
asyncSpawn p.disconnectPeer(peer) # this should unsubscribePeer the peer too
|
||||
|
||||
procCall FloodSub(p).onPubSubPeerEvent(peer, event)
|
||||
|
||||
@@ -187,11 +239,11 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
|
||||
return
|
||||
|
||||
# remove from peer IPs collection too
|
||||
if pubSubPeer.address.isSome():
|
||||
g.peersInIP.withValue(pubSubPeer.address.get(), s):
|
||||
pubSubPeer.address.withValue(address):
|
||||
g.peersInIP.withValue(address, s):
|
||||
s[].excl(pubSubPeer.peerId)
|
||||
if s[].len == 0:
|
||||
g.peersInIP.del(pubSubPeer.address.get())
|
||||
g.peersInIP.del(address)
|
||||
|
||||
for t in toSeq(g.mesh.keys):
|
||||
trace "pruning unsubscribing peer", pubSubPeer, score = pubSubPeer.score
|
||||
@@ -200,8 +252,8 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
|
||||
|
||||
for t in toSeq(g.gossipsub.keys):
|
||||
g.gossipsub.removePeer(t, pubSubPeer)
|
||||
# also try to remove from explicit table here
|
||||
g.explicit.removePeer(t, pubSubPeer)
|
||||
# also try to remove from direct peers table here
|
||||
g.subscribedDirectPeers.removePeer(t, pubSubPeer)
|
||||
|
||||
for t in toSeq(g.fanout.keys):
|
||||
g.fanout.removePeer(t, pubSubPeer)
|
||||
@@ -210,12 +262,14 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
|
||||
for topic, info in stats[].topicInfos.mpairs:
|
||||
info.firstMessageDeliveries = 0
|
||||
|
||||
pubSubPeer.stopSendNonPriorityTask()
|
||||
|
||||
procCall FloodSub(g).unsubscribePeer(peer)
|
||||
|
||||
proc handleSubscribe*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
topic: string,
|
||||
subscribe: bool) =
|
||||
proc handleSubscribe(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
topic: string,
|
||||
subscribe: bool) =
|
||||
logScope:
|
||||
peer
|
||||
topic
|
||||
@@ -240,7 +294,7 @@ proc handleSubscribe*(g: GossipSub,
|
||||
# subscribe remote peer to the topic
|
||||
discard g.gossipsub.addPeer(topic, peer)
|
||||
if peer.peerId in g.parameters.directPeers:
|
||||
discard g.explicit.addPeer(topic, peer)
|
||||
discard g.subscribedDirectPeers.addPeer(topic, peer)
|
||||
else:
|
||||
trace "peer unsubscribed from topic"
|
||||
|
||||
@@ -254,7 +308,7 @@ proc handleSubscribe*(g: GossipSub,
|
||||
|
||||
g.fanout.removePeer(topic, peer)
|
||||
if peer.peerId in g.parameters.directPeers:
|
||||
g.explicit.removePeer(topic, peer)
|
||||
g.subscribedDirectPeers.removePeer(topic, peer)
|
||||
|
||||
trace "gossip peers", peers = g.gossipsub.peers(topic), topic
|
||||
|
||||
@@ -262,54 +316,65 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
|
||||
g.handlePrune(peer, control.prune)
|
||||
|
||||
var respControl: ControlMessage
|
||||
g.handleIDontWant(peer, control.idontwant)
|
||||
let iwant = g.handleIHave(peer, control.ihave)
|
||||
if iwant.messageIds.len > 0:
|
||||
if iwant.messageIDs.len > 0:
|
||||
respControl.iwant.add(iwant)
|
||||
respControl.prune.add(g.handleGraft(peer, control.graft))
|
||||
let messages = g.handleIWant(peer, control.iwant)
|
||||
|
||||
if
|
||||
respControl.prune.len > 0 or
|
||||
respControl.iwant.len > 0 or
|
||||
messages.len > 0:
|
||||
# iwant and prunes from here, also messages
|
||||
let
|
||||
isPruneNotEmpty = respControl.prune.len > 0
|
||||
isIWantNotEmpty = respControl.iwant.len > 0
|
||||
|
||||
for smsg in messages:
|
||||
for topic in smsg.topicIds:
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_broadcast_messages.inc(labelValues = [topic])
|
||||
if isPruneNotEmpty or isIWantNotEmpty:
|
||||
|
||||
if isIWantNotEmpty:
|
||||
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
|
||||
|
||||
if isPruneNotEmpty:
|
||||
for prune in respControl.prune:
|
||||
if g.knownTopics.contains(prune.topicID):
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicID])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_messages.inc(labelValues = ["generic"])
|
||||
|
||||
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
|
||||
|
||||
for prune in respControl.prune:
|
||||
if g.knownTopics.contains(prune.topicId):
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicId])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
|
||||
|
||||
trace "sending control message", msg = shortLog(respControl), peer
|
||||
g.send(
|
||||
peer,
|
||||
RPCMsg(control: some(respControl), messages: messages))
|
||||
RPCMsg(control: some(respControl)), isHighPriority = true)
|
||||
|
||||
if messages.len > 0:
|
||||
for smsg in messages:
|
||||
let topic = smsg.topic
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_broadcast_messages.inc(labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_messages.inc(labelValues = ["generic"])
|
||||
|
||||
# iwant replies have lower priority
|
||||
trace "sending iwant reply messages", peer
|
||||
g.send(
|
||||
peer,
|
||||
RPCMsg(messages: messages), isHighPriority = false)
|
||||
|
||||
proc validateAndRelay(g: GossipSub,
|
||||
msg: Message,
|
||||
msgId, msgIdSalted: MessageId,
|
||||
msgId: MessageId, saltedId: SaltedId,
|
||||
peer: PubSubPeer) {.async.} =
|
||||
try:
|
||||
let validation = await g.validate(msg)
|
||||
|
||||
var seenPeers: HashSet[PubSubPeer]
|
||||
discard g.validationSeen.pop(msgIdSalted, seenPeers)
|
||||
discard g.validationSeen.pop(saltedId, seenPeers)
|
||||
libp2p_gossipsub_duplicate_during_validation.inc(seenPeers.len.int64)
|
||||
libp2p_gossipsub_saved_bytes.inc((msg.data.len * seenPeers.len).int64, labelValues = ["validation_duplicate"])
|
||||
|
||||
case validation
|
||||
of ValidationResult.Reject:
|
||||
debug "Dropping message after validation, reason: reject",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg.topicIds)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
return
|
||||
of ValidationResult.Ignore:
|
||||
debug "Dropping message after validation, reason: ignore",
|
||||
@@ -321,40 +386,107 @@ proc validateAndRelay(g: GossipSub,
|
||||
# store in cache only after validation
|
||||
g.mcache.put(msgId, msg)
|
||||
|
||||
g.rewardDelivered(peer, msg.topicIds, true)
|
||||
let topic = msg.topic
|
||||
g.rewardDelivered(peer, topic, true)
|
||||
|
||||
var toSendPeers = HashSet[PubSubPeer]()
|
||||
for t in msg.topicIds: # for every topic in the message
|
||||
if t notin g.topics:
|
||||
continue
|
||||
if topic notin g.topics:
|
||||
return
|
||||
|
||||
g.floodsub.withValue(t, peers): toSendPeers.incl(peers[])
|
||||
g.mesh.withValue(t, peers): toSendPeers.incl(peers[])
|
||||
g.floodsub.withValue(topic, peers): toSendPeers.incl(peers[])
|
||||
g.mesh.withValue(topic, peers): toSendPeers.incl(peers[])
|
||||
g.subscribedDirectPeers.withValue(topic, peers): toSendPeers.incl(peers[])
|
||||
|
||||
# Don't send it to source peer, or peers that
|
||||
# sent it during validation
|
||||
toSendPeers.excl(peer)
|
||||
toSendPeers.excl(seenPeers)
|
||||
|
||||
# IDontWant is only worth it if the message is substantially
|
||||
# bigger than the messageId
|
||||
if msg.data.len > msgId.len * 10:
|
||||
g.broadcast(toSendPeers, RPCMsg(control: some(ControlMessage(
|
||||
idontwant: @[ControlIWant(messageIDs: @[msgId])]
|
||||
))), isHighPriority = true)
|
||||
|
||||
for peer in toSendPeers:
|
||||
for heDontWant in peer.heDontWants:
|
||||
if saltedId in heDontWant:
|
||||
seenPeers.incl(peer)
|
||||
libp2p_gossipsub_idontwant_saved_messages.inc
|
||||
libp2p_gossipsub_saved_bytes.inc(msg.data.len.int64, labelValues = ["idontwant"])
|
||||
break
|
||||
toSendPeers.excl(seenPeers)
|
||||
|
||||
# In theory, if topics are the same in all messages, we could batch - we'd
|
||||
# also have to be careful to only include validated messages
|
||||
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
||||
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]), isHighPriority = false)
|
||||
trace "forwarded message to peers", peers = toSendPeers.len, msgId, peer
|
||||
for topic in msg.topicIds:
|
||||
if topic notin g.topics: continue
|
||||
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_messages_rebroadcasted.inc(toSendPeers.len.int64, labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_messages_rebroadcasted.inc(toSendPeers.len.int64, labelValues = ["generic"])
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_messages_rebroadcasted.inc(toSendPeers.len.int64, labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_messages_rebroadcasted.inc(toSendPeers.len.int64, labelValues = ["generic"])
|
||||
|
||||
await handleData(g, topic, msg.data)
|
||||
await handleData(g, topic, msg.data)
|
||||
except CatchableError as exc:
|
||||
info "validateAndRelay failed", msg=exc.msg
|
||||
|
||||
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
|
||||
msgs.mapIt(it.data.len + it.topic.len).foldl(a + b, 0)
|
||||
|
||||
proc messageOverhead(g: GossipSub, msg: RPCMsg, msgSize: int): int =
|
||||
# In this way we count even ignored fields by protobuf
|
||||
let
|
||||
payloadSize =
|
||||
if g.verifySignature:
|
||||
byteSize(msg.messages)
|
||||
else:
|
||||
dataAndTopicsIdSize(msg.messages)
|
||||
controlSize = msg.control.withValue(control):
|
||||
byteSize(control.ihave) + byteSize(control.iwant)
|
||||
do: # no control message
|
||||
0
|
||||
|
||||
msgSize - payloadSize - controlSize
|
||||
|
||||
proc rateLimit*(g: GossipSub, peer: PubSubPeer, overhead: int) {.async.} =
|
||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||
if not overheadRateLimit.tryConsume(overhead):
|
||||
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||
debug "Peer sent too much useless application data and it's above rate limit.", peer, overhead
|
||||
if g.parameters.disconnectPeerAboveRateLimit:
|
||||
await g.disconnectPeer(peer)
|
||||
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
|
||||
|
||||
method rpcHandler*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
rpcMsg: RPCMsg) {.async.} =
|
||||
data: seq[byte]) {.async.} =
|
||||
let msgSize = data.len
|
||||
var rpcMsg = decodeRpcMsg(data).valueOr:
|
||||
debug "failed to decode msg from peer", peer, err = error
|
||||
await rateLimit(g, peer, msgSize)
|
||||
# Raising in the handler closes the gossipsub connection (but doesn't
|
||||
# disconnect the peer!)
|
||||
# TODO evaluate behaviour penalty values
|
||||
peer.behaviourPenalty += 0.1
|
||||
|
||||
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
for m in rpcMsg.messages:
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [$peer.peerId, m.topic])
|
||||
|
||||
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||
await rateLimit(g, peer, g.messageOverhead(rpcMsg, msgSize))
|
||||
|
||||
# trigger hooks - these may modify the message
|
||||
peer.recvObservers(rpcMsg)
|
||||
|
||||
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
|
||||
g.send(peer, RPCMsg(pong: rpcMsg.ping), isHighPriority = true)
|
||||
peer.pingBudget.dec
|
||||
|
||||
for i in 0..<min(g.topicsHigh, rpcMsg.subscriptions.len):
|
||||
template sub: untyped = rpcMsg.subscriptions[i]
|
||||
g.handleSubscribe(peer, sub.topic, sub.subscribe)
|
||||
@@ -374,16 +506,15 @@ method rpcHandler*(g: GossipSub,
|
||||
if msgIdResult.isErr:
|
||||
debug "Dropping message due to failed message id generation",
|
||||
error = msgIdResult.error
|
||||
# TODO: descore peers due to error during message validation (malicious?)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
continue
|
||||
|
||||
let
|
||||
msgId = msgIdResult.get
|
||||
msgIdSalted = msgId & g.seenSalt
|
||||
msgIdSalted = g.salt(msgId)
|
||||
topic = msg.topic
|
||||
|
||||
# addSeen adds salt to msgId to avoid
|
||||
# remote attacking the hash function
|
||||
if g.addSeen(msgId):
|
||||
if g.addSeen(msgIdSalted):
|
||||
trace "Dropping already-seen message", msgId = shortLog(msgId), peer
|
||||
|
||||
var alreadyReceived = false
|
||||
@@ -393,8 +524,8 @@ method rpcHandler*(g: GossipSub,
|
||||
alreadyReceived = true
|
||||
|
||||
if not alreadyReceived:
|
||||
let delay = Moment.now() - g.firstSeen(msgId)
|
||||
g.rewardDelivered(peer, msg.topicIds, false, delay)
|
||||
let delay = Moment.now() - g.firstSeen(msgIdSalted)
|
||||
g.rewardDelivered(peer, topic, false, delay)
|
||||
|
||||
libp2p_gossipsub_duplicate.inc()
|
||||
|
||||
@@ -404,7 +535,7 @@ method rpcHandler*(g: GossipSub,
|
||||
libp2p_gossipsub_received.inc()
|
||||
|
||||
# avoid processing messages we are not interested in
|
||||
if msg.topicIds.allIt(it notin g.topics):
|
||||
if topic notin g.topics:
|
||||
debug "Dropping message of topic without subscription", msgId = shortLog(msgId), peer
|
||||
continue
|
||||
|
||||
@@ -412,14 +543,14 @@ method rpcHandler*(g: GossipSub,
|
||||
# always validate if signature is present or required
|
||||
debug "Dropping message due to failed signature verification",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg.topicIds)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
continue
|
||||
|
||||
if msg.seqno.len > 0 and msg.seqno.len != 8:
|
||||
# if we have seqno should be 8 bytes long
|
||||
debug "Dropping message due to invalid seqno length",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg.topicIds)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
continue
|
||||
|
||||
# g.anonymize needs no evaluation when receiving messages
|
||||
@@ -463,70 +594,76 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
|
||||
topicID: topic,
|
||||
peers: g.peerExchangeList(topic),
|
||||
backoff: g.parameters.unsubscribeBackoff.seconds.uint64)])))
|
||||
g.broadcast(mpeers, msg)
|
||||
g.broadcast(mpeers, msg, isHighPriority = true)
|
||||
|
||||
for peer in mpeers:
|
||||
g.pruned(peer, topic, backoff = some(g.parameters.unsubscribeBackoff))
|
||||
|
||||
g.mesh.del(topic)
|
||||
|
||||
|
||||
# Send unsubscribe (in reverse order to sub/graft)
|
||||
procCall PubSub(g).onTopicSubscription(topic, subscribed)
|
||||
|
||||
method publish*(g: GossipSub,
|
||||
topic: string,
|
||||
data: seq[byte]): Future[int] {.async.} =
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(g).publish(topic, data)
|
||||
|
||||
logScope:
|
||||
topic
|
||||
|
||||
trace "Publishing message on topic", data = data.shortLog
|
||||
|
||||
if topic.len <= 0: # data could be 0/empty
|
||||
debug "Empty topic, skipping publish"
|
||||
return 0
|
||||
|
||||
# base returns always 0
|
||||
discard await procCall PubSub(g).publish(topic, data)
|
||||
|
||||
trace "Publishing message on topic", data = data.shortLog
|
||||
|
||||
var peers: HashSet[PubSubPeer]
|
||||
|
||||
if g.parameters.floodPublish:
|
||||
# With flood publishing enabled, the mesh is used when propagating messages from other peers,
|
||||
# but a peer's own messages will always be published to all known peers in the topic.
|
||||
for peer in g.gossipsub.getOrDefault(topic):
|
||||
if peer.score >= g.parameters.publishThreshold:
|
||||
trace "publish: including flood/high score peer", peer
|
||||
peers.incl(peer)
|
||||
|
||||
# add always direct peers
|
||||
peers.incl(g.explicit.getOrDefault(topic))
|
||||
peers.incl(g.subscribedDirectPeers.getOrDefault(topic))
|
||||
|
||||
if topic in g.topics: # if we're subscribed use the mesh
|
||||
peers.incl(g.mesh.getOrDefault(topic))
|
||||
|
||||
if peers.len < g.parameters.dLow and g.parameters.floodPublish == false:
|
||||
# not subscribed or bad mesh, send to fanout peers
|
||||
# disable for floodPublish, since we already sent to every good peer
|
||||
#
|
||||
var fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
|
||||
if fanoutPeers.len == 0:
|
||||
g.replenishFanout(topic)
|
||||
fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
|
||||
if g.parameters.floodPublish:
|
||||
# With flood publishing enabled, the mesh is used when propagating messages from other peers,
|
||||
# but a peer's own messages will always be published to all known peers in the topic, limited
|
||||
# to the amount of peers we can send it to in one heartbeat
|
||||
|
||||
let maxPeersToFlood =
|
||||
if g.parameters.bandwidthEstimatebps > 0:
|
||||
let
|
||||
bandwidth = (g.parameters.bandwidthEstimatebps) div 8 div 1000 # Divisions are to convert it to Bytes per ms TODO replace with bandwidth estimate
|
||||
msToTransmit = max(data.len div bandwidth, 1)
|
||||
max(g.parameters.heartbeatInterval.milliseconds div msToTransmit, g.parameters.dLow)
|
||||
else:
|
||||
int.high() # unlimited
|
||||
|
||||
for peer in g.gossipsub.getOrDefault(topic):
|
||||
if peers.len >= maxPeersToFlood: break
|
||||
|
||||
if peer.score >= g.parameters.publishThreshold:
|
||||
trace "publish: including flood/high score peer", peer
|
||||
peers.incl(peer)
|
||||
|
||||
elif peers.len < g.parameters.dLow:
|
||||
# not subscribed or bad mesh, send to fanout peers
|
||||
# when flood-publishing, fanout won't help since all potential peers have
|
||||
# already been added
|
||||
|
||||
g.replenishFanout(topic) # Make sure fanout is populated
|
||||
|
||||
var fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
|
||||
g.rng.shuffle(fanoutPeers)
|
||||
if fanoutPeers.len + peers.len > g.parameters.d:
|
||||
fanoutPeers.setLen(g.parameters.d - peers.len)
|
||||
|
||||
for fanPeer in fanoutPeers:
|
||||
peers.incl(fanPeer)
|
||||
if peers.len > g.parameters.d: break
|
||||
|
||||
# even if we couldn't publish,
|
||||
# we still attempted to publish
|
||||
# on the topic, so it makes sense
|
||||
# to update the last topic publish
|
||||
# time
|
||||
# Attempting to publish counts as fanout send (even if the message
|
||||
# ultimately is not sent)
|
||||
g.lastFanoutPubSub[topic] = Moment.fromNow(g.parameters.fanoutTTL)
|
||||
|
||||
if peers.len == 0:
|
||||
@@ -534,7 +671,6 @@ method publish*(g: GossipSub,
|
||||
debug "No peers for topic, skipping publish", peersOnTopic = topicPeers.len,
|
||||
connectedPeers = topicPeers.filterIt(it.connected).len,
|
||||
topic
|
||||
# skipping topic as our metrics finds that heavy
|
||||
libp2p_gossipsub_failed_publish.inc()
|
||||
return 0
|
||||
|
||||
@@ -555,14 +691,16 @@ method publish*(g: GossipSub,
|
||||
|
||||
trace "Created new message", msg = shortLog(msg), peers = peers.len
|
||||
|
||||
if g.addSeen(msgId):
|
||||
# custom msgid providers might cause this
|
||||
if g.addSeen(g.salt(msgId)):
|
||||
# If the message was received or published recently, don't re-publish it -
|
||||
# this might happen when not using sequence id:s and / or with a custom
|
||||
# message id provider
|
||||
trace "Dropping already-seen message"
|
||||
return 0
|
||||
|
||||
g.mcache.put(msgId, msg)
|
||||
|
||||
g.broadcast(peers, RPCMsg(messages: @[msg]))
|
||||
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
|
||||
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])
|
||||
@@ -570,15 +708,16 @@ method publish*(g: GossipSub,
|
||||
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = ["generic"])
|
||||
|
||||
trace "Published message to peers", peers=peers.len
|
||||
|
||||
return peers.len
|
||||
|
||||
proc maintainDirectPeer(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
let peer = g.peers.getOrDefault(id)
|
||||
if isNil(peer):
|
||||
if id notin g.peers:
|
||||
trace "Attempting to dial a direct peer", peer = id
|
||||
if g.switch.isConnected(id):
|
||||
warn "We are connected to a direct peer, but it isn't a GossipSub peer!", id
|
||||
return
|
||||
try:
|
||||
await g.switch.connect(id, addrs)
|
||||
await g.switch.connect(id, addrs, forceDial = true)
|
||||
# populate the peer after it's connected
|
||||
discard g.getOrCreatePeer(id, g.codecs)
|
||||
except CancelledError as exc:
|
||||
@@ -596,33 +735,43 @@ proc maintainDirectPeers(g: GossipSub) {.async.} =
|
||||
for id, addrs in g.parameters.directPeers:
|
||||
await g.addDirectPeer(id, addrs)
|
||||
|
||||
method start*(g: GossipSub) {.async.} =
|
||||
method start*(
|
||||
g: GossipSub
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
|
||||
trace "gossipsub start"
|
||||
|
||||
if not g.heartbeatFut.isNil:
|
||||
warn "Starting gossipsub twice"
|
||||
return
|
||||
return fut
|
||||
|
||||
g.heartbeatFut = g.heartbeat()
|
||||
g.scoringHeartbeatFut = g.scoringHeartbeat()
|
||||
g.directPeersLoop = g.maintainDirectPeers()
|
||||
g.started = true
|
||||
fut
|
||||
|
||||
method stop*(g: GossipSub): Future[void] {.async: (raises: [], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
|
||||
method stop*(g: GossipSub) {.async.} =
|
||||
trace "gossipsub stop"
|
||||
g.started = false
|
||||
if g.heartbeatFut.isNil:
|
||||
warn "Stopping gossipsub without starting it"
|
||||
return
|
||||
return fut
|
||||
|
||||
# stop heartbeat interval
|
||||
g.directPeersLoop.cancel()
|
||||
g.scoringHeartbeatFut.cancel()
|
||||
g.heartbeatFut.cancel()
|
||||
g.heartbeatFut = nil
|
||||
fut
|
||||
|
||||
method initPubSub*(g: GossipSub)
|
||||
{.raises: [Defect, InitializationError].} =
|
||||
{.raises: [InitializationError].} =
|
||||
procCall FloodSub(g).initPubSub()
|
||||
|
||||
if not g.parameters.explicit:
|
||||
@@ -633,7 +782,18 @@ method initPubSub*(g: GossipSub)
|
||||
raise newException(InitializationError, $validationRes.error)
|
||||
|
||||
# init the floodsub stuff here, we customize timedcache in gossip!
|
||||
g.seen = TimedCache[MessageId].init(g.parameters.seenTTL)
|
||||
g.seen = TimedCache[SaltedId].init(g.parameters.seenTTL)
|
||||
|
||||
# init gossip stuff
|
||||
g.mcache = MCache.init(g.parameters.historyGossip, g.parameters.historyLength)
|
||||
|
||||
method getOrCreatePeer*(
|
||||
g: GossipSub,
|
||||
peerId: PeerId,
|
||||
protos: seq[string]): PubSubPeer =
|
||||
|
||||
let peer = procCall PubSub(g).getOrCreatePeer(peerId, protos)
|
||||
g.parameters.overheadRateLimit.withValue(overheadRateLimit):
|
||||
peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(overheadRateLimit.bytes, overheadRateLimit.interval))
|
||||
peer.maxNumElementsInNonPriorityQueue = g.parameters.maxNumElementsInNonPriorityQueue
|
||||
return peer
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sequtils, sets, algorithm, deques]
|
||||
import chronos, chronicles, metrics
|
||||
@@ -33,7 +30,7 @@ declareGauge(libp2p_gossipsub_healthy_peers_topics, "number of topics in mesh wi
|
||||
declareCounter(libp2p_gossipsub_above_dhigh_condition, "number of above dhigh pruning branches ran", labels = ["topic"])
|
||||
declareGauge(libp2p_gossipsub_received_iwants, "received iwants", labels = ["kind"])
|
||||
|
||||
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [Defect].} =
|
||||
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) =
|
||||
g.withPeerStats(p.peerId) do (stats: var PeerStats):
|
||||
var info = stats.topicInfos.getOrDefault(topic)
|
||||
info.graftTime = Moment.now()
|
||||
@@ -49,12 +46,10 @@ proc pruned*(g: GossipSub,
|
||||
p: PubSubPeer,
|
||||
topic: string,
|
||||
setBackoff: bool = true,
|
||||
backoff = none(Duration)) {.raises: [Defect].} =
|
||||
backoff = none(Duration)) =
|
||||
if setBackoff:
|
||||
let
|
||||
backoffDuration =
|
||||
if isSome(backoff): backoff.get()
|
||||
else: g.parameters.pruneBackoff
|
||||
backoffDuration = backoff.get(g.parameters.pruneBackoff)
|
||||
backoffMoment = Moment.fromNow(backoffDuration)
|
||||
|
||||
g.backingOff
|
||||
@@ -75,7 +70,7 @@ proc pruned*(g: GossipSub,
|
||||
|
||||
trace "pruned", peer=p, topic
|
||||
|
||||
proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [Defect].} =
|
||||
proc handleBackingOff*(t: var BackoffTable, topic: string) =
|
||||
let now = Moment.now()
|
||||
var expired = toSeq(t.getOrDefault(topic).pairs())
|
||||
expired.keepIf do (pair: tuple[peer: PeerId, expire: Moment]) -> bool:
|
||||
@@ -84,7 +79,7 @@ proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [Defect].}
|
||||
t.withValue(topic, v):
|
||||
v[].del(peer)
|
||||
|
||||
proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] {.raises: [Defect].} =
|
||||
proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] =
|
||||
if not g.parameters.enablePX:
|
||||
return @[]
|
||||
var peers = g.gossipsub.getOrDefault(topic, initHashSet[PubSubPeer]()).toSeq()
|
||||
@@ -105,16 +100,17 @@ proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] {.raises:
|
||||
|
||||
proc handleGraft*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
grafts: seq[ControlGraft]): seq[ControlPrune] = # {.raises: [Defect].} TODO chronicles exception on windows
|
||||
grafts: seq[ControlGraft]): seq[ControlPrune] =
|
||||
var prunes: seq[ControlPrune]
|
||||
for graft in grafts:
|
||||
let topic = graft.topicId
|
||||
trace "peer grafted topic", peer, topic
|
||||
let topic = graft.topicID
|
||||
trace "peer grafted topicID", peer, topic
|
||||
|
||||
# It is an error to GRAFT on a explicit peer
|
||||
# It is an error to GRAFT on a direct peer
|
||||
if peer.peerId in g.parameters.directPeers:
|
||||
# receiving a graft from a direct peer should yield a more prominent warning (protocol violation)
|
||||
warn "an explicit peer attempted to graft us, peering agreements should be reciprocal",
|
||||
# we are trusting direct peer not to abuse this
|
||||
warn "a direct peer attempted to graft us, peering agreements should be reciprocal",
|
||||
peer, topic
|
||||
# and such an attempt should be logged and rejected with a PRUNE
|
||||
prunes.add(ControlPrune(
|
||||
@@ -194,31 +190,25 @@ proc handleGraft*(g: GossipSub,
|
||||
proc getPeers(prune: ControlPrune, peer: PubSubPeer): seq[(PeerId, Option[PeerRecord])] =
|
||||
var routingRecords: seq[(PeerId, Option[PeerRecord])]
|
||||
for record in prune.peers:
|
||||
let peerRecord =
|
||||
if record.signedPeerRecord.len == 0:
|
||||
none(PeerRecord)
|
||||
else:
|
||||
let signedRecord = SignedPeerRecord.decode(record.signedPeerRecord)
|
||||
if signedRecord.isErr:
|
||||
trace "peer sent invalid SPR", peer, error=signedRecord.error
|
||||
none(PeerRecord)
|
||||
var peerRecord = none(PeerRecord)
|
||||
if record.signedPeerRecord.len > 0:
|
||||
SignedPeerRecord.decode(record.signedPeerRecord).toOpt().withValue(spr):
|
||||
if record.peerId != spr.data.peerId:
|
||||
trace "peer sent envelope with wrong public key", peer
|
||||
else:
|
||||
if record.peerId != signedRecord.get().data.peerId:
|
||||
trace "peer sent envelope with wrong public key", peer
|
||||
none(PeerRecord)
|
||||
else:
|
||||
some(signedRecord.get().data)
|
||||
peerRecord = some(spr.data)
|
||||
else:
|
||||
trace "peer sent invalid SPR", peer
|
||||
|
||||
routingRecords.add((record.peerId, peerRecord))
|
||||
|
||||
routingRecords
|
||||
|
||||
|
||||
proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.raises: [Defect].} =
|
||||
proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) =
|
||||
for prune in prunes:
|
||||
let topic = prune.topicId
|
||||
let topic = prune.topicID
|
||||
|
||||
trace "peer pruned topic", peer, topic
|
||||
trace "peer pruned topicID", peer, topic
|
||||
|
||||
# add peer backoff
|
||||
if prune.backoff > 0:
|
||||
@@ -248,39 +238,41 @@ proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.r
|
||||
|
||||
proc handleIHave*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
ihaves: seq[ControlIHave]): ControlIWant {.raises: [Defect].} =
|
||||
ihaves: seq[ControlIHave]): ControlIWant =
|
||||
var res: ControlIWant
|
||||
if peer.score < g.parameters.gossipThreshold:
|
||||
trace "ihave: ignoring low score peer", peer, score = peer.score
|
||||
elif peer.iHaveBudget <= 0:
|
||||
trace "ihave: ignoring out of budget peer", peer, score = peer.score
|
||||
else:
|
||||
# TODO review deduplicate algorithm
|
||||
# * https://github.com/nim-lang/Nim/blob/5f46474555ee93306cce55342e81130c1da79a42/lib/pure/collections/sequtils.nim#L184
|
||||
# * it's probably not efficient and might give preference to the first dupe
|
||||
let deIhaves = ihaves.deduplicate()
|
||||
for ihave in deIhaves:
|
||||
for ihave in ihaves:
|
||||
trace "peer sent ihave",
|
||||
peer, topic = ihave.topicId, msgs = ihave.messageIds
|
||||
if ihave.topicId in g.mesh:
|
||||
# also avoid duplicates here!
|
||||
let deIhavesMsgs = ihave.messageIds.deduplicate()
|
||||
for msgId in deIhavesMsgs:
|
||||
if not g.hasSeen(msgId):
|
||||
if peer.iHaveBudget > 0:
|
||||
res.messageIds.add(msgId)
|
||||
peer, topicID = ihave.topicID, msgs = ihave.messageIDs
|
||||
if ihave.topicID in g.topics:
|
||||
for msgId in ihave.messageIDs:
|
||||
if not g.hasSeen(g.salt(msgId)):
|
||||
if peer.iHaveBudget <= 0:
|
||||
break
|
||||
elif msgId notin res.messageIDs:
|
||||
res.messageIDs.add(msgId)
|
||||
dec peer.iHaveBudget
|
||||
trace "requested message via ihave", messageID=msgId
|
||||
else:
|
||||
break
|
||||
# shuffling res.messageIDs before sending it out to increase the likelihood
|
||||
# of getting an answer if the peer truncates the list due to internal size restrictions.
|
||||
g.rng.shuffle(res.messageIds)
|
||||
g.rng.shuffle(res.messageIDs)
|
||||
return res
|
||||
|
||||
proc handleIDontWant*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
iDontWants: seq[ControlIWant]) =
|
||||
for dontWant in iDontWants:
|
||||
for messageId in dontWant.messageIDs:
|
||||
if peer.heDontWants[^1].len > 1000: break
|
||||
peer.heDontWants[^1].incl(g.salt(messageId))
|
||||
|
||||
proc handleIWant*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
iwants: seq[ControlIWant]): seq[Message] {.raises: [Defect].} =
|
||||
iwants: seq[ControlIWant]): seq[Message] =
|
||||
var
|
||||
messages: seq[Message]
|
||||
invalidRequests = 0
|
||||
@@ -288,7 +280,7 @@ proc handleIWant*(g: GossipSub,
|
||||
trace "iwant: ignoring low score peer", peer, score = peer.score
|
||||
else:
|
||||
for iwant in iwants:
|
||||
for mid in iwant.messageIds:
|
||||
for mid in iwant.messageIDs:
|
||||
trace "peer sent iwant", peer, messageID = mid
|
||||
# canAskIWant will only return true once for a specific message
|
||||
if not peer.canAskIWant(mid):
|
||||
@@ -299,15 +291,14 @@ proc handleIWant*(g: GossipSub,
|
||||
libp2p_gossipsub_received_iwants.inc(1, labelValues=["skipped"])
|
||||
return messages
|
||||
continue
|
||||
let msg = g.mcache.get(mid)
|
||||
if msg.isSome:
|
||||
libp2p_gossipsub_received_iwants.inc(1, labelValues=["correct"])
|
||||
messages.add(msg.get())
|
||||
else:
|
||||
let msg = g.mcache.get(mid).valueOr:
|
||||
libp2p_gossipsub_received_iwants.inc(1, labelValues=["unknown"])
|
||||
continue
|
||||
libp2p_gossipsub_received_iwants.inc(1, labelValues=["correct"])
|
||||
messages.add(msg)
|
||||
return messages
|
||||
|
||||
proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} =
|
||||
proc commitMetrics(metrics: var MeshMetrics) =
|
||||
libp2p_gossipsub_low_peers_topics.set(metrics.lowPeersTopics)
|
||||
libp2p_gossipsub_no_peers_topics.set(metrics.noPeersTopics)
|
||||
libp2p_gossipsub_under_dout_topics.set(metrics.underDoutTopics)
|
||||
@@ -316,7 +307,7 @@ proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} =
|
||||
libp2p_gossipsub_peers_per_topic_fanout.set(metrics.otherPeersPerTopicFanout, labelValues = ["other"])
|
||||
libp2p_gossipsub_peers_per_topic_mesh.set(metrics.otherPeersPerTopicMesh, labelValues = ["other"])
|
||||
|
||||
proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) {.raises: [Defect].} =
|
||||
proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) =
|
||||
logScope:
|
||||
topic
|
||||
mesh = g.mesh.peers(topic)
|
||||
@@ -348,7 +339,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
# avoid negative score peers
|
||||
it.score >= 0.0 and
|
||||
it notin currentMesh[] and
|
||||
# don't pick explicit peers
|
||||
# don't pick direct peers
|
||||
it.peerId notin g.parameters.directPeers and
|
||||
# and avoid peers we are backing off
|
||||
it.peerId notin backingOff:
|
||||
@@ -388,7 +379,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
it notin currentMesh[] and
|
||||
# avoid negative score peers
|
||||
it.score >= 0.0 and
|
||||
# don't pick explicit peers
|
||||
# don't pick direct peers
|
||||
it.peerId notin g.parameters.directPeers and
|
||||
# and avoid peers we are backing off
|
||||
it.peerId notin backingOff:
|
||||
@@ -490,7 +481,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
# avoid negative score peers
|
||||
it.score >= median.score and
|
||||
it notin currentMesh[] and
|
||||
# don't pick explicit peers
|
||||
# don't pick direct peers
|
||||
it.peerId notin g.parameters.directPeers and
|
||||
# and avoid peers we are backing off
|
||||
it.peerId notin backingOff:
|
||||
@@ -537,16 +528,16 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
# Send changes to peers after table updates to avoid stale state
|
||||
if grafts.len > 0:
|
||||
let graft = RPCMsg(control: some(ControlMessage(graft: @[ControlGraft(topicID: topic)])))
|
||||
g.broadcast(grafts, graft)
|
||||
g.broadcast(grafts, graft, isHighPriority = true)
|
||||
if prunes.len > 0:
|
||||
let prune = RPCMsg(control: some(ControlMessage(
|
||||
prune: @[ControlPrune(
|
||||
topicID: topic,
|
||||
peers: g.peerExchangeList(topic),
|
||||
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
|
||||
g.broadcast(prunes, prune)
|
||||
g.broadcast(prunes, prune, isHighPriority = true)
|
||||
|
||||
proc dropFanoutPeers*(g: GossipSub) {.raises: [Defect].} =
|
||||
proc dropFanoutPeers*(g: GossipSub) =
|
||||
# drop peers that we haven't published to in
|
||||
# GossipSubFanoutTTL seconds
|
||||
let now = Moment.now()
|
||||
@@ -559,13 +550,13 @@ proc dropFanoutPeers*(g: GossipSub) {.raises: [Defect].} =
|
||||
for topic in drops:
|
||||
g.lastFanoutPubSub.del topic
|
||||
|
||||
proc replenishFanout*(g: GossipSub, topic: string) {.raises: [Defect].} =
|
||||
proc replenishFanout*(g: GossipSub, topic: string) =
|
||||
## get fanout peers for a topic
|
||||
logScope: topic
|
||||
trace "about to replenish fanout"
|
||||
|
||||
let currentMesh = g.mesh.getOrDefault(topic)
|
||||
if g.fanout.peers(topic) < g.parameters.dLow:
|
||||
let currentMesh = g.mesh.getOrDefault(topic)
|
||||
trace "replenishing fanout", peers = g.fanout.peers(topic)
|
||||
for peer in g.gossipsub.getOrDefault(topic):
|
||||
if peer in currentMesh: continue
|
||||
@@ -575,7 +566,7 @@ proc replenishFanout*(g: GossipSub, topic: string) {.raises: [Defect].} =
|
||||
|
||||
trace "fanout replenished with peers", peers = g.fanout.peers(topic)
|
||||
|
||||
proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises: [Defect].} =
|
||||
proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
|
||||
## gossip iHave messages to peers
|
||||
##
|
||||
|
||||
@@ -586,7 +577,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises:
|
||||
trace "getting gossip peers (iHave)", ntopics=topics.len
|
||||
for topic in topics:
|
||||
if topic notin g.gossipsub:
|
||||
trace "topic not in gossip array, skipping", topicID = topic
|
||||
trace "topic not in gossip array, skipping", topic = topic
|
||||
continue
|
||||
|
||||
let mids = g.mcache.window(topic)
|
||||
@@ -619,26 +610,25 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises:
|
||||
x notin gossipPeers and
|
||||
x.score >= g.parameters.gossipThreshold
|
||||
|
||||
var target = g.parameters.dLazy
|
||||
let factor = (g.parameters.gossipFactor.float * allPeers.len.float).int
|
||||
if factor > target:
|
||||
target = min(factor, allPeers.len)
|
||||
# https://github.com/libp2p/specs/blob/98c5aa9421703fc31b0833ad8860a55db15be063/pubsub/gossipsub/gossipsub-v1.1.md#adaptive-gossip-dissemination
|
||||
let
|
||||
factor = (g.parameters.gossipFactor.float * allPeers.len.float).int
|
||||
target = max(g.parameters.dLazy, factor)
|
||||
|
||||
if target < allPeers.len:
|
||||
g.rng.shuffle(allPeers)
|
||||
allPeers.setLen(target)
|
||||
|
||||
let msgIdsAsSet = ihave.messageIds.toHashSet()
|
||||
|
||||
for peer in allPeers:
|
||||
control.mgetOrPut(peer, ControlMessage()).ihave.add(ihave)
|
||||
peer.sentIHaves[^1].incl(msgIdsAsSet)
|
||||
for msgId in ihave.messageIDs:
|
||||
peer.sentIHaves[^1].incl(msgId)
|
||||
|
||||
libp2p_gossipsub_cache_window_size.set(cacheWindowSize.int64)
|
||||
|
||||
return control
|
||||
|
||||
proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
|
||||
proc onHeartbeat(g: GossipSub) =
|
||||
# reset IWANT budget
|
||||
# reset IHAVE cap
|
||||
block:
|
||||
@@ -646,7 +636,11 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
|
||||
peer.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||
if peer.sentIHaves.len > g.parameters.historyLength:
|
||||
discard peer.sentIHaves.popLast()
|
||||
peer.heDontWants.addFirst(default(HashSet[SaltedId]))
|
||||
if peer.heDontWants.len > g.parameters.historyLength:
|
||||
discard peer.heDontWants.popLast()
|
||||
peer.iHaveBudget = IHavePeerBudget
|
||||
peer.pingBudget = PingsPeerBudget
|
||||
|
||||
var meshMetrics = MeshMetrics()
|
||||
|
||||
@@ -666,13 +660,14 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
|
||||
g.pruned(peer, t)
|
||||
g.mesh.removePeer(t, peer)
|
||||
prunes &= peer
|
||||
peer.clearNonPriorityQueue()
|
||||
if prunes.len > 0:
|
||||
let prune = RPCMsg(control: some(ControlMessage(
|
||||
prune: @[ControlPrune(
|
||||
topicID: t,
|
||||
peers: g.peerExchangeList(t),
|
||||
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
|
||||
g.broadcast(prunes, prune)
|
||||
g.broadcast(prunes, prune, isHighPriority = true)
|
||||
|
||||
# pass by ptr in order to both signal we want to update metrics
|
||||
# and as well update the struct for each topic during this iteration
|
||||
@@ -690,16 +685,14 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
|
||||
for peer, control in peers:
|
||||
# only ihave from here
|
||||
for ihave in control.ihave:
|
||||
if g.knownTopics.contains(ihave.topicId):
|
||||
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicId])
|
||||
if g.knownTopics.contains(ihave.topicID):
|
||||
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicID])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_ihave.inc(labelValues = ["generic"])
|
||||
g.send(peer, RPCMsg(control: some(control)))
|
||||
g.send(peer, RPCMsg(control: some(control)), isHighPriority = true)
|
||||
|
||||
g.mcache.shift() # shift the cache
|
||||
|
||||
# {.pop.} # raises [Defect]
|
||||
|
||||
proc heartbeat*(g: GossipSub) {.async.} =
|
||||
heartbeat "GossipSub", g.parameters.heartbeatInterval:
|
||||
trace "running heartbeat", instance = cast[int](g)
|
||||
|
||||
@@ -7,16 +7,16 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sets, options]
|
||||
import std/[tables, sets]
|
||||
import chronos, chronicles, metrics
|
||||
import chronos/ratelimit
|
||||
import "."/[types]
|
||||
import ".."/[pubsubpeer]
|
||||
import ../rpc/messages
|
||||
import "../../.."/[peerid, multiaddress, switch, utils/heartbeat]
|
||||
import ../pubsub
|
||||
|
||||
logScope:
|
||||
topics = "libp2p gossipsub"
|
||||
@@ -30,6 +30,7 @@ declareGauge(libp2p_gossipsub_peers_score_invalidMessageDeliveries, "Detailed go
|
||||
declareGauge(libp2p_gossipsub_peers_score_appScore, "Detailed gossipsub scoring metric", labels = ["agent"])
|
||||
declareGauge(libp2p_gossipsub_peers_score_behaviourPenalty, "Detailed gossipsub scoring metric", labels = ["agent"])
|
||||
declareGauge(libp2p_gossipsub_peers_score_colocationFactor, "Detailed gossipsub scoring metric", labels = ["agent"])
|
||||
declarePublicCounter(libp2p_gossipsub_peers_rate_limit_hits, "The number of times peers were above their rate limit", labels = ["agent"])
|
||||
|
||||
proc init*(_: type[TopicParams]): TopicParams =
|
||||
TopicParams(
|
||||
@@ -55,7 +56,7 @@ proc init*(_: type[TopicParams]): TopicParams =
|
||||
proc withPeerStats*(
|
||||
g: GossipSub,
|
||||
peerId: PeerId,
|
||||
action: proc (stats: var PeerStats) {.gcsafe, raises: [Defect].}) =
|
||||
action: proc (stats: var PeerStats) {.gcsafe, raises: [].}) =
|
||||
## Add or update peer statistics for a particular peer id - the statistics
|
||||
## are retained across multiple connections until they expire
|
||||
g.peerStats.withValue(peerId, stats) do:
|
||||
@@ -74,39 +75,30 @@ func `/`(a, b: Duration): float64 =
|
||||
func byScore*(x,y: PubSubPeer): int = system.cmp(x.score, y.score)
|
||||
|
||||
proc colocationFactor(g: GossipSub, peer: PubSubPeer): float64 =
|
||||
if peer.address.isNone():
|
||||
0.0
|
||||
let address = peer.address.valueOr: return 0.0
|
||||
|
||||
g.peersInIP.mgetOrPut(address, initHashSet[PeerId]()).incl(peer.peerId)
|
||||
let
|
||||
ipPeers = g.peersInIP.getOrDefault(address).len().float64
|
||||
if ipPeers > g.parameters.ipColocationFactorThreshold:
|
||||
trace "colocationFactor over threshold", peer, address, ipPeers
|
||||
let over = ipPeers - g.parameters.ipColocationFactorThreshold
|
||||
over * over
|
||||
else:
|
||||
let
|
||||
address = peer.address.get()
|
||||
g.peersInIP.mgetOrPut(address, initHashSet[PeerId]()).incl(peer.peerId)
|
||||
let
|
||||
ipPeers = g.peersInIP.getOrDefault(address).len().float64
|
||||
if ipPeers > g.parameters.ipColocationFactorThreshold:
|
||||
trace "colocationFactor over threshold", peer, address, ipPeers
|
||||
let over = ipPeers - g.parameters.ipColocationFactorThreshold
|
||||
over * over
|
||||
else:
|
||||
0.0
|
||||
|
||||
{.pop.}
|
||||
|
||||
proc disconnectPeer(g: GossipSub, peer: PubSubPeer) {.async.} =
|
||||
let agent =
|
||||
when defined(libp2p_agents_metrics):
|
||||
if peer.shortAgent.len > 0:
|
||||
peer.shortAgent
|
||||
else:
|
||||
"unknown"
|
||||
else:
|
||||
"unknown"
|
||||
libp2p_gossipsub_bad_score_disconnection.inc(labelValues = [agent])
|
||||
0.0
|
||||
|
||||
proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async.} =
|
||||
try:
|
||||
await g.switch.disconnect(peer.peerId)
|
||||
except CatchableError as exc: # Never cancelled
|
||||
trace "Failed to close connection", peer, error = exc.name, msg = exc.msg
|
||||
|
||||
proc disconnectIfBadScorePeer*(g: GossipSub, peer: PubSubPeer, score: float64) =
|
||||
if g.parameters.disconnectBadPeers and score < g.parameters.graylistThreshold and
|
||||
peer.peerId notin g.parameters.directPeers:
|
||||
debug "disconnecting bad score peer", peer, score = peer.score
|
||||
asyncSpawn(g.disconnectPeer(peer))
|
||||
libp2p_gossipsub_bad_score_disconnection.inc(labelValues = [peer.getAgent()])
|
||||
|
||||
proc updateScores*(g: GossipSub) = # avoid async
|
||||
## https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#the-score-function
|
||||
@@ -176,14 +168,7 @@ proc updateScores*(g: GossipSub) = # avoid async
|
||||
score += topicScore * topicParams.topicWeight
|
||||
|
||||
# Score metrics
|
||||
let agent =
|
||||
when defined(libp2p_agents_metrics):
|
||||
if peer.shortAgent.len > 0:
|
||||
peer.shortAgent
|
||||
else:
|
||||
"unknown"
|
||||
else:
|
||||
"unknown"
|
||||
let agent = peer.getAgent()
|
||||
libp2p_gossipsub_peers_score_firstMessageDeliveries.inc(info.firstMessageDeliveries, labelValues = [agent])
|
||||
libp2p_gossipsub_peers_score_meshMessageDeliveries.inc(info.meshMessageDeliveries, labelValues = [agent])
|
||||
libp2p_gossipsub_peers_score_meshFailurePenalty.inc(info.meshFailurePenalty, labelValues = [agent])
|
||||
@@ -220,14 +205,7 @@ proc updateScores*(g: GossipSub) = # avoid async
|
||||
score += colocationFactor * g.parameters.ipColocationFactorWeight
|
||||
|
||||
# Score metrics
|
||||
let agent =
|
||||
when defined(libp2p_agents_metrics):
|
||||
if peer.shortAgent.len > 0:
|
||||
peer.shortAgent
|
||||
else:
|
||||
"unknown"
|
||||
else:
|
||||
"unknown"
|
||||
let agent = peer.getAgent()
|
||||
libp2p_gossipsub_peers_score_appScore.inc(peer.appScore, labelValues = [agent])
|
||||
libp2p_gossipsub_peers_score_behaviourPenalty.inc(peer.behaviourPenalty, labelValues = [agent])
|
||||
libp2p_gossipsub_peers_score_colocationFactor.inc(colocationFactor, labelValues = [agent])
|
||||
@@ -247,11 +225,7 @@ proc updateScores*(g: GossipSub) = # avoid async
|
||||
|
||||
trace "updated peer's score", peer, score = peer.score, n_topics, is_grafted
|
||||
|
||||
if g.parameters.disconnectBadPeers and stats.score < g.parameters.graylistThreshold and
|
||||
peer.peerId notin g.parameters.directPeers:
|
||||
debug "disconnecting bad score peer", peer, score = peer.score
|
||||
asyncSpawn(g.disconnectPeer(peer))
|
||||
|
||||
g.disconnectIfBadScorePeer(peer, stats.score)
|
||||
libp2p_gossipsub_peers_scores.inc(peer.score, labelValues = [agent])
|
||||
|
||||
for peer in evicting:
|
||||
@@ -264,43 +238,52 @@ proc scoringHeartbeat*(g: GossipSub) {.async.} =
|
||||
trace "running scoring heartbeat", instance = cast[int](g)
|
||||
g.updateScores()
|
||||
|
||||
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, topics: seq[string]) =
|
||||
for tt in topics:
|
||||
let t = tt
|
||||
if t notin g.topics:
|
||||
continue
|
||||
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
|
||||
let uselessAppBytesNum = msg.data.len
|
||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
|
||||
debug "Peer sent invalid message and it's above rate limit", peer, uselessAppBytesNum
|
||||
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||
if g.parameters.disconnectPeerAboveRateLimit:
|
||||
await g.disconnectPeer(peer)
|
||||
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
|
||||
|
||||
let tt = t
|
||||
# update stats
|
||||
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
|
||||
stats.topicInfos.mgetOrPut(tt, TopicInfo()).invalidMessageDeliveries += 1
|
||||
let topic = msg.topic
|
||||
if topic notin g.topics:
|
||||
return
|
||||
|
||||
# update stats
|
||||
g.withPeerStats(peer.peerId) do(stats: var PeerStats):
|
||||
stats.topicInfos.mgetOrPut(topic, TopicInfo()).invalidMessageDeliveries += 1
|
||||
|
||||
proc addCapped*[T](stat: var T, diff, cap: T) =
|
||||
stat += min(diff, cap - stat)
|
||||
|
||||
proc rewardDelivered*(
|
||||
g: GossipSub, peer: PubSubPeer, topics: openArray[string], first: bool, delay = ZeroDuration) =
|
||||
for tt in topics:
|
||||
let t = tt
|
||||
if t notin g.topics:
|
||||
continue
|
||||
g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
topic: string,
|
||||
first: bool,
|
||||
delay = ZeroDuration,
|
||||
) =
|
||||
if topic notin g.topics:
|
||||
return
|
||||
|
||||
let tt = t
|
||||
let topicParams = g.topicParams.mgetOrPut(t, TopicParams.init())
|
||||
# if in mesh add more delivery score
|
||||
let topicParams = g.topicParams.mgetOrPut(topic, TopicParams.init())
|
||||
# if in mesh add more delivery score
|
||||
|
||||
if delay > topicParams.meshMessageDeliveriesWindow:
|
||||
# Too old
|
||||
continue
|
||||
if delay > topicParams.meshMessageDeliveriesWindow:
|
||||
# Too old
|
||||
return
|
||||
|
||||
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
|
||||
stats.topicInfos.withValue(tt, tstats):
|
||||
if first:
|
||||
tstats[].firstMessageDeliveries.addCapped(
|
||||
1, topicParams.firstMessageDeliveriesCap)
|
||||
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
|
||||
stats.topicInfos.withValue(topic, tstats):
|
||||
if first:
|
||||
tstats[].firstMessageDeliveries.addCapped(
|
||||
1, topicParams.firstMessageDeliveriesCap)
|
||||
|
||||
if tstats[].inMesh:
|
||||
tstats[].meshMessageDeliveries.addCapped(
|
||||
1, topicParams.meshMessageDeliveriesCap)
|
||||
do: # make sure we don't loose this information
|
||||
stats.topicInfos[tt] = TopicInfo(meshMessageDeliveries: 1)
|
||||
if tstats[].inMesh:
|
||||
tstats[].meshMessageDeliveries.addCapped(
|
||||
1, topicParams.meshMessageDeliveriesCap)
|
||||
do: # make sure we don't lose this information
|
||||
stats.topicInfos[topic] = TopicInfo(meshMessageDeliveries: 1)
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import std/[options, tables, sets]
|
||||
@@ -48,6 +45,7 @@ const
|
||||
|
||||
const
|
||||
BackoffSlackTime* = 2 # seconds
|
||||
PingsPeerBudget* = 100 # maximum of 6.4kb/heartbeat (6.4kb/s with default 1 second/hb)
|
||||
IHavePeerBudget* = 10
|
||||
# the max amount of IHave to expose, not by spec, but go as example
|
||||
# rust sigp: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/config.rs#L572
|
||||
@@ -104,6 +102,11 @@ type
|
||||
behaviourPenalty*: float64 # the eventual penalty score
|
||||
|
||||
GossipSubParams* {.public.} = object
|
||||
# explicit is used to check if the GossipSubParams instance was created by the user either passing params to GossipSubParams(...)
|
||||
# or GossipSubParams.init(...). In the first case explicit should be set to true when calling the Nim constructor.
|
||||
# In the second case, the param isn't necessary and should be always be set to true by init.
|
||||
# If none of those options were used, it means the instance was created using Nim default values.
|
||||
# In this case, GossipSubParams.init() should be called when initing GossipSub to set the values to their default value defined by nim-libp2p.
|
||||
explicit*: bool
|
||||
pruneBackoff*: Duration
|
||||
unsubscribeBackoff*: Duration
|
||||
@@ -144,25 +147,31 @@ type
|
||||
disconnectBadPeers*: bool
|
||||
enablePX*: bool
|
||||
|
||||
bandwidthEstimatebps*: int # This is currently used only for limting flood publishing. 0 disables flood-limiting completely
|
||||
|
||||
overheadRateLimit*: Opt[tuple[bytes: int, interval: Duration]]
|
||||
disconnectPeerAboveRateLimit*: bool
|
||||
|
||||
# Max number of elements allowed in the non-priority queue. When this limit has been reached, the peer will be disconnected.
|
||||
maxNumElementsInNonPriorityQueue*: int
|
||||
|
||||
BackoffTable* = Table[string, Table[PeerId, Moment]]
|
||||
ValidationSeenTable* = Table[MessageId, HashSet[PubSubPeer]]
|
||||
ValidationSeenTable* = Table[SaltedId, HashSet[PubSubPeer]]
|
||||
|
||||
RoutingRecordsPair* = tuple[id: PeerId, record: Option[PeerRecord]]
|
||||
RoutingRecordsHandler* =
|
||||
proc(peer: PeerId,
|
||||
tag: string, # For gossipsub, the topic
|
||||
peers: seq[RoutingRecordsPair])
|
||||
{.gcsafe, raises: [Defect].}
|
||||
{.gcsafe, raises: [].}
|
||||
|
||||
GossipSub* = ref object of FloodSub
|
||||
mesh*: PeerTable # peers that we send messages to when we are subscribed to the topic
|
||||
fanout*: PeerTable # peers that we send messages to when we're not subscribed to the topic
|
||||
gossipsub*: PeerTable # peers that are subscribed to a topic
|
||||
explicit*: PeerTable # directpeers that we keep alive explicitly
|
||||
subscribedDirectPeers*: PeerTable # directpeers that we keep alive
|
||||
backingOff*: BackoffTable # peers to backoff from when replenishing the mesh
|
||||
lastFanoutPubSub*: Table[string, Moment] # last publish time for fanout topics
|
||||
gossip*: Table[string, seq[ControlIHave]] # pending gossip
|
||||
control*: Table[string, ControlMessage] # pending control messages
|
||||
mcache*: MCache # messages cache
|
||||
validationSeen*: ValidationSeenTable # peers who sent us message in validation
|
||||
heartbeatFut*: Future[void] # cancellation future for heartbeat interval
|
||||
|
||||
@@ -7,57 +7,59 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, tables, options]
|
||||
import std/[sets, tables]
|
||||
import rpc/[messages]
|
||||
import results
|
||||
|
||||
export sets, tables, messages, options
|
||||
export sets, tables, messages, results
|
||||
|
||||
type
|
||||
CacheEntry* = object
|
||||
mid*: MessageId
|
||||
topicIds*: seq[string]
|
||||
msgId*: MessageId
|
||||
topic*: string
|
||||
|
||||
MCache* = object of RootObj
|
||||
msgs*: Table[MessageId, Message]
|
||||
history*: seq[seq[CacheEntry]]
|
||||
pos*: int
|
||||
windowSize*: Natural
|
||||
|
||||
func get*(c: MCache, mid: MessageId): Option[Message] =
|
||||
if mid in c.msgs:
|
||||
try: some(c.msgs[mid])
|
||||
func get*(c: MCache, msgId: MessageId): Opt[Message] =
|
||||
if msgId in c.msgs:
|
||||
try: Opt.some(c.msgs[msgId])
|
||||
except KeyError: raiseAssert "checked"
|
||||
else:
|
||||
none(Message)
|
||||
Opt.none(Message)
|
||||
|
||||
func contains*(c: MCache, mid: MessageId): bool =
|
||||
mid in c.msgs
|
||||
func contains*(c: MCache, msgId: MessageId): bool =
|
||||
msgId in c.msgs
|
||||
|
||||
func put*(c: var MCache, msgId: MessageId, msg: Message) =
|
||||
if not c.msgs.hasKeyOrPut(msgId, msg):
|
||||
# Only add cache entry if the message was not already in the cache
|
||||
c.history[0].add(CacheEntry(mid: msgId, topicIds: msg.topicIds))
|
||||
c.history[c.pos].add(CacheEntry(msgId: msgId, topic: msg.topic))
|
||||
|
||||
func window*(c: MCache, topic: string): HashSet[MessageId] =
|
||||
let
|
||||
len = min(c.windowSize, c.history.len)
|
||||
|
||||
for i in 0..<len:
|
||||
for entry in c.history[i]:
|
||||
for t in entry.topicIds:
|
||||
if t == topic:
|
||||
result.incl(entry.mid)
|
||||
break
|
||||
# Work backwards from `pos` in the circular buffer
|
||||
for entry in c.history[(c.pos + c.history.len - i) mod c.history.len]:
|
||||
if entry.topic == topic:
|
||||
result.incl(entry.msgId)
|
||||
|
||||
func shift*(c: var MCache) =
|
||||
for entry in c.history.pop():
|
||||
c.msgs.del(entry.mid)
|
||||
# Shift circular buffer to write to a new position, clearing it from past
|
||||
# iterations
|
||||
c.pos = (c.pos + 1) mod c.history.len
|
||||
|
||||
c.history.insert(@[])
|
||||
for entry in c.history[c.pos]:
|
||||
c.msgs.del(entry.msgId)
|
||||
|
||||
reset(c.history[c.pos])
|
||||
|
||||
func init*(T: type MCache, window, history: Natural): T =
|
||||
T(
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sets, sequtils]
|
||||
import ./pubsubpeer, ../../peerid
|
||||
|
||||
@@ -13,13 +13,11 @@
|
||||
## `publish<#publish.e%2CPubSub%2Cstring%2Cseq%5Bbyte%5D>`_ something on it,
|
||||
## and eventually `unsubscribe<#unsubscribe%2CPubSub%2Cstring%2CTopicHandler>`_ from it.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sequtils, sets, strutils]
|
||||
import chronos, chronicles, metrics
|
||||
import chronos/ratelimit
|
||||
import ./errors as pubsub_errors,
|
||||
./pubsubpeer,
|
||||
./rpc/[message, messages, protobuf],
|
||||
@@ -32,7 +30,6 @@ import ./errors as pubsub_errors,
|
||||
../../errors,
|
||||
../../utility
|
||||
|
||||
import metrics
|
||||
import stew/results
|
||||
export results
|
||||
|
||||
@@ -86,18 +83,18 @@ type
|
||||
InitializationError* = object of LPError
|
||||
|
||||
TopicHandler* {.public.} = proc(topic: string,
|
||||
data: seq[byte]): Future[void] {.gcsafe, raises: [Defect].}
|
||||
data: seq[byte]): Future[void] {.gcsafe, raises: [].}
|
||||
|
||||
ValidatorHandler* {.public.} = proc(topic: string,
|
||||
message: Message): Future[ValidationResult] {.gcsafe, raises: [Defect].}
|
||||
message: Message): Future[ValidationResult] {.gcsafe, raises: [].}
|
||||
|
||||
TopicPair* = tuple[topic: string, handler: TopicHandler]
|
||||
|
||||
MsgIdProvider* {.public.} =
|
||||
proc(m: Message): Result[MessageId, ValidationResult] {.noSideEffect, raises: [Defect], gcsafe.}
|
||||
proc(m: Message): Result[MessageId, ValidationResult] {.noSideEffect, raises: [], gcsafe.}
|
||||
|
||||
SubscriptionValidator* {.public.} =
|
||||
proc(topic: string): bool {.raises: [Defect], gcsafe.}
|
||||
proc(topic: string): bool {.raises: [], gcsafe.}
|
||||
## Every time a peer send us a subscription (even to an unknown topic),
|
||||
## we have to store it, which may be an attack vector.
|
||||
## This callback can be used to reject topic we're not interested in
|
||||
@@ -140,18 +137,34 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||
|
||||
libp2p_pubsub_peers.set(p.peers.len.int64)
|
||||
|
||||
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg) {.raises: [Defect].} =
|
||||
## Attempt to send `msg` to remote peer
|
||||
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool) {.raises: [].} =
|
||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to the specified remote peer in the PubSub network.
|
||||
##
|
||||
## Parameters:
|
||||
## - `p`: The `PubSub` instance.
|
||||
## - `peer`: An instance of `PubSubPeer` representing the peer to whom the message should be sent.
|
||||
## - `msg`: The `RPCMsg` instance that contains the message to be sent.
|
||||
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
|
||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
||||
## priority messages have been sent.
|
||||
|
||||
trace "sending pubsub message to peer", peer, msg = shortLog(msg)
|
||||
peer.send(msg, p.anonymize)
|
||||
peer.send(msg, p.anonymize, isHighPriority)
|
||||
|
||||
proc broadcast*(
|
||||
p: PubSub,
|
||||
sendPeers: auto, # Iteratble[PubSubPeer]
|
||||
msg: RPCMsg) {.raises: [Defect].} =
|
||||
## Attempt to send `msg` to the given peers
|
||||
msg: RPCMsg,
|
||||
isHighPriority: bool) {.raises: [].} =
|
||||
## This procedure attempts to send a `msg` (of type `RPCMsg`) to a specified group of peers in the PubSub network.
|
||||
##
|
||||
## Parameters:
|
||||
## - `p`: The `PubSub` instance.
|
||||
## - `sendPeers`: An iterable of `PubSubPeer` instances representing the peers to whom the message should be sent.
|
||||
## - `msg`: The `RPCMsg` instance that contains the message to be broadcast.
|
||||
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
|
||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
||||
## priority messages have been sent.
|
||||
|
||||
let npeers = sendPeers.len.int64
|
||||
for sub in msg.subscriptions:
|
||||
@@ -167,29 +180,28 @@ proc broadcast*(
|
||||
libp2p_pubsub_broadcast_unsubscriptions.inc(npeers, labelValues = ["generic"])
|
||||
|
||||
for smsg in msg.messages:
|
||||
for topic in smsg.topicIds:
|
||||
if p.knownTopics.contains(topic):
|
||||
libp2p_pubsub_broadcast_messages.inc(npeers, labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_messages.inc(npeers, labelValues = ["generic"])
|
||||
let topic = smsg.topic
|
||||
if p.knownTopics.contains(topic):
|
||||
libp2p_pubsub_broadcast_messages.inc(npeers, labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_messages.inc(npeers, labelValues = ["generic"])
|
||||
|
||||
if msg.control.isSome():
|
||||
libp2p_pubsub_broadcast_iwant.inc(npeers * msg.control.get().iwant.len.int64)
|
||||
msg.control.withValue(control):
|
||||
libp2p_pubsub_broadcast_iwant.inc(npeers * control.iwant.len.int64)
|
||||
|
||||
let control = msg.control.get()
|
||||
for ihave in control.ihave:
|
||||
if p.knownTopics.contains(ihave.topicId):
|
||||
libp2p_pubsub_broadcast_ihave.inc(npeers, labelValues = [ihave.topicId])
|
||||
if p.knownTopics.contains(ihave.topicID):
|
||||
libp2p_pubsub_broadcast_ihave.inc(npeers, labelValues = [ihave.topicID])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_ihave.inc(npeers, labelValues = ["generic"])
|
||||
for graft in control.graft:
|
||||
if p.knownTopics.contains(graft.topicId):
|
||||
libp2p_pubsub_broadcast_graft.inc(npeers, labelValues = [graft.topicId])
|
||||
if p.knownTopics.contains(graft.topicID):
|
||||
libp2p_pubsub_broadcast_graft.inc(npeers, labelValues = [graft.topicID])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_graft.inc(npeers, labelValues = ["generic"])
|
||||
for prune in control.prune:
|
||||
if p.knownTopics.contains(prune.topicId):
|
||||
libp2p_pubsub_broadcast_prune.inc(npeers, labelValues = [prune.topicId])
|
||||
if p.knownTopics.contains(prune.topicID):
|
||||
libp2p_pubsub_broadcast_prune.inc(npeers, labelValues = [prune.topicID])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_prune.inc(npeers, labelValues = ["generic"])
|
||||
|
||||
@@ -198,19 +210,19 @@ proc broadcast*(
|
||||
|
||||
if anyIt(sendPeers, it.hasObservers):
|
||||
for peer in sendPeers:
|
||||
p.send(peer, msg)
|
||||
p.send(peer, msg, isHighPriority)
|
||||
else:
|
||||
# Fast path that only encodes message once
|
||||
let encoded = encodeRpcMsg(msg, p.anonymize)
|
||||
for peer in sendPeers:
|
||||
asyncSpawn peer.sendEncoded(encoded)
|
||||
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
|
||||
|
||||
proc sendSubs*(p: PubSub,
|
||||
peer: PubSubPeer,
|
||||
topics: openArray[string],
|
||||
subscribe: bool) =
|
||||
## send subscriptions to remote peer
|
||||
p.send(peer, RPCMsg.withSubs(topics, subscribe))
|
||||
p.send(peer, RPCMsg.withSubs(topics, subscribe), isHighPriority = true)
|
||||
|
||||
for topic in topics:
|
||||
if subscribe:
|
||||
@@ -239,36 +251,33 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
|
||||
libp2p_pubsub_received_unsubscriptions.inc(labelValues = ["generic"])
|
||||
|
||||
for i in 0..<rpcMsg.messages.len():
|
||||
template smsg: untyped = rpcMsg.messages[i]
|
||||
for j in 0..<smsg.topicIds.len():
|
||||
template topic: untyped = smsg.topicIds[j]
|
||||
if p.knownTopics.contains(topic):
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_received_messages.inc(labelValues = ["generic"])
|
||||
let topic = rpcMsg.messages[i].topic
|
||||
if p.knownTopics.contains(topic):
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [topic])
|
||||
else:
|
||||
libp2p_pubsub_received_messages.inc(labelValues = ["generic"])
|
||||
|
||||
if rpcMsg.control.isSome():
|
||||
libp2p_pubsub_received_iwant.inc(rpcMsg.control.get().iwant.len.int64)
|
||||
template control: untyped = rpcMsg.control.unsafeGet()
|
||||
rpcMsg.control.withValue(control):
|
||||
libp2p_pubsub_received_iwant.inc(control.iwant.len.int64)
|
||||
for ihave in control.ihave:
|
||||
if p.knownTopics.contains(ihave.topicId):
|
||||
libp2p_pubsub_received_ihave.inc(labelValues = [ihave.topicId])
|
||||
if p.knownTopics.contains(ihave.topicID):
|
||||
libp2p_pubsub_received_ihave.inc(labelValues = [ihave.topicID])
|
||||
else:
|
||||
libp2p_pubsub_received_ihave.inc(labelValues = ["generic"])
|
||||
for graft in control.graft:
|
||||
if p.knownTopics.contains(graft.topicId):
|
||||
libp2p_pubsub_received_graft.inc(labelValues = [graft.topicId])
|
||||
if p.knownTopics.contains(graft.topicID):
|
||||
libp2p_pubsub_received_graft.inc(labelValues = [graft.topicID])
|
||||
else:
|
||||
libp2p_pubsub_received_graft.inc(labelValues = ["generic"])
|
||||
for prune in control.prune:
|
||||
if p.knownTopics.contains(prune.topicId):
|
||||
libp2p_pubsub_received_prune.inc(labelValues = [prune.topicId])
|
||||
if p.knownTopics.contains(prune.topicID):
|
||||
libp2p_pubsub_received_prune.inc(labelValues = [prune.topicID])
|
||||
else:
|
||||
libp2p_pubsub_received_prune.inc(labelValues = ["generic"])
|
||||
|
||||
method rpcHandler*(p: PubSub,
|
||||
peer: PubSubPeer,
|
||||
rpcMsg: RPCMsg): Future[void] {.base, async.} =
|
||||
data: seq[byte]): Future[void] {.base, async.} =
|
||||
## Handler that must be overridden by concrete implementation
|
||||
raiseAssert "Unimplemented"
|
||||
|
||||
@@ -277,16 +286,20 @@ method onNewPeer(p: PubSub, peer: PubSubPeer) {.base, gcsafe.} = discard
|
||||
method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent) {.base, gcsafe.} =
|
||||
# Peer event is raised for the send connection in particular
|
||||
case event.kind
|
||||
of PubSubPeerEventKind.Connected:
|
||||
of PubSubPeerEventKind.StreamOpened:
|
||||
if p.topics.len > 0:
|
||||
p.sendSubs(peer, toSeq(p.topics.keys), true)
|
||||
of PubSubPeerEventKind.Disconnected:
|
||||
of PubSubPeerEventKind.StreamClosed:
|
||||
discard
|
||||
of PubSubPeerEventKind.DisconnectionRequested:
|
||||
discard
|
||||
|
||||
proc getOrCreatePeer*(
|
||||
|
||||
method getOrCreatePeer*(
|
||||
p: PubSub,
|
||||
peerId: PeerId,
|
||||
protos: seq[string]): PubSubPeer =
|
||||
protos: seq[string]): PubSubPeer {.base, gcsafe.} =
|
||||
|
||||
p.peers.withValue(peerId, peer):
|
||||
return peer[]
|
||||
|
||||
@@ -359,9 +372,9 @@ method handleConn*(p: PubSub,
|
||||
## that we're interested in
|
||||
##
|
||||
|
||||
proc handler(peer: PubSubPeer, msg: RPCMsg): Future[void] =
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]): Future[void] =
|
||||
# call pubsub rpc handler
|
||||
p.rpcHandler(peer, msg)
|
||||
p.rpcHandler(peer, data)
|
||||
|
||||
let peer = p.getOrCreatePeer(conn.peerId, @[proto])
|
||||
|
||||
@@ -491,7 +504,7 @@ method publish*(p: PubSub,
|
||||
return 0
|
||||
|
||||
method initPubSub*(p: PubSub)
|
||||
{.base, raises: [Defect, InitializationError].} =
|
||||
{.base, raises: [InitializationError].} =
|
||||
## perform pubsub initialization
|
||||
p.observers = new(seq[PubSubObserver])
|
||||
if p.msgIdProvider == nil:
|
||||
@@ -504,7 +517,7 @@ method addValidator*(p: PubSub,
|
||||
## will be sent to `hook`. `hook` can return either `Accept`,
|
||||
## `Ignore` or `Reject` (which can descore the peer)
|
||||
for t in topic:
|
||||
trace "adding validator for topic", topicId = t
|
||||
trace "adding validator for topic", topic = t
|
||||
p.validators.mgetOrPut(t, HashSet[ValidatorHandler]()).incl(hook)
|
||||
|
||||
method removeValidator*(p: PubSub,
|
||||
@@ -519,13 +532,13 @@ method removeValidator*(p: PubSub,
|
||||
method validate*(p: PubSub, message: Message): Future[ValidationResult] {.async, base.} =
|
||||
var pending: seq[Future[ValidationResult]]
|
||||
trace "about to validate message"
|
||||
for topic in message.topicIds:
|
||||
trace "looking for validators on topic", topicId = topic,
|
||||
registered = toSeq(p.validators.keys)
|
||||
if topic in p.validators:
|
||||
trace "running validators for topic", topicId = topic
|
||||
for validator in p.validators[topic]:
|
||||
pending.add(validator(topic, message))
|
||||
let topic = message.topic
|
||||
trace "looking for validators on topic",
|
||||
topic = topic, registered = toSeq(p.validators.keys)
|
||||
if topic in p.validators:
|
||||
trace "running validators for topic", topic = topic
|
||||
for validator in p.validators[topic]:
|
||||
pending.add(validator(topic, message))
|
||||
|
||||
result = ValidationResult.Accept
|
||||
let futs = await allFinished(pending)
|
||||
@@ -559,7 +572,7 @@ proc init*[PubParams: object | bool](
|
||||
maxMessageSize: int = 1024 * 1024,
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
parameters: PubParams = false): P
|
||||
{.raises: [Defect, InitializationError], public.} =
|
||||
{.raises: [InitializationError], public.} =
|
||||
let pubsub =
|
||||
when PubParams is bool:
|
||||
P(switch: switch,
|
||||
|
||||
@@ -7,14 +7,12 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, strutils, tables, hashes, options, sets, deques]
|
||||
import stew/results
|
||||
import chronos, chronicles, nimcrypto/sha2, metrics
|
||||
import chronos/ratelimit
|
||||
import rpc/[messages, message, protobuf],
|
||||
../../peerid,
|
||||
../../peerinfo,
|
||||
@@ -23,32 +21,51 @@ import rpc/[messages, message, protobuf],
|
||||
../../protobuf/minprotobuf,
|
||||
../../utility
|
||||
|
||||
export peerid, connection
|
||||
export peerid, connection, deques
|
||||
|
||||
logScope:
|
||||
topics = "libp2p pubsubpeer"
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
declareCounter(libp2p_pubsub_sent_messages, "number of messages sent", labels = ["id", "topic"])
|
||||
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
|
||||
declareCounter(libp2p_pubsub_skipped_received_messages, "number of received skipped messages", labels = ["id"])
|
||||
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
|
||||
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
declareGauge(libp2p_gossipsub_priority_queue_size, "the number of messages in the priority queue", labels = ["id"])
|
||||
declareGauge(libp2p_gossipsub_non_priority_queue_size, "the number of messages in the non-priority queue", labels = ["id"])
|
||||
|
||||
const
|
||||
DefaultMaxNumElementsInNonPriorityQueue* = 1024
|
||||
BehaviourPenaltyFoNonPriorityQueueOverLimit = 0.0001 # this value is quite arbitrary and was found empirically
|
||||
# to result in a behaviourPenalty around [0.1, 0.2] when the score is updated.
|
||||
|
||||
type
|
||||
PeerRateLimitError* = object of CatchableError
|
||||
|
||||
PubSubObserver* = ref object
|
||||
onRecv*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [Defect].}
|
||||
onSend*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [Defect].}
|
||||
onRecv*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||
onSend*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||
|
||||
PubSubPeerEventKind* {.pure.} = enum
|
||||
Connected
|
||||
Disconnected
|
||||
StreamOpened
|
||||
StreamClosed
|
||||
DisconnectionRequested # tells gossipsub that the transport connection to the peer should be closed
|
||||
|
||||
PubSubPeerEvent* = object
|
||||
kind*: PubSubPeerEventKind
|
||||
|
||||
GetConn* = proc(): Future[Connection] {.gcsafe, raises: [Defect].}
|
||||
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [Defect].} # have to pass peer as it's unknown during init
|
||||
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [Defect].}
|
||||
GetConn* = proc(): Future[Connection] {.gcsafe, raises: [].}
|
||||
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [].} # have to pass peer as it's unknown during init
|
||||
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
|
||||
|
||||
RpcMessageQueue* = ref object
|
||||
# Tracks async tasks for sending high-priority peer-published messages.
|
||||
sendPriorityQueue: Deque[Future[void]]
|
||||
# Queue for lower-priority messages, like "IWANT" replies and relay messages.
|
||||
nonPriorityQueue: AsyncQueue[seq[byte]]
|
||||
# Task for processing non-priority message queue.
|
||||
sendNonPriorityTask: Future[void]
|
||||
|
||||
PubSubPeer* = ref object of RootObj
|
||||
getConn*: GetConn # callback to establish a new send connection
|
||||
@@ -63,13 +80,22 @@ type
|
||||
|
||||
score*: float64
|
||||
sentIHaves*: Deque[HashSet[MessageId]]
|
||||
heDontWants*: Deque[HashSet[SaltedId]]
|
||||
## IDONTWANT contains unvalidated message id:s which may be long and/or
|
||||
## expensive to look up, so we apply the same salting to them as during
|
||||
## unvalidated message processing
|
||||
iHaveBudget*: int
|
||||
pingBudget*: int
|
||||
maxMessageSize: int
|
||||
appScore*: float64 # application specific score
|
||||
behaviourPenalty*: float64 # the eventual penalty score
|
||||
overheadRateLimitOpt*: Opt[TokenBucket]
|
||||
|
||||
RPCHandler* = proc(peer: PubSubPeer, msg: RPCMsg): Future[void]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
rpcmessagequeue: RpcMessageQueue
|
||||
maxNumElementsInNonPriorityQueue*: int # The max number of elements allowed in the non-priority queue.
|
||||
|
||||
RPCHandler* = proc(peer: PubSubPeer, data: seq[byte]): Future[void]
|
||||
{.gcsafe, raises: [].}
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
func shortAgent*(p: PubSubPeer): string =
|
||||
@@ -80,6 +106,16 @@ when defined(libp2p_agents_metrics):
|
||||
#so we have to read the parents short agent..
|
||||
p.sendConn.getWrapped().shortAgent
|
||||
|
||||
proc getAgent*(peer: PubSubPeer): string =
|
||||
return
|
||||
when defined(libp2p_agents_metrics):
|
||||
if peer.shortAgent.len > 0:
|
||||
peer.shortAgent
|
||||
else:
|
||||
"unknown"
|
||||
else:
|
||||
"unknown"
|
||||
|
||||
func hash*(p: PubSubPeer): Hash =
|
||||
p.peerId.hash
|
||||
|
||||
@@ -108,7 +144,7 @@ func outbound*(p: PubSubPeer): bool =
|
||||
else:
|
||||
false
|
||||
|
||||
proc recvObservers(p: PubSubPeer, msg: var RPCMsg) =
|
||||
proc recvObservers*(p: PubSubPeer, msg: var RPCMsg) =
|
||||
# trigger hooks
|
||||
if not(isNil(p.observers)) and p.observers[].len > 0:
|
||||
for obs in p.observers[]:
|
||||
@@ -135,28 +171,13 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
|
||||
conn, peer = p, closed = conn.closed,
|
||||
data = data.shortLog
|
||||
|
||||
var rmsg = decodeRpcMsg(data)
|
||||
await p.handler(p, data)
|
||||
data = newSeq[byte]() # Release memory
|
||||
|
||||
if rmsg.isErr():
|
||||
notice "failed to decode msg from peer",
|
||||
conn, peer = p, closed = conn.closed,
|
||||
err = rmsg.error()
|
||||
break
|
||||
|
||||
trace "decoded msg from peer",
|
||||
conn, peer = p, closed = conn.closed,
|
||||
msg = rmsg.get().shortLog
|
||||
# trigger hooks
|
||||
p.recvObservers(rmsg.get())
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
for m in rmsg.get().messages:
|
||||
for t in m.topicIDs:
|
||||
# metrics
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [$p.peerId, t])
|
||||
|
||||
await p.handler(p, rmsg.get())
|
||||
except PeerRateLimitError as exc:
|
||||
debug "Peer rate limit exceeded, exiting read while", conn, peer = p, error = exc.msg
|
||||
except CatchableError as exc:
|
||||
debug "Exception occurred in PubSubPeer.handle",
|
||||
conn, peer = p, closed = conn.closed, exc = exc.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
except CancelledError:
|
||||
@@ -170,11 +191,29 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
|
||||
debug "exiting pubsub read loop",
|
||||
conn, peer = p, closed = conn.closed
|
||||
|
||||
proc closeSendConn(p: PubSubPeer, event: PubSubPeerEventKind) {.async.} =
|
||||
if p.sendConn != nil:
|
||||
debug "Removing send connection", p, conn = p.sendConn
|
||||
await p.sendConn.close()
|
||||
p.sendConn = nil
|
||||
|
||||
if not p.connectedFut.finished:
|
||||
p.connectedFut.complete()
|
||||
|
||||
try:
|
||||
if p.onEvent != nil:
|
||||
p.onEvent(p, PubSubPeerEvent(kind: event))
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Errors during diconnection events", error = exc.msg
|
||||
# don't cleanup p.address else we leak some gossip stat table
|
||||
|
||||
proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
||||
try:
|
||||
if p.connectedFut.finished:
|
||||
p.connectedFut = newFuture[void]()
|
||||
let newConn = await p.getConn()
|
||||
let newConn = await p.getConn().wait(5.seconds)
|
||||
if newConn.isNil:
|
||||
raise (ref LPError)(msg: "Cannot establish send connection")
|
||||
|
||||
@@ -182,7 +221,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
||||
# remote peer - if we had multiple channels up and one goes down, all
|
||||
# stop working so we make an effort to only keep a single channel alive
|
||||
|
||||
trace "Get new send connection", p, newConn
|
||||
debug "Get new send connection", p, newConn
|
||||
|
||||
# Careful to race conditions here.
|
||||
# Topic subscription relies on either connectedFut
|
||||
@@ -192,34 +231,21 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
||||
p.address = if p.sendConn.observedAddr.isSome: some(p.sendConn.observedAddr.get) else: none(MultiAddress)
|
||||
|
||||
if p.onEvent != nil:
|
||||
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.Connected))
|
||||
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.StreamOpened))
|
||||
|
||||
await handle(p, newConn)
|
||||
finally:
|
||||
if p.sendConn != nil:
|
||||
trace "Removing send connection", p, conn = p.sendConn
|
||||
await p.sendConn.close()
|
||||
p.sendConn = nil
|
||||
await p.closeSendConn(PubSubPeerEventKind.StreamClosed)
|
||||
|
||||
try:
|
||||
if p.onEvent != nil:
|
||||
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.Disconnected))
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Errors during diconnection events", error = exc.msg
|
||||
|
||||
# don't cleanup p.address else we leak some gossip stat table
|
||||
|
||||
proc connectImpl(p: PubSubPeer) {.async.} =
|
||||
proc connectImpl(peer: PubSubPeer) {.async.} =
|
||||
try:
|
||||
# Keep trying to establish a connection while it's possible to do so - the
|
||||
# send connection might get disconnected due to a timeout or an unrelated
|
||||
# issue so we try to get a new on
|
||||
while true:
|
||||
await connectOnce(p)
|
||||
await connectOnce(peer)
|
||||
except CatchableError as exc: # never cancelled
|
||||
debug "Could not establish send connection", msg = exc.msg
|
||||
debug "Could not establish send connection", peer, msg = exc.msg
|
||||
|
||||
proc connect*(p: PubSubPeer) =
|
||||
if p.connected:
|
||||
@@ -233,33 +259,37 @@ proc hasSendConn*(p: PubSubPeer): bool =
|
||||
template sendMetrics(msg: RPCMsg): untyped =
|
||||
when defined(libp2p_expensive_metrics):
|
||||
for x in msg.messages:
|
||||
for t in x.topicIDs:
|
||||
# metrics
|
||||
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
|
||||
# metrics
|
||||
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, x.topic])
|
||||
|
||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect], async.} =
|
||||
doAssert(not isNil(p), "pubsubpeer nil!")
|
||||
proc clearSendPriorityQueue(p: PubSubPeer) =
|
||||
if p.rpcmessagequeue.sendPriorityQueue.len == 0:
|
||||
return # fast path
|
||||
|
||||
if msg.len <= 0:
|
||||
debug "empty message, skipping", p, msg = shortLog(msg)
|
||||
return
|
||||
while p.rpcmessagequeue.sendPriorityQueue.len > 0 and
|
||||
p.rpcmessagequeue.sendPriorityQueue[0].finished:
|
||||
discard p.rpcmessagequeue.sendPriorityQueue.popFirst()
|
||||
|
||||
if msg.len > p.maxMessageSize:
|
||||
info "trying to send a too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
|
||||
return
|
||||
while p.rpcmessagequeue.sendPriorityQueue.len > 0 and
|
||||
p.rpcmessagequeue.sendPriorityQueue[^1].finished:
|
||||
discard p.rpcmessagequeue.sendPriorityQueue.popLast()
|
||||
|
||||
if p.sendConn == nil:
|
||||
discard await p.connectedFut.withTimeout(1.seconds)
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_priority_queue_size.set(
|
||||
value = p.rpcmessagequeue.sendPriorityQueue.len.int64,
|
||||
labelValues = [$p.peerId])
|
||||
|
||||
var conn = p.sendConn
|
||||
if conn == nil or conn.closed():
|
||||
debug "No send connection, skipping message", p, msg = shortLog(msg)
|
||||
return
|
||||
proc clearNonPriorityQueue*(p: PubSubPeer) =
|
||||
if len(p.rpcmessagequeue.nonPriorityQueue) > 0:
|
||||
p.rpcmessagequeue.nonPriorityQueue.clear()
|
||||
|
||||
trace "sending encoded msgs to peer", conn, encoded = shortLog(msg)
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_non_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
|
||||
|
||||
proc sendMsgContinue(conn: Connection, msgFut: Future[void]) {.async.} =
|
||||
# Continuation for a pending `sendMsg` future from below
|
||||
try:
|
||||
await conn.writeLp(msg)
|
||||
await msgFut
|
||||
trace "sent pubsub message to remote", conn
|
||||
except CatchableError as exc: # never cancelled
|
||||
# Because we detach the send call from the currently executing task using
|
||||
@@ -270,9 +300,125 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect], async.} =
|
||||
|
||||
await conn.close() # This will clean up the send connection
|
||||
|
||||
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
|
||||
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
|
||||
proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async.} =
|
||||
# Slow path of `sendMsg` where msg is held in memory while send connection is
|
||||
# being set up
|
||||
if p.sendConn == nil:
|
||||
# Wait for a send conn to be setup. `connectOnce` will
|
||||
# complete this even if the sendConn setup failed
|
||||
discard await race(p.connectedFut)
|
||||
|
||||
var conn = p.sendConn
|
||||
if conn == nil or conn.closed():
|
||||
debug "No send connection", p, msg = shortLog(msg)
|
||||
return
|
||||
|
||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
|
||||
await sendMsgContinue(conn, conn.writeLp(msg))
|
||||
|
||||
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] =
|
||||
if p.sendConn != nil and not p.sendConn.closed():
|
||||
# Fast path that avoids copying msg (which happens for {.async.})
|
||||
let conn = p.sendConn
|
||||
|
||||
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
|
||||
let f = conn.writeLp(msg)
|
||||
if not f.completed():
|
||||
sendMsgContinue(conn, f)
|
||||
else:
|
||||
f
|
||||
else:
|
||||
sendMsgSlow(p, msg)
|
||||
|
||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[void] =
|
||||
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
|
||||
##
|
||||
## Parameters:
|
||||
## - `p`: The `PubSubPeer` instance to which the message is to be sent.
|
||||
## - `msg`: The message to be sent, encoded as a sequence of bytes (`seq[byte]`).
|
||||
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
|
||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
||||
## priority messages have been sent.
|
||||
doAssert(not isNil(p), "pubsubpeer nil!")
|
||||
|
||||
p.clearSendPriorityQueue()
|
||||
|
||||
# When queues are empty, skipping the non-priority queue for low priority
|
||||
# messages reduces latency
|
||||
let emptyQueues =
|
||||
(p.rpcmessagequeue.sendPriorityQueue.len() +
|
||||
p.rpcmessagequeue.nonPriorityQueue.len()) == 0
|
||||
|
||||
if msg.len <= 0:
|
||||
debug "empty message, skipping", peer = p, msg = shortLog(msg)
|
||||
Future[void].completed()
|
||||
elif msg.len > p.maxMessageSize:
|
||||
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
|
||||
Future[void].completed()
|
||||
elif isHighPriority or emptyQueues:
|
||||
let f = p.sendMsg(msg)
|
||||
if not f.finished:
|
||||
p.rpcmessagequeue.sendPriorityQueue.addLast(f)
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_priority_queue_size.inc(labelValues = [$p.peerId])
|
||||
f
|
||||
else:
|
||||
if len(p.rpcmessagequeue.nonPriorityQueue) >= p.maxNumElementsInNonPriorityQueue:
|
||||
p.behaviourPenalty += BehaviourPenaltyFoNonPriorityQueueOverLimit
|
||||
trace "Peer has reached maxNumElementsInNonPriorityQueue. Discarding message and applying behaviour penalty.", peer = p, score = p.score,
|
||||
behaviourPenalty = p.behaviourPenalty, agent = p.getAgent()
|
||||
Future[void].completed()
|
||||
else:
|
||||
let f = p.rpcmessagequeue.nonPriorityQueue.addLast(msg)
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_non_priority_queue_size.inc(labelValues = [$p.peerId])
|
||||
f
|
||||
|
||||
iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize: bool): seq[byte] =
|
||||
## This iterator takes an `RPCMsg` and sequentially repackages its Messages into new `RPCMsg` instances.
|
||||
## Each new `RPCMsg` accumulates Messages until reaching the specified `maxSize`. If a single Message
|
||||
## exceeds the `maxSize` when trying to fit into an empty `RPCMsg`, the latter is skipped as too large to send.
|
||||
## Every constructed `RPCMsg` is then encoded, optionally anonymized, and yielded as a sequence of bytes.
|
||||
|
||||
var currentRPCMsg = rpcMsg
|
||||
currentRPCMsg.messages = newSeq[Message]()
|
||||
|
||||
var currentSize = byteSize(currentRPCMsg)
|
||||
|
||||
for msg in rpcMsg.messages:
|
||||
let msgSize = byteSize(msg)
|
||||
|
||||
# Check if adding the next message will exceed maxSize
|
||||
if float(currentSize + msgSize) * 1.1 > float(maxSize): # Guessing 10% protobuf overhead
|
||||
if currentRPCMsg.messages.len == 0:
|
||||
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
|
||||
continue # Skip this message
|
||||
|
||||
trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
|
||||
yield encodeRpcMsg(currentRPCMsg, anonymize)
|
||||
currentRPCMsg = RPCMsg()
|
||||
currentSize = 0
|
||||
|
||||
currentRPCMsg.messages.add(msg)
|
||||
currentSize += msgSize
|
||||
|
||||
# Check if there is a non-empty currentRPCMsg left to be added
|
||||
if currentSize > 0 and currentRPCMsg.messages.len > 0:
|
||||
trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
|
||||
yield encodeRpcMsg(currentRPCMsg, anonymize)
|
||||
else:
|
||||
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
|
||||
|
||||
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool) {.raises: [].} =
|
||||
## Asynchronously sends an `RPCMsg` to a specified `PubSubPeer` with an option for anonymization.
|
||||
##
|
||||
## Parameters:
|
||||
## - `p`: The `PubSubPeer` instance to which the message is to be sent.
|
||||
## - `msg`: The `RPCMsg` instance representing the message to be sent.
|
||||
## - `anonymize`: A boolean flag indicating whether the message should be sent with anonymization.
|
||||
## - `isHighPriority`: A boolean flag indicating whether the message should be treated as high priority.
|
||||
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
|
||||
## priority messages have been sent.
|
||||
# When sending messages, we take care to re-encode them with the right
|
||||
# anonymization flag to ensure that we're not penalized for sending invalid
|
||||
# or malicious data on the wire - in particular, re-encoding protects against
|
||||
@@ -290,7 +436,13 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
|
||||
sendMetrics(msg)
|
||||
encodeRpcMsg(msg, anonymize)
|
||||
|
||||
asyncSpawn p.sendEncoded(encoded)
|
||||
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
|
||||
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
|
||||
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority)
|
||||
else:
|
||||
# If the message size is within limits, send it as is
|
||||
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
|
||||
asyncSpawn p.sendEncoded(encoded, isHighPriority)
|
||||
|
||||
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
||||
for sentIHave in p.sentIHaves.mitems():
|
||||
@@ -299,13 +451,54 @@ proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
||||
return true
|
||||
return false
|
||||
|
||||
proc sendNonPriorityTask(p: PubSubPeer) {.async.} =
|
||||
while true:
|
||||
# we send non-priority messages only if there are no pending priority messages
|
||||
let msg = await p.rpcmessagequeue.nonPriorityQueue.popFirst()
|
||||
while p.rpcmessagequeue.sendPriorityQueue.len > 0:
|
||||
p.clearSendPriorityQueue()
|
||||
# waiting for the last future minimizes the number of times we have to
|
||||
# wait for something (each wait = performance cost) -
|
||||
# clearSendPriorityQueue ensures we're not waiting for an already-finished
|
||||
# future
|
||||
if p.rpcmessagequeue.sendPriorityQueue.len > 0:
|
||||
# `race` prevents `p.rpcmessagequeue.sendPriorityQueue[^1]` from being
|
||||
# cancelled when this task is cancelled
|
||||
discard await race(p.rpcmessagequeue.sendPriorityQueue[^1])
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_non_priority_queue_size.dec(labelValues = [$p.peerId])
|
||||
await p.sendMsg(msg)
|
||||
|
||||
proc startSendNonPriorityTask(p: PubSubPeer) =
|
||||
debug "starting sendNonPriorityTask", p
|
||||
if p.rpcmessagequeue.sendNonPriorityTask.isNil:
|
||||
p.rpcmessagequeue.sendNonPriorityTask = p.sendNonPriorityTask()
|
||||
|
||||
proc stopSendNonPriorityTask*(p: PubSubPeer) =
|
||||
if not p.rpcmessagequeue.sendNonPriorityTask.isNil:
|
||||
debug "stopping sendNonPriorityTask", p
|
||||
p.rpcmessagequeue.sendNonPriorityTask.cancelSoon()
|
||||
p.rpcmessagequeue.sendNonPriorityTask = nil
|
||||
p.rpcmessagequeue.sendPriorityQueue.clear()
|
||||
when defined(pubsubpeer_queue_metrics):
|
||||
libp2p_gossipsub_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
|
||||
p.clearNonPriorityQueue()
|
||||
|
||||
proc new(T: typedesc[RpcMessageQueue]): T =
|
||||
return T(
|
||||
sendPriorityQueue: initDeque[Future[void]](),
|
||||
nonPriorityQueue: newAsyncQueue[seq[byte]]()
|
||||
)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[PubSubPeer],
|
||||
peerId: PeerId,
|
||||
getConn: GetConn,
|
||||
onEvent: OnEvent,
|
||||
codec: string,
|
||||
maxMessageSize: int): T =
|
||||
maxMessageSize: int,
|
||||
maxNumElementsInNonPriorityQueue: int = DefaultMaxNumElementsInNonPriorityQueue,
|
||||
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket)): T =
|
||||
|
||||
result = T(
|
||||
getConn: getConn,
|
||||
@@ -313,6 +506,11 @@ proc new*(
|
||||
codec: codec,
|
||||
peerId: peerId,
|
||||
connectedFut: newFuture[void](),
|
||||
maxMessageSize: maxMessageSize
|
||||
maxMessageSize: maxMessageSize,
|
||||
overheadRateLimitOpt: overheadRateLimitOpt,
|
||||
rpcmessagequeue: RpcMessageQueue.new(),
|
||||
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue
|
||||
)
|
||||
result.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||
result.heDontWants.addFirst(default(HashSet[SaltedId]))
|
||||
result.startSendNonPriorityTask()
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronicles, metrics, stew/[byteutils, endians2]
|
||||
import ./messages,
|
||||
@@ -65,22 +62,21 @@ proc init*(
|
||||
topic: string,
|
||||
seqno: Option[uint64],
|
||||
sign: bool = true): Message
|
||||
{.gcsafe, raises: [Defect, LPError].} =
|
||||
var msg = Message(data: data, topicIDs: @[topic])
|
||||
{.gcsafe, raises: [LPError].} =
|
||||
var msg = Message(data: data, topic: topic)
|
||||
|
||||
# order matters, we want to include seqno in the signature
|
||||
if seqno.isSome:
|
||||
msg.seqno = @(seqno.get().toBytesBE())
|
||||
seqno.withValue(seqn):
|
||||
msg.seqno = @(seqn.toBytesBE())
|
||||
|
||||
if peer.isSome:
|
||||
let peer = peer.get()
|
||||
peer.withValue(peer):
|
||||
msg.fromPeer = peer.peerId
|
||||
if sign:
|
||||
msg.signature = sign(msg, peer.privateKey).expect("Couldn't sign message!")
|
||||
msg.key = peer.privateKey.getPublicKey().expect("Invalid private key!")
|
||||
.getBytes().expect("Couldn't get public key bytes!")
|
||||
elif sign:
|
||||
raise (ref LPError)(msg: "Cannot sign message without peer info")
|
||||
else:
|
||||
if sign: raise (ref LPError)(msg: "Cannot sign message without peer info")
|
||||
|
||||
msg
|
||||
|
||||
@@ -90,10 +86,10 @@ proc init*(
|
||||
data: seq[byte],
|
||||
topic: string,
|
||||
seqno: Option[uint64]): Message
|
||||
{.gcsafe, raises: [Defect, LPError].} =
|
||||
var msg = Message(data: data, topicIDs: @[topic])
|
||||
{.gcsafe, raises: [LPError].} =
|
||||
var msg = Message(data: data, topic: topic)
|
||||
msg.fromPeer = peerId
|
||||
|
||||
if seqno.isSome:
|
||||
msg.seqno = @(seqno.get().toBytesBE())
|
||||
seqno.withValue(seqn):
|
||||
msg.seqno = @(seqn.toBytesBE())
|
||||
msg
|
||||
|
||||
@@ -7,13 +7,10 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import options, sequtils
|
||||
import "../../.."/[
|
||||
import ../../../[
|
||||
peerid,
|
||||
routing_record,
|
||||
utility
|
||||
@@ -21,50 +18,67 @@ import "../../.."/[
|
||||
|
||||
export options
|
||||
|
||||
proc expectedFields[T](t: typedesc[T], existingFieldNames: seq[string]) {.raises: [CatchableError].} =
|
||||
var fieldNames: seq[string]
|
||||
for name, _ in fieldPairs(T()):
|
||||
fieldNames &= name
|
||||
if fieldNames != existingFieldNames:
|
||||
fieldNames.keepIf(proc(it: string): bool = it notin existingFieldNames)
|
||||
raise newException(CatchableError, $T & " fields changed, please search for and revise all relevant procs. New fields: " & $fieldNames)
|
||||
|
||||
type
|
||||
PeerInfoMsg* = object
|
||||
peerId*: PeerId
|
||||
signedPeerRecord*: seq[byte]
|
||||
PeerInfoMsg* = object
|
||||
peerId*: PeerId
|
||||
signedPeerRecord*: seq[byte]
|
||||
|
||||
SubOpts* = object
|
||||
subscribe*: bool
|
||||
topic*: string
|
||||
SubOpts* = object
|
||||
subscribe*: bool
|
||||
topic*: string
|
||||
|
||||
MessageId* = seq[byte]
|
||||
MessageId* = seq[byte]
|
||||
|
||||
Message* = object
|
||||
fromPeer*: PeerId
|
||||
data*: seq[byte]
|
||||
seqno*: seq[byte]
|
||||
topicIds*: seq[string]
|
||||
signature*: seq[byte]
|
||||
key*: seq[byte]
|
||||
SaltedId* = object
|
||||
# Salted hash of message ID - used instead of the ordinary message ID to
|
||||
# avoid hash poisoning attacks and to make memory usage more predictable
|
||||
# with respect to the variable-length message id
|
||||
data*: MDigest[256]
|
||||
|
||||
ControlMessage* = object
|
||||
ihave*: seq[ControlIHave]
|
||||
iwant*: seq[ControlIWant]
|
||||
graft*: seq[ControlGraft]
|
||||
prune*: seq[ControlPrune]
|
||||
Message* = object
|
||||
fromPeer*: PeerId
|
||||
data*: seq[byte]
|
||||
seqno*: seq[byte]
|
||||
topic*: string
|
||||
signature*: seq[byte]
|
||||
key*: seq[byte]
|
||||
|
||||
ControlIHave* = object
|
||||
topicId*: string
|
||||
messageIds*: seq[MessageId]
|
||||
ControlMessage* = object
|
||||
ihave*: seq[ControlIHave]
|
||||
iwant*: seq[ControlIWant]
|
||||
graft*: seq[ControlGraft]
|
||||
prune*: seq[ControlPrune]
|
||||
idontwant*: seq[ControlIWant]
|
||||
|
||||
ControlIWant* = object
|
||||
messageIds*: seq[MessageId]
|
||||
ControlIHave* = object
|
||||
topicID*: string
|
||||
messageIDs*: seq[MessageId]
|
||||
|
||||
ControlGraft* = object
|
||||
topicId*: string
|
||||
ControlIWant* = object
|
||||
messageIDs*: seq[MessageId]
|
||||
|
||||
ControlPrune* = object
|
||||
topicId*: string
|
||||
peers*: seq[PeerInfoMsg]
|
||||
backoff*: uint64
|
||||
ControlGraft* = object
|
||||
topicID*: string
|
||||
|
||||
RPCMsg* = object
|
||||
subscriptions*: seq[SubOpts]
|
||||
messages*: seq[Message]
|
||||
control*: Option[ControlMessage]
|
||||
ControlPrune* = object
|
||||
topicID*: string
|
||||
peers*: seq[PeerInfoMsg]
|
||||
backoff*: uint64
|
||||
|
||||
RPCMsg* = object
|
||||
subscriptions*: seq[SubOpts]
|
||||
messages*: seq[Message]
|
||||
control*: Option[ControlMessage]
|
||||
ping*: seq[byte]
|
||||
pong*: seq[byte]
|
||||
|
||||
func withSubs*(
|
||||
T: type RPCMsg, topics: openArray[string], subscribe: bool): T =
|
||||
@@ -73,23 +87,23 @@ func withSubs*(
|
||||
|
||||
func shortLog*(s: ControlIHave): auto =
|
||||
(
|
||||
topicId: s.topicId.shortLog,
|
||||
messageIds: mapIt(s.messageIds, it.shortLog)
|
||||
topic: s.topicID.shortLog,
|
||||
messageIDs: mapIt(s.messageIDs, it.shortLog)
|
||||
)
|
||||
|
||||
func shortLog*(s: ControlIWant): auto =
|
||||
(
|
||||
messageIds: mapIt(s.messageIds, it.shortLog)
|
||||
messageIDs: mapIt(s.messageIDs, it.shortLog)
|
||||
)
|
||||
|
||||
func shortLog*(s: ControlGraft): auto =
|
||||
(
|
||||
topicId: s.topicId.shortLog
|
||||
topic: s.topicID.shortLog
|
||||
)
|
||||
|
||||
func shortLog*(s: ControlPrune): auto =
|
||||
(
|
||||
topicId: s.topicId.shortLog
|
||||
topic: s.topicID.shortLog
|
||||
)
|
||||
|
||||
func shortLog*(c: ControlMessage): auto =
|
||||
@@ -105,21 +119,65 @@ func shortLog*(msg: Message): auto =
|
||||
fromPeer: msg.fromPeer.shortLog,
|
||||
data: msg.data.shortLog,
|
||||
seqno: msg.seqno.shortLog,
|
||||
topicIds: $msg.topicIds,
|
||||
topic: msg.topic,
|
||||
signature: msg.signature.shortLog,
|
||||
key: msg.key.shortLog
|
||||
)
|
||||
|
||||
func shortLog*(m: RPCMsg): auto =
|
||||
if m.control.isSome:
|
||||
(
|
||||
subscriptions: m.subscriptions,
|
||||
messages: mapIt(m.messages, it.shortLog),
|
||||
control: m.control.get().shortLog
|
||||
)
|
||||
else:
|
||||
(
|
||||
subscriptions: m.subscriptions,
|
||||
messages: mapIt(m.messages, it.shortLog),
|
||||
control: ControlMessage().shortLog
|
||||
)
|
||||
(
|
||||
subscriptions: m.subscriptions,
|
||||
messages: mapIt(m.messages, it.shortLog),
|
||||
control: m.control.get(ControlMessage()).shortLog
|
||||
)
|
||||
|
||||
static: expectedFields(PeerInfoMsg, @["peerId", "signedPeerRecord"])
|
||||
proc byteSize(peerInfo: PeerInfoMsg): int =
|
||||
peerInfo.peerId.len + peerInfo.signedPeerRecord.len
|
||||
|
||||
static: expectedFields(SubOpts, @["subscribe", "topic"])
|
||||
proc byteSize(subOpts: SubOpts): int =
|
||||
1 + subOpts.topic.len # 1 byte for the bool
|
||||
|
||||
static: expectedFields(Message, @["fromPeer", "data", "seqno", "topic", "signature", "key"])
|
||||
proc byteSize*(msg: Message): int =
|
||||
msg.fromPeer.len + msg.data.len + msg.seqno.len + msg.signature.len + msg.key.len +
|
||||
msg.topic.len
|
||||
|
||||
proc byteSize*(msgs: seq[Message]): int =
|
||||
msgs.foldl(a + b.byteSize, 0)
|
||||
|
||||
static: expectedFields(ControlIHave, @["topicID", "messageIDs"])
|
||||
proc byteSize(controlIHave: ControlIHave): int =
|
||||
controlIHave.topicID.len + controlIHave.messageIDs.foldl(a + b.len, 0)
|
||||
|
||||
proc byteSize*(ihaves: seq[ControlIHave]): int =
|
||||
ihaves.foldl(a + b.byteSize, 0)
|
||||
|
||||
static: expectedFields(ControlIWant, @["messageIDs"])
|
||||
proc byteSize(controlIWant: ControlIWant): int =
|
||||
controlIWant.messageIDs.foldl(a + b.len, 0)
|
||||
|
||||
proc byteSize*(iwants: seq[ControlIWant]): int =
|
||||
iwants.foldl(a + b.byteSize, 0)
|
||||
|
||||
static: expectedFields(ControlGraft, @["topicID"])
|
||||
proc byteSize(controlGraft: ControlGraft): int =
|
||||
controlGraft.topicID.len
|
||||
|
||||
static: expectedFields(ControlPrune, @["topicID", "peers", "backoff"])
|
||||
proc byteSize(controlPrune: ControlPrune): int =
|
||||
controlPrune.topicID.len + controlPrune.peers.foldl(a + b.byteSize, 0) + 8 # 8 bytes for uint64
|
||||
|
||||
static: expectedFields(ControlMessage, @["ihave", "iwant", "graft", "prune", "idontwant"])
|
||||
proc byteSize(control: ControlMessage): int =
|
||||
control.ihave.foldl(a + b.byteSize, 0) + control.iwant.foldl(a + b.byteSize, 0) +
|
||||
control.graft.foldl(a + b.byteSize, 0) + control.prune.foldl(a + b.byteSize, 0) +
|
||||
control.idontwant.foldl(a + b.byteSize, 0)
|
||||
|
||||
static: expectedFields(RPCMsg, @["subscriptions", "messages", "control", "ping", "pong"])
|
||||
proc byteSize*(rpc: RPCMsg): int =
|
||||
result = rpc.subscriptions.foldl(a + b.byteSize, 0) + byteSize(rpc.messages) +
|
||||
rpc.ping.len + rpc.pong.len
|
||||
rpc.control.withValue(ctrl):
|
||||
result += ctrl.byteSize
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import options
|
||||
import stew/assign2
|
||||
@@ -32,7 +29,7 @@ when defined(libp2p_protobuf_metrics):
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, graft: ControlGraft) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, graft.topicId)
|
||||
ipb.write(1, graft.topicID)
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
|
||||
@@ -48,7 +45,7 @@ proc write*(pb: var ProtoBuffer, field: int, infoMsg: PeerInfoMsg) =
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, prune: ControlPrune) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, prune.topicId)
|
||||
ipb.write(1, prune.topicID)
|
||||
for peer in prune.peers:
|
||||
ipb.write(2, peer)
|
||||
ipb.write(3, prune.backoff)
|
||||
@@ -60,8 +57,8 @@ proc write*(pb: var ProtoBuffer, field: int, prune: ControlPrune) =
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, ihave: ControlIHave) =
|
||||
var ipb = initProtoBuffer()
|
||||
ipb.write(1, ihave.topicId)
|
||||
for mid in ihave.messageIds:
|
||||
ipb.write(1, ihave.topicID)
|
||||
for mid in ihave.messageIDs:
|
||||
ipb.write(2, mid)
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
@@ -71,7 +68,7 @@ proc write*(pb: var ProtoBuffer, field: int, ihave: ControlIHave) =
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, iwant: ControlIWant) =
|
||||
var ipb = initProtoBuffer()
|
||||
for mid in iwant.messageIds:
|
||||
for mid in iwant.messageIDs:
|
||||
ipb.write(1, mid)
|
||||
if len(ipb.buffer) > 0:
|
||||
ipb.finish()
|
||||
@@ -90,6 +87,8 @@ proc write*(pb: var ProtoBuffer, field: int, control: ControlMessage) =
|
||||
ipb.write(3, graft)
|
||||
for prune in control.prune:
|
||||
ipb.write(4, prune)
|
||||
for idontwant in control.idontwant:
|
||||
ipb.write(5, idontwant)
|
||||
if len(ipb.buffer) > 0:
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
@@ -111,8 +110,7 @@ proc encodeMessage*(msg: Message, anonymize: bool): seq[byte] =
|
||||
pb.write(2, msg.data)
|
||||
if len(msg.seqno) > 0 and not anonymize:
|
||||
pb.write(3, msg.seqno)
|
||||
for topic in msg.topicIds:
|
||||
pb.write(4, topic)
|
||||
pb.write(4, msg.topic)
|
||||
if len(msg.signature) > 0 and not anonymize:
|
||||
pb.write(5, msg.signature)
|
||||
if len(msg.key) > 0 and not anonymize:
|
||||
@@ -134,10 +132,10 @@ proc decodeGraft*(pb: ProtoBuffer): ProtoResult[ControlGraft] {.
|
||||
|
||||
trace "decodeGraft: decoding message"
|
||||
var control = ControlGraft()
|
||||
if ? pb.getField(1, control.topicId):
|
||||
trace "decodeGraft: read topicId", topic_id = control.topicId
|
||||
if ? pb.getField(1, control.topicID):
|
||||
trace "decodeGraft: read topicID", topicID = control.topicID
|
||||
else:
|
||||
trace "decodeGraft: topicId is missing"
|
||||
trace "decodeGraft: topicID is missing"
|
||||
ok(control)
|
||||
|
||||
proc decodePeerInfoMsg*(pb: ProtoBuffer): ProtoResult[PeerInfoMsg] {.
|
||||
@@ -161,10 +159,10 @@ proc decodePrune*(pb: ProtoBuffer): ProtoResult[ControlPrune] {.
|
||||
|
||||
trace "decodePrune: decoding message"
|
||||
var control = ControlPrune()
|
||||
if ? pb.getField(1, control.topicId):
|
||||
trace "decodePrune: read topicId", topic_id = control.topicId
|
||||
if ? pb.getField(1, control.topicID):
|
||||
trace "decodePrune: read topicID", topic = control.topicID
|
||||
else:
|
||||
trace "decodePrune: topicId is missing"
|
||||
trace "decodePrune: topicID is missing"
|
||||
var bpeers: seq[seq[byte]]
|
||||
if ? pb.getRepeatedField(2, bpeers):
|
||||
for bpeer in bpeers:
|
||||
@@ -180,12 +178,12 @@ proc decodeIHave*(pb: ProtoBuffer): ProtoResult[ControlIHave] {.
|
||||
|
||||
trace "decodeIHave: decoding message"
|
||||
var control = ControlIHave()
|
||||
if ? pb.getField(1, control.topicId):
|
||||
trace "decodeIHave: read topicId", topic_id = control.topicId
|
||||
if ? pb.getField(1, control.topicID):
|
||||
trace "decodeIHave: read topicID", topic = control.topicID
|
||||
else:
|
||||
trace "decodeIHave: topicId is missing"
|
||||
if ? pb.getRepeatedField(2, control.messageIds):
|
||||
trace "decodeIHave: read messageIDs", message_ids = control.messageIds
|
||||
trace "decodeIHave: topicID is missing"
|
||||
if ? pb.getRepeatedField(2, control.messageIDs):
|
||||
trace "decodeIHave: read messageIDs", message_ids = control.messageIDs
|
||||
else:
|
||||
trace "decodeIHave: no messageIDs"
|
||||
ok(control)
|
||||
@@ -196,8 +194,8 @@ proc decodeIWant*(pb: ProtoBuffer): ProtoResult[ControlIWant] {.inline.} =
|
||||
|
||||
trace "decodeIWant: decoding message"
|
||||
var control = ControlIWant()
|
||||
if ? pb.getRepeatedField(1, control.messageIds):
|
||||
trace "decodeIWant: read messageIDs", message_ids = control.messageIds
|
||||
if ? pb.getRepeatedField(1, control.messageIDs):
|
||||
trace "decodeIWant: read messageIDs", message_ids = control.messageIDs
|
||||
else:
|
||||
trace "decodeIWant: no messageIDs"
|
||||
ok(control)
|
||||
@@ -213,6 +211,7 @@ proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.
|
||||
var iwantpbs: seq[seq[byte]]
|
||||
var graftpbs: seq[seq[byte]]
|
||||
var prunepbs: seq[seq[byte]]
|
||||
var idontwant: seq[seq[byte]]
|
||||
if ? cpb.getRepeatedField(1, ihavepbs):
|
||||
for item in ihavepbs:
|
||||
control.ihave.add(? decodeIHave(initProtoBuffer(item)))
|
||||
@@ -225,6 +224,9 @@ proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.
|
||||
if ? cpb.getRepeatedField(4, prunepbs):
|
||||
for item in prunepbs:
|
||||
control.prune.add(? decodePrune(initProtoBuffer(item)))
|
||||
if ? cpb.getRepeatedField(5, idontwant):
|
||||
for item in idontwant:
|
||||
control.idontwant.add(? decodeIWant(initProtoBuffer(item)))
|
||||
trace "decodeControl: message statistics", graft_count = len(control.graft),
|
||||
prune_count = len(control.prune),
|
||||
ihave_count = len(control.ihave),
|
||||
@@ -283,10 +285,11 @@ proc decodeMessage*(pb: ProtoBuffer): ProtoResult[Message] {.inline.} =
|
||||
trace "decodeMessage: read seqno", seqno = msg.seqno
|
||||
else:
|
||||
trace "decodeMessage: seqno is missing"
|
||||
if ? pb.getRepeatedField(4, msg.topicIds):
|
||||
trace "decodeMessage: read topics", topic_ids = msg.topicIds
|
||||
if ?pb.getField(4, msg.topic):
|
||||
trace "decodeMessage: read topic", topic = msg.topic
|
||||
else:
|
||||
trace "decodeMessage: topics are missing"
|
||||
trace "decodeMessage: topic is required"
|
||||
return err(ProtoError.RequiredFieldMissing)
|
||||
if ? pb.getField(5, msg.signature):
|
||||
trace "decodeMessage: read signature", signature = msg.signature.shortLog()
|
||||
else:
|
||||
@@ -317,8 +320,14 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
|
||||
pb.write(1, item)
|
||||
for item in msg.messages:
|
||||
pb.write(2, item, anonymize)
|
||||
if msg.control.isSome():
|
||||
pb.write(3, msg.control.get())
|
||||
msg.control.withValue(control):
|
||||
pb.write(3, control)
|
||||
# nim-libp2p extension, using fields which are unlikely to be used
|
||||
# by other extensions
|
||||
if msg.ping.len > 0:
|
||||
pb.write(60, msg.ping)
|
||||
if msg.pong.len > 0:
|
||||
pb.write(61, msg.pong)
|
||||
if len(pb.buffer) > 0:
|
||||
pb.finish()
|
||||
pb.buffer
|
||||
@@ -326,8 +335,10 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
|
||||
proc decodeRpcMsg*(msg: seq[byte]): ProtoResult[RPCMsg] {.inline.} =
|
||||
trace "decodeRpcMsg: decoding message", msg = msg.shortLog()
|
||||
var pb = initProtoBuffer(msg, maxSize = uint.high)
|
||||
var rpcMsg = ok(RPCMsg())
|
||||
assign(rpcMsg.get().messages, ? pb.decodeMessages())
|
||||
assign(rpcMsg.get().subscriptions, ? pb.decodeSubscriptions())
|
||||
assign(rpcMsg.get().control, ? pb.decodeControl())
|
||||
rpcMsg
|
||||
var rpcMsg = RPCMsg()
|
||||
assign(rpcMsg.messages, ? pb.decodeMessages())
|
||||
assign(rpcMsg.subscriptions, ? pb.decodeSubscriptions())
|
||||
assign(rpcMsg.control, ? pb.decodeControl())
|
||||
discard ? pb.getField(60, rpcMsg.ping)
|
||||
discard ? pb.getField(61, rpcMsg.pong)
|
||||
ok(rpcMsg)
|
||||
|
||||
@@ -7,15 +7,15 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables]
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[hashes, sets]
|
||||
import chronos/timer, stew/results
|
||||
|
||||
import ../../utility
|
||||
|
||||
export results
|
||||
|
||||
const Timeout* = 10.seconds # default timeout in ms
|
||||
|
||||
type
|
||||
@@ -27,20 +27,38 @@ type
|
||||
|
||||
TimedCache*[K] = object of RootObj
|
||||
head, tail: TimedEntry[K] # nim linked list doesn't allow inserting at pos
|
||||
entries: Table[K, TimedEntry[K]]
|
||||
entries: HashSet[TimedEntry[K]]
|
||||
timeout: Duration
|
||||
|
||||
func `==`*[E](a, b: TimedEntry[E]): bool =
|
||||
if isNil(a) == isNil(b):
|
||||
isNil(a) or a.key == b.key
|
||||
else:
|
||||
false
|
||||
|
||||
func hash*(a: TimedEntry): Hash =
|
||||
if isNil(a):
|
||||
default(Hash)
|
||||
else:
|
||||
hash(a[].key)
|
||||
|
||||
func expire*(t: var TimedCache, now: Moment = Moment.now()) =
|
||||
while t.head != nil and t.head.expiresAt < now:
|
||||
t.entries.del(t.head.key)
|
||||
t.entries.excl(t.head)
|
||||
t.head.prev = nil
|
||||
t.head = t.head.next
|
||||
if t.head == nil: t.tail = nil
|
||||
|
||||
func del*[K](t: var TimedCache[K], key: K): Opt[TimedEntry[K]] =
|
||||
# Removes existing key from cache, returning the previous value if present
|
||||
var item: TimedEntry[K]
|
||||
if t.entries.pop(key, item):
|
||||
let tmp = TimedEntry[K](key: key)
|
||||
if tmp in t.entries:
|
||||
let item = try:
|
||||
t.entries[tmp] # use the shared instance in the set
|
||||
except KeyError:
|
||||
raiseAssert "just checked"
|
||||
t.entries.excl(item)
|
||||
|
||||
if t.head == item: t.head = item.next
|
||||
if t.tail == item: t.tail = item.prev
|
||||
|
||||
@@ -56,14 +74,14 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
|
||||
# refreshed.
|
||||
t.expire(now)
|
||||
|
||||
var previous = t.del(k) # Refresh existing item
|
||||
|
||||
let addedAt =
|
||||
if previous.isSome: previous.get().addedAt
|
||||
else: now
|
||||
let
|
||||
previous = t.del(k) # Refresh existing item
|
||||
addedAt = if previous.isSome():
|
||||
previous[].addedAt
|
||||
else:
|
||||
now
|
||||
|
||||
let node = TimedEntry[K](key: k, addedAt: addedAt, expiresAt: now + t.timeout)
|
||||
|
||||
if t.head == nil:
|
||||
t.tail = node
|
||||
t.head = t.tail
|
||||
@@ -84,16 +102,24 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
|
||||
if cur == t.tail:
|
||||
t.tail = node
|
||||
|
||||
t.entries[k] = node
|
||||
t.entries.incl(node)
|
||||
|
||||
previous.isSome()
|
||||
|
||||
func contains*[K](t: TimedCache[K], k: K): bool =
|
||||
k in t.entries
|
||||
let tmp = TimedEntry[K](key: k)
|
||||
tmp in t.entries
|
||||
|
||||
func addedAt*[K](t: TimedCache[K], k: K): Moment =
|
||||
t.entries.getOrDefault(k).addedAt
|
||||
func addedAt*[K](t: var TimedCache[K], k: K): Moment =
|
||||
let tmp = TimedEntry[K](key: k)
|
||||
try:
|
||||
if tmp in t.entries: # raising is slow
|
||||
# Use shared instance from entries
|
||||
return t.entries[tmp][].addedAt
|
||||
except KeyError:
|
||||
raiseAssert "just checked"
|
||||
|
||||
default(Moment)
|
||||
|
||||
func init*[K](T: type TimedCache[K], timeout: Duration = Timeout): T =
|
||||
T(
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -7,16 +7,14 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import tables, sequtils, sugar, sets, options
|
||||
import tables, sequtils, sugar, sets
|
||||
import metrics except collect
|
||||
import chronos,
|
||||
chronicles,
|
||||
bearssl/rand,
|
||||
stew/[byteutils, objects]
|
||||
stew/[byteutils, objects, results]
|
||||
import ./protocol,
|
||||
../switch,
|
||||
../routing_record,
|
||||
@@ -30,6 +28,11 @@ export chronicles
|
||||
logScope:
|
||||
topics = "libp2p discovery rendezvous"
|
||||
|
||||
declareCounter(libp2p_rendezvous_register, "number of advertise requests")
|
||||
declareCounter(libp2p_rendezvous_discover, "number of discovery requests")
|
||||
declareGauge(libp2p_rendezvous_registered, "number of registered peers")
|
||||
declareGauge(libp2p_rendezvous_namespaces, "number of registered namespaces")
|
||||
|
||||
const
|
||||
RendezVousCodec* = "/rendezvous/1.0.0"
|
||||
MinimumDuration* = 2.hours
|
||||
@@ -65,34 +68,34 @@ type
|
||||
Register = object
|
||||
ns : string
|
||||
signedPeerRecord: seq[byte]
|
||||
ttl: Option[uint64] # in seconds
|
||||
ttl: Opt[uint64] # in seconds
|
||||
|
||||
RegisterResponse = object
|
||||
status: ResponseStatus
|
||||
text: Option[string]
|
||||
ttl: Option[uint64] # in seconds
|
||||
text: Opt[string]
|
||||
ttl: Opt[uint64] # in seconds
|
||||
|
||||
Unregister = object
|
||||
ns: string
|
||||
|
||||
Discover = object
|
||||
ns: string
|
||||
limit: Option[uint64]
|
||||
cookie: Option[seq[byte]]
|
||||
limit: Opt[uint64]
|
||||
cookie: Opt[seq[byte]]
|
||||
|
||||
DiscoverResponse = object
|
||||
registrations: seq[Register]
|
||||
cookie: Option[seq[byte]]
|
||||
cookie: Opt[seq[byte]]
|
||||
status: ResponseStatus
|
||||
text: Option[string]
|
||||
text: Opt[string]
|
||||
|
||||
Message = object
|
||||
msgType: MessageType
|
||||
register: Option[Register]
|
||||
registerResponse: Option[RegisterResponse]
|
||||
unregister: Option[Unregister]
|
||||
discover: Option[Discover]
|
||||
discoverResponse: Option[DiscoverResponse]
|
||||
register: Opt[Register]
|
||||
registerResponse: Opt[RegisterResponse]
|
||||
unregister: Opt[Unregister]
|
||||
discover: Opt[Discover]
|
||||
discoverResponse: Opt[DiscoverResponse]
|
||||
|
||||
proc encode(c: Cookie): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
@@ -104,17 +107,17 @@ proc encode(r: Register): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, r.ns)
|
||||
result.write(2, r.signedPeerRecord)
|
||||
if r.ttl.isSome():
|
||||
result.write(3, r.ttl.get())
|
||||
r.ttl.withValue(ttl):
|
||||
result.write(3, ttl)
|
||||
result.finish()
|
||||
|
||||
proc encode(rr: RegisterResponse): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, rr.status.uint)
|
||||
if rr.text.isSome():
|
||||
result.write(2, rr.text.get())
|
||||
if rr.ttl.isSome():
|
||||
result.write(3, rr.ttl.get())
|
||||
rr.text.withValue(text):
|
||||
result.write(2, text)
|
||||
rr.ttl.withValue(ttl):
|
||||
result.write(3, ttl)
|
||||
result.finish()
|
||||
|
||||
proc encode(u: Unregister): ProtoBuffer =
|
||||
@@ -125,48 +128,48 @@ proc encode(u: Unregister): ProtoBuffer =
|
||||
proc encode(d: Discover): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, d.ns)
|
||||
if d.limit.isSome():
|
||||
result.write(2, d.limit.get())
|
||||
if d.cookie.isSome():
|
||||
result.write(3, d.cookie.get())
|
||||
d.limit.withValue(limit):
|
||||
result.write(2, limit)
|
||||
d.cookie.withValue(cookie):
|
||||
result.write(3, cookie)
|
||||
result.finish()
|
||||
|
||||
proc encode(d: DiscoverResponse): ProtoBuffer =
|
||||
proc encode(dr: DiscoverResponse): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
for reg in d.registrations:
|
||||
for reg in dr.registrations:
|
||||
result.write(1, reg.encode())
|
||||
if d.cookie.isSome():
|
||||
result.write(2, d.cookie.get())
|
||||
result.write(3, d.status.uint)
|
||||
if d.text.isSome():
|
||||
result.write(4, d.text.get())
|
||||
dr.cookie.withValue(cookie):
|
||||
result.write(2, cookie)
|
||||
result.write(3, dr.status.uint)
|
||||
dr.text.withValue(text):
|
||||
result.write(4, text)
|
||||
result.finish()
|
||||
|
||||
proc encode(msg: Message): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, msg.msgType.uint)
|
||||
if msg.register.isSome():
|
||||
result.write(2, msg.register.get().encode())
|
||||
if msg.registerResponse.isSome():
|
||||
result.write(3, msg.registerResponse.get().encode())
|
||||
if msg.unregister.isSome():
|
||||
result.write(4, msg.unregister.get().encode())
|
||||
if msg.discover.isSome():
|
||||
result.write(5, msg.discover.get().encode())
|
||||
if msg.discoverResponse.isSome():
|
||||
result.write(6, msg.discoverResponse.get().encode())
|
||||
msg.register.withValue(register):
|
||||
result.write(2, register.encode())
|
||||
msg.registerResponse.withValue(registerResponse):
|
||||
result.write(3, registerResponse.encode())
|
||||
msg.unregister.withValue(unregister):
|
||||
result.write(4, unregister.encode())
|
||||
msg.discover.withValue(discover):
|
||||
result.write(5, discover.encode())
|
||||
msg.discoverResponse.withValue(discoverResponse):
|
||||
result.write(6, discoverResponse.encode())
|
||||
result.finish()
|
||||
|
||||
proc decode(_: typedesc[Cookie], buf: seq[byte]): Option[Cookie] =
|
||||
proc decode(_: typedesc[Cookie], buf: seq[byte]): Opt[Cookie] =
|
||||
var c: Cookie
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getRequiredField(1, c.offset)
|
||||
r2 = pb.getRequiredField(2, c.ns)
|
||||
if r1.isErr() or r2.isErr(): return none(Cookie)
|
||||
some(c)
|
||||
if r1.isErr() or r2.isErr(): return Opt.none(Cookie)
|
||||
Opt.some(c)
|
||||
|
||||
proc decode(_: typedesc[Register], buf: seq[byte]): Option[Register] =
|
||||
proc decode(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
|
||||
var
|
||||
r: Register
|
||||
ttl: uint64
|
||||
@@ -175,11 +178,11 @@ proc decode(_: typedesc[Register], buf: seq[byte]): Option[Register] =
|
||||
r1 = pb.getRequiredField(1, r.ns)
|
||||
r2 = pb.getRequiredField(2, r.signedPeerRecord)
|
||||
r3 = pb.getField(3, ttl)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr(): return none(Register)
|
||||
if r3.get(): r.ttl = some(ttl)
|
||||
some(r)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr(): return Opt.none(Register)
|
||||
if r3.get(false): r.ttl = Opt.some(ttl)
|
||||
Opt.some(r)
|
||||
|
||||
proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Option[RegisterResponse] =
|
||||
proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Opt[RegisterResponse] =
|
||||
var
|
||||
rr: RegisterResponse
|
||||
statusOrd: uint
|
||||
@@ -191,20 +194,20 @@ proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Option[RegisterRespo
|
||||
r2 = pb.getField(2, text)
|
||||
r3 = pb.getField(3, ttl)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr() or
|
||||
not checkedEnumAssign(rr.status, statusOrd): return none(RegisterResponse)
|
||||
if r2.get(): rr.text = some(text)
|
||||
if r3.get(): rr.ttl = some(ttl)
|
||||
some(rr)
|
||||
not checkedEnumAssign(rr.status, statusOrd): return Opt.none(RegisterResponse)
|
||||
if r2.get(false): rr.text = Opt.some(text)
|
||||
if r3.get(false): rr.ttl = Opt.some(ttl)
|
||||
Opt.some(rr)
|
||||
|
||||
proc decode(_: typedesc[Unregister], buf: seq[byte]): Option[Unregister] =
|
||||
proc decode(_: typedesc[Unregister], buf: seq[byte]): Opt[Unregister] =
|
||||
var u: Unregister
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getRequiredField(1, u.ns)
|
||||
if r1.isErr(): return none(Unregister)
|
||||
some(u)
|
||||
if r1.isErr(): return Opt.none(Unregister)
|
||||
Opt.some(u)
|
||||
|
||||
proc decode(_: typedesc[Discover], buf: seq[byte]): Option[Discover] =
|
||||
proc decode(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
|
||||
var
|
||||
d: Discover
|
||||
limit: uint64
|
||||
@@ -214,12 +217,12 @@ proc decode(_: typedesc[Discover], buf: seq[byte]): Option[Discover] =
|
||||
r1 = pb.getRequiredField(1, d.ns)
|
||||
r2 = pb.getField(2, limit)
|
||||
r3 = pb.getField(3, cookie)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr: return none(Discover)
|
||||
if r2.get(): d.limit = some(limit)
|
||||
if r3.get(): d.cookie = some(cookie)
|
||||
some(d)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr: return Opt.none(Discover)
|
||||
if r2.get(false): d.limit = Opt.some(limit)
|
||||
if r3.get(false): d.cookie = Opt.some(cookie)
|
||||
Opt.some(d)
|
||||
|
||||
proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Option[DiscoverResponse] =
|
||||
proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Opt[DiscoverResponse] =
|
||||
var
|
||||
dr: DiscoverResponse
|
||||
registrations: seq[seq[byte]]
|
||||
@@ -233,48 +236,47 @@ proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Option[DiscoverRespo
|
||||
r3 = pb.getRequiredField(3, statusOrd)
|
||||
r4 = pb.getField(4, text)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr or r4.isErr() or
|
||||
not checkedEnumAssign(dr.status, statusOrd): return none(DiscoverResponse)
|
||||
not checkedEnumAssign(dr.status, statusOrd): return Opt.none(DiscoverResponse)
|
||||
for reg in registrations:
|
||||
var r: Register
|
||||
let regOpt = Register.decode(reg)
|
||||
if regOpt.isNone(): return none(DiscoverResponse)
|
||||
dr.registrations.add(regOpt.get())
|
||||
if r2.get(): dr.cookie = some(cookie)
|
||||
if r4.get(): dr.text = some(text)
|
||||
some(dr)
|
||||
let regOpt = Register.decode(reg).valueOr:
|
||||
return
|
||||
dr.registrations.add(regOpt)
|
||||
if r2.get(false): dr.cookie = Opt.some(cookie)
|
||||
if r4.get(false): dr.text = Opt.some(text)
|
||||
Opt.some(dr)
|
||||
|
||||
proc decode(_: typedesc[Message], buf: seq[byte]): Option[Message] =
|
||||
proc decode(_: typedesc[Message], buf: seq[byte]): Opt[Message] =
|
||||
var
|
||||
msg: Message
|
||||
statusOrd: uint
|
||||
pbr, pbrr, pbu, pbd, pbdr: ProtoBuffer
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getRequiredField(1, statusOrd)
|
||||
r2 = pb.getField(2, pbr)
|
||||
r3 = pb.getField(3, pbrr)
|
||||
r4 = pb.getField(4, pbu)
|
||||
r5 = pb.getField(5, pbd)
|
||||
r6 = pb.getField(6, pbdr)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr() or
|
||||
r4.isErr() or r5.isErr() or r6.isErr() or
|
||||
not checkedEnumAssign(msg.msgType, statusOrd): return none(Message)
|
||||
if r2.get():
|
||||
let pb = initProtoBuffer(buf)
|
||||
|
||||
? pb.getRequiredField(1, statusOrd).toOpt
|
||||
if not checkedEnumAssign(msg.msgType, statusOrd): return Opt.none(Message)
|
||||
|
||||
if ? pb.getField(2, pbr).optValue:
|
||||
msg.register = Register.decode(pbr.buffer)
|
||||
if msg.register.isNone(): return none(Message)
|
||||
if r3.get():
|
||||
if msg.register.isNone(): return Opt.none(Message)
|
||||
|
||||
if ? pb.getField(3, pbrr).optValue:
|
||||
msg.registerResponse = RegisterResponse.decode(pbrr.buffer)
|
||||
if msg.registerResponse.isNone(): return none(Message)
|
||||
if r4.get():
|
||||
if msg.registerResponse.isNone(): return Opt.none(Message)
|
||||
|
||||
if ? pb.getField(4, pbu).optValue:
|
||||
msg.unregister = Unregister.decode(pbu.buffer)
|
||||
if msg.unregister.isNone(): return none(Message)
|
||||
if r5.get():
|
||||
if msg.unregister.isNone(): return Opt.none(Message)
|
||||
|
||||
if ? pb.getField(5, pbd).optValue:
|
||||
msg.discover = Discover.decode(pbd.buffer)
|
||||
if msg.discover.isNone(): return none(Message)
|
||||
if r6.get():
|
||||
if msg.discover.isNone(): return Opt.none(Message)
|
||||
|
||||
if ? pb.getField(6, pbdr).optValue:
|
||||
msg.discoverResponse = DiscoverResponse.decode(pbdr.buffer)
|
||||
if msg.discoverResponse.isNone(): return none(Message)
|
||||
some(msg)
|
||||
if msg.discoverResponse.isNone(): return Opt.none(Message)
|
||||
|
||||
Opt.some(msg)
|
||||
|
||||
|
||||
type
|
||||
@@ -314,7 +316,7 @@ proc sendRegisterResponse(conn: Connection,
|
||||
ttl: uint64) {.async.} =
|
||||
let msg = encode(Message(
|
||||
msgType: MessageType.RegisterResponse,
|
||||
registerResponse: some(RegisterResponse(status: Ok, ttl: some(ttl)))))
|
||||
registerResponse: Opt.some(RegisterResponse(status: Ok, ttl: Opt.some(ttl)))))
|
||||
await conn.writeLp(msg.buffer)
|
||||
|
||||
proc sendRegisterResponseError(conn: Connection,
|
||||
@@ -322,7 +324,7 @@ proc sendRegisterResponseError(conn: Connection,
|
||||
text: string = "") {.async.} =
|
||||
let msg = encode(Message(
|
||||
msgType: MessageType.RegisterResponse,
|
||||
registerResponse: some(RegisterResponse(status: status, text: some(text)))))
|
||||
registerResponse: Opt.some(RegisterResponse(status: status, text: Opt.some(text)))))
|
||||
await conn.writeLp(msg.buffer)
|
||||
|
||||
proc sendDiscoverResponse(conn: Connection,
|
||||
@@ -330,10 +332,10 @@ proc sendDiscoverResponse(conn: Connection,
|
||||
cookie: Cookie) {.async.} =
|
||||
let msg = encode(Message(
|
||||
msgType: MessageType.DiscoverResponse,
|
||||
discoverResponse: some(DiscoverResponse(
|
||||
discoverResponse: Opt.some(DiscoverResponse(
|
||||
status: Ok,
|
||||
registrations: s,
|
||||
cookie: some(cookie.encode().buffer)
|
||||
cookie: Opt.some(cookie.encode().buffer)
|
||||
))
|
||||
))
|
||||
await conn.writeLp(msg.buffer)
|
||||
@@ -343,7 +345,7 @@ proc sendDiscoverResponseError(conn: Connection,
|
||||
text: string = "") {.async.} =
|
||||
let msg = encode(Message(
|
||||
msgType: MessageType.DiscoverResponse,
|
||||
discoverResponse: some(DiscoverResponse(status: status, text: some(text)))))
|
||||
discoverResponse: Opt.some(DiscoverResponse(status: status, text: Opt.some(text)))))
|
||||
await conn.writeLp(msg.buffer)
|
||||
|
||||
proc countRegister(rdv: RendezVous, peerId: PeerId): int =
|
||||
@@ -378,6 +380,7 @@ proc save(rdv: RendezVous,
|
||||
|
||||
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
|
||||
trace "Received Register", peerId = conn.peerId, ns = r.ns
|
||||
libp2p_rendezvous_register.inc()
|
||||
if r.ns.len notin 1..255:
|
||||
return conn.sendRegisterResponseError(InvalidNamespace)
|
||||
let ttl = r.ttl.get(MinimumTTL)
|
||||
@@ -389,6 +392,8 @@ proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
|
||||
if rdv.countRegister(conn.peerId) >= RegistrationLimitPerPeer:
|
||||
return conn.sendRegisterResponseError(NotAuthorized, "Registration limit reached")
|
||||
rdv.save(r.ns, conn.peerId, r)
|
||||
libp2p_rendezvous_registered.inc()
|
||||
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
|
||||
conn.sendRegisterResponse(ttl)
|
||||
|
||||
proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
|
||||
@@ -398,11 +403,13 @@ proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
|
||||
for index in rdv.namespaces[nsSalted]:
|
||||
if rdv.registered[index].peerId == conn.peerId:
|
||||
rdv.registered[index].expiration = rdv.defaultDT
|
||||
libp2p_rendezvous_registered.dec()
|
||||
except KeyError:
|
||||
return
|
||||
|
||||
proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
|
||||
trace "Received Discover", peerId = conn.peerId, ns = d.ns
|
||||
libp2p_rendezvous_discover.inc()
|
||||
if d.ns.len notin 0..255:
|
||||
await conn.sendDiscoverResponseError(InvalidNamespace)
|
||||
return
|
||||
@@ -411,7 +418,7 @@ proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
|
||||
cookie =
|
||||
if d.cookie.isSome():
|
||||
try:
|
||||
Cookie.decode(d.cookie.get()).get()
|
||||
Cookie.decode(d.cookie.tryGet()).tryGet()
|
||||
except CatchableError:
|
||||
await conn.sendDiscoverResponseError(InvalidCookie)
|
||||
return
|
||||
@@ -442,7 +449,7 @@ proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
|
||||
break
|
||||
if reg.expiration < n or index.uint64 <= cookie.offset: continue
|
||||
limit.dec()
|
||||
reg.data.ttl = some((reg.expiration - Moment.now()).seconds.uint64)
|
||||
reg.data.ttl = Opt.some((reg.expiration - Moment.now()).seconds.uint64)
|
||||
reg.data
|
||||
rdv.rng.shuffle(s)
|
||||
await conn.sendDiscoverResponse(s, Cookie(offset: offset.uint64, ns: d.ns))
|
||||
@@ -457,12 +464,13 @@ proc advertisePeer(rdv: RendezVous,
|
||||
await conn.writeLp(msg)
|
||||
let
|
||||
buf = await conn.readLp(4096)
|
||||
msgRecv = Message.decode(buf).get()
|
||||
msgRecv = Message.decode(buf).tryGet()
|
||||
if msgRecv.msgType != MessageType.RegisterResponse:
|
||||
trace "Unexpected register response", peer, msgType = msgRecv.msgType
|
||||
elif msgRecv.registerResponse.isNone() or
|
||||
msgRecv.registerResponse.get().status != ResponseStatus.Ok:
|
||||
elif msgRecv.registerResponse.tryGet().status != ResponseStatus.Ok:
|
||||
trace "Refuse to register", peer, response = msgRecv.registerResponse
|
||||
else:
|
||||
trace "Successfully registered", peer, response = msgRecv.registerResponse
|
||||
except CatchableError as exc:
|
||||
trace "exception in the advertise", error = exc.msg
|
||||
finally:
|
||||
@@ -470,19 +478,18 @@ proc advertisePeer(rdv: RendezVous,
|
||||
await rdv.sema.acquire()
|
||||
discard await advertiseWrap().withTimeout(5.seconds)
|
||||
|
||||
proc advertise*(rdv: RendezVous,
|
||||
method advertise*(rdv: RendezVous,
|
||||
ns: string,
|
||||
ttl: Duration = MinimumDuration) {.async.} =
|
||||
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode()
|
||||
if sprBuff.isErr():
|
||||
ttl: Duration = MinimumDuration) {.async, base.} =
|
||||
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
|
||||
raise newException(RendezVousError, "Wrong Signed Peer Record")
|
||||
if ns.len notin 1..255:
|
||||
raise newException(RendezVousError, "Invalid namespace")
|
||||
if ttl notin MinimumDuration..MaximumDuration:
|
||||
raise newException(RendezVousError, "Invalid time to live")
|
||||
let
|
||||
r = Register(ns: ns, signedPeerRecord: sprBuff.get(), ttl: some(ttl.seconds.uint64))
|
||||
msg = encode(Message(msgType: MessageType.Register, register: some(r)))
|
||||
r = Register(ns: ns, signedPeerRecord: sprBuff, ttl: Opt.some(ttl.seconds.uint64))
|
||||
msg = encode(Message(msgType: MessageType.Register, register: Opt.some(r)))
|
||||
rdv.save(ns, rdv.switch.peerInfo.peerId, r)
|
||||
let fut = collect(newSeq()):
|
||||
for peer in rdv.peers:
|
||||
@@ -498,7 +505,9 @@ proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
|
||||
collect(newSeq()):
|
||||
for index in rdv.namespaces[nsSalted]:
|
||||
if rdv.registered[index].expiration > n:
|
||||
SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).get().data
|
||||
let res = SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).valueOr:
|
||||
continue
|
||||
res.data
|
||||
except KeyError as exc:
|
||||
@[]
|
||||
|
||||
@@ -519,38 +528,42 @@ proc request*(rdv: RendezVous,
|
||||
proc requestPeer(peer: PeerId) {.async.} =
|
||||
let conn = await rdv.switch.dial(peer, RendezVousCodec)
|
||||
defer: await conn.close()
|
||||
d.limit = some(limit)
|
||||
d.limit = Opt.some(limit)
|
||||
d.cookie =
|
||||
try:
|
||||
some(rdv.cookiesSaved[peer][ns])
|
||||
Opt.some(rdv.cookiesSaved[peer][ns])
|
||||
except KeyError as exc:
|
||||
none(seq[byte])
|
||||
Opt.none(seq[byte])
|
||||
await conn.writeLp(encode(Message(
|
||||
msgType: MessageType.Discover,
|
||||
discover: some(d))).buffer)
|
||||
discover: Opt.some(d))).buffer)
|
||||
let
|
||||
buf = await conn.readLp(65536)
|
||||
msgRcv = Message.decode(buf).get()
|
||||
if msgRcv.msgType != MessageType.DiscoverResponse or
|
||||
msgRcv.discoverResponse.isNone():
|
||||
msgRcv = Message.decode(buf).valueOr:
|
||||
debug "Message undecodable"
|
||||
return
|
||||
if msgRcv.msgType != MessageType.DiscoverResponse:
|
||||
debug "Unexpected discover response", msgType = msgRcv.msgType
|
||||
return
|
||||
let resp = msgRcv.discoverResponse.get()
|
||||
let resp = msgRcv.discoverResponse.valueOr:
|
||||
debug "Discover response is empty"
|
||||
return
|
||||
if resp.status != ResponseStatus.Ok:
|
||||
trace "Cannot discover", ns, status = resp.status, text = resp.text
|
||||
return
|
||||
if resp.cookie.isSome() and resp.cookie.get().len < 1000:
|
||||
if rdv.cookiesSaved.hasKeyOrPut(peer, {ns: resp.cookie.get()}.toTable):
|
||||
rdv.cookiesSaved[peer][ns] = resp.cookie.get()
|
||||
resp.cookie.withValue(cookie):
|
||||
if cookie.len() < 1000 and rdv.cookiesSaved.hasKeyOrPut(peer, {ns: cookie}.toTable()):
|
||||
rdv.cookiesSaved[peer][ns] = cookie
|
||||
for r in resp.registrations:
|
||||
if limit == 0: return
|
||||
if r.ttl.isNone() or r.ttl.get() > MaximumTTL: continue
|
||||
let sprRes = SignedPeerRecord.decode(r.signedPeerRecord)
|
||||
if sprRes.isErr(): continue
|
||||
let pr = sprRes.get().data
|
||||
let ttl = r.ttl.get(MaximumTTL + 1)
|
||||
if ttl > MaximumTTL: continue
|
||||
let
|
||||
spr = SignedPeerRecord.decode(r.signedPeerRecord).valueOr: continue
|
||||
pr = spr.data
|
||||
if s.hasKey(pr.peerId):
|
||||
let (prSaved, rSaved) = s[pr.peerId]
|
||||
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get() < r.ttl.get()) or
|
||||
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(MaximumTTL) < ttl) or
|
||||
prSaved.seqNo < pr.seqNo:
|
||||
s[pr.peerId] = (pr, r)
|
||||
else:
|
||||
@@ -589,7 +602,7 @@ proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
|
||||
rdv.unsubscribeLocally(ns)
|
||||
let msg = encode(Message(
|
||||
msgType: MessageType.Unregister,
|
||||
unregister: some(Unregister(ns: ns))))
|
||||
unregister: Opt.some(Unregister(ns: ns))))
|
||||
|
||||
proc unsubscribePeer(rdv: RendezVous, peerId: PeerId) {.async.} =
|
||||
try:
|
||||
@@ -623,17 +636,17 @@ proc new*(T: typedesc[RendezVous],
|
||||
sema: newAsyncSemaphore(SemaphoreDefaultSize)
|
||||
)
|
||||
logScope: topics = "libp2p discovery rendezvous"
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
let
|
||||
buf = await conn.readLp(4096)
|
||||
msg = Message.decode(buf).get()
|
||||
msg = Message.decode(buf).tryGet()
|
||||
case msg.msgType:
|
||||
of MessageType.Register: await rdv.register(conn, msg.register.get())
|
||||
of MessageType.Register: await rdv.register(conn, msg.register.tryGet())
|
||||
of MessageType.RegisterResponse:
|
||||
trace "Got an unexpected Register Response", response = msg.registerResponse
|
||||
of MessageType.Unregister: rdv.unregister(conn, msg.unregister.get())
|
||||
of MessageType.Discover: await rdv.discover(conn, msg.discover.get())
|
||||
of MessageType.Unregister: rdv.unregister(conn, msg.unregister.tryGet())
|
||||
of MessageType.Discover: await rdv.discover(conn, msg.discover.tryGet())
|
||||
of MessageType.DiscoverResponse:
|
||||
trace "Got an unexpected Discover Response", response = msg.discoverResponse
|
||||
except CancelledError as exc:
|
||||
@@ -657,21 +670,33 @@ proc new*(T: typedesc[RendezVous],
|
||||
proc deletesRegister(rdv: RendezVous) {.async.} =
|
||||
heartbeat "Register timeout", 1.minutes:
|
||||
let n = Moment.now()
|
||||
var total = 0
|
||||
rdv.registered.flushIfIt(it.expiration < n)
|
||||
for data in rdv.namespaces.mvalues():
|
||||
data.keepItIf(it >= rdv.registered.offset)
|
||||
total += data.len
|
||||
libp2p_rendezvous_registered.set(int64(total))
|
||||
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
|
||||
|
||||
method start*(rdv: RendezVous) {.async.} =
|
||||
method start*(
|
||||
rdv: RendezVous
|
||||
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if not rdv.registerDeletionLoop.isNil:
|
||||
warn "Starting rendezvous twice"
|
||||
return
|
||||
return fut
|
||||
rdv.registerDeletionLoop = rdv.deletesRegister()
|
||||
rdv.started = true
|
||||
fut
|
||||
|
||||
method stop*(rdv: RendezVous) {.async.} =
|
||||
method stop*(rdv: RendezVous): Future[void] {.async: (raises: [], raw: true).} =
|
||||
let fut = newFuture[void]()
|
||||
fut.complete()
|
||||
if rdv.registerDeletionLoop.isNil:
|
||||
warn "Stopping rendezvous without starting it"
|
||||
return
|
||||
return fut
|
||||
rdv.started = false
|
||||
rdv.registerDeletionLoop.cancel()
|
||||
rdv.registerDeletionLoop = nil
|
||||
fut
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/strformat
|
||||
import chronos
|
||||
@@ -92,7 +89,7 @@ type
|
||||
readCs: CipherState
|
||||
writeCs: CipherState
|
||||
|
||||
NoiseError* = object of LPError
|
||||
NoiseError* = object of LPStreamError
|
||||
NoiseHandshakeError* = object of NoiseError
|
||||
NoiseDecryptTagError* = object of NoiseError
|
||||
NoiseOversizedPayloadError* = object of NoiseError
|
||||
@@ -102,10 +99,10 @@ type
|
||||
|
||||
func shortLog*(conn: NoiseConnection): auto =
|
||||
try:
|
||||
if conn.isNil: "NoiseConnection(nil)"
|
||||
if conn == nil: "NoiseConnection(nil)"
|
||||
else: &"{shortLog(conn.peerId)}:{conn.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(NoiseConnection): shortLog(it)
|
||||
|
||||
@@ -115,7 +112,7 @@ proc genKeyPair(rng: var HmacDrbgContext): KeyPair =
|
||||
|
||||
proc hashProtocol(name: string): MDigest[256] =
|
||||
# If protocol_name is less than or equal to HASHLEN bytes in length,
|
||||
# sets h equal to protocol_name with zero bytes appended to make HASHLEN bytes.
|
||||
# sets h to protocol_name with zero bytes appended to make HASHLEN bytes.
|
||||
# Otherwise sets h = HASH(protocol_name).
|
||||
|
||||
if name.len <= 32:
|
||||
@@ -136,7 +133,7 @@ proc encrypt(
|
||||
state: var CipherState,
|
||||
data: var openArray[byte],
|
||||
ad: openArray[byte]): ChaChaPolyTag
|
||||
{.noinit, raises: [Defect, NoiseNonceMaxError].} =
|
||||
{.noinit, raises: [NoiseNonceMaxError].} =
|
||||
|
||||
var nonce: ChaChaPolyNonce
|
||||
nonce[4..<12] = toBytesLE(state.n)
|
||||
@@ -145,10 +142,10 @@ proc encrypt(
|
||||
|
||||
inc state.n
|
||||
if state.n > NonceMax:
|
||||
raise newException(NoiseNonceMaxError, "Noise max nonce value reached")
|
||||
raise (ref NoiseNonceMaxError)(msg: "Noise max nonce value reached")
|
||||
|
||||
proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
|
||||
{.raises: [Defect, NoiseNonceMaxError].} =
|
||||
{.raises: [NoiseNonceMaxError].} =
|
||||
result = newSeqOfCap[byte](data.len + sizeof(ChaChaPolyTag))
|
||||
result.add(data)
|
||||
|
||||
@@ -160,7 +157,7 @@ proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
|
||||
tag = byteutils.toHex(tag), data = result.shortLog, nonce = state.n - 1
|
||||
|
||||
proc decryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
|
||||
{.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} =
|
||||
{.raises: [NoiseDecryptTagError, NoiseNonceMaxError].} =
|
||||
var
|
||||
tagIn = data.toOpenArray(data.len - ChaChaPolyTag.len, data.high).intoChaChaPolyTag
|
||||
tagOut: ChaChaPolyTag
|
||||
@@ -171,10 +168,11 @@ proc decryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
|
||||
trace "decryptWithAd", tagIn = tagIn.shortLog, tagOut = tagOut.shortLog, nonce = state.n
|
||||
if tagIn != tagOut:
|
||||
debug "decryptWithAd failed", data = shortLog(data)
|
||||
raise newException(NoiseDecryptTagError, "decryptWithAd failed tag authentication.")
|
||||
raise (ref NoiseDecryptTagError)(msg:
|
||||
"decryptWithAd failed tag authentication.")
|
||||
inc state.n
|
||||
if state.n > NonceMax:
|
||||
raise newException(NoiseNonceMaxError, "Noise max nonce value reached")
|
||||
raise (ref NoiseNonceMaxError)(msg: "Noise max nonce value reached")
|
||||
|
||||
# Symmetricstate
|
||||
|
||||
@@ -184,8 +182,7 @@ proc init(_: type[SymmetricState]): SymmetricState =
|
||||
result.cs = CipherState(k: EmptyKey)
|
||||
|
||||
proc mixKey(ss: var SymmetricState, ikm: ChaChaPolyKey) =
|
||||
var
|
||||
temp_keys: array[2, ChaChaPolyKey]
|
||||
var temp_keys: array[2, ChaChaPolyKey]
|
||||
sha256.hkdf(ss.ck, ikm, [], temp_keys)
|
||||
ss.ck = temp_keys[0]
|
||||
ss.cs = CipherState(k: temp_keys[1])
|
||||
@@ -201,15 +198,14 @@ proc mixHash(ss: var SymmetricState, data: openArray[byte]) =
|
||||
|
||||
# We might use this for other handshake patterns/tokens
|
||||
proc mixKeyAndHash(ss: var SymmetricState, ikm: openArray[byte]) {.used.} =
|
||||
var
|
||||
temp_keys: array[3, ChaChaPolyKey]
|
||||
var temp_keys: array[3, ChaChaPolyKey]
|
||||
sha256.hkdf(ss.ck, ikm, [], temp_keys)
|
||||
ss.ck = temp_keys[0]
|
||||
ss.mixHash(temp_keys[1])
|
||||
ss.cs = CipherState(k: temp_keys[2])
|
||||
|
||||
proc encryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
|
||||
{.raises: [Defect, NoiseNonceMaxError].} =
|
||||
{.raises: [NoiseNonceMaxError].} =
|
||||
# according to spec if key is empty leave plaintext
|
||||
if ss.cs.hasKey:
|
||||
result = ss.cs.encryptWithAd(ss.h.data, data)
|
||||
@@ -218,7 +214,7 @@ proc encryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
|
||||
ss.mixHash(result)
|
||||
|
||||
proc decryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
|
||||
{.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} =
|
||||
{.raises: [NoiseDecryptTagError, NoiseNonceMaxError].} =
|
||||
# according to spec if key is empty leave plaintext
|
||||
if ss.cs.hasKey and data.len > ChaChaPolyTag.len:
|
||||
result = ss.cs.decryptWithAd(ss.h.data, data)
|
||||
@@ -237,7 +233,8 @@ proc init(_: type[HandshakeState]): HandshakeState =
|
||||
|
||||
template write_e: untyped =
|
||||
trace "noise write e"
|
||||
# Sets e (which must be empty) to GENERATE_KEYPAIR(). Appends e.public_key to the buffer. Calls MixHash(e.public_key).
|
||||
# Sets e (which must be empty) to GENERATE_KEYPAIR().
|
||||
# Appends e.public_key to the buffer. Calls MixHash(e.public_key).
|
||||
hs.e = genKeyPair(p.rng[])
|
||||
msg.add hs.e.publicKey
|
||||
hs.ss.mixHash(hs.e.publicKey)
|
||||
@@ -278,33 +275,37 @@ template read_e: untyped =
|
||||
trace "noise read e", size = msg.len
|
||||
|
||||
if msg.len < Curve25519Key.len:
|
||||
raise newException(NoiseHandshakeError, "Noise E, expected more data")
|
||||
raise (ref NoiseHandshakeError)(msg: "Noise E, expected more data")
|
||||
|
||||
# Sets re (which must be empty) to the next DHLEN bytes from the message. Calls MixHash(re.public_key).
|
||||
# Sets re (which must be empty) to the next DHLEN bytes from the message.
|
||||
# Calls MixHash(re.public_key).
|
||||
hs.re[0..Curve25519Key.high] = msg.toOpenArray(0, Curve25519Key.high)
|
||||
msg.consume(Curve25519Key.len)
|
||||
hs.ss.mixHash(hs.re)
|
||||
|
||||
template read_s: untyped =
|
||||
trace "noise read s", size = msg.len
|
||||
# Sets temp to the next DHLEN + 16 bytes of the message if HasKey() == True, or to the next DHLEN bytes otherwise.
|
||||
# Sets temp to the next DHLEN + 16 bytes of the message if HasKey() == True,
|
||||
# or to the next DHLEN bytes otherwise.
|
||||
# Sets rs (which must be empty) to DecryptAndHash(temp).
|
||||
let
|
||||
rsLen =
|
||||
if hs.ss.cs.hasKey:
|
||||
if msg.len < Curve25519Key.len + ChaChaPolyTag.len:
|
||||
raise newException(NoiseHandshakeError, "Noise S, expected more data")
|
||||
raise (ref NoiseHandshakeError)(msg: "Noise S, expected more data")
|
||||
Curve25519Key.len + ChaChaPolyTag.len
|
||||
else:
|
||||
if msg.len < Curve25519Key.len:
|
||||
raise newException(NoiseHandshakeError, "Noise S, expected more data")
|
||||
raise (ref NoiseHandshakeError)(msg: "Noise S, expected more data")
|
||||
Curve25519Key.len
|
||||
hs.rs[0..Curve25519Key.high] =
|
||||
hs.ss.decryptAndHash(msg.toOpenArray(0, rsLen - 1))
|
||||
|
||||
msg.consume(rsLen)
|
||||
|
||||
proc readFrame(sconn: Connection): Future[seq[byte]] {.async.} =
|
||||
proc readFrame(
|
||||
sconn: Connection
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
var besize {.noinit.}: array[2, byte]
|
||||
await sconn.readExactly(addr besize[0], besize.len)
|
||||
let size = uint16.fromBytesBE(besize).int
|
||||
@@ -316,7 +317,11 @@ proc readFrame(sconn: Connection): Future[seq[byte]] {.async.} =
|
||||
await sconn.readExactly(addr buffer[0], buffer.len)
|
||||
return buffer
|
||||
|
||||
proc writeFrame(sconn: Connection, buf: openArray[byte]): Future[void] =
|
||||
proc writeFrame(
|
||||
sconn: Connection,
|
||||
buf: openArray[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
doAssert buf.len <= uint16.high.int
|
||||
var
|
||||
lesize = buf.len.uint16
|
||||
@@ -327,13 +332,24 @@ proc writeFrame(sconn: Connection, buf: openArray[byte]): Future[void] =
|
||||
outbuf &= buf
|
||||
sconn.write(outbuf)
|
||||
|
||||
proc receiveHSMessage(sconn: Connection): Future[seq[byte]] = readFrame(sconn)
|
||||
proc sendHSMessage(sconn: Connection, buf: openArray[byte]): Future[void] =
|
||||
proc receiveHSMessage(
|
||||
sconn: Connection
|
||||
): Future[seq[byte]] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
readFrame(sconn)
|
||||
|
||||
proc sendHSMessage(
|
||||
sconn: Connection,
|
||||
buf: openArray[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
writeFrame(sconn, buf)
|
||||
|
||||
proc handshakeXXOutbound(
|
||||
p: Noise, conn: Connection,
|
||||
p2pSecret: seq[byte]): Future[HandshakeResult] {.async.} =
|
||||
p2pSecret: seq[byte]
|
||||
): Future[HandshakeResult] {.async: (raises: [
|
||||
CancelledError, LPStreamError]).} =
|
||||
const initiator = true
|
||||
var
|
||||
hs = HandshakeState.init()
|
||||
@@ -375,13 +391,16 @@ proc handshakeXXOutbound(
|
||||
await conn.sendHSMessage(msg.data)
|
||||
|
||||
let (cs1, cs2) = hs.ss.split()
|
||||
return HandshakeResult(cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
return HandshakeResult(
|
||||
cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
finally:
|
||||
burnMem(hs)
|
||||
|
||||
proc handshakeXXInbound(
|
||||
p: Noise, conn: Connection,
|
||||
p2pSecret: seq[byte]): Future[HandshakeResult] {.async.} =
|
||||
p2pSecret: seq[byte]
|
||||
): Future[HandshakeResult] {.async: (raises: [
|
||||
CancelledError, LPStreamError]).} =
|
||||
const initiator = false
|
||||
|
||||
var
|
||||
@@ -425,11 +444,14 @@ proc handshakeXXInbound(
|
||||
let
|
||||
remoteP2psecret = hs.ss.decryptAndHash(msg.data)
|
||||
(cs1, cs2) = hs.ss.split()
|
||||
return HandshakeResult(cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
return HandshakeResult(
|
||||
cs1: cs1, cs2: cs2, remoteP2psecret: remoteP2psecret, rs: hs.rs)
|
||||
finally:
|
||||
burnMem(hs)
|
||||
|
||||
method readMessage*(sconn: NoiseConnection): Future[seq[byte]] {.async.} =
|
||||
method readMessage*(
|
||||
sconn: NoiseConnection
|
||||
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
while true: # Discard 0-length payloads
|
||||
let frame = await sconn.stream.readFrame()
|
||||
sconn.activity = true
|
||||
@@ -448,7 +470,7 @@ proc encryptFrame(
|
||||
sconn: NoiseConnection,
|
||||
cipherFrame: var openArray[byte],
|
||||
src: openArray[byte])
|
||||
{.raises: [Defect, NoiseNonceMaxError].} =
|
||||
{.raises: [NoiseNonceMaxError].} =
|
||||
# Frame consists of length + cipher data + tag
|
||||
doAssert src.len <= MaxPlainSize
|
||||
doAssert cipherFrame.len == 2 + src.len + sizeof(ChaChaPolyTag)
|
||||
@@ -461,7 +483,11 @@ proc encryptFrame(
|
||||
|
||||
cipherFrame[2 + src.len()..<cipherFrame.len] = tag
|
||||
|
||||
method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
|
||||
method write*(
|
||||
sconn: NoiseConnection,
|
||||
message: seq[byte]
|
||||
): Future[void] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true).} =
|
||||
# Fast path: `{.async.}` would introduce a copy of `message`
|
||||
const FramingSize = 2 + sizeof(ChaChaPolyTag)
|
||||
|
||||
@@ -481,7 +507,8 @@ method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
|
||||
try:
|
||||
encryptFrame(
|
||||
sconn,
|
||||
cipherFrames.toOpenArray(woffset, woffset + chunkSize + FramingSize - 1),
|
||||
cipherFrames.toOpenArray(
|
||||
woffset, woffset + chunkSize + FramingSize - 1),
|
||||
message.toOpenArray(offset, offset + chunkSize - 1))
|
||||
except NoiseNonceMaxError as exc:
|
||||
debug "Noise nonce exceeded"
|
||||
@@ -504,21 +531,28 @@ method write*(sconn: NoiseConnection, message: seq[byte]): Future[void] =
|
||||
# sequencing issues
|
||||
sconn.stream.write(cipherFrames)
|
||||
|
||||
method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerId]): Future[SecureConn] {.async.} =
|
||||
method handshake*(
|
||||
p: Noise,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[SecureConn] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
trace "Starting Noise handshake", conn, initiator
|
||||
|
||||
let timeout = conn.timeout
|
||||
conn.timeout = HandshakeTimeout
|
||||
|
||||
# https://github.com/libp2p/specs/tree/master/noise#libp2p-data-in-handshake-messages
|
||||
let
|
||||
signedPayload = p.localPrivateKey.sign(
|
||||
PayloadString & p.noiseKeys.publicKey.getBytes).tryGet()
|
||||
let signedPayload = p.localPrivateKey.sign(
|
||||
PayloadString & p.noiseKeys.publicKey.getBytes)
|
||||
if signedPayload.isErr():
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to sign public key: " & $signedPayload.error())
|
||||
|
||||
var
|
||||
libp2pProof = initProtoBuffer()
|
||||
libp2pProof.write(1, p.localPublicKey)
|
||||
libp2pProof.write(2, signedPayload.getBytes())
|
||||
libp2pProof.write(2, signedPayload.get().getBytes())
|
||||
# data field also there but not used!
|
||||
libp2pProof.finish()
|
||||
|
||||
@@ -537,30 +571,38 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
|
||||
remoteSigBytes: seq[byte]
|
||||
|
||||
if not remoteProof.getField(1, remotePubKeyBytes).valueOr(false):
|
||||
raise newException(NoiseHandshakeError, "Failed to deserialize remote public key bytes. (initiator: " & $initiator & ")")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to deserialize remote public key bytes. (initiator: " &
|
||||
$initiator & ")")
|
||||
if not remoteProof.getField(2, remoteSigBytes).valueOr(false):
|
||||
raise newException(NoiseHandshakeError, "Failed to deserialize remote signature bytes. (initiator: " & $initiator & ")")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to deserialize remote signature bytes. (initiator: " &
|
||||
$initiator & ")")
|
||||
|
||||
if not remotePubKey.init(remotePubKeyBytes):
|
||||
raise newException(NoiseHandshakeError, "Failed to decode remote public key. (initiator: " & $initiator & ")")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to decode remote public key. (initiator: " & $initiator & ")")
|
||||
if not remoteSig.init(remoteSigBytes):
|
||||
raise newException(NoiseHandshakeError, "Failed to decode remote signature. (initiator: " & $initiator & ")")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to decode remote signature. (initiator: " & $initiator & ")")
|
||||
|
||||
let verifyPayload = PayloadString & handshakeRes.rs.getBytes
|
||||
if not remoteSig.verify(verifyPayload, remotePubKey):
|
||||
raise newException(NoiseHandshakeError, "Noise handshake signature verify failed.")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Noise handshake signature verify failed.")
|
||||
else:
|
||||
trace "Remote signature verified", conn
|
||||
|
||||
let pid = PeerId.init(remotePubKey).valueOr:
|
||||
raise newException(NoiseHandshakeError, "Invalid remote peer id: " & $error)
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Invalid remote peer id: " & $error)
|
||||
|
||||
trace "Remote peer id", pid = $pid
|
||||
|
||||
if peerId.isSome():
|
||||
let targetPid = peerId.get()
|
||||
peerId.withValue(targetPid):
|
||||
if not targetPid.validate():
|
||||
raise newException(NoiseHandshakeError, "Failed to validate expected peerId.")
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Failed to validate expected peerId.")
|
||||
|
||||
if pid != targetPid:
|
||||
var
|
||||
@@ -570,7 +612,8 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
|
||||
initiator, dealt_peer = conn,
|
||||
dealt_key = $failedKey, received_peer = $pid,
|
||||
received_key = $remotePubKey
|
||||
raise newException(NoiseHandshakeError, "Noise handshake, peer id don't match! " & $pid & " != " & $targetPid)
|
||||
raise (ref NoiseHandshakeError)(msg:
|
||||
"Noise handshake, peer id don't match! " & $pid & " != " & $targetPid)
|
||||
conn.peerId = pid
|
||||
|
||||
var tmp = NoiseConnection.new(conn, conn.peerId, conn.observedAddr)
|
||||
@@ -590,7 +633,7 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
|
||||
|
||||
return secure
|
||||
|
||||
method closeImpl*(s: NoiseConnection) {.async.} =
|
||||
method closeImpl*(s: NoiseConnection) {.async: (raises: []).} =
|
||||
await procCall SecureConn(s).closeImpl()
|
||||
|
||||
burnMem(s.readCs)
|
||||
@@ -601,15 +644,14 @@ method init*(p: Noise) {.gcsafe.} =
|
||||
p.codec = NoiseCodec
|
||||
|
||||
proc new*(
|
||||
T: typedesc[Noise],
|
||||
rng: ref HmacDrbgContext,
|
||||
privateKey: PrivateKey,
|
||||
outgoing: bool = true,
|
||||
commonPrologue: seq[byte] = @[]): T =
|
||||
|
||||
T: typedesc[Noise],
|
||||
rng: ref HmacDrbgContext,
|
||||
privateKey: PrivateKey,
|
||||
outgoing: bool = true,
|
||||
commonPrologue: seq[byte] = @[]): T =
|
||||
let pkBytes = privateKey.getPublicKey()
|
||||
.expect("Expected valid Private Key")
|
||||
.getBytes().expect("Couldn't get public Key bytes")
|
||||
.expect("Expected valid Private Key")
|
||||
.getBytes().expect("Couldn't get public Key bytes")
|
||||
|
||||
var noise = Noise(
|
||||
rng: rng,
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import secure, ../../stream/connection
|
||||
@@ -22,7 +19,7 @@ type
|
||||
|
||||
method init(p: PlainText) {.gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string)
|
||||
{.async, gcsafe.} = discard
|
||||
{.async.} = discard
|
||||
## plain text doesn't do anything
|
||||
|
||||
p.codec = PlainTextCodec
|
||||
|
||||
@@ -1,452 +0,0 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[oids, strformat]
|
||||
import bearssl/rand
|
||||
import chronos, chronicles, stew/endians2
|
||||
import nimcrypto/[hmac, sha2, sha, hash, rijndael, twofish, bcmode]
|
||||
import secure,
|
||||
../../stream/connection,
|
||||
../../peerinfo,
|
||||
../../crypto/crypto,
|
||||
../../crypto/ecnist,
|
||||
../../peerid,
|
||||
../../utility,
|
||||
../../errors
|
||||
|
||||
export hmac, sha2, sha, hash, rijndael, bcmode
|
||||
|
||||
logScope:
|
||||
topics = "libp2p secio"
|
||||
|
||||
const
|
||||
SecioCodec* = "/secio/1.0.0"
|
||||
SecioMaxMessageSize = 8 * 1024 * 1024 ## 8mb
|
||||
SecioMaxMacSize = sha512.sizeDigest
|
||||
SecioNonceSize = 16
|
||||
SecioExchanges = "P-256,P-384,P-521"
|
||||
SecioCiphers = "TwofishCTR,AES-256,AES-128"
|
||||
SecioHashes = "SHA256,SHA512"
|
||||
|
||||
type
|
||||
Secio* = ref object of Secure
|
||||
rng: ref HmacDrbgContext
|
||||
localPrivateKey: PrivateKey
|
||||
localPublicKey: PublicKey
|
||||
remotePublicKey: PublicKey
|
||||
|
||||
SecureCipherType {.pure.} = enum
|
||||
Aes128, Aes256, Twofish
|
||||
|
||||
SecureMacType {.pure.} = enum
|
||||
Sha1, Sha256, Sha512
|
||||
|
||||
SecureCipher = object
|
||||
case kind: SecureCipherType
|
||||
of SecureCipherType.Aes128:
|
||||
ctxaes128: CTR[aes128]
|
||||
of SecureCipherType.Aes256:
|
||||
ctxaes256: CTR[aes256]
|
||||
of SecureCipherType.Twofish:
|
||||
ctxtwofish256: CTR[twofish256]
|
||||
|
||||
SecureMac = object
|
||||
case kind: SecureMacType
|
||||
of SecureMacType.Sha256:
|
||||
ctxsha256: HMAC[sha256]
|
||||
of SecureMacType.Sha512:
|
||||
ctxsha512: HMAC[sha512]
|
||||
of SecureMacType.Sha1:
|
||||
ctxsha1: HMAC[sha1]
|
||||
|
||||
SecioConn = ref object of SecureConn
|
||||
writerMac: SecureMac
|
||||
readerMac: SecureMac
|
||||
writerCoder: SecureCipher
|
||||
readerCoder: SecureCipher
|
||||
|
||||
SecioError* = object of LPError
|
||||
|
||||
func shortLog*(conn: SecioConn): auto =
|
||||
try:
|
||||
if conn.isNil: "SecioConn(nil)"
|
||||
else: &"{shortLog(conn.peerId)}:{conn.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
|
||||
chronicles.formatIt(SecioConn): shortLog(it)
|
||||
|
||||
proc init(mac: var SecureMac, hash: string, key: openArray[byte]) =
|
||||
if hash == "SHA256":
|
||||
mac = SecureMac(kind: SecureMacType.Sha256)
|
||||
mac.ctxsha256.init(key)
|
||||
elif hash == "SHA512":
|
||||
mac = SecureMac(kind: SecureMacType.Sha512)
|
||||
mac.ctxsha512.init(key)
|
||||
elif hash == "SHA1":
|
||||
mac = SecureMac(kind: SecureMacType.Sha1)
|
||||
mac.ctxsha1.init(key)
|
||||
|
||||
proc update(mac: var SecureMac, data: openArray[byte]) =
|
||||
case mac.kind
|
||||
of SecureMacType.Sha256:
|
||||
update(mac.ctxsha256, data)
|
||||
of SecureMacType.Sha512:
|
||||
update(mac.ctxsha512, data)
|
||||
of SecureMacType.Sha1:
|
||||
update(mac.ctxsha1, data)
|
||||
|
||||
proc sizeDigest(mac: SecureMac): int {.inline.} =
|
||||
case mac.kind
|
||||
of SecureMacType.Sha256:
|
||||
result = int(mac.ctxsha256.sizeDigest())
|
||||
of SecureMacType.Sha512:
|
||||
result = int(mac.ctxsha512.sizeDigest())
|
||||
of SecureMacType.Sha1:
|
||||
result = int(mac.ctxsha1.sizeDigest())
|
||||
|
||||
proc finish(mac: var SecureMac, data: var openArray[byte]) =
|
||||
case mac.kind
|
||||
of SecureMacType.Sha256:
|
||||
discard finish(mac.ctxsha256, data)
|
||||
of SecureMacType.Sha512:
|
||||
discard finish(mac.ctxsha512, data)
|
||||
of SecureMacType.Sha1:
|
||||
discard finish(mac.ctxsha1, data)
|
||||
|
||||
proc reset(mac: var SecureMac) =
|
||||
case mac.kind
|
||||
of SecureMacType.Sha256:
|
||||
reset(mac.ctxsha256)
|
||||
of SecureMacType.Sha512:
|
||||
reset(mac.ctxsha512)
|
||||
of SecureMacType.Sha1:
|
||||
reset(mac.ctxsha1)
|
||||
|
||||
proc init(sc: var SecureCipher, cipher: string, key: openArray[byte],
|
||||
iv: openArray[byte]) {.inline.} =
|
||||
if cipher == "AES-128":
|
||||
sc = SecureCipher(kind: SecureCipherType.Aes128)
|
||||
sc.ctxaes128.init(key, iv)
|
||||
elif cipher == "AES-256":
|
||||
sc = SecureCipher(kind: SecureCipherType.Aes256)
|
||||
sc.ctxaes256.init(key, iv)
|
||||
elif cipher == "TwofishCTR":
|
||||
sc = SecureCipher(kind: SecureCipherType.Twofish)
|
||||
sc.ctxtwofish256.init(key, iv)
|
||||
|
||||
proc encrypt(cipher: var SecureCipher, input: openArray[byte],
|
||||
output: var openArray[byte]) {.inline.} =
|
||||
case cipher.kind
|
||||
of SecureCipherType.Aes128:
|
||||
cipher.ctxaes128.encrypt(input, output)
|
||||
of SecureCipherType.Aes256:
|
||||
cipher.ctxaes256.encrypt(input, output)
|
||||
of SecureCipherType.Twofish:
|
||||
cipher.ctxtwofish256.encrypt(input, output)
|
||||
|
||||
proc decrypt(cipher: var SecureCipher, input: openArray[byte],
|
||||
output: var openArray[byte]) {.inline.} =
|
||||
case cipher.kind
|
||||
of SecureCipherType.Aes128:
|
||||
cipher.ctxaes128.decrypt(input, output)
|
||||
of SecureCipherType.Aes256:
|
||||
cipher.ctxaes256.decrypt(input, output)
|
||||
of SecureCipherType.Twofish:
|
||||
cipher.ctxtwofish256.decrypt(input, output)
|
||||
|
||||
proc macCheckAndDecode(sconn: SecioConn, data: var seq[byte]): bool =
|
||||
## This procedure checks MAC of recieved message ``data`` and if message is
|
||||
## authenticated, then decrypt message.
|
||||
##
|
||||
## Procedure returns ``false`` if message is too short or MAC verification
|
||||
## failed.
|
||||
var macData: array[SecioMaxMacSize, byte]
|
||||
let macsize = sconn.readerMac.sizeDigest()
|
||||
if len(data) < macsize:
|
||||
trace "Message is shorter then MAC size", message_length = len(data),
|
||||
mac_size = macsize
|
||||
return false
|
||||
let mark = len(data) - macsize
|
||||
sconn.readerMac.update(data.toOpenArray(0, mark - 1))
|
||||
sconn.readerMac.finish(macData)
|
||||
sconn.readerMac.reset()
|
||||
if not equalMem(addr data[mark], addr macData[0], macsize):
|
||||
trace "Invalid MAC",
|
||||
calculated = toHex(macData.toOpenArray(0, macsize - 1)),
|
||||
stored = toHex(data.toOpenArray(mark, data.high))
|
||||
return false
|
||||
|
||||
sconn.readerCoder.decrypt(data.toOpenArray(0, mark - 1),
|
||||
data.toOpenArray(0, mark - 1))
|
||||
data.setLen(mark)
|
||||
result = true
|
||||
|
||||
proc readRawMessage(conn: Connection): Future[seq[byte]] {.async.} =
|
||||
while true: # Discard 0-length payloads
|
||||
var lengthBuf: array[4, byte]
|
||||
await conn.readExactly(addr lengthBuf[0], lengthBuf.len)
|
||||
let length = uint32.fromBytesBE(lengthBuf)
|
||||
|
||||
trace "Recieved message header", header = lengthBuf.shortLog, length = length
|
||||
|
||||
if length > SecioMaxMessageSize: # Verify length before casting!
|
||||
trace "Received size of message exceed limits", conn, length = length
|
||||
raise (ref SecioError)(msg: "Message exceeds maximum length")
|
||||
|
||||
if length > 0:
|
||||
var buf = newSeq[byte](int(length))
|
||||
await conn.readExactly(addr buf[0], buf.len)
|
||||
trace "Received message body",
|
||||
conn, length = buf.len, buff = buf.shortLog
|
||||
return buf
|
||||
|
||||
trace "Discarding 0-length payload", conn
|
||||
|
||||
method readMessage*(sconn: SecioConn): Future[seq[byte]] {.async.} =
|
||||
## Read message from channel secure connection ``sconn``.
|
||||
when chronicles.enabledLogLevel == LogLevel.TRACE:
|
||||
logScope:
|
||||
stream_oid = $sconn.stream.oid
|
||||
var buf = await sconn.stream.readRawMessage()
|
||||
if sconn.macCheckAndDecode(buf):
|
||||
result = buf
|
||||
else:
|
||||
trace "Message MAC verification failed", buf = buf.shortLog
|
||||
raise (ref SecioError)(msg: "message failed MAC verification")
|
||||
|
||||
method write*(sconn: SecioConn, message: seq[byte]) {.async.} =
|
||||
## Write message ``message`` to secure connection ``sconn``.
|
||||
if message.len == 0:
|
||||
return
|
||||
|
||||
var
|
||||
left = message.len
|
||||
offset = 0
|
||||
while left > 0:
|
||||
let
|
||||
chunkSize = if left > SecioMaxMessageSize - 64: SecioMaxMessageSize - 64 else: left
|
||||
macsize = sconn.writerMac.sizeDigest()
|
||||
length = chunkSize + macsize
|
||||
|
||||
var msg = newSeq[byte](chunkSize + 4 + macsize)
|
||||
msg[0..<4] = uint32(length).toBytesBE()
|
||||
|
||||
sconn.writerCoder.encrypt(message.toOpenArray(offset, offset + chunkSize - 1),
|
||||
msg.toOpenArray(4, 4 + chunkSize - 1))
|
||||
left = left - chunkSize
|
||||
offset = offset + chunkSize
|
||||
let mo = 4 + chunkSize
|
||||
sconn.writerMac.update(msg.toOpenArray(4, 4 + chunkSize - 1))
|
||||
sconn.writerMac.finish(msg.toOpenArray(mo, mo + macsize - 1))
|
||||
sconn.writerMac.reset()
|
||||
|
||||
trace "Writing message", message = msg.shortLog, left, offset
|
||||
await sconn.stream.write(msg)
|
||||
sconn.activity = true
|
||||
|
||||
proc newSecioConn(conn: Connection,
|
||||
hash: string,
|
||||
cipher: string,
|
||||
secrets: Secret,
|
||||
order: int,
|
||||
remotePubKey: PublicKey): SecioConn
|
||||
{.raises: [Defect, LPError].} =
|
||||
## Create new secure stream/lpstream, using specified hash algorithm ``hash``,
|
||||
## cipher algorithm ``cipher``, stretched keys ``secrets`` and order
|
||||
## ``order``.
|
||||
|
||||
result = SecioConn.new(conn, conn.peerId, conn.observedAddr)
|
||||
|
||||
let i0 = if order < 0: 1 else: 0
|
||||
let i1 = if order < 0: 0 else: 1
|
||||
|
||||
trace "Writer credentials", mackey = secrets.macOpenArray(i0).shortLog,
|
||||
enckey = secrets.keyOpenArray(i0).shortLog,
|
||||
iv = secrets.ivOpenArray(i0).shortLog
|
||||
trace "Reader credentials", mackey = secrets.macOpenArray(i1).shortLog,
|
||||
enckey = secrets.keyOpenArray(i1).shortLog,
|
||||
iv = secrets.ivOpenArray(i1).shortLog
|
||||
result.writerMac.init(hash, secrets.macOpenArray(i0))
|
||||
result.readerMac.init(hash, secrets.macOpenArray(i1))
|
||||
result.writerCoder.init(cipher, secrets.keyOpenArray(i0),
|
||||
secrets.ivOpenArray(i0))
|
||||
result.readerCoder.init(cipher, secrets.keyOpenArray(i1),
|
||||
secrets.ivOpenArray(i1))
|
||||
|
||||
proc transactMessage(conn: Connection,
|
||||
msg: seq[byte]): Future[seq[byte]] {.async.} =
|
||||
trace "Sending message", message = msg.shortLog, length = len(msg)
|
||||
await conn.write(msg)
|
||||
return await conn.readRawMessage()
|
||||
|
||||
method handshake*(s: Secio, conn: Connection, initiator: bool, peerId: Opt[PeerId]): Future[SecureConn] {.async.} =
|
||||
var
|
||||
localNonce: array[SecioNonceSize, byte]
|
||||
remoteNonce: seq[byte]
|
||||
remoteBytesPubkey: seq[byte]
|
||||
remoteEBytesPubkey: seq[byte]
|
||||
remoteEBytesSig: seq[byte]
|
||||
remotePubkey: PublicKey
|
||||
remoteEPubkey: ecnist.EcPublicKey
|
||||
remoteESignature: Signature
|
||||
remoteExchanges: string
|
||||
remoteCiphers: string
|
||||
remoteHashes: string
|
||||
remotePeerId: PeerId
|
||||
localPeerId: PeerId
|
||||
localBytesPubkey = s.localPublicKey.getBytes().tryGet()
|
||||
|
||||
hmacDrbgGenerate(s.rng[], localNonce)
|
||||
|
||||
var request = createProposal(localNonce,
|
||||
localBytesPubkey,
|
||||
SecioExchanges,
|
||||
SecioCiphers,
|
||||
SecioHashes)
|
||||
|
||||
localPeerId = PeerId.init(s.localPublicKey).tryGet()
|
||||
|
||||
trace "Local proposal", schemes = SecioExchanges,
|
||||
ciphers = SecioCiphers,
|
||||
hashes = SecioHashes,
|
||||
pubkey = localBytesPubkey.shortLog,
|
||||
peer = localPeerId
|
||||
|
||||
var answer = await transactMessage(conn, request)
|
||||
|
||||
if len(answer) == 0:
|
||||
trace "Proposal exchange failed", conn
|
||||
raise (ref SecioError)(msg: "Proposal exchange failed")
|
||||
|
||||
if not decodeProposal(answer, remoteNonce, remoteBytesPubkey, remoteExchanges,
|
||||
remoteCiphers, remoteHashes):
|
||||
trace "Remote proposal decoding failed", conn
|
||||
raise (ref SecioError)(msg: "Remote proposal decoding failed")
|
||||
|
||||
if not remotePubkey.init(remoteBytesPubkey):
|
||||
trace "Remote public key incorrect or corrupted",
|
||||
pubkey = remoteBytesPubkey.shortLog
|
||||
raise (ref SecioError)(msg: "Remote public key incorrect or corrupted")
|
||||
|
||||
remotePeerId = PeerId.init(remotePubkey).tryGet()
|
||||
|
||||
if peerId.isSome():
|
||||
let targetPid = peerId.get()
|
||||
if not targetPid.validate():
|
||||
raise newException(SecioError, "Failed to validate expected peerId.")
|
||||
|
||||
if remotePeerId != targetPid:
|
||||
raise newException(SecioError, "Peer ids don't match!")
|
||||
conn.peerId = remotePeerId
|
||||
let order = getOrder(remoteBytesPubkey, localNonce, localBytesPubkey,
|
||||
remoteNonce).tryGet()
|
||||
trace "Remote proposal", schemes = remoteExchanges, ciphers = remoteCiphers,
|
||||
hashes = remoteHashes,
|
||||
pubkey = remoteBytesPubkey.shortLog, order = order,
|
||||
peer = remotePeerId
|
||||
|
||||
let scheme = selectBest(order, SecioExchanges, remoteExchanges)
|
||||
let cipher = selectBest(order, SecioCiphers, remoteCiphers)
|
||||
let hash = selectBest(order, SecioHashes, remoteHashes)
|
||||
if len(scheme) == 0 or len(cipher) == 0 or len(hash) == 0:
|
||||
trace "No algorithms in common", peer = remotePeerId
|
||||
raise (ref SecioError)(msg: "No algorithms in common")
|
||||
|
||||
trace "Encryption scheme selected", scheme = scheme, cipher = cipher,
|
||||
hash = hash
|
||||
|
||||
var ekeypair = ephemeral(scheme, s.rng[]).tryGet()
|
||||
# We need EC public key in raw binary form
|
||||
var epubkey = ekeypair.pubkey.getRawBytes().tryGet()
|
||||
var localCorpus = request[4..^1] & answer & epubkey
|
||||
var signature = s.localPrivateKey.sign(localCorpus).tryGet()
|
||||
|
||||
var localExchange = createExchange(epubkey, signature.getBytes())
|
||||
var remoteExchange = await transactMessage(conn, localExchange)
|
||||
if len(remoteExchange) == 0:
|
||||
trace "Corpus exchange failed", conn
|
||||
raise (ref SecioError)(msg: "Corpus exchange failed")
|
||||
|
||||
if not decodeExchange(remoteExchange, remoteEBytesPubkey, remoteEBytesSig):
|
||||
trace "Remote exchange decoding failed", conn
|
||||
raise (ref SecioError)(msg: "Remote exchange decoding failed")
|
||||
|
||||
if not remoteESignature.init(remoteEBytesSig):
|
||||
trace "Remote signature incorrect or corrupted", signature = remoteEBytesSig.shortLog
|
||||
raise (ref SecioError)(msg: "Remote signature incorrect or corrupted")
|
||||
|
||||
var remoteCorpus = answer & request[4..^1] & remoteEBytesPubkey
|
||||
if not remoteESignature.verify(remoteCorpus, remotePubkey):
|
||||
trace "Signature verification failed", scheme = $remotePubkey.scheme,
|
||||
signature = $remoteESignature,
|
||||
pubkey = $remotePubkey,
|
||||
corpus = $remoteCorpus
|
||||
raise (ref SecioError)(msg: "Signature verification failed")
|
||||
|
||||
trace "Signature verified", scheme = remotePubkey.scheme
|
||||
|
||||
if not remoteEPubkey.initRaw(remoteEBytesPubkey):
|
||||
trace "Remote ephemeral public key incorrect or corrupted",
|
||||
pubkey = toHex(remoteEBytesPubkey)
|
||||
raise (ref SecioError)(msg: "Remote ephemeral public key incorrect or corrupted")
|
||||
|
||||
var secret = getSecret(remoteEPubkey, ekeypair.seckey)
|
||||
if len(secret) == 0:
|
||||
trace "Shared secret could not be created"
|
||||
raise (ref SecioError)(msg: "Shared secret could not be created")
|
||||
|
||||
trace "Shared secret calculated", secret = secret.shortLog
|
||||
|
||||
var keys = stretchKeys(cipher, hash, secret)
|
||||
|
||||
trace "Authenticated encryption parameters",
|
||||
iv0 = toHex(keys.ivOpenArray(0)), key0 = keys.keyOpenArray(0).shortLog,
|
||||
mac0 = keys.macOpenArray(0).shortLog,
|
||||
iv1 = keys.ivOpenArray(1).shortLog, key1 = keys.keyOpenArray(1).shortLog,
|
||||
mac1 = keys.macOpenArray(1).shortLog
|
||||
|
||||
# Perform Nonce exchange over encrypted channel.
|
||||
|
||||
var secioConn = newSecioConn(conn, hash, cipher, keys, order, remotePubkey)
|
||||
result = secioConn
|
||||
await secioConn.write(remoteNonce)
|
||||
var res = await secioConn.readMessage()
|
||||
|
||||
if res != @localNonce:
|
||||
trace "Nonce verification failed", receivedNonce = res.shortLog,
|
||||
localNonce = localNonce.shortLog
|
||||
raise (ref SecioError)(msg: "Nonce verification failed")
|
||||
else:
|
||||
trace "Secure handshake succeeded"
|
||||
|
||||
method init(s: Secio) {.gcsafe.} =
|
||||
procCall Secure(s).init()
|
||||
s.codec = SecioCodec
|
||||
|
||||
proc new*(
|
||||
T: typedesc[Secio],
|
||||
rng: ref HmacDrbgContext,
|
||||
localPrivateKey: PrivateKey): T =
|
||||
let pkRes = localPrivateKey.getPublicKey()
|
||||
if pkRes.isErr:
|
||||
raise newException(Defect, "Invalid private key")
|
||||
|
||||
let secio = Secio(
|
||||
rng: rng,
|
||||
localPrivateKey: localPrivateKey,
|
||||
localPublicKey: pkRes.get(),
|
||||
)
|
||||
secio.init()
|
||||
secio
|
||||
@@ -1,5 +1,5 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
@@ -8,10 +8,7 @@
|
||||
# those terms.
|
||||
|
||||
{.push gcsafe.}
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strformat]
|
||||
import stew/results
|
||||
@@ -40,18 +37,19 @@ type
|
||||
|
||||
func shortLog*(conn: SecureConn): auto =
|
||||
try:
|
||||
if conn.isNil: "SecureConn(nil)"
|
||||
if conn == nil: "SecureConn(nil)"
|
||||
else: &"{shortLog(conn.peerId)}:{conn.oid}"
|
||||
except ValueError as exc:
|
||||
raise newException(Defect, exc.msg)
|
||||
raiseAssert(exc.msg)
|
||||
|
||||
chronicles.formatIt(SecureConn): shortLog(it)
|
||||
|
||||
proc new*(T: type SecureConn,
|
||||
conn: Connection,
|
||||
peerId: PeerId,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout: Duration = DefaultConnectionTimeout): T =
|
||||
proc new*(
|
||||
T: type SecureConn,
|
||||
conn: Connection,
|
||||
peerId: PeerId,
|
||||
observedAddr: Opt[MultiAddress],
|
||||
timeout: Duration = DefaultConnectionTimeout): T =
|
||||
result = T(stream: conn,
|
||||
peerId: peerId,
|
||||
observedAddr: observedAddr,
|
||||
@@ -66,55 +64,72 @@ method initStream*(s: SecureConn) =
|
||||
|
||||
procCall Connection(s).initStream()
|
||||
|
||||
method closeImpl*(s: SecureConn) {.async.} =
|
||||
method closeImpl*(s: SecureConn) {.async: (raises: []).} =
|
||||
trace "Closing secure conn", s, dir = s.dir
|
||||
if not(isNil(s.stream)):
|
||||
if s.stream != nil:
|
||||
await s.stream.close()
|
||||
|
||||
await procCall Connection(s).closeImpl()
|
||||
|
||||
method readMessage*(c: SecureConn): Future[seq[byte]] {.async, base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
method readMessage*(
|
||||
c: SecureConn
|
||||
): Future[seq[byte]] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), base.} =
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
method getWrapped*(s: SecureConn): Connection = s.stream
|
||||
|
||||
method handshake*(s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]): Future[SecureConn] {.async, base.} =
|
||||
doAssert(false, "Not implemented!")
|
||||
method handshake*(
|
||||
s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[SecureConn] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), base.} =
|
||||
raiseAssert("Not implemented!")
|
||||
|
||||
proc handleConn(s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]): Future[Connection] {.async.} =
|
||||
proc handleConn(
|
||||
s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[Connection] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
var sconn = await s.handshake(conn, initiator, peerId)
|
||||
# mark connection bottom level transport direction
|
||||
# this is the safest place to do this
|
||||
# we require this information in for example gossipsub
|
||||
sconn.transportDir = if initiator: Direction.Out else: Direction.In
|
||||
|
||||
proc cleanup() {.async.} =
|
||||
proc cleanup() {.async: (raises: []).} =
|
||||
try:
|
||||
let futs = [conn.join(), sconn.join()]
|
||||
await futs[0] or futs[1]
|
||||
for f in futs:
|
||||
if not f.finished: await f.cancelAndWait() # cancel outstanding join()
|
||||
block:
|
||||
let
|
||||
fut1 = conn.join()
|
||||
fut2 = sconn.join()
|
||||
try: # https://github.com/status-im/nim-chronos/issues/516
|
||||
discard await race(fut1, fut2)
|
||||
except ValueError: raiseAssert("Futures list is not empty")
|
||||
# at least one join() completed, cancel pending one, if any
|
||||
if not fut1.finished: await fut1.cancelAndWait()
|
||||
if not fut2.finished: await fut2.cancelAndWait()
|
||||
block:
|
||||
let
|
||||
fut1 = sconn.close()
|
||||
fut2 = conn.close()
|
||||
await allFutures(fut1, fut2)
|
||||
static: doAssert typeof(fut1).E is void # Cannot fail
|
||||
static: doAssert typeof(fut2).E is void # Cannot fail
|
||||
|
||||
await allFuturesThrowing(
|
||||
sconn.close(), conn.close())
|
||||
except CancelledError:
|
||||
# This is top-level procedure which will work as separate task, so it
|
||||
# do not need to propagate CancelledError.
|
||||
discard
|
||||
except CatchableError as exc:
|
||||
debug "error cleaning up secure connection", err = exc.msg, sconn
|
||||
|
||||
if not isNil(sconn):
|
||||
if sconn != nil:
|
||||
# All the errors are handled inside `cleanup()` procedure.
|
||||
asyncSpawn cleanup()
|
||||
|
||||
return sconn
|
||||
sconn
|
||||
|
||||
method init*(s: Secure) =
|
||||
procCall LPProtocol(s).init()
|
||||
@@ -130,23 +145,25 @@ method init*(s: Secure) =
|
||||
warn "securing connection canceled", conn
|
||||
await conn.close()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
except LPStreamError as exc:
|
||||
warn "securing connection failed", err = exc.msg, conn
|
||||
await conn.close()
|
||||
|
||||
s.handler = handle
|
||||
|
||||
method secure*(s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]):
|
||||
Future[Connection] {.base.} =
|
||||
s.handleConn(conn, initiator, peerId)
|
||||
method secure*(
|
||||
s: Secure,
|
||||
conn: Connection,
|
||||
peerId: Opt[PeerId]
|
||||
): Future[Connection] {.async: (raises: [
|
||||
CancelledError, LPStreamError], raw: true), base.} =
|
||||
s.handleConn(conn, conn.dir == Direction.Out, peerId)
|
||||
|
||||
method readOnce*(s: SecureConn,
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
method readOnce*(
|
||||
s: SecureConn,
|
||||
pbytes: pointer,
|
||||
nbytes: int
|
||||
): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} =
|
||||
doAssert(nbytes > 0, "nbytes must be positive integer")
|
||||
|
||||
if s.isEof:
|
||||
@@ -163,7 +180,7 @@ method readOnce*(s: SecureConn,
|
||||
raise err
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as err:
|
||||
except LPStreamError as err:
|
||||
debug "Error while reading message from secure connection, closing.",
|
||||
error = err.name,
|
||||
message = err.msg,
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implements Routing Records.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, times]
|
||||
import pkg/stew/results
|
||||
@@ -45,14 +42,12 @@ proc decode*(
|
||||
? pb.getRequiredField(2, record.seqNo)
|
||||
|
||||
var addressInfos: seq[seq[byte]]
|
||||
let pb3 = ? pb.getRepeatedField(3, addressInfos)
|
||||
|
||||
if pb3:
|
||||
if ? pb.getRepeatedField(3, addressInfos):
|
||||
for address in addressInfos:
|
||||
var addressInfo = AddressInfo()
|
||||
let subProto = initProtoBuffer(address)
|
||||
let f = subProto.getField(1, addressInfo.address)
|
||||
if f.isOk() and f.get():
|
||||
if f.get(false):
|
||||
record.addresses &= addressInfo
|
||||
|
||||
if record.addresses.len == 0:
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles, times, tables, sequtils
|
||||
import ../switch,
|
||||
@@ -20,7 +17,7 @@ logScope:
|
||||
topics = "libp2p autorelay"
|
||||
|
||||
type
|
||||
OnReservationHandler = proc (addresses: seq[MultiAddress]) {.gcsafe, raises: [Defect].}
|
||||
OnReservationHandler = proc (addresses: seq[MultiAddress]) {.gcsafe, raises: [].}
|
||||
|
||||
AutoRelayService* = ref object of Service
|
||||
running: bool
|
||||
@@ -35,9 +32,12 @@ type
|
||||
addressMapper: AddressMapper
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
proc isRunning*(self: AutoRelayService): bool =
|
||||
return self.running
|
||||
|
||||
proc addressMapper(
|
||||
self: AutoRelayService,
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return concat(toSeq(self.relayAddresses.values))
|
||||
|
||||
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
|
||||
@@ -58,8 +58,8 @@ proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch)
|
||||
self.onReservation(concat(toSeq(self.relayAddresses.values)))
|
||||
await sleepAsync chronos.seconds(ttl - 30)
|
||||
|
||||
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return await addressMapper(self, listenAddrs)
|
||||
|
||||
let hasBeenSetUp = await procCall Service(self).setup(switch)
|
||||
@@ -83,7 +83,7 @@ proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
|
||||
self.backingOff.keepItIf(it != pid)
|
||||
self.peerAvailable.fire()
|
||||
|
||||
proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
|
||||
while true:
|
||||
# Remove relayPeers that failed
|
||||
let peers = toSeq(self.relayPeers.keys())
|
||||
@@ -116,14 +116,14 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
await self.peerAvailable.wait()
|
||||
await sleepAsync(200.millis)
|
||||
|
||||
method run*(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
method run*(self: AutoRelayService, switch: Switch) {.async.} =
|
||||
if self.running:
|
||||
trace "Autorelay is already running"
|
||||
return
|
||||
self.running = true
|
||||
self.runner = self.innerRun(switch)
|
||||
|
||||
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
|
||||
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
|
||||
let hasBeenStopped = await procCall Service(self).stop(switch)
|
||||
if hasBeenStopped:
|
||||
self.running = false
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sequtils]
|
||||
|
||||
@@ -33,13 +30,9 @@ type
|
||||
onNewStatusHandler: StatusAndConfidenceHandler
|
||||
autoRelayService: AutoRelayService
|
||||
autonatService: AutonatService
|
||||
isPublicIPAddrProc: IsPublicIPAddrProc
|
||||
|
||||
IsPublicIPAddrProc* = proc(ta: TransportAddress): bool {.gcsafe, raises: [Defect].}
|
||||
|
||||
proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayService: AutoRelayService,
|
||||
isPublicIPAddrProc: IsPublicIPAddrProc = isGlobal): T =
|
||||
return T(autonatService: autonatService, autoRelayService: autoRelayService, isPublicIPAddrProc: isPublicIPAddrProc)
|
||||
proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayService: AutoRelayService): T =
|
||||
return T(autonatService: autonatService, autoRelayService: autoRelayService)
|
||||
|
||||
proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bool] {.async.} =
|
||||
proc tryConnect(address: MultiAddress): Future[bool] {.async.} =
|
||||
@@ -52,14 +45,8 @@ proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Fut
|
||||
for address in switch.peerStore[AddressBook][peerId]:
|
||||
try:
|
||||
let isRelayed = address.contains(multiCodec("p2p-circuit"))
|
||||
if isRelayed.isErr() or isRelayed.get():
|
||||
continue
|
||||
if DNS.matchPartial(address):
|
||||
if not isRelayed.get(false) and address.isPublicMA():
|
||||
return await tryConnect(address)
|
||||
else:
|
||||
let ta = initTAddress(address)
|
||||
if ta.isOk() and self.isPublicIPAddrProc(ta.get()):
|
||||
return await tryConnect(address)
|
||||
except CatchableError as err:
|
||||
debug "Failed to create direct connection.", err = err.msg
|
||||
continue
|
||||
@@ -107,10 +94,10 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
|
||||
|
||||
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
||||
|
||||
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
|
||||
if networkReachability == NetworkReachability.NotReachable:
|
||||
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
if networkReachability == NetworkReachability.NotReachable and not self.autoRelayService.isRunning():
|
||||
discard await self.autoRelayService.setup(switch)
|
||||
elif networkReachability == NetworkReachability.Reachable:
|
||||
elif networkReachability == NetworkReachability.Reachable and self.autoRelayService.isRunning():
|
||||
discard await self.autoRelayService.stop(switch)
|
||||
|
||||
# We do it here instead of in the AutonatService because this is useful only when hole punching.
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implements Signed Envelope.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sugar
|
||||
import pkg/stew/[results, byteutils]
|
||||
@@ -115,19 +112,12 @@ proc getField*(pb: ProtoBuffer, field: int,
|
||||
if not(res):
|
||||
ok(false)
|
||||
else:
|
||||
let env = Envelope.decode(buffer, domain)
|
||||
if env.isOk():
|
||||
value = env.get()
|
||||
ok(true)
|
||||
else:
|
||||
err(ProtoError.IncorrectBlob)
|
||||
value = Envelope.decode(buffer, domain).valueOr: return err(ProtoError.IncorrectBlob)
|
||||
ok(true)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, env: Envelope): Result[void, CryptoError] =
|
||||
let e = env.encode()
|
||||
|
||||
if e.isErr():
|
||||
return err(e.error)
|
||||
pb.write(field, e.get())
|
||||
let e = ? env.encode()
|
||||
pb.write(field, e)
|
||||
ok()
|
||||
|
||||
type
|
||||
@@ -145,7 +135,7 @@ proc init*[T](_: typedesc[SignedPayload[T]],
|
||||
T.payloadType(),
|
||||
data.encode(),
|
||||
T.payloadDomain)
|
||||
|
||||
|
||||
ok(SignedPayload[T](data: data, envelope: envelope))
|
||||
|
||||
proc getField*[T](pb: ProtoBuffer, field: int,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user