mirror of
https://github.com/vacp2p/nim-libp2p.git
synced 2026-01-10 13:58:17 -05:00
Compare commits
88 Commits
v1.1.0
...
drop-old-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6484d3fce4 | ||
|
|
a04f8d2757 | ||
|
|
5d9478b0ec | ||
|
|
f6775d2843 | ||
|
|
0d0ef3519f | ||
|
|
a10b8af737 | ||
|
|
b16ec00327 | ||
|
|
df0b98bfdd | ||
|
|
451637a644 | ||
|
|
20a8e57262 | ||
|
|
a1f3940c06 | ||
|
|
14d1787de8 | ||
|
|
07cab432ba | ||
|
|
0317d589ce | ||
|
|
4158849521 | ||
|
|
362c94bf34 | ||
|
|
cbe70da155 | ||
|
|
e0f70b7177 | ||
|
|
c1dfd58772 | ||
|
|
04af0c4323 | ||
|
|
eb0890cd6f | ||
|
|
9bc5ec1566 | ||
|
|
5594bcb33e | ||
|
|
d46bcdb6ac | ||
|
|
9468bb6b4d | ||
|
|
2725be64ba | ||
|
|
e3c967ad19 | ||
|
|
d2c98bd87d | ||
|
|
3011ba4326 | ||
|
|
c6566707fa | ||
|
|
3be681ec4d | ||
|
|
2ede0fa40c | ||
|
|
7c195ab927 | ||
|
|
3230407ffe | ||
|
|
deb72c8580 | ||
|
|
ce0685c272 | ||
|
|
1f4b090227 | ||
|
|
fb05f5ae22 | ||
|
|
e12f65f193 | ||
|
|
4b3bc4f819 | ||
|
|
6791f5e7bb | ||
|
|
08d9c84aca | ||
|
|
4e7eaba67a | ||
|
|
5f7a3ab829 | ||
|
|
ebef85c9d7 | ||
|
|
3fc1236659 | ||
|
|
fc4e9a8bb8 | ||
|
|
60f953629d | ||
|
|
18b0f726df | ||
|
|
459f6851e7 | ||
|
|
575344e2e9 | ||
|
|
75871817ee | ||
|
|
61929aed6c | ||
|
|
56599f5b9d | ||
|
|
b2eac7ecbd | ||
|
|
20b0e40f7d | ||
|
|
ff77d52851 | ||
|
|
545a31d4f0 | ||
|
|
b76bac752f | ||
|
|
c6aa085e98 | ||
|
|
e03547ea3e | ||
|
|
f80ce3133c | ||
|
|
d6263bf751 | ||
|
|
56c23a286a | ||
|
|
7a369dd1bf | ||
|
|
b784167805 | ||
|
|
440461b24b | ||
|
|
fab1340020 | ||
|
|
1721f078c7 | ||
|
|
74c402ed9d | ||
|
|
c45f9705ab | ||
|
|
81b861b34e | ||
|
|
43359dd9d1 | ||
|
|
f85d0f75ea | ||
|
|
66f9dc9167 | ||
|
|
1c4d0832ce | ||
|
|
224f92e172 | ||
|
|
5efa089196 | ||
|
|
9d4c4307de | ||
|
|
49dfa84c6f | ||
|
|
a65b7b028f | ||
|
|
67711478ce | ||
|
|
c28d8bb353 | ||
|
|
eb78292d9c | ||
|
|
3725f6a95b | ||
|
|
3640b4dd89 | ||
|
|
32085ca88a | ||
|
|
c76d1e18ef |
7
.github/workflows/bumper.yml
vendored
7
.github/workflows/bumper.yml
vendored
@@ -24,13 +24,14 @@ jobs:
|
||||
repository: ${{ matrix.target.repo }}
|
||||
ref: ${{ matrix.target.branch }}
|
||||
path: nbc
|
||||
submodules: true
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
|
||||
|
||||
- name: Checkout this ref
|
||||
run: |
|
||||
cd nbc/vendor/nim-libp2p
|
||||
cd nbc
|
||||
git submodule update --init vendor/nim-libp2p
|
||||
cd vendor/nim-libp2p
|
||||
git checkout $GITHUB_SHA
|
||||
|
||||
- name: Commit this bump
|
||||
@@ -38,7 +39,7 @@ jobs:
|
||||
cd nbc
|
||||
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
|
||||
git config --global user.name = "${{ github.actor }}"
|
||||
git commit -a -m "auto-bump nim-libp2p"
|
||||
git commit --allow-empty -a -m "auto-bump nim-libp2p"
|
||||
git branch -D nim-libp2p-auto-bump-${GITHUB_REF##*/} || true
|
||||
git switch -c nim-libp2p-auto-bump-${GITHUB_REF##*/}
|
||||
git push -f origin nim-libp2p-auto-bump-${GITHUB_REF##*/}
|
||||
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
cpu: amd64
|
||||
#- os: windows
|
||||
#cpu: i386
|
||||
branch: [version-1-2, version-1-6]
|
||||
branch: [version-1-6]
|
||||
include:
|
||||
- target:
|
||||
os: linux
|
||||
|
||||
12
.github/workflows/daily.yml
vendored
Normal file
12
.github/workflows/daily.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Daily
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['version-1-6','version-2-0']"
|
||||
cpu: "['amd64']"
|
||||
84
.github/workflows/daily_common.yml
vendored
Normal file
84
.github/workflows/daily_common.yml
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
name: daily-common
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
nim-branch:
|
||||
description: 'Nim branch'
|
||||
required: true
|
||||
type: string
|
||||
cpu:
|
||||
description: 'CPU'
|
||||
required: true
|
||||
type: string
|
||||
exclude:
|
||||
description: 'Exclude matrix configurations'
|
||||
required: false
|
||||
type: string
|
||||
default: "[]"
|
||||
|
||||
jobs:
|
||||
delete-cache:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
build:
|
||||
needs: delete-cache
|
||||
timeout-minutes: 120
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- os: linux
|
||||
builder: ubuntu-20
|
||||
shell: bash
|
||||
- os: macos
|
||||
builder: macos-12
|
||||
shell: bash
|
||||
- os: windows
|
||||
builder: windows-2019
|
||||
shell: msys2 {0}
|
||||
branch: ${{ fromJSON(inputs.nim-branch) }}
|
||||
cpu: ${{ fromJSON(inputs.cpu) }}
|
||||
exclude: ${{ fromJSON(inputs.exclude) }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
|
||||
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.branch }})'
|
||||
runs-on: ${{ matrix.platform.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.platform.os }}
|
||||
shell: ${{ matrix.platform.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
cpu: ${{ matrix.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
cache: false
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install -y --depsOnly
|
||||
NIMFLAGS="${NIMFLAGS} --mm:refc" nimble test
|
||||
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
||||
echo -e "\nTesting with '--mm:orc':\n"
|
||||
NIMFLAGS="${NIMFLAGS} --mm:orc" nimble test
|
||||
fi
|
||||
13
.github/workflows/daily_i386.yml
vendored
Normal file
13
.github/workflows/daily_i386.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
name: Daily i386
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['version-1-6','version-2-0', 'devel']"
|
||||
cpu: "['i386']"
|
||||
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
|
||||
12
.github/workflows/daily_nim_devel.yml
vendored
Normal file
12
.github/workflows/daily_nim_devel.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Daily Nim Devel
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
call-multi-nim-common:
|
||||
uses: ./.github/workflows/daily_common.yml
|
||||
with:
|
||||
nim-branch: "['devel']"
|
||||
cpu: "['amd64']"
|
||||
2
.github/workflows/doc.yml
vendored
2
.github/workflows/doc.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
|
||||
- uses: jiro4989/setup-nim-action@v1
|
||||
with:
|
||||
nim-version: 'stable'
|
||||
nim-version: '1.6.x'
|
||||
|
||||
- name: Generate doc
|
||||
run: |
|
||||
|
||||
20
.github/workflows/interop.yml
vendored
20
.github/workflows/interop.yml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
- name: Build image
|
||||
run: >
|
||||
cd multidim-interop/impl/nim/v1.0 &&
|
||||
cd transport-interop/impl/nim/v1.0 &&
|
||||
make commitSha=$GITHUB_SHA image_name=nim-libp2p-head
|
||||
|
||||
- name: Create ping-version.json
|
||||
@@ -45,10 +45,24 @@ jobs:
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
|
||||
) > ${{ github.workspace }}/test_head.json
|
||||
|
||||
- uses: libp2p/test-plans/.github/actions/run-interop-ping-test@master
|
||||
- uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/test_head.json
|
||||
|
||||
run-hole-punching-interop:
|
||||
name: Run hole-punching interoperability tests
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- name: Build image
|
||||
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
|
||||
- name: Run tests
|
||||
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
|
||||
with:
|
||||
test-filter: nim-libp2p-head
|
||||
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json
|
||||
|
||||
82
.github/workflows/multi_nim.yml
vendored
82
.github/workflows/multi_nim.yml
vendored
@@ -1,82 +0,0 @@
|
||||
name: Daily
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 6 * * *"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
delete-cache:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: snnaplab/delete-branch-cache-action@v1
|
||||
|
||||
build:
|
||||
needs: delete-cache
|
||||
timeout-minutes: 120
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
target:
|
||||
- os: linux
|
||||
cpu: amd64
|
||||
- os: linux
|
||||
cpu: i386
|
||||
- os: macos
|
||||
cpu: amd64
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
#- os: windows
|
||||
#cpu: i386
|
||||
branch: [version-1-2, version-1-6, devel]
|
||||
include:
|
||||
- target:
|
||||
os: linux
|
||||
builder: ubuntu-20.04
|
||||
shell: bash
|
||||
- target:
|
||||
os: macos
|
||||
builder: macos-12
|
||||
shell: bash
|
||||
- target:
|
||||
os: windows
|
||||
builder: windows-2019
|
||||
shell: msys2 {0}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: ${{ matrix.shell }}
|
||||
|
||||
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
|
||||
runs-on: ${{ matrix.builder }}
|
||||
continue-on-error: ${{ matrix.branch == 'devel' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Nim
|
||||
uses: "./.github/actions/install_nim"
|
||||
with:
|
||||
os: ${{ matrix.target.os }}
|
||||
shell: ${{ matrix.shell }}
|
||||
nim_branch: ${{ matrix.branch }}
|
||||
cpu: ${{ matrix.target.cpu }}
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '~1.15.5'
|
||||
|
||||
- name: Install p2pd
|
||||
run: |
|
||||
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
nim --version
|
||||
nimble --version
|
||||
nimble install -y --depsOnly
|
||||
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
|
||||
if [[ "${{ matrix.branch }}" == "devel" ]]; then
|
||||
echo -e "\nTesting with '--gc:orc':\n"
|
||||
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
|
||||
fi
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -16,3 +16,4 @@ tests/pubsub/testgossipsub
|
||||
examples/*.md
|
||||
nimble.develop
|
||||
nimble.paths
|
||||
go-libp2p-daemon/
|
||||
|
||||
31
.pinned
31
.pinned
@@ -1,16 +1,17 @@
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#acf9645e328bdcab481cfda1c158e07ecd46bd7b
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#1e6350870855541b381d77d4659688bc0d2c4227
|
||||
chronos;https://github.com/status-im/nim-chronos@#ab5a8c2e0f6941fe3debd61dff0293790079d1b0
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#814f8927e1f356f39219f37f069b83066bcc893a
|
||||
httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#a7d815ed92f200f490c95d3cfd722089cc923ce6
|
||||
metrics;https://github.com/status-im/nim-metrics@#abf3acc7f06cee9ee2c287d2f31413dc3df4c04e
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#4014ef939b51e02053c2e16dd3481d47bc9267dd
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#fd173fdff863ce2e211cf64c9a03bc7539fe40b0
|
||||
serialization;https://github.com/status-im/nim-serialization@#5b7cea55efeb074daa8abd8146a03a34adb4521a
|
||||
stew;https://github.com/status-im/nim-stew@#003fe9f0c83c2b0b2ccbd37087e6d1ccd30a3234
|
||||
bearssl;https://github.com/status-im/nim-bearssl@#e4157639db180e52727712a47deaefcbbac6ec86
|
||||
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
|
||||
chronos;https://github.com/status-im/nim-chronos@#ba143e029f35fd9b4cd3d89d007cc834d0d5ba3c
|
||||
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
|
||||
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
|
||||
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
|
||||
json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
|
||||
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
|
||||
nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
|
||||
results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
|
||||
secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
|
||||
serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35
|
||||
stew;https://github.com/status-im/nim-stew@#3159137d9a3110edb4024145ce0ba778975de40e
|
||||
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#883c7a50ad3b82158e64d074c5578fe33ab3c452
|
||||
websock;https://github.com/status-im/nim-websock@#fea05cde8b123b38d1a0a8524b77efbc84daa848
|
||||
zlib;https://github.com/status-im/nim-zlib@#826e2fc013f55b4478802d4f2e39f187c50d520a
|
||||
unittest2;https://github.com/status-im/nim-unittest2@#2300fa9924a76e6c96bc4ea79d043e3a0f27120c
|
||||
websock;https://github.com/status-im/nim-websock@#f8ed9b40a5ff27ad02a3c237c4905b0924e3f982
|
||||
zlib;https://github.com/status-im/nim-zlib@#38b72eda9d70067df4a953f56b5ed59630f2a17b
|
||||
@@ -105,7 +105,7 @@ The versioning follows [semver](https://semver.org/), with some additions:
|
||||
- Some of libp2p procedures are marked as `.public.`, they will remain compatible during each `MAJOR` version
|
||||
- The rest of the procedures are considered internal, and can change at any `MINOR` version (but remain compatible for each new `PATCH`)
|
||||
|
||||
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.2 & 1.6`
|
||||
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.6 & 2.0`
|
||||
|
||||
## Development
|
||||
Clone and Install dependencies:
|
||||
|
||||
@@ -9,11 +9,8 @@ switch("warning", "ObservableStores:off")
|
||||
switch("warning", "LockLevel:off")
|
||||
--define:chronosStrictException
|
||||
--styleCheck:usages
|
||||
if (NimMajor, NimMinor) < (1, 6):
|
||||
--styleCheck:hint
|
||||
else:
|
||||
switch("warningAsError", "UseBase:on")
|
||||
--styleCheck:error
|
||||
switch("warningAsError", "UseBase:on")
|
||||
--styleCheck:error
|
||||
|
||||
# Avoid some rare stack corruption while using exceptions with a SEH-enabled
|
||||
# toolchain: https://github.com/status-im/nimbus-eth2/issues/3121
|
||||
|
||||
@@ -13,7 +13,7 @@ type
|
||||
proc new(T: typedesc[TestProto]): T =
|
||||
|
||||
# every incoming connections will be in handled in this closure
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.writeLp("Roger p2p!")
|
||||
|
||||
@@ -40,7 +40,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
||||
##
|
||||
# The actual application
|
||||
##
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng() # Single random number source for the whole application
|
||||
# port 0 will take a random available port
|
||||
|
||||
@@ -53,7 +53,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
|
||||
##
|
||||
##
|
||||
## Let's now start to create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng()
|
||||
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
|
||||
|
||||
@@ -25,7 +25,7 @@ type TestProto = ref object of LPProtocol
|
||||
|
||||
proc new(T: typedesc[TestProto]): T =
|
||||
# every incoming connections will in be handled in this closure
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
# Read up to 1024 bytes from this connection, and transform them into
|
||||
# a string
|
||||
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
|
||||
@@ -44,7 +44,7 @@ proc hello(p: TestProto, conn: Connection) {.async.} =
|
||||
## Again, pretty straight-forward, we just send a message on the connection.
|
||||
##
|
||||
## We can now create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let
|
||||
rng = newRng()
|
||||
testProto = TestProto.new()
|
||||
|
||||
@@ -108,7 +108,7 @@ type
|
||||
|
||||
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
|
||||
var res: MetricProto
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
let
|
||||
metrics = await res.metricGetter()
|
||||
asProtobuf = metrics.encode()
|
||||
@@ -126,7 +126,7 @@ proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
|
||||
return MetricList.decode(protobuf).tryGet()
|
||||
|
||||
## We can now create our main procedure:
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let rng = newRng()
|
||||
proc randomMetricGenerator: Future[MetricList] {.async.} =
|
||||
let metricCount = rng[].generate(uint32) mod 16
|
||||
|
||||
@@ -33,7 +33,7 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
|
||||
const DumbCodec = "/dumb/proto/1.0.0"
|
||||
type DumbProto = ref object of LPProtocol
|
||||
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
|
||||
await conn.close()
|
||||
return T.new(codecs = @[DumbCodec], handler = handle)
|
||||
@@ -49,7 +49,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
|
||||
## (rendezvous in this case) as a bootnode. For this example, we'll
|
||||
## create a bootnode, and then every peer will advertise itself on the
|
||||
## bootnode, and use it to find other peers
|
||||
proc main() {.async, gcsafe.} =
|
||||
proc main() {.async.} =
|
||||
let bootNode = createSwitch()
|
||||
await bootNode.start()
|
||||
|
||||
|
||||
@@ -143,7 +143,7 @@ proc draw(g: Game) =
|
||||
## peer know that we are available, check that he is also available,
|
||||
## and launch the game.
|
||||
proc new(T: typedesc[GameProto], g: Game): T =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
defer: await conn.closeWithEof()
|
||||
if g.peerFound.finished or g.hasCandidate:
|
||||
await conn.close()
|
||||
|
||||
@@ -7,7 +7,7 @@ description = "LibP2P implementation"
|
||||
license = "MIT"
|
||||
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
|
||||
|
||||
requires "nim >= 1.2.0",
|
||||
requires "nim >= 1.6.0",
|
||||
"nimcrypto >= 0.4.1",
|
||||
"dnsclient >= 0.3.0 & < 0.4.0",
|
||||
"bearssl >= 0.1.4",
|
||||
@@ -17,14 +17,24 @@ requires "nim >= 1.2.0",
|
||||
"secp256k1",
|
||||
"stew#head",
|
||||
"websock",
|
||||
"unittest2 >= 0.0.5 & < 0.1.0"
|
||||
"unittest2"
|
||||
|
||||
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
|
||||
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
|
||||
let flags = getEnv("NIMFLAGS", "") # Extra flags for the compiler
|
||||
let verbose = getEnv("V", "") notin ["", "0"]
|
||||
|
||||
let cfg =
|
||||
" --styleCheck:usages --styleCheck:error" &
|
||||
(if verbose: "" else: " --verbosity:0 --hints:off") &
|
||||
" --skipParentCfg --skipUserCfg -f" &
|
||||
" --threads:on --opt:speed"
|
||||
|
||||
import hashes, strutils
|
||||
|
||||
import hashes
|
||||
proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||
moreoptions: string = "") =
|
||||
var excstr = "nim c --skipParentCfg --opt:speed -d:debug "
|
||||
excstr.add(" " & getEnv("NIMFLAGS") & " ")
|
||||
excstr.add(" --verbosity:0 --hints:off ")
|
||||
var excstr = nimc & " " & lang & " -d:debug " & cfg & " " & flags
|
||||
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
|
||||
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
|
||||
excstr.add(" " & moreoptions & " ")
|
||||
@@ -34,7 +44,7 @@ proc runTest(filename: string, verify: bool = true, sign: bool = true,
|
||||
rmFile "tests/" & filename.toExe
|
||||
|
||||
proc buildSample(filename: string, run = false, extraFlags = "") =
|
||||
var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off -p:. " & extraFlags
|
||||
var excstr = nimc & " " & lang & " " & cfg & " " & flags & " -p:. " & extraFlags
|
||||
excstr.add(" examples/" & filename)
|
||||
exec excstr
|
||||
if run:
|
||||
@@ -42,7 +52,7 @@ proc buildSample(filename: string, run = false, extraFlags = "") =
|
||||
rmFile "examples/" & filename.toExe
|
||||
|
||||
proc tutorialToMd(filename: string) =
|
||||
let markdown = gorge "cat " & filename & " | nim c -r --verbosity:0 --hints:off tools/markdown_builder.nim "
|
||||
let markdown = gorge "cat " & filename & " | " & nimc & " " & lang & " -r --verbosity:0 --hints:off tools/markdown_builder.nim "
|
||||
writeFile(filename.replace(".nim", ".md"), markdown)
|
||||
|
||||
task testnative, "Runs libp2p native tests":
|
||||
@@ -104,15 +114,12 @@ task examples_build, "Build the samples":
|
||||
buildSample("circuitrelay", true)
|
||||
buildSample("tutorial_1_connect", true)
|
||||
buildSample("tutorial_2_customproto", true)
|
||||
if (NimMajor, NimMinor) > (1, 2):
|
||||
# These tutorials relies on post 1.4 exception tracking
|
||||
buildSample("tutorial_3_protobuf", true)
|
||||
buildSample("tutorial_4_gossipsub", true)
|
||||
buildSample("tutorial_5_discovery", true)
|
||||
# Nico doesn't work in 1.2
|
||||
exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
|
||||
exec "nimble install -y nico"
|
||||
buildSample("tutorial_6_game", false, "--styleCheck:off")
|
||||
buildSample("tutorial_3_protobuf", true)
|
||||
buildSample("tutorial_4_gossipsub", true)
|
||||
buildSample("tutorial_5_discovery", true)
|
||||
exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
|
||||
exec "nimble install -y nico"
|
||||
buildSample("tutorial_6_game", false, "--styleCheck:off")
|
||||
|
||||
# pin system
|
||||
# while nimble lockfile
|
||||
@@ -123,7 +130,7 @@ task pin, "Create a lockfile":
|
||||
# pinner.nim was originally here
|
||||
# but you can't read output from
|
||||
# a command in a nimscript
|
||||
exec "nim c -r tools/pinner.nim"
|
||||
exec nimc & " c -r tools/pinner.nim"
|
||||
|
||||
import sequtils
|
||||
import os
|
||||
|
||||
@@ -16,10 +16,7 @@ runnableExamples:
|
||||
# etc
|
||||
.build()
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
options, tables, chronos, chronicles, sequtils,
|
||||
@@ -28,7 +25,7 @@ import
|
||||
muxers/[muxer, mplex/mplex, yamux/yamux],
|
||||
protocols/[identify, secure/secure, secure/noise, rendezvous],
|
||||
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
|
||||
connmanager, upgrademngrs/muxedupgrade,
|
||||
connmanager, upgrademngrs/muxedupgrade, observedaddrmanager,
|
||||
nameresolving/nameresolver,
|
||||
errors, utility
|
||||
|
||||
@@ -36,7 +33,7 @@ export
|
||||
switch, peerid, peerinfo, connection, multiaddress, crypto, errors
|
||||
|
||||
type
|
||||
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [Defect].}
|
||||
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [].}
|
||||
|
||||
SecureProtocol* {.pure.} = enum
|
||||
Noise,
|
||||
@@ -57,11 +54,12 @@ type
|
||||
protoVersion: string
|
||||
agentVersion: string
|
||||
nameResolver: NameResolver
|
||||
peerStoreCapacity: Option[int]
|
||||
peerStoreCapacity: Opt[int]
|
||||
autonat: bool
|
||||
circuitRelay: Relay
|
||||
rdv: RendezVous
|
||||
services: seq[Service]
|
||||
observedAddrManager: ObservedAddrManager
|
||||
|
||||
proc new*(T: type[SwitchBuilder]): T {.public.} =
|
||||
## Creates a SwitchBuilder
|
||||
@@ -124,8 +122,8 @@ proc withMplex*(
|
||||
b.muxers.add(MuxerProvider.new(newMuxer, MplexCodec))
|
||||
b
|
||||
|
||||
proc withYamux*(b: SwitchBuilder): SwitchBuilder =
|
||||
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn)
|
||||
proc withYamux*(b: SwitchBuilder, windowSize: int = YamuxDefaultWindowSize): SwitchBuilder =
|
||||
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn, windowSize)
|
||||
|
||||
assert b.muxers.countIt(it.codec == YamuxCodec) == 0, "Yamux build multiple times"
|
||||
b.muxers.add(MuxerProvider.new(newMuxer, YamuxCodec))
|
||||
@@ -173,7 +171,7 @@ proc withMaxConnsPerPeer*(b: SwitchBuilder, maxConnsPerPeer: int): SwitchBuilder
|
||||
b
|
||||
|
||||
proc withPeerStore*(b: SwitchBuilder, capacity: int): SwitchBuilder {.public.} =
|
||||
b.peerStoreCapacity = some(capacity)
|
||||
b.peerStoreCapacity = Opt.some(capacity)
|
||||
b
|
||||
|
||||
proc withProtoVersion*(b: SwitchBuilder, protoVersion: string): SwitchBuilder {.public.} =
|
||||
@@ -204,8 +202,12 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
|
||||
b.services = services
|
||||
b
|
||||
|
||||
proc withObservedAddrManager*(b: SwitchBuilder, observedAddrManager: ObservedAddrManager): SwitchBuilder =
|
||||
b.observedAddrManager = observedAddrManager
|
||||
b
|
||||
|
||||
proc build*(b: SwitchBuilder): Switch
|
||||
{.raises: [Defect, LPError], public.} =
|
||||
{.raises: [LPError], public.} =
|
||||
|
||||
if b.rng == nil: # newRng could fail
|
||||
raise newException(Defect, "Cannot initialize RNG")
|
||||
@@ -226,11 +228,16 @@ proc build*(b: SwitchBuilder): Switch
|
||||
protoVersion = b.protoVersion,
|
||||
agentVersion = b.agentVersion)
|
||||
|
||||
let identify =
|
||||
if b.observedAddrManager != nil:
|
||||
Identify.new(peerInfo, b.sendSignedPeerRecord, b.observedAddrManager)
|
||||
else:
|
||||
Identify.new(peerInfo, b.sendSignedPeerRecord)
|
||||
|
||||
let
|
||||
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
|
||||
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
|
||||
ms = MultistreamSelect.new()
|
||||
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, connManager, ms)
|
||||
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
|
||||
|
||||
let
|
||||
transports = block:
|
||||
@@ -245,9 +252,9 @@ proc build*(b: SwitchBuilder): Switch
|
||||
if isNil(b.rng):
|
||||
b.rng = newRng()
|
||||
|
||||
let peerStore =
|
||||
if isSome(b.peerStoreCapacity):
|
||||
PeerStore.new(identify, b.peerStoreCapacity.get())
|
||||
let peerStore = block:
|
||||
b.peerStoreCapacity.withValue(capacity):
|
||||
PeerStore.new(identify, capacity)
|
||||
else:
|
||||
PeerStore.new(identify)
|
||||
|
||||
@@ -296,7 +303,7 @@ proc newStandardSwitch*(
|
||||
nameResolver: NameResolver = nil,
|
||||
sendSignedPeerRecord = false,
|
||||
peerStoreCapacity = 1000): Switch
|
||||
{.raises: [Defect, LPError], public.} =
|
||||
{.raises: [LPError], public.} =
|
||||
## Helper for common switch configurations.
|
||||
{.push warning[Deprecated]:off.}
|
||||
if SecureProtocol.Secio in secureManagers:
|
||||
@@ -319,7 +326,7 @@ proc newStandardSwitch*(
|
||||
.withNameResolver(nameResolver)
|
||||
.withNoise()
|
||||
|
||||
if privKey.isSome():
|
||||
b = b.withPrivateKey(privKey.get())
|
||||
privKey.withValue(pkey):
|
||||
b = b.withPrivateKey(pkey)
|
||||
|
||||
b.build()
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implementes CID (Content IDentifier).
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import tables, hashes
|
||||
import multibase, multicodec, multihash, vbuffer, varint
|
||||
@@ -279,9 +276,6 @@ proc `$`*(cid: Cid): string =
|
||||
BTCBase58.encode(cid.data.buffer)
|
||||
elif cid.cidver == CIDv1:
|
||||
let res = MultiBase.encode("base58btc", cid.data.buffer)
|
||||
if res.isOk():
|
||||
res.get()
|
||||
else:
|
||||
""
|
||||
res.get("")
|
||||
else:
|
||||
""
|
||||
|
||||
@@ -7,12 +7,9 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, tables, sequtils, sets]
|
||||
import std/[tables, sequtils, sets]
|
||||
import pkg/[chronos, chronicles, metrics]
|
||||
import peerinfo,
|
||||
peerstore,
|
||||
@@ -51,7 +48,7 @@ type
|
||||
|
||||
ConnEventHandler* =
|
||||
proc(peerId: PeerId, event: ConnEvent): Future[void]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
{.gcsafe, raises: [].}
|
||||
|
||||
PeerEventKind* {.pure.} = enum
|
||||
Left,
|
||||
@@ -65,7 +62,7 @@ type
|
||||
discard
|
||||
|
||||
PeerEventHandler* =
|
||||
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [Defect].}
|
||||
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [].}
|
||||
|
||||
ConnManager* = ref object of RootObj
|
||||
maxConnsPerPeer: int
|
||||
@@ -131,7 +128,7 @@ proc removeConnEventHandler*(c: ConnManager,
|
||||
|
||||
proc triggerConnEvent*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
event: ConnEvent) {.async, gcsafe.} =
|
||||
event: ConnEvent) {.async.} =
|
||||
try:
|
||||
trace "About to trigger connection events", peer = peerId
|
||||
if c.connEvents[event.kind].len() > 0:
|
||||
@@ -163,7 +160,7 @@ proc removePeerEventHandler*(c: ConnManager,
|
||||
|
||||
proc triggerPeerEvents*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
event: PeerEvent) {.async, gcsafe.} =
|
||||
event: PeerEvent) {.async.} =
|
||||
|
||||
trace "About to trigger peer events", peer = peerId
|
||||
if c.peerEvents[event.kind].len == 0:
|
||||
@@ -285,7 +282,7 @@ proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer =
|
||||
|
||||
proc storeMuxer*(c: ConnManager,
|
||||
muxer: Muxer)
|
||||
{.raises: [Defect, CatchableError].} =
|
||||
{.raises: [CatchableError].} =
|
||||
## store the connection and muxer
|
||||
##
|
||||
|
||||
@@ -338,7 +335,7 @@ proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} =
|
||||
await c.inSema.acquire()
|
||||
return ConnectionSlot(connManager: c, direction: In)
|
||||
|
||||
proc getOutgoingSlot*(c: ConnManager, forceDial = false): ConnectionSlot {.raises: [Defect, TooManyConnectionsError].} =
|
||||
proc getOutgoingSlot*(c: ConnManager, forceDial = false): ConnectionSlot {.raises: [TooManyConnectionsError].} =
|
||||
if forceDial:
|
||||
c.outSema.forceAcquire()
|
||||
elif not c.outSema.tryAcquire():
|
||||
@@ -382,7 +379,7 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
|
||||
cs.trackConnection(mux.connection)
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
muxer: Muxer): Future[Connection] {.async, gcsafe.} =
|
||||
muxer: Muxer): Future[Connection] {.async.} =
|
||||
## get a muxed stream for the passed muxer
|
||||
##
|
||||
|
||||
@@ -390,7 +387,7 @@ proc getStream*(c: ConnManager,
|
||||
return await muxer.newStream()
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
|
||||
peerId: PeerId): Future[Connection] {.async.} =
|
||||
## get a muxed stream for the passed peer from any connection
|
||||
##
|
||||
|
||||
@@ -398,7 +395,7 @@ proc getStream*(c: ConnManager,
|
||||
|
||||
proc getStream*(c: ConnManager,
|
||||
peerId: PeerId,
|
||||
dir: Direction): Future[Connection] {.async, gcsafe.} =
|
||||
dir: Direction): Future[Connection] {.async.} =
|
||||
## get a muxed stream for the passed peer from a connection with `dir`
|
||||
##
|
||||
|
||||
|
||||
@@ -15,10 +15,7 @@
|
||||
|
||||
# RFC @ https://tools.ietf.org/html/rfc7539
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/blockx
|
||||
from stew/assign2 import assign
|
||||
|
||||
@@ -8,10 +8,7 @@
|
||||
# those terms.
|
||||
|
||||
## This module implements Public Key and Private Key interface for libp2p.
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
from strutils import split, strip, cmpIgnoreCase
|
||||
|
||||
@@ -68,11 +65,13 @@ when supported(PKScheme.Ed25519):
|
||||
import ed25519/ed25519
|
||||
when supported(PKScheme.Secp256k1):
|
||||
import secp
|
||||
when supported(PKScheme.ECDSA):
|
||||
import ecnist
|
||||
|
||||
# We are still importing `ecnist` because, it is used for SECIO handshake,
|
||||
# but it will be impossible to create ECNIST keys or import ECNIST keys.
|
||||
# These used to be declared in `crypto` itself
|
||||
export ecnist.ephemeral, ecnist.ECDHEScheme
|
||||
|
||||
import ecnist, bearssl/rand, bearssl/hash as bhash
|
||||
import bearssl/rand, bearssl/hash as bhash
|
||||
import ../protobuf/minprotobuf, ../vbuffer, ../multihash, ../multicodec
|
||||
import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
@@ -89,8 +88,6 @@ type
|
||||
Sha256,
|
||||
Sha512
|
||||
|
||||
ECDHEScheme* = EcCurveKind
|
||||
|
||||
PublicKey* = object
|
||||
case scheme*: PKScheme
|
||||
of PKScheme.RSA:
|
||||
@@ -458,7 +455,8 @@ proc getBytes*(sig: Signature): seq[byte] =
|
||||
## Return signature ``sig`` in binary form.
|
||||
result = sig.data
|
||||
|
||||
proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
|
||||
template initImpl[T: PrivateKey|PublicKey](
|
||||
key: var T, data: openArray[byte]): bool =
|
||||
## Initialize private key ``key`` from libp2p's protobuf serialized raw
|
||||
## binary form.
|
||||
##
|
||||
@@ -471,7 +469,7 @@ proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
|
||||
var pb = initProtoBuffer(@data)
|
||||
let r1 = pb.getField(1, id)
|
||||
let r2 = pb.getField(2, buffer)
|
||||
if not(r1.isOk() and r1.get() and r2.isOk() and r2.get()):
|
||||
if not(r1.get(false) and r2.get(false)):
|
||||
false
|
||||
else:
|
||||
if cast[int8](id) notin SupportedSchemesInt or len(buffer) <= 0:
|
||||
@@ -520,6 +518,14 @@ proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
|
||||
else:
|
||||
false
|
||||
|
||||
{.push warning[ProveField]:off.} # https://github.com/nim-lang/Nim/issues/22060
|
||||
proc init*(key: var PrivateKey, data: openArray[byte]): bool =
|
||||
initImpl(key, data)
|
||||
|
||||
proc init*(key: var PublicKey, data: openArray[byte]): bool =
|
||||
initImpl(key, data)
|
||||
{.pop.}
|
||||
|
||||
proc init*(sig: var Signature, data: openArray[byte]): bool =
|
||||
## Initialize signature ``sig`` from raw binary form.
|
||||
##
|
||||
@@ -873,34 +879,6 @@ proc mac*(secret: Secret, id: int): seq[byte] {.inline.} =
|
||||
offset += secret.ivsize + secret.keysize
|
||||
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.macsize)
|
||||
|
||||
proc ephemeral*(
|
||||
scheme: ECDHEScheme,
|
||||
rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
|
||||
## Generate ephemeral keys used to perform ECDHE.
|
||||
var keypair: EcKeyPair
|
||||
if scheme == Secp256r1:
|
||||
keypair = ? EcKeyPair.random(Secp256r1, rng).orError(KeyError)
|
||||
elif scheme == Secp384r1:
|
||||
keypair = ? EcKeyPair.random(Secp384r1, rng).orError(KeyError)
|
||||
elif scheme == Secp521r1:
|
||||
keypair = ? EcKeyPair.random(Secp521r1, rng).orError(KeyError)
|
||||
ok(keypair)
|
||||
|
||||
proc ephemeral*(
|
||||
scheme: string, rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
|
||||
## Generate ephemeral keys used to perform ECDHE using string encoding.
|
||||
##
|
||||
## Currently supported encoding strings are P-256, P-384, P-521, if encoding
|
||||
## string is not supported P-521 key will be generated.
|
||||
if scheme == "P-256":
|
||||
ephemeral(Secp256r1, rng)
|
||||
elif scheme == "P-384":
|
||||
ephemeral(Secp384r1, rng)
|
||||
elif scheme == "P-521":
|
||||
ephemeral(Secp521r1, rng)
|
||||
else:
|
||||
ephemeral(Secp521r1, rng)
|
||||
|
||||
proc getOrder*(remotePubkey, localNonce: openArray[byte],
|
||||
localPubkey, remoteNonce: openArray[byte]): CryptoResult[int] =
|
||||
## Compare values and calculate `order` parameter.
|
||||
@@ -976,9 +954,8 @@ proc decodeProposal*(message: seq[byte], nonce, pubkey: var seq[byte],
|
||||
let r4 = pb.getField(4, ciphers)
|
||||
let r5 = pb.getField(5, hashes)
|
||||
|
||||
r1.isOk() and r1.get() and r2.isOk() and r2.get() and
|
||||
r3.isOk() and r3.get() and r4.isOk() and r4.get() and
|
||||
r5.isOk() and r5.get()
|
||||
r1.get(false) and r2.get(false) and r3.get(false) and
|
||||
r4.get(false) and r5.get(false)
|
||||
|
||||
proc createExchange*(epubkey, signature: openArray[byte]): seq[byte] =
|
||||
## Create SecIO exchange message using ephemeral public key ``epubkey`` and
|
||||
@@ -998,32 +975,32 @@ proc decodeExchange*(message: seq[byte],
|
||||
var pb = initProtoBuffer(message)
|
||||
let r1 = pb.getField(1, pubkey)
|
||||
let r2 = pb.getField(2, signature)
|
||||
r1.isOk() and r1.get() and r2.isOk() and r2.get()
|
||||
r1.get(false) and r2.get(false)
|
||||
|
||||
## Serialization/Deserialization helpers
|
||||
|
||||
proc write*(vb: var VBuffer, pubkey: PublicKey) {.
|
||||
inline, raises: [Defect, ResultError[CryptoError]].} =
|
||||
inline, raises: [ResultError[CryptoError]].} =
|
||||
## Write PublicKey value ``pubkey`` to buffer ``vb``.
|
||||
vb.writeSeq(pubkey.getBytes().tryGet())
|
||||
|
||||
proc write*(vb: var VBuffer, seckey: PrivateKey) {.
|
||||
inline, raises: [Defect, ResultError[CryptoError]].} =
|
||||
inline, raises: [ResultError[CryptoError]].} =
|
||||
## Write PrivateKey value ``seckey`` to buffer ``vb``.
|
||||
vb.writeSeq(seckey.getBytes().tryGet())
|
||||
|
||||
proc write*(vb: var VBuffer, sig: PrivateKey) {.
|
||||
inline, raises: [Defect, ResultError[CryptoError]].} =
|
||||
inline, raises: [ResultError[CryptoError]].} =
|
||||
## Write Signature value ``sig`` to buffer ``vb``.
|
||||
vb.writeSeq(sig.getBytes().tryGet())
|
||||
|
||||
proc write*[T: PublicKey|PrivateKey](pb: var ProtoBuffer, field: int,
|
||||
key: T) {.
|
||||
inline, raises: [Defect, ResultError[CryptoError]].} =
|
||||
inline, raises: [ResultError[CryptoError]].} =
|
||||
write(pb, field, key.getBytes().tryGet())
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, sig: Signature) {.
|
||||
inline, raises: [Defect].} =
|
||||
inline, raises: [].} =
|
||||
write(pb, field, sig.getBytes())
|
||||
|
||||
proc getField*[T: PublicKey|PrivateKey](pb: ProtoBuffer, field: int,
|
||||
|
||||
@@ -15,10 +15,7 @@
|
||||
|
||||
# RFC @ https://tools.ietf.org/html/rfc7748
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/[ec, rand]
|
||||
import stew/results
|
||||
|
||||
@@ -14,10 +14,7 @@
|
||||
## BearSSL library <https://bearssl.org/>
|
||||
## Copyright(C) 2018 Thomas Pornin <pornin@bolet.org>.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/[ec, rand, hash]
|
||||
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
|
||||
@@ -997,3 +994,33 @@ proc verify*[T: byte|char](sig: EcSignature, message: openArray[T],
|
||||
# Clear context with initial value
|
||||
kv.init(addr hc.vtable)
|
||||
result = (res == 1)
|
||||
|
||||
type ECDHEScheme* = EcCurveKind
|
||||
|
||||
proc ephemeral*(
|
||||
scheme: ECDHEScheme,
|
||||
rng: var HmacDrbgContext): EcResult[EcKeyPair] =
|
||||
## Generate ephemeral keys used to perform ECDHE.
|
||||
var keypair: EcKeyPair
|
||||
if scheme == Secp256r1:
|
||||
keypair = ? EcKeyPair.random(Secp256r1, rng)
|
||||
elif scheme == Secp384r1:
|
||||
keypair = ? EcKeyPair.random(Secp384r1, rng)
|
||||
elif scheme == Secp521r1:
|
||||
keypair = ? EcKeyPair.random(Secp521r1, rng)
|
||||
ok(keypair)
|
||||
|
||||
proc ephemeral*(
|
||||
scheme: string, rng: var HmacDrbgContext): EcResult[EcKeyPair] =
|
||||
## Generate ephemeral keys used to perform ECDHE using string encoding.
|
||||
##
|
||||
## Currently supported encoding strings are P-256, P-384, P-521, if encoding
|
||||
## string is not supported P-521 key will be generated.
|
||||
if scheme == "P-256":
|
||||
ephemeral(Secp256r1, rng)
|
||||
elif scheme == "P-384":
|
||||
ephemeral(Secp384r1, rng)
|
||||
elif scheme == "P-521":
|
||||
ephemeral(Secp521r1, rng)
|
||||
else:
|
||||
ephemeral(Secp521r1, rng)
|
||||
|
||||
@@ -11,10 +11,7 @@
|
||||
## This code is a port of the public domain, "ref10" implementation of ed25519
|
||||
## from SUPERCOP.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/rand
|
||||
import constants
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
# https://tools.ietf.org/html/rfc5869
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import nimcrypto
|
||||
import bearssl/[kdf, hash]
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implements minimal ASN.1 encoding/decoding primitives.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import stew/[endians2, results, ctops]
|
||||
export results
|
||||
|
||||
@@ -13,10 +13,7 @@
|
||||
## BearSSL library <https://bearssl.org/>
|
||||
## Copyright(C) 2018 Thomas Pornin <pornin@bolet.org>.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/[rsa, rand, hash]
|
||||
import minasn1
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import bearssl/rand
|
||||
import
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
## This module implementes API for `go-libp2p-daemon`.
|
||||
import std/[os, osproc, strutils, tables, strtabs, sequtils]
|
||||
@@ -153,10 +150,10 @@ type
|
||||
key*: PublicKey
|
||||
|
||||
P2PStreamCallback* = proc(api: DaemonAPI,
|
||||
stream: P2PStream): Future[void] {.gcsafe, raises: [Defect, CatchableError].}
|
||||
stream: P2PStream): Future[void] {.gcsafe, raises: [CatchableError].}
|
||||
P2PPubSubCallback* = proc(api: DaemonAPI,
|
||||
ticket: PubsubTicket,
|
||||
message: PubSubMessage): Future[bool] {.gcsafe, raises: [Defect, CatchableError].}
|
||||
message: PubSubMessage): Future[bool] {.gcsafe, raises: [CatchableError].}
|
||||
|
||||
DaemonError* = object of LPError
|
||||
DaemonRemoteError* = object of DaemonError
|
||||
@@ -474,7 +471,7 @@ proc checkResponse(pb: ProtoBuffer): ResponseKind {.inline.} =
|
||||
else:
|
||||
result = ResponseKind.Error
|
||||
|
||||
proc getErrorMessage(pb: ProtoBuffer): string {.inline, raises: [Defect, DaemonLocalError].} =
|
||||
proc getErrorMessage(pb: ProtoBuffer): string {.inline, raises: [DaemonLocalError].} =
|
||||
var error: seq[byte]
|
||||
if pb.getRequiredField(ResponseType.ERROR.int, error).isOk():
|
||||
if initProtoBuffer(error).getRequiredField(1, result).isErr():
|
||||
@@ -504,7 +501,7 @@ proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
|
||||
result = buffer
|
||||
|
||||
proc newConnection*(api: DaemonAPI): Future[StreamTransport]
|
||||
{.raises: [Defect, LPError].} =
|
||||
{.raises: [LPError].} =
|
||||
result = connect(api.address)
|
||||
|
||||
proc closeConnection*(api: DaemonAPI, transp: StreamTransport): Future[void] =
|
||||
@@ -515,7 +512,7 @@ proc socketExists(address: MultiAddress): Future[bool] {.async.} =
|
||||
var transp = await connect(address)
|
||||
await transp.closeWait()
|
||||
result = true
|
||||
except CatchableError, Defect:
|
||||
except CatchableError:
|
||||
result = false
|
||||
|
||||
when defined(windows):
|
||||
@@ -556,7 +553,7 @@ proc getSocket(pattern: string,
|
||||
closeSocket(sock)
|
||||
|
||||
# This is forward declaration needed for newDaemonApi()
|
||||
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async, gcsafe.}
|
||||
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
|
||||
|
||||
proc copyEnv(): StringTableRef =
|
||||
## This procedure copy all environment variables into StringTable.
|
||||
@@ -758,7 +755,7 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
|
||||
|
||||
# Starting daemon process
|
||||
# echo "Starting ", cmd, " ", args.join(" ")
|
||||
api.process =
|
||||
api.process =
|
||||
exceptionToAssert:
|
||||
startProcess(cmd, "", args, env, {poParentStreams})
|
||||
# Waiting until daemon will not be bound to control socket.
|
||||
@@ -837,7 +834,7 @@ proc transactMessage(transp: StreamTransport,
|
||||
result = initProtoBuffer(message)
|
||||
|
||||
proc getPeerInfo(pb: ProtoBuffer): PeerInfo
|
||||
{.raises: [Defect, DaemonLocalError].} =
|
||||
{.raises: [DaemonLocalError].} =
|
||||
## Get PeerInfo object from ``pb``.
|
||||
result.addresses = newSeq[MultiAddress]()
|
||||
if pb.getRequiredField(1, result.peer).isErr():
|
||||
@@ -868,7 +865,7 @@ proc connect*(api: DaemonAPI, peer: PeerId,
|
||||
timeout))
|
||||
pb.withMessage() do:
|
||||
discard
|
||||
except CatchableError, Defect:
|
||||
except CatchableError:
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
|
||||
@@ -928,7 +925,7 @@ proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
|
||||
asyncSpawn handler(api, stream)
|
||||
|
||||
proc addHandler*(api: DaemonAPI, protocols: seq[string],
|
||||
handler: P2PStreamCallback) {.async, raises: [Defect, LPError].} =
|
||||
handler: P2PStreamCallback) {.async, raises: [LPError].} =
|
||||
## Add stream handler ``handler`` for set of protocols ``protocols``.
|
||||
var transp = await api.newConnection()
|
||||
let maddress = await getSocket(api.pattern, addr api.ucounter)
|
||||
@@ -998,7 +995,7 @@ proc cmTrimPeers*(api: DaemonAPI) {.async.} =
|
||||
await api.closeConnection(transp)
|
||||
|
||||
proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo
|
||||
{.raises: [Defect, DaemonLocalError].} =
|
||||
{.raises: [DaemonLocalError].} =
|
||||
var res: seq[byte]
|
||||
if pb.getRequiredField(2, res).isOk():
|
||||
result = initProtoBuffer(res).getPeerInfo()
|
||||
@@ -1006,23 +1003,23 @@ proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo
|
||||
raise newException(DaemonLocalError, "Missing required field `peer`!")
|
||||
|
||||
proc dhtGetSingleValue(pb: ProtoBuffer): seq[byte]
|
||||
{.raises: [Defect, DaemonLocalError].} =
|
||||
{.raises: [DaemonLocalError].} =
|
||||
result = newSeq[byte]()
|
||||
if pb.getRequiredField(3, result).isErr():
|
||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||
|
||||
proc dhtGetSinglePublicKey(pb: ProtoBuffer): PublicKey
|
||||
{.raises: [Defect, DaemonLocalError].} =
|
||||
{.raises: [DaemonLocalError].} =
|
||||
if pb.getRequiredField(3, result).isErr():
|
||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||
|
||||
proc dhtGetSinglePeerId(pb: ProtoBuffer): PeerId
|
||||
{.raises: [Defect, DaemonLocalError].} =
|
||||
{.raises: [DaemonLocalError].} =
|
||||
if pb.getRequiredField(3, result).isErr():
|
||||
raise newException(DaemonLocalError, "Missing field `value`!")
|
||||
|
||||
proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
|
||||
{.inline, raises: [Defect, DaemonLocalError].} =
|
||||
{.inline, raises: [DaemonLocalError].} =
|
||||
var dhtResponse: seq[byte]
|
||||
if pb.getRequiredField(ResponseType.DHT.int, dhtResponse).isOk():
|
||||
var pbDhtResponse = initProtoBuffer(dhtResponse)
|
||||
@@ -1035,13 +1032,13 @@ proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
|
||||
var value: seq[byte]
|
||||
if pbDhtResponse.getRequiredField(3, value).isErr():
|
||||
raise newException(DaemonLocalError, "Missing required DHT field `value`!")
|
||||
|
||||
|
||||
return initProtoBuffer(value)
|
||||
else:
|
||||
raise newException(DaemonLocalError, "Wrong message type!")
|
||||
|
||||
proc enterPsMessage(pb: ProtoBuffer): ProtoBuffer
|
||||
{.inline, raises: [Defect, DaemonLocalError].} =
|
||||
{.inline, raises: [DaemonLocalError].} =
|
||||
var res: seq[byte]
|
||||
if pb.getRequiredField(ResponseType.PUBSUB.int, res).isErr():
|
||||
raise newException(DaemonLocalError, "Wrong message type!")
|
||||
@@ -1049,7 +1046,7 @@ proc enterPsMessage(pb: ProtoBuffer): ProtoBuffer
|
||||
initProtoBuffer(res)
|
||||
|
||||
proc getDhtMessageType(pb: ProtoBuffer): DHTResponseType
|
||||
{.inline, raises: [Defect, DaemonLocalError].} =
|
||||
{.inline, raises: [DaemonLocalError].} =
|
||||
var dtype: uint
|
||||
if pb.getRequiredField(1, dtype).isErr():
|
||||
raise newException(DaemonLocalError, "Missing required DHT field `type`!")
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
## This module implements Pool of StreamTransport.
|
||||
import chronos
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
## 5. LocalAddress: optional bytes
|
||||
## 6. RemoteAddress: optional bytes
|
||||
## 7. Message: required bytes
|
||||
import os, options
|
||||
import os
|
||||
import nimcrypto/utils, stew/endians2
|
||||
import protobuf/minprotobuf, stream/connection, protocols/secure/secure,
|
||||
multiaddress, peerid, varint, muxers/mplex/coder
|
||||
@@ -33,7 +33,7 @@ import protobuf/minprotobuf, stream/connection, protocols/secure/secure,
|
||||
from times import getTime, toUnix, fromUnix, nanosecond, format, Time,
|
||||
NanosecondRange, initTime
|
||||
from strutils import toHex, repeat
|
||||
export peerid, options, multiaddress
|
||||
export peerid, multiaddress
|
||||
|
||||
type
|
||||
FlowDirection* = enum
|
||||
@@ -43,10 +43,10 @@ type
|
||||
timestamp*: uint64
|
||||
direction*: FlowDirection
|
||||
message*: seq[byte]
|
||||
seqID*: Option[uint64]
|
||||
mtype*: Option[uint64]
|
||||
local*: Option[MultiAddress]
|
||||
remote*: Option[MultiAddress]
|
||||
seqID*: Opt[uint64]
|
||||
mtype*: Opt[uint64]
|
||||
local*: Opt[MultiAddress]
|
||||
remote*: Opt[MultiAddress]
|
||||
|
||||
const
|
||||
libp2p_dump_dir* {.strdefine.} = "nim-libp2p"
|
||||
@@ -72,7 +72,8 @@ proc dumpMessage*(conn: SecureConn, direction: FlowDirection,
|
||||
var pb = initProtoBuffer(options = {WithVarintLength})
|
||||
pb.write(2, getTimestamp())
|
||||
pb.write(4, uint64(direction))
|
||||
pb.write(6, conn.observedAddr)
|
||||
conn.observedAddr.withValue(oaddr):
|
||||
pb.write(6, oaddr)
|
||||
pb.write(7, data)
|
||||
pb.finish()
|
||||
|
||||
@@ -100,7 +101,7 @@ proc dumpMessage*(conn: SecureConn, direction: FlowDirection,
|
||||
finally:
|
||||
close(handle)
|
||||
|
||||
proc decodeDumpMessage*(data: openArray[byte]): Option[ProtoMessage] =
|
||||
proc decodeDumpMessage*(data: openArray[byte]): Opt[ProtoMessage] =
|
||||
## Decode protobuf's message ProtoMessage from array of bytes ``data``.
|
||||
var
|
||||
pb = initProtoBuffer(data)
|
||||
@@ -108,13 +109,12 @@ proc decodeDumpMessage*(data: openArray[byte]): Option[ProtoMessage] =
|
||||
ma1, ma2: MultiAddress
|
||||
pmsg: ProtoMessage
|
||||
|
||||
let res2 = pb.getField(2, pmsg.timestamp)
|
||||
if res2.isErr() or not(res2.get()):
|
||||
return none[ProtoMessage]()
|
||||
|
||||
let res4 = pb.getField(4, value)
|
||||
if res4.isErr() or not(res4.get()):
|
||||
return none[ProtoMessage]()
|
||||
let
|
||||
r2 = pb.getField(2, pmsg.timestamp)
|
||||
r4 = pb.getField(4, value)
|
||||
r7 = pb.getField(7, pmsg.message)
|
||||
if not r2.get(false) or not r4.get(false) or not r7.get(false):
|
||||
return Opt.none(ProtoMessage)
|
||||
|
||||
# `case` statement could not work here with an error "selector must be of an
|
||||
# ordinal type, float or string"
|
||||
@@ -124,30 +124,27 @@ proc decodeDumpMessage*(data: openArray[byte]): Option[ProtoMessage] =
|
||||
elif value == uint64(Incoming):
|
||||
Incoming
|
||||
else:
|
||||
return none[ProtoMessage]()
|
||||
return Opt.none(ProtoMessage)
|
||||
|
||||
let res7 = pb.getField(7, pmsg.message)
|
||||
if res7.isErr() or not(res7.get()):
|
||||
return none[ProtoMessage]()
|
||||
let r1 = pb.getField(1, value)
|
||||
if r1.get(false):
|
||||
pmsg.seqID = Opt.some(value)
|
||||
|
||||
value = 0'u64
|
||||
let res1 = pb.getField(1, value)
|
||||
if res1.isOk() and res1.get():
|
||||
pmsg.seqID = some(value)
|
||||
value = 0'u64
|
||||
let res3 = pb.getField(3, value)
|
||||
if res3.isOk() and res3.get():
|
||||
pmsg.mtype = some(value)
|
||||
let res5 = pb.getField(5, ma1)
|
||||
if res5.isOk() and res5.get():
|
||||
pmsg.local = some(ma1)
|
||||
let res6 = pb.getField(6, ma2)
|
||||
if res6.isOk() and res6.get():
|
||||
pmsg.remote = some(ma2)
|
||||
let r3 = pb.getField(3, value)
|
||||
if r3.get(false):
|
||||
pmsg.mtype = Opt.some(value)
|
||||
|
||||
some(pmsg)
|
||||
let
|
||||
r5 = pb.getField(5, ma1)
|
||||
r6 = pb.getField(6, ma2)
|
||||
if r5.get(false):
|
||||
pmsg.local = Opt.some(ma1)
|
||||
if r6.get(false):
|
||||
pmsg.remote = Opt.some(ma2)
|
||||
|
||||
iterator messages*(data: seq[byte]): Option[ProtoMessage] =
|
||||
Opt.some(pmsg)
|
||||
|
||||
iterator messages*(data: seq[byte]): Opt[ProtoMessage] =
|
||||
## Iterate over sequence of bytes and decode all the ``ProtoMessage``
|
||||
## messages we found.
|
||||
var value: uint64
|
||||
@@ -242,27 +239,19 @@ proc toString*(msg: ProtoMessage, dump = true): string =
|
||||
" >> "
|
||||
let address =
|
||||
block:
|
||||
let local =
|
||||
if msg.local.isSome():
|
||||
"[" & $(msg.local.get()) & "]"
|
||||
else:
|
||||
"[LOCAL]"
|
||||
let remote =
|
||||
if msg.remote.isSome():
|
||||
"[" & $(msg.remote.get()) & "]"
|
||||
else:
|
||||
"[REMOTE]"
|
||||
let local = block:
|
||||
msg.local.withValue(loc): "[" & $loc & "]"
|
||||
else: "[LOCAL]"
|
||||
let remote = block:
|
||||
msg.remote.withValue(rem): "[" & $rem & "]"
|
||||
else: "[REMOTE]"
|
||||
local & direction & remote
|
||||
let seqid =
|
||||
if msg.seqID.isSome():
|
||||
"seqID = " & $(msg.seqID.get()) & " "
|
||||
else:
|
||||
""
|
||||
let mtype =
|
||||
if msg.mtype.isSome():
|
||||
"type = " & $(msg.mtype.get()) & " "
|
||||
else:
|
||||
""
|
||||
let seqid = block:
|
||||
msg.seqID.wihValue(seqid): "seqID = " & $seqid & " "
|
||||
else: ""
|
||||
let mtype = block:
|
||||
msg.mtype.withValue(typ): "type = " & $typ & " "
|
||||
else: ""
|
||||
res.add(" ")
|
||||
res.add(address)
|
||||
res.add(" ")
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import stew/results
|
||||
@@ -29,7 +26,7 @@ method connect*(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out) {.async, base.} =
|
||||
dir = Direction.Out) {.async, base.} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
|
||||
@@ -53,21 +53,21 @@ proc dialAndUpgrade(
|
||||
peerId: Opt[PeerId],
|
||||
hostname: string,
|
||||
address: MultiAddress,
|
||||
upgradeDir = Direction.Out):
|
||||
dir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
|
||||
for transport in self.transports: # for each transport
|
||||
if transport.handles(address): # check if it can dial it
|
||||
trace "Dialing address", address, peerId, hostname
|
||||
trace "Dialing address", address, peerId = peerId.get(default(PeerId)), hostname
|
||||
let dialed =
|
||||
try:
|
||||
libp2p_total_dial_attempts.inc()
|
||||
await transport.dial(hostname, address, peerId)
|
||||
except CancelledError as exc:
|
||||
debug "Dialing canceled", msg = exc.msg, peerId
|
||||
debug "Dialing canceled", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Dialing failed", msg = exc.msg, peerId
|
||||
debug "Dialing failed", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
libp2p_failed_dials.inc()
|
||||
return nil # Try the next address
|
||||
|
||||
@@ -75,15 +75,19 @@ proc dialAndUpgrade(
|
||||
|
||||
let mux =
|
||||
try:
|
||||
dialed.transportDir = upgradeDir
|
||||
await transport.upgrade(dialed, upgradeDir, peerId)
|
||||
# This is for the very specific case of a simultaneous dial during DCUtR. In this case, both sides will have
|
||||
# an Outbound direction at the transport level. Therefore we update the DCUtR initiator transport direction to Inbound.
|
||||
# The if below is more general and might handle other use cases in the future.
|
||||
if dialed.dir != dir:
|
||||
dialed.dir = dir
|
||||
await transport.upgrade(dialed, peerId)
|
||||
except CatchableError as exc:
|
||||
# If we failed to establish the connection through one transport,
|
||||
# we won't succeeded through another - no use in trying again
|
||||
await dialed.close()
|
||||
debug "Upgrade failed", msg = exc.msg, peerId
|
||||
debug "Upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
|
||||
if exc isnot CancelledError:
|
||||
if upgradeDir == Direction.Out:
|
||||
if dialed.dir == Direction.Out:
|
||||
libp2p_failed_upgrades_outgoing.inc()
|
||||
else:
|
||||
libp2p_failed_upgrades_incoming.inc()
|
||||
@@ -91,7 +95,7 @@ proc dialAndUpgrade(
|
||||
# Try other address
|
||||
return nil
|
||||
|
||||
doAssert not isNil(mux), "connection died after upgrade " & $upgradeDir
|
||||
doAssert not isNil(mux), "connection died after upgrade " & $dialed.dir
|
||||
debug "Dial successful", peerId = mux.connection.peerId
|
||||
return mux
|
||||
return nil
|
||||
@@ -128,10 +132,10 @@ proc dialAndUpgrade(
|
||||
self: Dialer,
|
||||
peerId: Opt[PeerId],
|
||||
addrs: seq[MultiAddress],
|
||||
upgradeDir = Direction.Out):
|
||||
dir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
|
||||
debug "Dialing peer", peerId
|
||||
debug "Dialing peer", peerId = peerId.get(default(PeerId))
|
||||
|
||||
for rawAddress in addrs:
|
||||
# resolve potential dnsaddr
|
||||
@@ -146,11 +150,11 @@ proc dialAndUpgrade(
|
||||
else: await self.nameResolver.resolveMAddress(expandedAddress)
|
||||
|
||||
for resolvedAddress in resolvedAddresses:
|
||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, upgradeDir)
|
||||
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
|
||||
if not isNil(result):
|
||||
return result
|
||||
|
||||
proc tryReusingConnection(self: Dialer, peerId: PeerId): Future[Opt[Muxer]] {.async.} =
|
||||
proc tryReusingConnection(self: Dialer, peerId: PeerId): Opt[Muxer] =
|
||||
let muxer = self.connManager.selectMuxer(peerId)
|
||||
if muxer == nil:
|
||||
return Opt.none(Muxer)
|
||||
@@ -164,7 +168,7 @@ proc internalConnect(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial: bool,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out):
|
||||
dir = Direction.Out):
|
||||
Future[Muxer] {.async.} =
|
||||
if Opt.some(self.localPeerId) == peerId:
|
||||
raise newException(CatchableError, "can't dial self!")
|
||||
@@ -174,15 +178,15 @@ proc internalConnect(
|
||||
try:
|
||||
await lock.acquire()
|
||||
|
||||
if peerId.isSome and reuseConnection:
|
||||
let muxOpt = await self.tryReusingConnection(peerId.get())
|
||||
if muxOpt.isSome:
|
||||
return muxOpt.get()
|
||||
if reuseConnection:
|
||||
peerId.withValue(peerId):
|
||||
self.tryReusingConnection(peerId).withValue(mux):
|
||||
return mux
|
||||
|
||||
let slot = self.connManager.getOutgoingSlot(forceDial)
|
||||
let muxed =
|
||||
try:
|
||||
await self.dialAndUpgrade(peerId, addrs, upgradeDir)
|
||||
await self.dialAndUpgrade(peerId, addrs, dir)
|
||||
except CatchableError as exc:
|
||||
slot.release()
|
||||
raise exc
|
||||
@@ -209,7 +213,7 @@ method connect*(
|
||||
addrs: seq[MultiAddress],
|
||||
forceDial = false,
|
||||
reuseConnection = true,
|
||||
upgradeDir = Direction.Out) {.async.} =
|
||||
dir = Direction.Out) {.async.} =
|
||||
## connect remote peer without negotiating
|
||||
## a protocol
|
||||
##
|
||||
@@ -217,7 +221,7 @@ method connect*(
|
||||
if self.connManager.connCount(peerId) > 0 and reuseConnection:
|
||||
return
|
||||
|
||||
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, upgradeDir)
|
||||
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
|
||||
|
||||
method connect*(
|
||||
self: Dialer,
|
||||
@@ -225,20 +229,20 @@ method connect*(
|
||||
allowUnknownPeerId = false): Future[PeerId] {.async.} =
|
||||
## Connects to a peer and retrieve its PeerId
|
||||
|
||||
let fullAddress = parseFullAddress(address)
|
||||
if fullAddress.isOk:
|
||||
parseFullAddress(address).toOpt().withValue(fullAddress):
|
||||
return (await self.internalConnect(
|
||||
Opt.some(fullAddress.get()[0]),
|
||||
@[fullAddress.get()[1]],
|
||||
false)).connection.peerId
|
||||
else:
|
||||
if allowUnknownPeerId == false:
|
||||
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
|
||||
return (await self.internalConnect(
|
||||
Opt.none(PeerId),
|
||||
@[address],
|
||||
Opt.some(fullAddress[0]),
|
||||
@[fullAddress[1]],
|
||||
false)).connection.peerId
|
||||
|
||||
if allowUnknownPeerId == false:
|
||||
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
|
||||
|
||||
return (await self.internalConnect(
|
||||
Opt.none(PeerId),
|
||||
@[address],
|
||||
false)).connection.peerId
|
||||
|
||||
proc negotiateStream(
|
||||
self: Dialer,
|
||||
conn: Connection,
|
||||
@@ -324,7 +328,7 @@ method dial*(
|
||||
await cleanup()
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
debug "Error dialing", conn, msg = exc.msg
|
||||
debug "Error dialing", conn, err = exc.msg
|
||||
await cleanup()
|
||||
raise exc
|
||||
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
import chronos, chronicles, stew/results
|
||||
@@ -18,7 +15,7 @@ import ../errors
|
||||
|
||||
type
|
||||
BaseAttr = ref object of RootObj
|
||||
comparator: proc(f, c: BaseAttr): bool {.gcsafe, raises: [Defect].}
|
||||
comparator: proc(f, c: BaseAttr): bool {.gcsafe, raises: [].}
|
||||
|
||||
Attribute[T] = ref object of BaseAttr
|
||||
value: T
|
||||
@@ -60,7 +57,7 @@ proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
|
||||
return Opt.some(f.to(T))
|
||||
Opt.none(T)
|
||||
|
||||
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [Defect, KeyError].} =
|
||||
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [KeyError].} =
|
||||
pa{T}.valueOr: raise newException(KeyError, "Attritute not found")
|
||||
|
||||
proc match*(pa, candidate: PeerAttributes): bool =
|
||||
@@ -73,7 +70,7 @@ proc match*(pa, candidate: PeerAttributes): bool =
|
||||
return true
|
||||
|
||||
type
|
||||
PeerFoundCallback* = proc(pa: PeerAttributes) {.raises: [Defect], gcsafe.}
|
||||
PeerFoundCallback* = proc(pa: PeerAttributes) {.raises: [], gcsafe.}
|
||||
|
||||
DiscoveryInterface* = ref object of RootObj
|
||||
onPeerFound*: PeerFoundCallback
|
||||
@@ -125,20 +122,15 @@ proc request*[T](dm: DiscoveryManager, value: T): DiscoveryQuery =
|
||||
pa.add(value)
|
||||
return dm.request(pa)
|
||||
|
||||
proc advertise*(dm: DiscoveryManager, pa: PeerAttributes) =
|
||||
proc advertise*[T](dm: DiscoveryManager, value: T) =
|
||||
for i in dm.interfaces:
|
||||
i.toAdvertise = pa
|
||||
i.toAdvertise.add(value)
|
||||
if i.advertiseLoop.isNil:
|
||||
i.advertisementUpdated = newAsyncEvent()
|
||||
i.advertiseLoop = i.advertise()
|
||||
else:
|
||||
i.advertisementUpdated.fire()
|
||||
|
||||
proc advertise*[T](dm: DiscoveryManager, value: T) =
|
||||
var pa: PeerAttributes
|
||||
pa.add(value)
|
||||
dm.advertise(pa)
|
||||
|
||||
template forEach*(query: DiscoveryQuery, code: untyped) =
|
||||
## Will execute `code` for each discovered peer. The
|
||||
## peer attritubtes are available through the variable
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import ./discoverymngr,
|
||||
@@ -22,6 +19,7 @@ type
|
||||
rdv*: RendezVous
|
||||
timeToRequest: Duration
|
||||
timeToAdvertise: Duration
|
||||
ttl: Duration
|
||||
|
||||
RdvNamespace* = distinct string
|
||||
|
||||
@@ -65,12 +63,16 @@ method advertise*(self: RendezVousInterface) {.async.} =
|
||||
|
||||
self.advertisementUpdated.clear()
|
||||
for toAdv in toAdvertise:
|
||||
await self.rdv.advertise(toAdv, self.timeToAdvertise)
|
||||
try:
|
||||
await self.rdv.advertise(toAdv, self.ttl)
|
||||
except CatchableError as error:
|
||||
debug "RendezVous advertise error: ", msg = error.msg
|
||||
|
||||
await sleepAsync(self.timeToAdvertise) or self.advertisementUpdated.wait()
|
||||
|
||||
proc new*(T: typedesc[RendezVousInterface],
|
||||
rdv: RendezVous,
|
||||
ttr: Duration = 1.minutes,
|
||||
tta: Duration = MinimumDuration): RendezVousInterface =
|
||||
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta)
|
||||
tta: Duration = 1.minutes,
|
||||
ttl: Duration = MinimumDuration): RendezVousInterface =
|
||||
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta, ttl: ttl)
|
||||
|
||||
@@ -19,7 +19,8 @@ func toException*(e: string): ref LPError =
|
||||
# sadly nim needs more love for hygienic templates
|
||||
# so here goes the macro, its based on the proc/template version
|
||||
# and uses quote do so it's quite readable
|
||||
macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
|
||||
# TODO https://github.com/nim-lang/Nim/issues/22936
|
||||
macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
|
||||
let nexclude = exclude.len
|
||||
case nexclude
|
||||
of 0:
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implements MultiAddress.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import pkg/chronos, chronicles
|
||||
@@ -401,6 +398,9 @@ const
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("quic"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("quic-v1"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("ip6zone"), kind: Length, size: 0,
|
||||
coder: TranscoderIP6Zone
|
||||
@@ -419,6 +419,9 @@ const
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("wss"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("tls"), kind: Marker, size: 0
|
||||
),
|
||||
MAProtocol(
|
||||
mcodec: multiCodec("ipfs"), kind: Length, size: 0,
|
||||
coder: TranscoderP2P
|
||||
@@ -471,7 +474,7 @@ const
|
||||
IP* = mapOr(IP4, IP6)
|
||||
DNS_OR_IP* = mapOr(DNS, IP)
|
||||
TCP_DNS* = mapAnd(DNS, mapEq("tcp"))
|
||||
TCP_IP* =mapAnd(IP, mapEq("tcp"))
|
||||
TCP_IP* = mapAnd(IP, mapEq("tcp"))
|
||||
TCP* = mapOr(TCP_DNS, TCP_IP)
|
||||
UDP_DNS* = mapAnd(DNS, mapEq("udp"))
|
||||
UDP_IP* = mapAnd(IP, mapEq("udp"))
|
||||
@@ -482,9 +485,10 @@ const
|
||||
WS_DNS* = mapAnd(TCP_DNS, mapEq("ws"))
|
||||
WS_IP* = mapAnd(TCP_IP, mapEq("ws"))
|
||||
WS* = mapAnd(TCP, mapEq("ws"))
|
||||
WSS_DNS* = mapAnd(TCP_DNS, mapEq("wss"))
|
||||
WSS_IP* = mapAnd(TCP_IP, mapEq("wss"))
|
||||
WSS* = mapAnd(TCP, mapEq("wss"))
|
||||
TLS_WS* = mapOr(mapEq("wss"), mapAnd(mapEq("tls"), mapEq("ws")))
|
||||
WSS_DNS* = mapAnd(TCP_DNS, TLS_WS)
|
||||
WSS_IP* = mapAnd(TCP_IP, TLS_WS)
|
||||
WSS* = mapAnd(TCP, TLS_WS)
|
||||
WebSockets_DNS* = mapOr(WS_DNS, WSS_DNS)
|
||||
WebSockets_IP* = mapOr(WS_IP, WSS_IP)
|
||||
WebSockets* = mapOr(WS, WSS)
|
||||
@@ -775,7 +779,7 @@ proc toString*(value: MultiAddress): MaResult[string] =
|
||||
res = "/" & parts.join("/")
|
||||
ok(res)
|
||||
|
||||
proc `$`*(value: MultiAddress): string {.raises: [Defect].} =
|
||||
proc `$`*(value: MultiAddress): string =
|
||||
## Return string representation of MultiAddress ``value``.
|
||||
let s = value.toString()
|
||||
if s.isErr: s.error
|
||||
@@ -954,7 +958,7 @@ proc init*(mtype: typedesc[MultiAddress]): MultiAddress =
|
||||
## Initialize empty MultiAddress.
|
||||
result.data = initVBuffer()
|
||||
|
||||
proc init*(mtype: typedesc[MultiAddress], address: ValidIpAddress,
|
||||
proc init*(mtype: typedesc[MultiAddress], address: IpAddress,
|
||||
protocol: IpTransportProtocol, port: Port): MultiAddress =
|
||||
var res: MultiAddress
|
||||
res.data = initVBuffer()
|
||||
@@ -1025,7 +1029,7 @@ proc append*(m1: var MultiAddress, m2: MultiAddress): MaResult[void] =
|
||||
ok()
|
||||
|
||||
proc `&`*(m1, m2: MultiAddress): MultiAddress {.
|
||||
raises: [Defect, LPError].} =
|
||||
raises: [LPError].} =
|
||||
## Concatenates two addresses ``m1`` and ``m2``, and returns result.
|
||||
##
|
||||
## This procedure performs validation of concatenated result and can raise
|
||||
@@ -1035,7 +1039,7 @@ proc `&`*(m1, m2: MultiAddress): MultiAddress {.
|
||||
concat(m1, m2).tryGet()
|
||||
|
||||
proc `&=`*(m1: var MultiAddress, m2: MultiAddress) {.
|
||||
raises: [Defect, LPError].} =
|
||||
raises: [LPError].} =
|
||||
## Concatenates two addresses ``m1`` and ``m2``.
|
||||
##
|
||||
## This procedure performs validation of concatenated result and can raise
|
||||
@@ -1079,19 +1083,15 @@ proc matchPart(pat: MaPattern, protos: seq[MultiCodec]): MaPatResult =
|
||||
proc match*(pat: MaPattern, address: MultiAddress): bool =
|
||||
## Match full ``address`` using pattern ``pat`` and return ``true`` if
|
||||
## ``address`` satisfies pattern.
|
||||
let protos = address.protocols()
|
||||
if protos.isErr():
|
||||
return false
|
||||
let res = matchPart(pat, protos.get())
|
||||
let protos = address.protocols().valueOr: return false
|
||||
let res = matchPart(pat, protos)
|
||||
res.flag and (len(res.rem) == 0)
|
||||
|
||||
proc matchPartial*(pat: MaPattern, address: MultiAddress): bool =
|
||||
## Match prefix part of ``address`` using pattern ``pat`` and return
|
||||
## ``true`` if ``address`` starts with pattern.
|
||||
let protos = address.protocols()
|
||||
if protos.isErr():
|
||||
return false
|
||||
let res = matchPart(pat, protos.get())
|
||||
let protos = address.protocols().valueOr: return false
|
||||
let res = matchPart(pat, protos)
|
||||
res.flag
|
||||
|
||||
proc `$`*(pat: MaPattern): string =
|
||||
@@ -1120,12 +1120,8 @@ proc getField*(pb: ProtoBuffer, field: int,
|
||||
if not(res):
|
||||
ok(false)
|
||||
else:
|
||||
let ma = MultiAddress.init(buffer)
|
||||
if ma.isOk():
|
||||
value = ma.get()
|
||||
ok(true)
|
||||
else:
|
||||
err(ProtoError.IncorrectBlob)
|
||||
value = MultiAddress.init(buffer).valueOr: return err(ProtoError.IncorrectBlob)
|
||||
ok(true)
|
||||
|
||||
proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
||||
value: var seq[MultiAddress]): ProtoResult[bool] {.
|
||||
@@ -1141,11 +1137,11 @@ proc getRepeatedField*(pb: ProtoBuffer, field: int,
|
||||
ok(false)
|
||||
else:
|
||||
for item in items:
|
||||
let ma = MultiAddress.init(item)
|
||||
if ma.isOk():
|
||||
value.add(ma.get())
|
||||
else:
|
||||
debug "Not supported MultiAddress in blob", ma = item
|
||||
let ma = MultiAddress.init(item).valueOr:
|
||||
debug "Unsupported MultiAddress in blob", ma = item
|
||||
continue
|
||||
|
||||
value.add(ma)
|
||||
if value.len == 0:
|
||||
err(ProtoError.IncorrectBlob)
|
||||
else:
|
||||
|
||||
@@ -13,10 +13,7 @@
|
||||
## 1. base32z
|
||||
##
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import tables
|
||||
import stew/[base32, base58, base64, results]
|
||||
@@ -27,17 +24,17 @@ type
|
||||
|
||||
MultiBase* = object
|
||||
|
||||
MBCodeSize = proc(length: int): int {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
||||
MBCodeSize = proc(length: int): int {.nimcall, gcsafe, noSideEffect, raises: [].}
|
||||
|
||||
MBCodec = object
|
||||
code: char
|
||||
name: string
|
||||
encr: proc(inbytes: openArray[byte],
|
||||
outbytes: var openArray[char],
|
||||
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
||||
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
|
||||
decr: proc(inbytes: openArray[char],
|
||||
outbytes: var openArray[byte],
|
||||
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
||||
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
|
||||
encl: MBCodeSize
|
||||
decl: MBCodeSize
|
||||
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implements MultiCodec.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import tables, hashes
|
||||
import varint, vbuffer
|
||||
@@ -194,9 +191,11 @@ const MultiCodecList = [
|
||||
("p2p", 0x01A5),
|
||||
("http", 0x01E0),
|
||||
("https", 0x01BB),
|
||||
("tls", 0x01C0),
|
||||
("quic", 0x01CC),
|
||||
("quic-v1", 0x01CD),
|
||||
("ws", 0x01DD),
|
||||
("wss", 0x01DE), # not in multicodec list
|
||||
("wss", 0x01DE),
|
||||
("p2p-websocket-star", 0x01DF), # not in multicodec list
|
||||
("p2p-webrtc-star", 0x0113), # not in multicodec list
|
||||
("p2p-webrtc-direct", 0x0114), # not in multicodec list
|
||||
|
||||
@@ -21,10 +21,7 @@
|
||||
## 1. SKEIN
|
||||
## 2. MURMUR
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import tables
|
||||
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
|
||||
@@ -45,7 +42,7 @@ const
|
||||
|
||||
type
|
||||
MHashCoderProc* = proc(data: openArray[byte],
|
||||
output: var openArray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
|
||||
output: var openArray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [].}
|
||||
MHash* = object
|
||||
mcodec*: MultiCodec
|
||||
size*: int
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strutils, sequtils, tables]
|
||||
import chronos, chronicles, stew/byteutils
|
||||
@@ -28,7 +25,7 @@ const
|
||||
Ls = "ls\n"
|
||||
|
||||
type
|
||||
Matcher* = proc (proto: string): bool {.gcsafe, raises: [Defect].}
|
||||
Matcher* = proc (proto: string): bool {.gcsafe, raises: [].}
|
||||
|
||||
MultiStreamError* = object of LPError
|
||||
|
||||
@@ -134,7 +131,7 @@ proc handle*(
|
||||
protos: seq[string],
|
||||
matchers = newSeq[Matcher](),
|
||||
active: bool = false,
|
||||
): Future[string] {.async, gcsafe.} =
|
||||
): Future[string] {.async.} =
|
||||
trace "Starting multistream negotiation", conn, handshaked = active
|
||||
var handshaked = active
|
||||
while not conn.atEof:
|
||||
@@ -175,10 +172,9 @@ proc handle*(
|
||||
trace "no handlers", conn, protocol = ms
|
||||
await conn.writeLp(Na)
|
||||
|
||||
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
|
||||
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async.} =
|
||||
trace "Starting multistream handler", conn, handshaked = active
|
||||
var
|
||||
handshaked = active
|
||||
protos: seq[string]
|
||||
matchers: seq[Matcher]
|
||||
for h in m.handlers:
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import pkg/[chronos, chronicles, stew/byteutils]
|
||||
import ../../stream/connection,
|
||||
@@ -45,7 +42,7 @@ const MaxMsgSize* = 1 shl 20 # 1mb
|
||||
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
|
||||
newException(InvalidMplexMsgType, "invalid message type")
|
||||
|
||||
proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
|
||||
proc readMsg*(conn: Connection): Future[Msg] {.async.} =
|
||||
let header = await conn.readVarint()
|
||||
trace "read header varint", varint = header, conn
|
||||
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[oids, strformat]
|
||||
import pkg/[chronos, chronicles, metrics]
|
||||
@@ -76,7 +73,7 @@ func shortLog*(s: LPChannel): auto =
|
||||
|
||||
chronicles.formatIt(LPChannel): shortLog(it)
|
||||
|
||||
proc open*(s: LPChannel) {.async, gcsafe.} =
|
||||
proc open*(s: LPChannel) {.async.} =
|
||||
trace "Opening channel", s, conn = s.conn
|
||||
if s.conn.isClosed:
|
||||
return
|
||||
@@ -98,7 +95,7 @@ proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
|
||||
if s.closedLocal and s.atEof():
|
||||
await procCall BufferStream(s).close()
|
||||
|
||||
proc reset*(s: LPChannel) {.async, gcsafe.} =
|
||||
proc reset*(s: LPChannel) {.async.} =
|
||||
if s.isClosed:
|
||||
trace "Already closed", s
|
||||
return
|
||||
@@ -126,7 +123,7 @@ proc reset*(s: LPChannel) {.async, gcsafe.} =
|
||||
|
||||
trace "Channel reset", s
|
||||
|
||||
method close*(s: LPChannel) {.async, gcsafe.} =
|
||||
method close*(s: LPChannel) {.async.} =
|
||||
## Close channel for writing - a message will be sent to the other peer
|
||||
## informing them that the channel is closed and that we're waiting for
|
||||
## their acknowledgement.
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import tables, sequtils, oids
|
||||
import chronos, chronicles, stew/byteutils, metrics
|
||||
@@ -79,7 +76,7 @@ proc newStreamInternal*(m: Mplex,
|
||||
chanId: uint64 = 0,
|
||||
name: string = "",
|
||||
timeout: Duration): LPChannel
|
||||
{.gcsafe, raises: [Defect, InvalidChannelIdError].} =
|
||||
{.gcsafe, raises: [InvalidChannelIdError].} =
|
||||
## create new channel/stream
|
||||
##
|
||||
let id = if initiator:
|
||||
@@ -125,7 +122,7 @@ proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
|
||||
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
|
||||
await chann.reset()
|
||||
|
||||
method handle*(m: Mplex) {.async, gcsafe.} =
|
||||
method handle*(m: Mplex) {.async.} =
|
||||
trace "Starting mplex handler", m
|
||||
try:
|
||||
while not m.connection.atEof:
|
||||
@@ -214,7 +211,7 @@ proc new*(M: type Mplex,
|
||||
|
||||
method newStream*(m: Mplex,
|
||||
name: string = "",
|
||||
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
||||
lazy: bool = false): Future[Connection] {.async.} =
|
||||
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
|
||||
|
||||
if not lazy:
|
||||
@@ -222,7 +219,7 @@ method newStream*(m: Mplex,
|
||||
|
||||
return Connection(channel)
|
||||
|
||||
method close*(m: Mplex) {.async, gcsafe.} =
|
||||
method close*(m: Mplex) {.async.} =
|
||||
if m.isClosed:
|
||||
trace "Already closed", m
|
||||
return
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles
|
||||
import ../stream/connection,
|
||||
@@ -26,8 +23,8 @@ type
|
||||
MuxerError* = object of LPError
|
||||
TooManyChannels* = object of MuxerError
|
||||
|
||||
StreamHandler* = proc(conn: Connection): Future[void] {.gcsafe, raises: [Defect].}
|
||||
MuxerHandler* = proc(muxer: Muxer): Future[void] {.gcsafe, raises: [Defect].}
|
||||
StreamHandler* = proc(conn: Connection): Future[void] {.gcsafe, raises: [].}
|
||||
MuxerHandler* = proc(muxer: Muxer): Future[void] {.gcsafe, raises: [].}
|
||||
|
||||
Muxer* = ref object of RootObj
|
||||
streamHandler*: StreamHandler
|
||||
@@ -35,7 +32,7 @@ type
|
||||
connection*: Connection
|
||||
|
||||
# user provider proc that returns a constructed Muxer
|
||||
MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [Defect].}
|
||||
MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [].}
|
||||
|
||||
# this wraps a creator proc that knows how to make muxers
|
||||
MuxerProvider* = object
|
||||
@@ -49,11 +46,11 @@ chronicles.formatIt(Muxer): shortLog(it)
|
||||
|
||||
# muxer interface
|
||||
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
|
||||
Future[Connection] {.base, async, gcsafe.} = discard
|
||||
method close*(m: Muxer) {.base, async, gcsafe.} =
|
||||
Future[Connection] {.base, async.} = discard
|
||||
method close*(m: Muxer) {.base, async.} =
|
||||
if not isNil(m.connection):
|
||||
await m.connection.close()
|
||||
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
|
||||
method handle*(m: Muxer): Future[void] {.base, async.} = discard
|
||||
|
||||
proc new*(
|
||||
T: typedesc[MuxerProvider],
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import sequtils, std/[tables]
|
||||
import chronos, chronicles, metrics, stew/[endians2, byteutils, objects]
|
||||
@@ -25,15 +22,16 @@ logScope:
|
||||
const
|
||||
YamuxCodec* = "/yamux/1.0.0"
|
||||
YamuxVersion = 0.uint8
|
||||
DefaultWindowSize = 256000
|
||||
YamuxDefaultWindowSize* = 256000
|
||||
MaxSendQueueSize = 256000
|
||||
MaxChannelCount = 200
|
||||
|
||||
when defined(libp2p_yamux_metrics):
|
||||
declareGauge(libp2p_yamux_channels, "yamux channels", labels = ["initiator", "peer"])
|
||||
declareHistogram libp2p_yamux_send_queue, "message send queue length (in byte)",
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
|
||||
declareHistogram libp2p_yamux_recv_queue, "message recv queue length (in byte)",
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
|
||||
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
|
||||
|
||||
type
|
||||
YamuxError* = object of CatchableError
|
||||
@@ -62,7 +60,7 @@ type
|
||||
streamId: uint32
|
||||
length: uint32
|
||||
|
||||
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
|
||||
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async.} =
|
||||
var buffer: array[12, byte]
|
||||
await conn.readExactly(addr buffer[0], 12)
|
||||
|
||||
@@ -146,6 +144,7 @@ type
|
||||
recvWindow: int
|
||||
sendWindow: int
|
||||
maxRecvWindow: int
|
||||
maxSendQueueSize: int
|
||||
conn: Connection
|
||||
isSrc: bool
|
||||
opened: bool
|
||||
@@ -172,9 +171,18 @@ proc `$`(channel: YamuxChannel): string =
|
||||
if s.len > 0:
|
||||
result &= " {" & s.foldl(if a != "": a & ", " & b else: b, "") & "}"
|
||||
|
||||
proc sendQueueBytes(channel: YamuxChannel, limit: bool = false): int =
|
||||
for (elem, sent, _) in channel.sendQueue:
|
||||
result.inc(min(elem.len - sent, if limit: channel.maxRecvWindow div 3 else: elem.len - sent))
|
||||
proc lengthSendQueue(channel: YamuxChannel): int =
|
||||
## Returns the length of what remains to be sent
|
||||
##
|
||||
channel.sendQueue.foldl(a + b.data.len - b.sent, 0)
|
||||
|
||||
proc lengthSendQueueWithLimit(channel: YamuxChannel): int =
|
||||
## Returns the length of what remains to be sent, but limit the size of big messages.
|
||||
##
|
||||
# For leniency, limit big messages size to the third of maxSendQueueSize
|
||||
# This value is arbitrary, it's not in the specs, it permits to store up to
|
||||
# 3 big messages if the peer is stalling.
|
||||
channel.sendQueue.foldl(a + min(b.data.len - b.sent, channel.maxSendQueueSize div 3), 0)
|
||||
|
||||
proc actuallyClose(channel: YamuxChannel) {.async.} =
|
||||
if channel.closedLocally and channel.sendQueue.len == 0 and
|
||||
@@ -186,15 +194,19 @@ proc remoteClosed(channel: YamuxChannel) {.async.} =
|
||||
channel.closedRemotely.complete()
|
||||
await channel.actuallyClose()
|
||||
|
||||
method closeImpl*(channel: YamuxChannel) {.async, gcsafe.} =
|
||||
method closeImpl*(channel: YamuxChannel) {.async.} =
|
||||
if not channel.closedLocally:
|
||||
channel.closedLocally = true
|
||||
channel.isEof = true
|
||||
|
||||
if channel.isReset == false and channel.sendQueue.len == 0:
|
||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
|
||||
await channel.actuallyClose()
|
||||
|
||||
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
|
||||
# If we reset locally, we want to flush up to a maximum of recvWindow
|
||||
# bytes. It's because the peer we're connected to can send us data before
|
||||
# it receives the reset.
|
||||
if channel.isReset:
|
||||
return
|
||||
trace "Reset channel"
|
||||
@@ -215,11 +227,14 @@ proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
|
||||
await channel.remoteClosed()
|
||||
channel.receivedData.fire()
|
||||
if not isLocal:
|
||||
# If we reset locally, we want to flush up to a maximum of recvWindow
|
||||
# bytes. We use the recvWindow in the proc cleanupChann.
|
||||
# If the reset is remote, there's no reason to flush anything.
|
||||
channel.recvWindow = 0
|
||||
|
||||
proc updateRecvWindow(channel: YamuxChannel) {.async.} =
|
||||
## Send to the peer a window update when the recvWindow is empty enough
|
||||
##
|
||||
# In order to avoid spamming a window update everytime a byte is read,
|
||||
# we send it everytime half of the maxRecvWindow is read.
|
||||
let inWindow = channel.recvWindow + channel.recvQueue.len
|
||||
if inWindow > channel.maxRecvWindow div 2:
|
||||
return
|
||||
@@ -237,6 +252,7 @@ method readOnce*(
|
||||
pbytes: pointer,
|
||||
nbytes: int):
|
||||
Future[int] {.async.} =
|
||||
## Read from a yamux channel
|
||||
|
||||
if channel.isReset:
|
||||
raise if channel.remoteReset:
|
||||
@@ -252,6 +268,7 @@ method readOnce*(
|
||||
await channel.closedRemotely or channel.receivedData.wait()
|
||||
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
|
||||
channel.returnedEof = true
|
||||
channel.isEof = true
|
||||
return 0
|
||||
|
||||
let toRead = min(channel.recvQueue.len, nbytes)
|
||||
@@ -281,21 +298,22 @@ proc trySend(channel: YamuxChannel) {.async.} =
|
||||
return
|
||||
channel.isSending = true
|
||||
defer: channel.isSending = false
|
||||
|
||||
while channel.sendQueue.len != 0:
|
||||
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
|
||||
if channel.sendWindow == 0:
|
||||
trace "send window empty"
|
||||
if channel.sendQueueBytes(true) > channel.maxRecvWindow:
|
||||
debug "channel send queue too big, resetting", maxSendWindow=channel.maxRecvWindow,
|
||||
currentQueueSize = channel.sendQueueBytes(true)
|
||||
trace "trying to send while the sendWindow is empty"
|
||||
if channel.lengthSendQueueWithLimit() > channel.maxSendQueueSize:
|
||||
trace "channel send queue too big, resetting", maxSendQueueSize=channel.maxSendQueueSize,
|
||||
currentQueueSize = channel.lengthSendQueueWithLimit()
|
||||
try:
|
||||
await channel.reset(true)
|
||||
except CatchableError as exc:
|
||||
debug "failed to reset", msg=exc.msg
|
||||
warn "failed to reset", msg=exc.msg
|
||||
break
|
||||
|
||||
let
|
||||
bytesAvailable = channel.sendQueueBytes()
|
||||
bytesAvailable = channel.lengthSendQueue()
|
||||
toSend = min(channel.sendWindow, bytesAvailable)
|
||||
var
|
||||
sendBuffer = newSeqUninitialized[byte](toSend + 12)
|
||||
@@ -310,20 +328,24 @@ proc trySend(channel: YamuxChannel) {.async.} =
|
||||
|
||||
var futures: seq[Future[void]]
|
||||
while inBuffer < toSend:
|
||||
# concatenate the different message we try to send into one buffer
|
||||
let (data, sent, fut) = channel.sendQueue[0]
|
||||
let bufferToSend = min(data.len - sent, toSend - inBuffer)
|
||||
sendBuffer.toOpenArray(12, 12 + toSend - 1)[inBuffer..<(inBuffer+bufferToSend)] =
|
||||
channel.sendQueue[0].data.toOpenArray(sent, sent + bufferToSend - 1)
|
||||
channel.sendQueue[0].sent.inc(bufferToSend)
|
||||
if channel.sendQueue[0].sent >= data.len:
|
||||
# if every byte of the message is in the buffer, add the write future to the
|
||||
# sequence of futures to be completed (or failed) when the buffer is sent
|
||||
futures.add(fut)
|
||||
channel.sendQueue.delete(0)
|
||||
inBuffer.inc(bufferToSend)
|
||||
|
||||
trace "build send buffer", h = $header, msg=string.fromBytes(sendBuffer[12..^1])
|
||||
trace "try to send the buffer", h = $header
|
||||
channel.sendWindow.dec(toSend)
|
||||
try: await channel.conn.write(sendBuffer)
|
||||
except CatchableError as exc:
|
||||
trace "failed to send the buffer"
|
||||
let connDown = newLPStreamConnDownError(exc)
|
||||
for fut in futures.items():
|
||||
fut.fail(connDown)
|
||||
@@ -334,6 +356,8 @@ proc trySend(channel: YamuxChannel) {.async.} =
|
||||
channel.activity = true
|
||||
|
||||
method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
|
||||
## Write to yamux channel
|
||||
##
|
||||
result = newFuture[void]("Yamux Send")
|
||||
if channel.remoteReset:
|
||||
result.fail(newLPStreamResetError())
|
||||
@@ -346,15 +370,20 @@ method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
|
||||
return result
|
||||
channel.sendQueue.add((msg, 0, result))
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_recv_queue.observe(channel.sendQueueBytes().int64)
|
||||
libp2p_yamux_send_queue.observe(channel.lengthSendQueue().int64)
|
||||
asyncSpawn channel.trySend()
|
||||
|
||||
proc open*(channel: YamuxChannel) {.async, gcsafe.} =
|
||||
proc open(channel: YamuxChannel) {.async.} =
|
||||
## Open a yamux channel by sending a window update with Syn or Ack flag
|
||||
##
|
||||
if channel.opened:
|
||||
trace "Try to open channel twice"
|
||||
return
|
||||
channel.opened = true
|
||||
await channel.conn.write(YamuxHeader.data(channel.id, 0, {if channel.isSrc: Syn else: Ack}))
|
||||
await channel.conn.write(YamuxHeader.windowUpdate(
|
||||
channel.id,
|
||||
uint32(max(channel.maxRecvWindow - YamuxDefaultWindowSize, 0)),
|
||||
{if channel.isSrc: Syn else: Ack}))
|
||||
|
||||
method getWrapped*(channel: YamuxChannel): Connection = channel.conn
|
||||
|
||||
@@ -365,12 +394,14 @@ type
|
||||
currentId: uint32
|
||||
isClosed: bool
|
||||
maxChannCount: int
|
||||
windowSize: int
|
||||
maxSendQueueSize: int
|
||||
|
||||
proc lenBySrc(m: Yamux, isSrc: bool): int =
|
||||
for v in m.channels.values():
|
||||
if v.isSrc == isSrc: result += 1
|
||||
|
||||
proc cleanupChann(m: Yamux, channel: YamuxChannel) {.async.} =
|
||||
proc cleanupChannel(m: Yamux, channel: YamuxChannel) {.async.} =
|
||||
await channel.join()
|
||||
m.channels.del(channel.id)
|
||||
when defined(libp2p_yamux_metrics):
|
||||
@@ -378,12 +409,19 @@ proc cleanupChann(m: Yamux, channel: YamuxChannel) {.async.} =
|
||||
if channel.isReset and channel.recvWindow > 0:
|
||||
m.flushed[channel.id] = channel.recvWindow
|
||||
|
||||
proc createStream(m: Yamux, id: uint32, isSrc: bool): YamuxChannel =
|
||||
proc createStream(m: Yamux, id: uint32, isSrc: bool,
|
||||
recvWindow: int, maxSendQueueSize: int): YamuxChannel =
|
||||
# As you can see, during initialization, recvWindow can be larger than maxRecvWindow.
|
||||
# This is because the peer we're connected to will always assume
|
||||
# that the initial recvWindow is 256k.
|
||||
# To solve this contradiction, no updateWindow will be sent until recvWindow is less
|
||||
# than maxRecvWindow
|
||||
result = YamuxChannel(
|
||||
id: id,
|
||||
maxRecvWindow: DefaultWindowSize,
|
||||
recvWindow: DefaultWindowSize,
|
||||
sendWindow: DefaultWindowSize,
|
||||
maxRecvWindow: recvWindow,
|
||||
recvWindow: if recvWindow > YamuxDefaultWindowSize: recvWindow else: YamuxDefaultWindowSize,
|
||||
sendWindow: YamuxDefaultWindowSize,
|
||||
maxSendQueueSize: maxSendQueueSize,
|
||||
isSrc: isSrc,
|
||||
conn: m.connection,
|
||||
receivedData: newAsyncEvent(),
|
||||
@@ -401,7 +439,7 @@ proc createStream(m: Yamux, id: uint32, isSrc: bool): YamuxChannel =
|
||||
when defined(libp2p_agents_metrics):
|
||||
result.shortAgent = m.connection.shortAgent
|
||||
m.channels[id] = result
|
||||
asyncSpawn m.cleanupChann(result)
|
||||
asyncSpawn m.cleanupChannel(result)
|
||||
trace "created channel", id, pid=m.connection.peerId
|
||||
when defined(libp2p_yamux_metrics):
|
||||
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $result.peerId])
|
||||
@@ -422,7 +460,7 @@ method close*(m: Yamux) {.async.} =
|
||||
trace "Closed yamux"
|
||||
|
||||
proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
|
||||
## call the muxer stream handler for this channel
|
||||
## Call the muxer stream handler for this channel
|
||||
##
|
||||
try:
|
||||
await m.streamHandler(channel)
|
||||
@@ -432,7 +470,7 @@ proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
|
||||
trace "Exception in yamux stream handler", msg = exc.msg
|
||||
await channel.reset()
|
||||
|
||||
method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
method handle*(m: Yamux) {.async.} =
|
||||
trace "Starting yamux handler", pid=m.connection.peerId
|
||||
try:
|
||||
while not m.connection.atEof:
|
||||
@@ -456,9 +494,11 @@ method handle*(m: Yamux) {.async, gcsafe.} =
|
||||
else:
|
||||
if header.streamId in m.flushed:
|
||||
m.flushed.del(header.streamId)
|
||||
|
||||
if header.streamId mod 2 == m.currentId mod 2:
|
||||
debug "Peer used our reserved stream id, skipping", id=header.streamId, currentId=m.currentId, peerId=m.connection.peerId
|
||||
raise newException(YamuxError, "Peer used our reserved stream id")
|
||||
let newStream = m.createStream(header.streamId, false)
|
||||
let newStream = m.createStream(header.streamId, false, m.windowSize, m.maxSendQueueSize)
|
||||
if m.channels.len >= m.maxChannCount:
|
||||
await newStream.reset()
|
||||
continue
|
||||
@@ -514,19 +554,24 @@ method getStreams*(m: Yamux): seq[Connection] =
|
||||
method newStream*(
|
||||
m: Yamux,
|
||||
name: string = "",
|
||||
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
|
||||
lazy: bool = false): Future[Connection] {.async.} =
|
||||
|
||||
if m.channels.len > m.maxChannCount - 1:
|
||||
raise newException(TooManyChannels, "max allowed channel count exceeded")
|
||||
let stream = m.createStream(m.currentId, true)
|
||||
let stream = m.createStream(m.currentId, true, m.windowSize, m.maxSendQueueSize)
|
||||
m.currentId += 2
|
||||
if not lazy:
|
||||
await stream.open()
|
||||
return stream
|
||||
|
||||
proc new*(T: type[Yamux], conn: Connection, maxChannCount: int = MaxChannelCount): T =
|
||||
proc new*(T: type[Yamux], conn: Connection,
|
||||
maxChannCount: int = MaxChannelCount,
|
||||
windowSize: int = YamuxDefaultWindowSize,
|
||||
maxSendQueueSize: int = MaxSendQueueSize): T =
|
||||
T(
|
||||
connection: conn,
|
||||
currentId: if conn.dir == Out: 1 else: 2,
|
||||
maxChannCount: maxChannCount
|
||||
maxChannCount: maxChannCount,
|
||||
windowSize: windowSize,
|
||||
maxSendQueueSize: maxSendQueueSize
|
||||
)
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[streams, strutils, sets, sequtils],
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sugar, sets, sequtils, strutils]
|
||||
import
|
||||
@@ -55,7 +52,7 @@ proc resolveOneAddress(
|
||||
ma: MultiAddress,
|
||||
domain: Domain = Domain.AF_UNSPEC,
|
||||
prefix = ""): Future[seq[MultiAddress]]
|
||||
{.async, raises: [Defect, MaError, TransportAddressError].} =
|
||||
{.async.} =
|
||||
#Resolve a single address
|
||||
var pbuf: array[2, byte]
|
||||
|
||||
@@ -121,7 +118,7 @@ proc resolveMAddress*(
|
||||
if not DNS.matchPartial(address):
|
||||
res.incl(address)
|
||||
else:
|
||||
let code = address[0].get().protoCode().get()
|
||||
let code = address[0].tryGet().protoCode().tryGet()
|
||||
let seq = case code:
|
||||
of multiCodec("dns"):
|
||||
await self.resolveOneAddress(address)
|
||||
@@ -132,7 +129,7 @@ proc resolveMAddress*(
|
||||
of multiCodec("dnsaddr"):
|
||||
await self.resolveDnsAddr(address)
|
||||
else:
|
||||
doAssert false
|
||||
assert false
|
||||
@[address]
|
||||
for ad in seq:
|
||||
res.incl(ad)
|
||||
|
||||
@@ -7,15 +7,11 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, tables],
|
||||
chronos, chronicles,
|
||||
multiaddress, multicodec
|
||||
import std/[sequtils, tables, sugar]
|
||||
import chronos
|
||||
import multiaddress, multicodec
|
||||
|
||||
type
|
||||
## Manages observed MultiAddresses by reomte peers. It keeps track of the most observed IP and IP/Port.
|
||||
@@ -36,14 +32,16 @@ proc getProtocol(self: ObservedAddrManager, observations: seq[MultiAddress], mul
|
||||
countTable.sort()
|
||||
var orderedPairs = toSeq(countTable.pairs)
|
||||
for (ma, count) in orderedPairs:
|
||||
let maFirst = ma[0].get()
|
||||
if maFirst.protoCode.get() == multiCodec and count >= self.minCount:
|
||||
let protoCode = (ma[0].flatMap(protoCode)).valueOr: continue
|
||||
if protoCode == multiCodec and count >= self.minCount:
|
||||
return Opt.some(ma)
|
||||
return Opt.none(MultiAddress)
|
||||
|
||||
proc getMostObservedProtocol(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
|
||||
## Returns the most observed IP address or none if the number of observations are less than minCount.
|
||||
let observedIPs = self.observedIPsAndPorts.mapIt(it[0].get())
|
||||
let observedIPs = collect:
|
||||
for observedIp in self.observedIPsAndPorts:
|
||||
observedIp[0].valueOr: continue
|
||||
return self.getProtocol(observedIPs, multiCodec)
|
||||
|
||||
proc getMostObservedProtoAndPort(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
|
||||
@@ -54,34 +52,24 @@ proc getMostObservedProtosAndPorts*(self: ObservedAddrManager): seq[MultiAddress
|
||||
## Returns the most observed IP4/Port and IP6/Port address or an empty seq if the number of observations
|
||||
## are less than minCount.
|
||||
var res: seq[MultiAddress]
|
||||
let ip4 = self.getMostObservedProtoAndPort(multiCodec("ip4"))
|
||||
if ip4.isSome():
|
||||
res.add(ip4.get())
|
||||
let ip6 = self.getMostObservedProtoAndPort(multiCodec("ip6"))
|
||||
if ip6.isSome():
|
||||
res.add(ip6.get())
|
||||
self.getMostObservedProtoAndPort(multiCodec("ip4")).withValue(ip4):
|
||||
res.add(ip4)
|
||||
self.getMostObservedProtoAndPort(multiCodec("ip6")).withValue(ip6):
|
||||
res.add(ip6)
|
||||
return res
|
||||
|
||||
proc guessDialableAddr*(
|
||||
self: ObservedAddrManager,
|
||||
ma: MultiAddress): MultiAddress =
|
||||
## Replaces the first proto valeu of each listen address by the corresponding (matching the proto code) most observed value.
|
||||
## Replaces the first proto value of each listen address by the corresponding (matching the proto code) most observed value.
|
||||
## If the most observed value is not available, the original MultiAddress is returned.
|
||||
try:
|
||||
let maFirst = ma[0]
|
||||
let maRest = ma[1..^1]
|
||||
if maRest.isErr():
|
||||
return ma
|
||||
let
|
||||
maFirst = ma[0].valueOr: return ma
|
||||
maRest = ma[1..^1].valueOr: return ma
|
||||
maFirstProto = maFirst.protoCode().valueOr: return ma
|
||||
|
||||
let observedIP = self.getMostObservedProtocol(maFirst.get().protoCode().get())
|
||||
return
|
||||
if observedIP.isNone() or maFirst.get() == observedIP.get():
|
||||
ma
|
||||
else:
|
||||
observedIP.get() & maRest.get()
|
||||
except CatchableError as error:
|
||||
debug "Error while handling manual port forwarding", msg = error.msg
|
||||
return ma
|
||||
let observedIP = self.getMostObservedProtocol(maFirstProto).valueOr: return ma
|
||||
return concat(observedIP, maRest).valueOr: ma
|
||||
|
||||
proc `$`*(self: ObservedAddrManager): string =
|
||||
## Returns a string representation of the ObservedAddrManager.
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implementes API for libp2p peer.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import
|
||||
@@ -44,10 +41,7 @@ func shortLog*(pid: PeerId): string =
|
||||
if len(spid) > 10:
|
||||
spid[3] = '*'
|
||||
|
||||
when (NimMajor, NimMinor) > (1, 4):
|
||||
spid.delete(4 .. spid.high - 6)
|
||||
else:
|
||||
spid.delete(4, spid.high - 6)
|
||||
spid.delete(4 .. spid.high - 6)
|
||||
|
||||
spid
|
||||
|
||||
@@ -191,19 +185,11 @@ proc random*(t: typedesc[PeerId], rng = newRng()): Result[PeerId, cstring] =
|
||||
|
||||
func match*(pid: PeerId, pubkey: PublicKey): bool =
|
||||
## Returns ``true`` if ``pid`` matches public key ``pubkey``.
|
||||
let p = PeerId.init(pubkey)
|
||||
if p.isErr:
|
||||
false
|
||||
else:
|
||||
pid == p.get()
|
||||
PeerId.init(pubkey) == Result[PeerId, cstring].ok(pid)
|
||||
|
||||
func match*(pid: PeerId, seckey: PrivateKey): bool =
|
||||
## Returns ``true`` if ``pid`` matches private key ``seckey``.
|
||||
let p = PeerId.init(seckey)
|
||||
if p.isErr:
|
||||
false
|
||||
else:
|
||||
pid == p.get()
|
||||
PeerId.init(seckey) == Result[PeerId, cstring].ok(pid)
|
||||
|
||||
## Serialization/Deserialization helpers
|
||||
|
||||
|
||||
@@ -7,13 +7,10 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
{.push public.}
|
||||
|
||||
import std/[options, sequtils]
|
||||
import std/sequtils
|
||||
import pkg/[chronos, chronicles, stew/results]
|
||||
import peerid, multiaddress, multicodec, crypto/crypto, routing_record, errors, utility
|
||||
|
||||
@@ -26,7 +23,7 @@ type
|
||||
|
||||
AddressMapper* =
|
||||
proc(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
{.gcsafe, raises: [].}
|
||||
|
||||
PeerInfo* {.public.} = ref object
|
||||
peerId*: PeerId
|
||||
@@ -56,15 +53,12 @@ proc update*(p: PeerInfo) {.async.} =
|
||||
for mapper in p.addressMappers:
|
||||
p.addrs = await mapper(p.addrs)
|
||||
|
||||
let sprRes = SignedPeerRecord.init(
|
||||
p.signedPeerRecord = SignedPeerRecord.init(
|
||||
p.privateKey,
|
||||
PeerRecord.init(p.peerId, p.addrs)
|
||||
)
|
||||
if sprRes.isOk:
|
||||
p.signedPeerRecord = sprRes.get()
|
||||
else:
|
||||
discard
|
||||
#info "Can't update the signed peer record"
|
||||
).valueOr():
|
||||
info "Can't update the signed peer record"
|
||||
return
|
||||
|
||||
proc addrs*(p: PeerInfo): seq[MultiAddress] =
|
||||
p.addrs
|
||||
@@ -99,7 +93,7 @@ proc new*(
|
||||
agentVersion: string = "",
|
||||
addressMappers = newSeq[AddressMapper](),
|
||||
): PeerInfo
|
||||
{.raises: [Defect, LPError].} =
|
||||
{.raises: [LPError].} =
|
||||
|
||||
let pubkey = try:
|
||||
key.getPublicKey().tryGet()
|
||||
|
||||
@@ -16,15 +16,12 @@ runnableExamples:
|
||||
# Create a custom book type
|
||||
type MoodBook = ref object of PeerBook[string]
|
||||
|
||||
var somePeerId = PeerId.random().get()
|
||||
var somePeerId = PeerId.random().expect("get random key")
|
||||
|
||||
peerStore[MoodBook][somePeerId] = "Happy"
|
||||
doAssert peerStore[MoodBook][somePeerId] == "Happy"
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[tables, sets, options, macros],
|
||||
@@ -45,7 +42,7 @@ type
|
||||
# Handler types #
|
||||
#################
|
||||
|
||||
PeerBookChangeHandler* = proc(peerId: PeerId) {.gcsafe, raises: [Defect].}
|
||||
PeerBookChangeHandler* = proc(peerId: PeerId) {.gcsafe, raises: [].}
|
||||
|
||||
#########
|
||||
# Books #
|
||||
@@ -161,20 +158,20 @@ proc updatePeerInfo*(
|
||||
if info.addrs.len > 0:
|
||||
peerStore[AddressBook][info.peerId] = info.addrs
|
||||
|
||||
if info.pubkey.isSome:
|
||||
peerStore[KeyBook][info.peerId] = info.pubkey.get()
|
||||
info.pubkey.withValue(pubkey):
|
||||
peerStore[KeyBook][info.peerId] = pubkey
|
||||
|
||||
if info.agentVersion.isSome:
|
||||
peerStore[AgentBook][info.peerId] = info.agentVersion.get().string
|
||||
info.agentVersion.withValue(agentVersion):
|
||||
peerStore[AgentBook][info.peerId] = agentVersion.string
|
||||
|
||||
if info.protoVersion.isSome:
|
||||
peerStore[ProtoVersionBook][info.peerId] = info.protoVersion.get().string
|
||||
info.protoVersion.withValue(protoVersion):
|
||||
peerStore[ProtoVersionBook][info.peerId] = protoVersion.string
|
||||
|
||||
if info.protos.len > 0:
|
||||
peerStore[ProtoBook][info.peerId] = info.protos
|
||||
|
||||
if info.signedPeerRecord.isSome:
|
||||
peerStore[SPRBook][info.peerId] = info.signedPeerRecord.get()
|
||||
info.signedPeerRecord.withValue(signedPeerRecord):
|
||||
peerStore[SPRBook][info.peerId] = signedPeerRecord
|
||||
|
||||
let cleanupPos = peerStore.toClean.find(info.peerId)
|
||||
if cleanupPos >= 0:
|
||||
@@ -210,11 +207,11 @@ proc identify*(
|
||||
let info = await peerStore.identify.identify(stream, stream.peerId)
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
var knownAgent = "unknown"
|
||||
if info.agentVersion.isSome and info.agentVersion.get().len > 0:
|
||||
let shortAgent = info.agentVersion.get().split("/")[0].safeToLowerAscii()
|
||||
if shortAgent.isOk() and KnownLibP2PAgentsSeq.contains(shortAgent.get()):
|
||||
knownAgent = shortAgent.get()
|
||||
var
|
||||
knownAgent = "unknown"
|
||||
shortAgent = info.agentVersion.get("").split("/")[0].safeToLowerAscii().get("")
|
||||
if KnownLibP2PAgentsSeq.contains(shortAgent):
|
||||
knownAgent = shortAgent
|
||||
muxer.connection.setShortAgent(knownAgent)
|
||||
|
||||
peerStore.updatePeerInfo(info)
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implements minimal Google's ProtoBuf primitives.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import ../varint, ../utility, stew/[endians2, results]
|
||||
export results, utility
|
||||
@@ -579,26 +576,18 @@ proc getField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
|
||||
proc getField*(pb: ProtoBuffer, field: int,
|
||||
output: var ProtoBuffer): ProtoResult[bool] {.inline.} =
|
||||
var buffer: seq[byte]
|
||||
let res = pb.getField(field, buffer)
|
||||
if res.isOk():
|
||||
if res.get():
|
||||
output = initProtoBuffer(buffer)
|
||||
ok(true)
|
||||
else:
|
||||
ok(false)
|
||||
if ? pb.getField(field, buffer):
|
||||
output = initProtoBuffer(buffer)
|
||||
ok(true)
|
||||
else:
|
||||
err(res.error)
|
||||
ok(false)
|
||||
|
||||
proc getRequiredField*[T](pb: ProtoBuffer, field: int,
|
||||
output: var T): ProtoResult[void] {.inline.} =
|
||||
let res = pb.getField(field, output)
|
||||
if res.isOk():
|
||||
if res.get():
|
||||
ok()
|
||||
else:
|
||||
err(RequiredFieldMissing)
|
||||
if ? pb.getField(field, output):
|
||||
ok()
|
||||
else:
|
||||
err(res.error)
|
||||
err(RequiredFieldMissing)
|
||||
|
||||
proc getRepeatedField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
|
||||
output: var seq[T]): ProtoResult[bool] =
|
||||
@@ -678,14 +667,10 @@ proc getRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
|
||||
|
||||
proc getRequiredRepeatedField*[T](pb: ProtoBuffer, field: int,
|
||||
output: var seq[T]): ProtoResult[void] {.inline.} =
|
||||
let res = pb.getRepeatedField(field, output)
|
||||
if res.isOk():
|
||||
if res.get():
|
||||
ok()
|
||||
else:
|
||||
err(RequiredFieldMissing)
|
||||
if ? pb.getRepeatedField(field, output):
|
||||
ok()
|
||||
else:
|
||||
err(res.error)
|
||||
err(RequiredFieldMissing)
|
||||
|
||||
proc getPackedRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
|
||||
output: var seq[T]): ProtoResult[bool] =
|
||||
|
||||
@@ -7,12 +7,8 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/options
|
||||
import stew/results
|
||||
import chronos, chronicles
|
||||
import ../../../switch,
|
||||
@@ -27,8 +23,8 @@ type
|
||||
AutonatClient* = ref object of RootObj
|
||||
|
||||
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
|
||||
id: some(pid),
|
||||
let pb = AutonatDial(peerInfo: Opt.some(AutonatPeerInfo(
|
||||
id: Opt.some(pid),
|
||||
addrs: addrs
|
||||
))).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
@@ -36,15 +32,13 @@ proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.}
|
||||
method dialMe*(self: AutonatClient, switch: Switch, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()):
|
||||
Future[MultiAddress] {.base, async.} =
|
||||
|
||||
proc getResponseOrRaise(autonatMsg: Option[AutonatMsg]): AutonatDialResponse {.raises: [Defect, AutonatError].} =
|
||||
if autonatMsg.isNone() or
|
||||
autonatMsg.get().msgType != DialResponse or
|
||||
autonatMsg.get().response.isNone() or
|
||||
(autonatMsg.get().response.get().status == Ok and
|
||||
autonatMsg.get().response.get().ma.isNone()):
|
||||
raise newException(AutonatError, "Unexpected response")
|
||||
else:
|
||||
autonatMsg.get().response.get()
|
||||
proc getResponseOrRaise(autonatMsg: Opt[AutonatMsg]): AutonatDialResponse {.raises: [AutonatError].} =
|
||||
autonatMsg.withValue(msg):
|
||||
if msg.msgType == DialResponse:
|
||||
msg.response.withValue(res):
|
||||
if not (res.status == Ok and res.ma.isNone()):
|
||||
return res
|
||||
raise newException(AutonatError, "Unexpected response")
|
||||
|
||||
let conn =
|
||||
try:
|
||||
@@ -69,7 +63,7 @@ method dialMe*(self: AutonatClient, switch: Switch, pid: PeerId, addrs: seq[Mult
|
||||
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
|
||||
return case response.status:
|
||||
of ResponseStatus.Ok:
|
||||
response.ma.get()
|
||||
response.ma.tryGet()
|
||||
of ResponseStatus.DialError:
|
||||
raise newException(AutonatUnreachableError, "Peer could not dial us back: " & response.text.get(""))
|
||||
else:
|
||||
|
||||
@@ -7,12 +7,8 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options]
|
||||
import stew/[results, objects]
|
||||
import chronos, chronicles
|
||||
import ../../../multiaddress,
|
||||
@@ -42,29 +38,29 @@ type
|
||||
InternalError = 300
|
||||
|
||||
AutonatPeerInfo* = object
|
||||
id*: Option[PeerId]
|
||||
id*: Opt[PeerId]
|
||||
addrs*: seq[MultiAddress]
|
||||
|
||||
AutonatDial* = object
|
||||
peerInfo*: Option[AutonatPeerInfo]
|
||||
peerInfo*: Opt[AutonatPeerInfo]
|
||||
|
||||
AutonatDialResponse* = object
|
||||
status*: ResponseStatus
|
||||
text*: Option[string]
|
||||
ma*: Option[MultiAddress]
|
||||
text*: Opt[string]
|
||||
ma*: Opt[MultiAddress]
|
||||
|
||||
AutonatMsg* = object
|
||||
msgType*: MsgType
|
||||
dial*: Option[AutonatDial]
|
||||
response*: Option[AutonatDialResponse]
|
||||
dial*: Opt[AutonatDial]
|
||||
response*: Opt[AutonatDialResponse]
|
||||
|
||||
NetworkReachability* {.pure.} = enum
|
||||
Unknown, NotReachable, Reachable
|
||||
|
||||
proc encode(p: AutonatPeerInfo): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
if p.id.isSome():
|
||||
result.write(1, p.id.get())
|
||||
p.id.withValue(id):
|
||||
result.write(1, id)
|
||||
for ma in p.addrs:
|
||||
result.write(2, ma.data.buffer)
|
||||
result.finish()
|
||||
@@ -73,8 +69,8 @@ proc encode*(d: AutonatDial): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, MsgType.Dial.uint)
|
||||
var dial = initProtoBuffer()
|
||||
if d.peerInfo.isSome():
|
||||
dial.write(1, encode(d.peerInfo.get()))
|
||||
d.peerInfo.withValue(pinfo):
|
||||
dial.write(1, encode(pinfo))
|
||||
dial.finish()
|
||||
result.write(2, dial.buffer)
|
||||
result.finish()
|
||||
@@ -84,72 +80,60 @@ proc encode*(r: AutonatDialResponse): ProtoBuffer =
|
||||
result.write(1, MsgType.DialResponse.uint)
|
||||
var bufferResponse = initProtoBuffer()
|
||||
bufferResponse.write(1, r.status.uint)
|
||||
if r.text.isSome():
|
||||
bufferResponse.write(2, r.text.get())
|
||||
if r.ma.isSome():
|
||||
bufferResponse.write(3, r.ma.get())
|
||||
r.text.withValue(text):
|
||||
bufferResponse.write(2, text)
|
||||
r.ma.withValue(ma):
|
||||
bufferResponse.write(3, ma)
|
||||
bufferResponse.finish()
|
||||
result.write(3, bufferResponse.buffer)
|
||||
result.finish()
|
||||
|
||||
proc encode*(msg: AutonatMsg): ProtoBuffer =
|
||||
if msg.dial.isSome():
|
||||
return encode(msg.dial.get())
|
||||
if msg.response.isSome():
|
||||
return encode(msg.response.get())
|
||||
msg.dial.withValue(dial):
|
||||
return encode(dial)
|
||||
msg.response.withValue(res):
|
||||
return encode(res)
|
||||
|
||||
proc decode*(_: typedesc[AutonatMsg], buf: seq[byte]): Option[AutonatMsg] =
|
||||
proc decode*(_: typedesc[AutonatMsg], buf: seq[byte]): Opt[AutonatMsg] =
|
||||
var
|
||||
msgTypeOrd: uint32
|
||||
pbDial: ProtoBuffer
|
||||
pbResponse: ProtoBuffer
|
||||
msg: AutonatMsg
|
||||
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getField(1, msgTypeOrd)
|
||||
r2 = pb.getField(2, pbDial)
|
||||
r3 = pb.getField(3, pbResponse)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr(): return none(AutonatMsg)
|
||||
let pb = initProtoBuffer(buf)
|
||||
|
||||
if r1.get() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
|
||||
return none(AutonatMsg)
|
||||
if r2.get():
|
||||
if ? pb.getField(1, msgTypeOrd).toOpt() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
|
||||
return Opt.none(AutonatMsg)
|
||||
if ? pb.getField(2, pbDial).toOpt():
|
||||
var
|
||||
pbPeerInfo: ProtoBuffer
|
||||
dial: AutonatDial
|
||||
let
|
||||
r4 = pbDial.getField(1, pbPeerInfo)
|
||||
if r4.isErr(): return none(AutonatMsg)
|
||||
let r4 = ? pbDial.getField(1, pbPeerInfo).toOpt()
|
||||
|
||||
var peerInfo: AutonatPeerInfo
|
||||
if r4.get():
|
||||
if r4:
|
||||
var pid: PeerId
|
||||
let
|
||||
r5 = pbPeerInfo.getField(1, pid)
|
||||
r6 = pbPeerInfo.getRepeatedField(2, peerInfo.addrs)
|
||||
if r5.isErr() or r6.isErr(): return none(AutonatMsg)
|
||||
if r5.get(): peerInfo.id = some(pid)
|
||||
dial.peerInfo = some(peerInfo)
|
||||
msg.dial = some(dial)
|
||||
r5 = ? pbPeerInfo.getField(1, pid).toOpt()
|
||||
r6 = ? pbPeerInfo.getRepeatedField(2, peerInfo.addrs).toOpt()
|
||||
if r5: peerInfo.id = Opt.some(pid)
|
||||
dial.peerInfo = Opt.some(peerInfo)
|
||||
msg.dial = Opt.some(dial)
|
||||
|
||||
if r3.get():
|
||||
if ? pb.getField(3, pbResponse).toOpt():
|
||||
var
|
||||
statusOrd: uint
|
||||
text: string
|
||||
ma: MultiAddress
|
||||
response: AutonatDialResponse
|
||||
|
||||
let
|
||||
r4 = pbResponse.getField(1, statusOrd)
|
||||
r5 = pbResponse.getField(2, text)
|
||||
r6 = pbResponse.getField(3, ma)
|
||||
|
||||
if r4.isErr() or r5.isErr() or r6.isErr() or
|
||||
(r4.get() and not checkedEnumAssign(response.status, statusOrd)):
|
||||
return none(AutonatMsg)
|
||||
if r5.get(): response.text = some(text)
|
||||
if r6.get(): response.ma = some(ma)
|
||||
msg.response = some(response)
|
||||
|
||||
return some(msg)
|
||||
if ? pbResponse.getField(1, statusOrd).optValue():
|
||||
if not checkedEnumAssign(response.status, statusOrd):
|
||||
return Opt.none(AutonatMsg)
|
||||
if ? pbResponse.getField(2, text).optValue():
|
||||
response.text = Opt.some(text)
|
||||
if ? pbResponse.getField(3, ma).optValue():
|
||||
response.ma = Opt.some(ma)
|
||||
msg.response = Opt.some(response)
|
||||
return Opt.some(msg)
|
||||
|
||||
@@ -7,12 +7,9 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, sets, sequtils]
|
||||
import std/[sets, sequtils]
|
||||
import stew/results
|
||||
import chronos, chronicles
|
||||
import ../../protocol,
|
||||
@@ -36,8 +33,8 @@ type
|
||||
dialTimeout: Duration
|
||||
|
||||
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
|
||||
id: some(pid),
|
||||
let pb = AutonatDial(peerInfo: Opt.some(AutonatPeerInfo(
|
||||
id: Opt.some(pid),
|
||||
addrs: addrs
|
||||
))).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
@@ -45,16 +42,16 @@ proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.}
|
||||
proc sendResponseError(conn: Connection, status: ResponseStatus, text: string = "") {.async.} =
|
||||
let pb = AutonatDialResponse(
|
||||
status: status,
|
||||
text: if text == "": none(string) else: some(text),
|
||||
ma: none(MultiAddress)
|
||||
text: if text == "": Opt.none(string) else: Opt.some(text),
|
||||
ma: Opt.none(MultiAddress)
|
||||
).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
|
||||
let pb = AutonatDialResponse(
|
||||
status: ResponseStatus.Ok,
|
||||
text: some("Ok"),
|
||||
ma: some(ma)
|
||||
text: Opt.some("Ok"),
|
||||
ma: Opt.some(ma)
|
||||
).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
@@ -73,8 +70,8 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
|
||||
futs = addrs.mapIt(autonat.switch.dialer.tryDial(conn.peerId, @[it]))
|
||||
let fut = await anyCompleted(futs).wait(autonat.dialTimeout)
|
||||
let ma = await fut
|
||||
if ma.isSome:
|
||||
await conn.sendResponseOk(ma.get())
|
||||
ma.withValue(maddr):
|
||||
await conn.sendResponseOk(maddr)
|
||||
else:
|
||||
await conn.sendResponseError(DialError, "Missing observed address")
|
||||
except CancelledError as exc:
|
||||
@@ -95,42 +92,40 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
|
||||
f.cancel()
|
||||
|
||||
proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[void] =
|
||||
if msg.dial.isNone() or msg.dial.get().peerInfo.isNone():
|
||||
let dial = msg.dial.valueOr:
|
||||
return conn.sendResponseError(BadRequest, "Missing Dial")
|
||||
let peerInfo = dial.peerInfo.valueOr:
|
||||
return conn.sendResponseError(BadRequest, "Missing Peer Info")
|
||||
let peerInfo = msg.dial.get().peerInfo.get()
|
||||
if peerInfo.id.isSome() and peerInfo.id.get() != conn.peerId:
|
||||
return conn.sendResponseError(BadRequest, "PeerId mismatch")
|
||||
peerInfo.id.withValue(id):
|
||||
if id != conn.peerId:
|
||||
return conn.sendResponseError(BadRequest, "PeerId mismatch")
|
||||
|
||||
if conn.observedAddr.isNone:
|
||||
let observedAddr = conn.observedAddr.valueOr:
|
||||
return conn.sendResponseError(BadRequest, "Missing observed address")
|
||||
let observedAddr = conn.observedAddr.get()
|
||||
|
||||
var isRelayed = observedAddr.contains(multiCodec("p2p-circuit"))
|
||||
if isRelayed.isErr() or isRelayed.get():
|
||||
var isRelayed = observedAddr.contains(multiCodec("p2p-circuit")).valueOr:
|
||||
return conn.sendResponseError(DialRefused, "Invalid observed address")
|
||||
if isRelayed:
|
||||
return conn.sendResponseError(DialRefused, "Refused to dial a relayed observed address")
|
||||
let hostIp = observedAddr[0]
|
||||
if hostIp.isErr() or not IP.match(hostIp.get()):
|
||||
trace "wrong observed address", address=observedAddr
|
||||
let hostIp = observedAddr[0].valueOr:
|
||||
return conn.sendResponseError(InternalError, "Wrong observed address")
|
||||
if not IP.match(hostIp):
|
||||
return conn.sendResponseError(InternalError, "Expected an IP address")
|
||||
var addrs = initHashSet[MultiAddress]()
|
||||
addrs.incl(observedAddr)
|
||||
trace "addrs received", addrs = peerInfo.addrs
|
||||
for ma in peerInfo.addrs:
|
||||
isRelayed = ma.contains(multiCodec("p2p-circuit"))
|
||||
if isRelayed.isErr() or isRelayed.get():
|
||||
continue
|
||||
let maFirst = ma[0]
|
||||
if maFirst.isErr() or not DNS_OR_IP.match(maFirst.get()):
|
||||
continue
|
||||
isRelayed = ma.contains(multiCodec("p2p-circuit")).valueOr: continue
|
||||
let maFirst = ma[0].valueOr: continue
|
||||
if not DNS_OR_IP.match(maFirst): continue
|
||||
|
||||
try:
|
||||
addrs.incl(
|
||||
if maFirst.get() == hostIp.get():
|
||||
if maFirst == hostIp:
|
||||
ma
|
||||
else:
|
||||
let maEnd = ma[1..^1]
|
||||
if maEnd.isErr(): continue
|
||||
hostIp.get() & maEnd.get()
|
||||
let maEnd = ma[1..^1].valueOr: continue
|
||||
hostIp & maEnd
|
||||
)
|
||||
except LPError as exc:
|
||||
continue
|
||||
@@ -145,12 +140,12 @@ proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[voi
|
||||
|
||||
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
|
||||
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
let msgOpt = AutonatMsg.decode(await conn.readLp(1024))
|
||||
if msgOpt.isNone() or msgOpt.get().msgType != MsgType.Dial:
|
||||
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
|
||||
raise newException(AutonatError, "Received malformed message")
|
||||
let msg = msgOpt.get()
|
||||
if msg.msgType != MsgType.Dial:
|
||||
raise newException(AutonatError, "Message type should be dial")
|
||||
await autonat.handleDial(conn, msg)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
|
||||
@@ -7,12 +7,9 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, deques, sequtils]
|
||||
import std/[deques, sequtils]
|
||||
import chronos, metrics
|
||||
import ../../../switch
|
||||
import ../../../wire
|
||||
@@ -21,7 +18,7 @@ from core import NetworkReachability, AutonatUnreachableError
|
||||
import ../../../utils/heartbeat
|
||||
import ../../../crypto/crypto
|
||||
|
||||
export options, core.NetworkReachability
|
||||
export core.NetworkReachability
|
||||
|
||||
logScope:
|
||||
topics = "libp2p autonatservice"
|
||||
@@ -34,12 +31,12 @@ type
|
||||
addressMapper: AddressMapper
|
||||
scheduleHandle: Future[void]
|
||||
networkReachability*: NetworkReachability
|
||||
confidence: Option[float]
|
||||
confidence: Opt[float]
|
||||
answers: Deque[NetworkReachability]
|
||||
autonatClient: AutonatClient
|
||||
statusAndConfidenceHandler: StatusAndConfidenceHandler
|
||||
rng: ref HmacDrbgContext
|
||||
scheduleInterval: Option[Duration]
|
||||
scheduleInterval: Opt[Duration]
|
||||
askNewConnectedPeers: bool
|
||||
numPeersToAsk: int
|
||||
maxQueueSize: int
|
||||
@@ -47,13 +44,13 @@ type
|
||||
dialTimeout: Duration
|
||||
enableAddressMapper: bool
|
||||
|
||||
StatusAndConfidenceHandler* = proc (networkReachability: NetworkReachability, confidence: Option[float]): Future[void] {.gcsafe, raises: [Defect].}
|
||||
StatusAndConfidenceHandler* = proc (networkReachability: NetworkReachability, confidence: Opt[float]): Future[void] {.gcsafe, raises: [].}
|
||||
|
||||
proc new*(
|
||||
T: typedesc[AutonatService],
|
||||
autonatClient: AutonatClient,
|
||||
rng: ref HmacDrbgContext,
|
||||
scheduleInterval: Option[Duration] = none(Duration),
|
||||
scheduleInterval: Opt[Duration] = Opt.none(Duration),
|
||||
askNewConnectedPeers = true,
|
||||
numPeersToAsk: int = 5,
|
||||
maxQueueSize: int = 10,
|
||||
@@ -63,7 +60,7 @@ proc new*(
|
||||
return T(
|
||||
scheduleInterval: scheduleInterval,
|
||||
networkReachability: Unknown,
|
||||
confidence: none(float),
|
||||
confidence: Opt.none(float),
|
||||
answers: initDeque[NetworkReachability](),
|
||||
autonatClient: autonatClient,
|
||||
rng: rng,
|
||||
@@ -85,27 +82,33 @@ proc hasEnoughIncomingSlots(switch: Switch): bool =
|
||||
proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
|
||||
return switch.connManager.selectMuxer(peerId, In) != nil
|
||||
|
||||
proc handleAnswer(self: AutonatService, ans: NetworkReachability) {.async.} =
|
||||
proc handleAnswer(self: AutonatService, ans: NetworkReachability): Future[bool] {.async.} =
|
||||
|
||||
if ans == Unknown:
|
||||
return
|
||||
|
||||
let oldNetworkReachability = self.networkReachability
|
||||
let oldConfidence = self.confidence
|
||||
|
||||
if self.answers.len == self.maxQueueSize:
|
||||
self.answers.popFirst()
|
||||
self.answers.addLast(ans)
|
||||
|
||||
self.networkReachability = Unknown
|
||||
self.confidence = none(float)
|
||||
self.confidence = Opt.none(float)
|
||||
const reachabilityPriority = [Reachable, NotReachable]
|
||||
for reachability in reachabilityPriority:
|
||||
let confidence = self.answers.countIt(it == reachability) / self.maxQueueSize
|
||||
libp2p_autonat_reachability_confidence.set(value = confidence, labelValues = [$reachability])
|
||||
if self.confidence.isNone and confidence >= self.minConfidence:
|
||||
self.networkReachability = reachability
|
||||
self.confidence = some(confidence)
|
||||
self.confidence = Opt.some(confidence)
|
||||
|
||||
debug "Current status", currentStats = $self.networkReachability, confidence = $self.confidence, answers = self.answers
|
||||
|
||||
# Return whether anything has changed
|
||||
return self.networkReachability != oldNetworkReachability or self.confidence != oldConfidence
|
||||
|
||||
proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[NetworkReachability] {.async.} =
|
||||
logScope:
|
||||
peerId = $peerId
|
||||
@@ -132,9 +135,9 @@ proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[Netwo
|
||||
except CatchableError as error:
|
||||
debug "dialMe unexpected error", msg = error.msg
|
||||
Unknown
|
||||
await self.handleAnswer(ans)
|
||||
if not isNil(self.statusAndConfidenceHandler):
|
||||
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
|
||||
let hasReachabilityOrConfidenceChanged = await self.handleAnswer(ans)
|
||||
if hasReachabilityOrConfidenceChanged:
|
||||
await self.callHandler()
|
||||
await switch.peerInfo.update()
|
||||
return ans
|
||||
|
||||
@@ -159,7 +162,7 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy
|
||||
proc addressMapper(
|
||||
self: AutonatService,
|
||||
peerStore: PeerStore,
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
|
||||
if self.networkReachability != NetworkReachability.Reachable:
|
||||
return listenAddrs
|
||||
@@ -168,8 +171,7 @@ proc addressMapper(
|
||||
for listenAddr in listenAddrs:
|
||||
var processedMA = listenAddr
|
||||
try:
|
||||
let hostIP = initTAddress(listenAddr).get()
|
||||
if not hostIP.isGlobal() and self.networkReachability == NetworkReachability.Reachable:
|
||||
if not listenAddr.isPublicMA() and self.networkReachability == NetworkReachability.Reachable:
|
||||
processedMA = peerStore.guessDialableAddr(listenAddr) # handle manual port forwarding
|
||||
except CatchableError as exc:
|
||||
debug "Error while handling address mapper", msg = exc.msg
|
||||
@@ -177,7 +179,7 @@ proc addressMapper(
|
||||
return addrs
|
||||
|
||||
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return await addressMapper(self, switch.peerStore, listenAddrs)
|
||||
|
||||
info "Setting up AutonatService"
|
||||
@@ -187,8 +189,8 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
||||
self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} =
|
||||
discard askPeer(self, switch, peerId)
|
||||
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
||||
if self.scheduleInterval.isSome():
|
||||
self.scheduleHandle = schedule(self, switch, self.scheduleInterval.get())
|
||||
self.scheduleInterval.withValue(interval):
|
||||
self.scheduleHandle = schedule(self, switch, interval)
|
||||
if self.enableAddressMapper:
|
||||
switch.peerInfo.addressMappers.add(self.addressMapper)
|
||||
return hasBeenSetup
|
||||
@@ -196,7 +198,6 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
|
||||
method run*(self: AutonatService, switch: Switch) {.async, public.} =
|
||||
trace "Running AutonatService"
|
||||
await askConnectedPeers(self, switch)
|
||||
await self.callHandler()
|
||||
|
||||
method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public.} =
|
||||
info "Stopping AutonatService"
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
|
||||
@@ -69,7 +66,7 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
|
||||
|
||||
if peerDialableAddrs.len > self.maxDialableAddrs:
|
||||
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false))
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.In))
|
||||
try:
|
||||
discard await anyCompleted(futs).wait(self.connectTimeout)
|
||||
debug "Dcutr initiator has directly connected to the remote peer."
|
||||
@@ -84,8 +81,8 @@ proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs:
|
||||
debug "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
|
||||
raise newException(DcutrError, "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", err)
|
||||
except CatchableError as err:
|
||||
debug "Unexpected error when trying direct conn", err = err.msg
|
||||
raise newException(DcutrError, "Unexpected error when trying a direct conn", err)
|
||||
debug "Unexpected error when Dcutr initiator tried to connect to the remote peer", err = err.msg
|
||||
raise newException(DcutrError, "Unexpected error when Dcutr initiator tried to connect to the remote peer", err)
|
||||
finally:
|
||||
if stream != nil:
|
||||
await stream.close()
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sequtils
|
||||
|
||||
@@ -44,7 +41,7 @@ proc encode*(msg: DcutrMsg): ProtoBuffer =
|
||||
result.write(2, addr)
|
||||
result.finish()
|
||||
|
||||
proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [Defect, DcutrError].} =
|
||||
proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [DcutrError].} =
|
||||
var
|
||||
msgTypeOrd: uint32
|
||||
dcutrMsg: DcutrMsg
|
||||
@@ -59,5 +56,10 @@ proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async
|
||||
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] =
|
||||
addrs.filterIt(TCP.match(it))
|
||||
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] {.raises: [LPError]} =
|
||||
var result = newSeq[MultiAddress]()
|
||||
for a in addrs:
|
||||
# This is necessary to also accept addrs like /ip4/198.51.100/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N
|
||||
if [TCP, mapAnd(TCP_DNS, P2PPattern), mapAnd(TCP_IP, P2PPattern)].anyIt(it.match(a)):
|
||||
result.add(a[0..1].tryGet())
|
||||
return result
|
||||
|
||||
@@ -7,13 +7,9 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, sets, sequtils]
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, sequtils]
|
||||
import stew/[results, objects]
|
||||
import chronos, chronicles
|
||||
|
||||
@@ -33,7 +29,7 @@ logScope:
|
||||
|
||||
proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
|
||||
|
||||
proc handleStream(stream: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(stream: Connection, proto: string) {.async.} =
|
||||
var peerDialableAddrs: seq[MultiAddress]
|
||||
try:
|
||||
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
|
||||
@@ -60,7 +56,7 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
|
||||
|
||||
if peerDialableAddrs.len > maxDialableAddrs:
|
||||
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, upgradeDir = Direction.In))
|
||||
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.Out))
|
||||
try:
|
||||
discard await anyCompleted(futs).wait(connectTimeout)
|
||||
debug "Dcutr receiver has directly connected to the remote peer."
|
||||
@@ -75,8 +71,8 @@ proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDi
|
||||
debug "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
|
||||
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", err)
|
||||
except CatchableError as err:
|
||||
warn "Unexpected error in dcutr handler", msg = err.msg
|
||||
raise newException(DcutrError, "Unexpected error in dcutr handler", err)
|
||||
warn "Unexpected error when Dcutr receiver tried to connect to the remote peer", msg = err.msg
|
||||
raise newException(DcutrError, "Unexpected error when Dcutr receiver tried to connect to the remote peer", err)
|
||||
|
||||
let self = T()
|
||||
self.handler = handleStream
|
||||
|
||||
@@ -7,15 +7,10 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import times, options
|
||||
{.push raises: [].}
|
||||
|
||||
import times
|
||||
import chronos, chronicles
|
||||
|
||||
import ./relay,
|
||||
./messages,
|
||||
./rconn,
|
||||
@@ -25,8 +20,6 @@ import ./relay,
|
||||
../../../multiaddress,
|
||||
../../../stream/connection
|
||||
|
||||
export options
|
||||
|
||||
logScope:
|
||||
topics = "libp2p relay relay-client"
|
||||
|
||||
@@ -39,7 +32,7 @@ type
|
||||
RelayV2DialError* = object of RelayClientError
|
||||
RelayClientAddConn* = proc(conn: Connection,
|
||||
duration: uint32,
|
||||
data: uint64): Future[void] {.gcsafe, raises: [Defect].}
|
||||
data: uint64): Future[void] {.gcsafe, raises: [].}
|
||||
RelayClient* = ref object of Relay
|
||||
onNewConnection*: RelayClientAddConn
|
||||
canHop: bool
|
||||
@@ -47,28 +40,27 @@ type
|
||||
Rsvp* = object
|
||||
expire*: uint64 # required, Unix expiration time (UTC)
|
||||
addrs*: seq[MultiAddress] # relay address for reserving peer
|
||||
voucher*: Option[Voucher] # optional, reservation voucher
|
||||
voucher*: Opt[Voucher] # optional, reservation voucher
|
||||
limitDuration*: uint32 # seconds
|
||||
limitData*: uint64 # bytes
|
||||
|
||||
proc sendStopError(conn: Connection, code: StatusV2) {.async.} =
|
||||
trace "send stop status", status = $code & " (" & $ord(code) & ")"
|
||||
let msg = StopMessage(msgType: StopMessageType.Status, status: some(code))
|
||||
let msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
|
||||
await conn.writeLp(encode(msg).buffer)
|
||||
|
||||
proc handleRelayedConnect(cl: RelayClient, conn: Connection, msg: StopMessage) {.async.} =
|
||||
if msg.peer.isNone():
|
||||
await sendStopError(conn, MalformedMessage)
|
||||
return
|
||||
let
|
||||
# TODO: check the go version to see in which way this could fail
|
||||
# it's unclear in the spec
|
||||
src = msg.peer.get()
|
||||
src = msg.peer.valueOr:
|
||||
await sendStopError(conn, MalformedMessage)
|
||||
return
|
||||
limitDuration = msg.limit.duration
|
||||
limitData = msg.limit.data
|
||||
msg = StopMessage(
|
||||
msgType: StopMessageType.Status,
|
||||
status: some(Ok))
|
||||
status: Opt.some(Ok))
|
||||
pb = encode(msg)
|
||||
|
||||
trace "incoming relay connection", src
|
||||
@@ -92,7 +84,7 @@ proc reserve*(cl: RelayClient,
|
||||
pb = encode(HopMessage(msgType: HopMessageType.Reserve))
|
||||
msg = try:
|
||||
await conn.writeLp(pb.buffer)
|
||||
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).get()
|
||||
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).tryGet()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
@@ -103,21 +95,21 @@ proc reserve*(cl: RelayClient,
|
||||
raise newException(ReservationError, "Unexpected relay response type")
|
||||
if msg.status.get(UnexpectedMessage) != Ok:
|
||||
raise newException(ReservationError, "Reservation failed")
|
||||
if msg.reservation.isNone():
|
||||
raise newException(ReservationError, "Missing reservation information")
|
||||
|
||||
let reservation = msg.reservation.get()
|
||||
let reservation = msg.reservation.valueOr:
|
||||
raise newException(ReservationError, "Missing reservation information")
|
||||
if reservation.expire > int64.high().uint64 or
|
||||
now().utc > reservation.expire.int64.fromUnix.utc:
|
||||
raise newException(ReservationError, "Bad expiration date")
|
||||
result.expire = reservation.expire
|
||||
result.addrs = reservation.addrs
|
||||
|
||||
if reservation.svoucher.isSome():
|
||||
let svoucher = SignedVoucher.decode(reservation.svoucher.get())
|
||||
if svoucher.isErr() or svoucher.get().data.relayPeerId != peerId:
|
||||
reservation.svoucher.withValue(sv):
|
||||
let svoucher = SignedVoucher.decode(sv).valueOr:
|
||||
raise newException(ReservationError, "Invalid voucher")
|
||||
result.voucher = some(svoucher.get().data)
|
||||
if svoucher.data.relayPeerId != peerId:
|
||||
raise newException(ReservationError, "Invalid voucher PeerId")
|
||||
result.voucher = Opt.some(svoucher.data)
|
||||
|
||||
result.limitDuration = msg.limit.duration
|
||||
result.limitData = msg.limit.data
|
||||
@@ -129,9 +121,9 @@ proc dialPeerV1*(
|
||||
dstAddrs: seq[MultiAddress]): Future[Connection] {.async.} =
|
||||
var
|
||||
msg = RelayMessage(
|
||||
msgType: some(RelayType.Hop),
|
||||
srcPeer: some(RelayPeer(peerId: cl.switch.peerInfo.peerId, addrs: cl.switch.peerInfo.addrs)),
|
||||
dstPeer: some(RelayPeer(peerId: dstPeerId, addrs: dstAddrs)))
|
||||
msgType: Opt.some(RelayType.Hop),
|
||||
srcPeer: Opt.some(RelayPeer(peerId: cl.switch.peerInfo.peerId, addrs: cl.switch.peerInfo.addrs)),
|
||||
dstPeer: Opt.some(RelayPeer(peerId: dstPeerId, addrs: dstAddrs)))
|
||||
pb = encode(msg)
|
||||
|
||||
trace "Dial peer", msgSend=msg
|
||||
@@ -154,16 +146,18 @@ proc dialPeerV1*(
|
||||
raise exc
|
||||
|
||||
try:
|
||||
if msgRcvFromRelayOpt.isNone:
|
||||
let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
|
||||
raise newException(RelayV1DialError, "Hop can't open destination stream")
|
||||
let msgRcvFromRelay = msgRcvFromRelayOpt.get()
|
||||
if msgRcvFromRelay.msgType.isNone or msgRcvFromRelay.msgType.get() != RelayType.Status:
|
||||
if msgRcvFromRelay.msgType.tryGet() != RelayType.Status:
|
||||
raise newException(RelayV1DialError, "Hop can't open destination stream: wrong message type")
|
||||
if msgRcvFromRelay.status.isNone or msgRcvFromRelay.status.get() != StatusV1.Success:
|
||||
if msgRcvFromRelay.status.tryGet() != StatusV1.Success:
|
||||
raise newException(RelayV1DialError, "Hop can't open destination stream: status failed")
|
||||
except RelayV1DialError as exc:
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise exc
|
||||
except ValueError as exc:
|
||||
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
|
||||
raise newException(RelayV1DialError, exc.msg)
|
||||
result = conn
|
||||
|
||||
proc dialPeerV2*(
|
||||
@@ -173,13 +167,13 @@ proc dialPeerV2*(
|
||||
dstAddrs: seq[MultiAddress]): Future[Connection] {.async.} =
|
||||
let
|
||||
p = Peer(peerId: dstPeerId, addrs: dstAddrs)
|
||||
pb = encode(HopMessage(msgType: HopMessageType.Connect, peer: some(p)))
|
||||
pb = encode(HopMessage(msgType: HopMessageType.Connect, peer: Opt.some(p)))
|
||||
|
||||
trace "Dial peer", p
|
||||
|
||||
let msgRcvFromRelay = try:
|
||||
await conn.writeLp(pb.buffer)
|
||||
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).get()
|
||||
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).tryGet()
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
@@ -189,19 +183,17 @@ proc dialPeerV2*(
|
||||
if msgRcvFromRelay.msgType != HopMessageType.Status:
|
||||
raise newException(RelayV2DialError, "Unexpected stop response")
|
||||
if msgRcvFromRelay.status.get(UnexpectedMessage) != Ok:
|
||||
trace "Relay stop failed", msg = msgRcvFromRelay.status.get()
|
||||
trace "Relay stop failed", msg = msgRcvFromRelay.status
|
||||
raise newException(RelayV2DialError, "Relay stop failure")
|
||||
conn.limitDuration = msgRcvFromRelay.limit.duration
|
||||
conn.limitData = msgRcvFromRelay.limit.data
|
||||
return conn
|
||||
|
||||
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
||||
let msgOpt = StopMessage.decode(await conn.readLp(RelayClientMsgSize))
|
||||
if msgOpt.isNone():
|
||||
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
|
||||
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
||||
await sendHopStatus(conn, MalformedMessage)
|
||||
return
|
||||
trace "client circuit relay v2 handle stream", msg = msgOpt.get()
|
||||
let msg = msgOpt.get()
|
||||
trace "client circuit relay v2 handle stream", msg
|
||||
|
||||
if msg.msgType == StopMessageType.Connect:
|
||||
await cl.handleRelayedConnect(conn, msg)
|
||||
@@ -209,17 +201,15 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
||||
trace "Unexpected client / relayv2 handshake", msgType=msg.msgType
|
||||
await sendStopError(conn, MalformedMessage)
|
||||
|
||||
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, gcsafe.} =
|
||||
if msg.srcPeer.isNone:
|
||||
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
|
||||
let src = msg.srcPeer.valueOr:
|
||||
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
|
||||
return
|
||||
let src = msg.srcPeer.get()
|
||||
|
||||
if msg.dstPeer.isNone:
|
||||
let dst = msg.dstPeer.valueOr:
|
||||
await sendStatus(conn, StatusV1.StopDstMultiaddrInvalid)
|
||||
return
|
||||
|
||||
let dst = msg.dstPeer.get()
|
||||
if dst.peerId != cl.switch.peerInfo.peerId:
|
||||
await sendStatus(conn, StatusV1.StopDstMultiaddrInvalid)
|
||||
return
|
||||
@@ -236,14 +226,17 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, g
|
||||
if cl.onNewConnection != nil: await cl.onNewConnection(conn, 0, 0)
|
||||
else: await conn.close()
|
||||
|
||||
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
|
||||
let msgOpt = RelayMessage.decode(await conn.readLp(RelayClientMsgSize))
|
||||
if msgOpt.isNone:
|
||||
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
|
||||
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
trace "client circuit relay v1 handle stream", msg = msgOpt.get()
|
||||
let msg = msgOpt.get()
|
||||
case msg.msgType.get:
|
||||
trace "client circuit relay v1 handle stream", msg
|
||||
|
||||
let typ = msg.msgType.valueOr:
|
||||
trace "Message type not set"
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
case typ:
|
||||
of RelayType.Hop:
|
||||
if cl.canHop: await cl.handleHop(conn, msg)
|
||||
else: await sendStatus(conn, StatusV1.HopCantSpeakRelay)
|
||||
@@ -273,7 +266,7 @@ proc new*(T: typedesc[RelayClient], canHop: bool = false,
|
||||
maxCircuitPerPeer: maxCircuitPerPeer,
|
||||
msgSize: msgSize,
|
||||
isCircuitRelayV1: circuitRelayV1)
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
case proto:
|
||||
of RelayV1Codec: await cl.handleStreamV1(conn)
|
||||
|
||||
@@ -7,13 +7,10 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import options, macros
|
||||
import stew/objects
|
||||
import macros
|
||||
import stew/[objects, results]
|
||||
import ../../../peerinfo,
|
||||
../../../signed_envelope
|
||||
|
||||
@@ -49,36 +46,36 @@ type
|
||||
addrs*: seq[MultiAddress]
|
||||
|
||||
RelayMessage* = object
|
||||
msgType*: Option[RelayType]
|
||||
srcPeer*: Option[RelayPeer]
|
||||
dstPeer*: Option[RelayPeer]
|
||||
status*: Option[StatusV1]
|
||||
msgType*: Opt[RelayType]
|
||||
srcPeer*: Opt[RelayPeer]
|
||||
dstPeer*: Opt[RelayPeer]
|
||||
status*: Opt[StatusV1]
|
||||
|
||||
proc encode*(msg: RelayMessage): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
|
||||
if isSome(msg.msgType):
|
||||
result.write(1, msg.msgType.get().ord.uint)
|
||||
if isSome(msg.srcPeer):
|
||||
msg.msgType.withValue(typ):
|
||||
result.write(1, typ.ord.uint)
|
||||
msg.srcPeer.withValue(srcPeer):
|
||||
var peer = initProtoBuffer()
|
||||
peer.write(1, msg.srcPeer.get().peerId)
|
||||
for ma in msg.srcPeer.get().addrs:
|
||||
peer.write(1, srcPeer.peerId)
|
||||
for ma in srcPeer.addrs:
|
||||
peer.write(2, ma.data.buffer)
|
||||
peer.finish()
|
||||
result.write(2, peer.buffer)
|
||||
if isSome(msg.dstPeer):
|
||||
msg.dstPeer.withValue(dstPeer):
|
||||
var peer = initProtoBuffer()
|
||||
peer.write(1, msg.dstPeer.get().peerId)
|
||||
for ma in msg.dstPeer.get().addrs:
|
||||
peer.write(1, dstPeer.peerId)
|
||||
for ma in dstPeer.addrs:
|
||||
peer.write(2, ma.data.buffer)
|
||||
peer.finish()
|
||||
result.write(3, peer.buffer)
|
||||
if isSome(msg.status):
|
||||
result.write(4, msg.status.get().ord.uint)
|
||||
msg.status.withValue(status):
|
||||
result.write(4, status.ord.uint)
|
||||
|
||||
result.finish()
|
||||
|
||||
proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Option[RelayMessage] =
|
||||
proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Opt[RelayMessage] =
|
||||
var
|
||||
rMsg: RelayMessage
|
||||
msgTypeOrd: uint32
|
||||
@@ -88,38 +85,29 @@ proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Option[RelayMessage] =
|
||||
pbSrc: ProtoBuffer
|
||||
pbDst: ProtoBuffer
|
||||
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getField(1, msgTypeOrd)
|
||||
r2 = pb.getField(2, pbSrc)
|
||||
r3 = pb.getField(3, pbDst)
|
||||
r4 = pb.getField(4, statusOrd)
|
||||
let pb = initProtoBuffer(buf)
|
||||
|
||||
if r1.isErr() or r2.isErr() or r3.isErr() or r4.isErr():
|
||||
return none(RelayMessage)
|
||||
|
||||
if r2.get() and
|
||||
(pbSrc.getField(1, src.peerId).isErr() or
|
||||
pbSrc.getRepeatedField(2, src.addrs).isErr()):
|
||||
return none(RelayMessage)
|
||||
|
||||
if r3.get() and
|
||||
(pbDst.getField(1, dst.peerId).isErr() or
|
||||
pbDst.getRepeatedField(2, dst.addrs).isErr()):
|
||||
return none(RelayMessage)
|
||||
|
||||
if r1.get():
|
||||
if ? pb.getField(1, msgTypeOrd).toOpt():
|
||||
if msgTypeOrd.int notin RelayType:
|
||||
return none(RelayMessage)
|
||||
rMsg.msgType = some(RelayType(msgTypeOrd))
|
||||
if r2.get(): rMsg.srcPeer = some(src)
|
||||
if r3.get(): rMsg.dstPeer = some(dst)
|
||||
if r4.get():
|
||||
return Opt.none(RelayMessage)
|
||||
rMsg.msgType = Opt.some(RelayType(msgTypeOrd))
|
||||
|
||||
if ? pb.getField(2, pbSrc).toOpt():
|
||||
discard ? pbSrc.getField(1, src.peerId).toOpt()
|
||||
discard ? pbSrc.getRepeatedField(2, src.addrs).toOpt()
|
||||
rMsg.srcPeer = Opt.some(src)
|
||||
|
||||
if ? pb.getField(3, pbDst).toOpt():
|
||||
discard ? pbDst.getField(1, dst.peerId).toOpt()
|
||||
discard ? pbDst.getRepeatedField(2, dst.addrs).toOpt()
|
||||
rMsg.dstPeer = Opt.some(dst)
|
||||
|
||||
if ? pb.getField(4, statusOrd).toOpt():
|
||||
var status: StatusV1
|
||||
if not checkedEnumAssign(status, statusOrd):
|
||||
return none(RelayMessage)
|
||||
rMsg.status = some(status)
|
||||
some(rMsg)
|
||||
return Opt.none(RelayMessage)
|
||||
rMsg.status = Opt.some(status)
|
||||
Opt.some(rMsg)
|
||||
|
||||
# Voucher
|
||||
|
||||
@@ -179,7 +167,7 @@ type
|
||||
Reservation* = object
|
||||
expire*: uint64 # required, Unix expiration time (UTC)
|
||||
addrs*: seq[MultiAddress] # relay address for reserving peer
|
||||
svoucher*: Option[seq[byte]] # optional, reservation voucher
|
||||
svoucher*: Opt[seq[byte]] # optional, reservation voucher
|
||||
Limit* = object
|
||||
duration*: uint32 # seconds
|
||||
data*: uint64 # bytes
|
||||
@@ -199,30 +187,29 @@ type
|
||||
Status = 2
|
||||
HopMessage* = object
|
||||
msgType*: HopMessageType
|
||||
peer*: Option[Peer]
|
||||
reservation*: Option[Reservation]
|
||||
peer*: Opt[Peer]
|
||||
reservation*: Opt[Reservation]
|
||||
limit*: Limit
|
||||
status*: Option[StatusV2]
|
||||
status*: Opt[StatusV2]
|
||||
|
||||
proc encode*(msg: HopMessage): ProtoBuffer =
|
||||
var pb = initProtoBuffer()
|
||||
|
||||
pb.write(1, msg.msgType.ord.uint)
|
||||
if msg.peer.isSome():
|
||||
msg.peer.withValue(peer):
|
||||
var ppb = initProtoBuffer()
|
||||
ppb.write(1, msg.peer.get().peerId)
|
||||
for ma in msg.peer.get().addrs:
|
||||
ppb.write(1, peer.peerId)
|
||||
for ma in peer.addrs:
|
||||
ppb.write(2, ma.data.buffer)
|
||||
ppb.finish()
|
||||
pb.write(2, ppb.buffer)
|
||||
if msg.reservation.isSome():
|
||||
let rsrv = msg.reservation.get()
|
||||
msg.reservation.withValue(rsrv):
|
||||
var rpb = initProtoBuffer()
|
||||
rpb.write(1, rsrv.expire)
|
||||
for ma in rsrv.addrs:
|
||||
rpb.write(2, ma.data.buffer)
|
||||
if rsrv.svoucher.isSome():
|
||||
rpb.write(3, rsrv.svoucher.get())
|
||||
rsrv.svoucher.withValue(vouch):
|
||||
rpb.write(3, vouch)
|
||||
rpb.finish()
|
||||
pb.write(3, rpb.buffer)
|
||||
if msg.limit.duration > 0 or msg.limit.data > 0:
|
||||
@@ -231,66 +218,51 @@ proc encode*(msg: HopMessage): ProtoBuffer =
|
||||
if msg.limit.data > 0: lpb.write(2, msg.limit.data)
|
||||
lpb.finish()
|
||||
pb.write(4, lpb.buffer)
|
||||
if msg.status.isSome():
|
||||
pb.write(5, msg.status.get().ord.uint)
|
||||
msg.status.withValue(status):
|
||||
pb.write(5, status.ord.uint)
|
||||
|
||||
pb.finish()
|
||||
pb
|
||||
|
||||
proc decode*(_: typedesc[HopMessage], buf: seq[byte]): Option[HopMessage] =
|
||||
var
|
||||
msg: HopMessage
|
||||
msgTypeOrd: uint32
|
||||
pbPeer: ProtoBuffer
|
||||
pbReservation: ProtoBuffer
|
||||
pbLimit: ProtoBuffer
|
||||
statusOrd: uint32
|
||||
peer: Peer
|
||||
reservation: Reservation
|
||||
limit: Limit
|
||||
res: bool
|
||||
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getRequiredField(1, msgTypeOrd)
|
||||
r2 = pb.getField(2, pbPeer)
|
||||
r3 = pb.getField(3, pbReservation)
|
||||
r4 = pb.getField(4, pbLimit)
|
||||
r5 = pb.getField(5, statusOrd)
|
||||
|
||||
if r1.isErr() or r2.isErr() or r3.isErr() or r4.isErr() or r5.isErr():
|
||||
return none(HopMessage)
|
||||
|
||||
if r2.get() and
|
||||
(pbPeer.getRequiredField(1, peer.peerId).isErr() or
|
||||
pbPeer.getRepeatedField(2, peer.addrs).isErr()):
|
||||
return none(HopMessage)
|
||||
|
||||
if r3.get():
|
||||
var svoucher: seq[byte]
|
||||
let rSVoucher = pbReservation.getField(3, svoucher)
|
||||
if pbReservation.getRequiredField(1, reservation.expire).isErr() or
|
||||
pbReservation.getRepeatedField(2, reservation.addrs).isErr() or
|
||||
rSVoucher.isErr():
|
||||
return none(HopMessage)
|
||||
if rSVoucher.get(): reservation.svoucher = some(svoucher)
|
||||
|
||||
if r4.get() and
|
||||
(pbLimit.getField(1, limit.duration).isErr() or
|
||||
pbLimit.getField(2, limit.data).isErr()):
|
||||
return none(HopMessage)
|
||||
proc decode*(_: typedesc[HopMessage], buf: seq[byte]): Opt[HopMessage] =
|
||||
var msg: HopMessage
|
||||
let pb = initProtoBuffer(buf)
|
||||
|
||||
var msgTypeOrd: uint32
|
||||
? pb.getRequiredField(1, msgTypeOrd).toOpt()
|
||||
if not checkedEnumAssign(msg.msgType, msgTypeOrd):
|
||||
return none(HopMessage)
|
||||
if r2.get(): msg.peer = some(peer)
|
||||
if r3.get(): msg.reservation = some(reservation)
|
||||
if r4.get(): msg.limit = limit
|
||||
if r5.get():
|
||||
return Opt.none(HopMessage)
|
||||
|
||||
var pbPeer: ProtoBuffer
|
||||
if ? pb.getField(2, pbPeer).toOpt():
|
||||
var peer: Peer
|
||||
? pbPeer.getRequiredField(1, peer.peerId).toOpt()
|
||||
discard ? pbPeer.getRepeatedField(2, peer.addrs).toOpt()
|
||||
msg.peer = Opt.some(peer)
|
||||
|
||||
var pbReservation: ProtoBuffer
|
||||
if ? pb.getField(3, pbReservation).toOpt():
|
||||
var
|
||||
svoucher: seq[byte]
|
||||
reservation: Reservation
|
||||
if ? pbReservation.getField(3, svoucher).toOpt():
|
||||
reservation.svoucher = Opt.some(svoucher)
|
||||
? pbReservation.getRequiredField(1, reservation.expire).toOpt()
|
||||
discard ? pbReservation.getRepeatedField(2, reservation.addrs).toOpt()
|
||||
msg.reservation = Opt.some(reservation)
|
||||
|
||||
var pbLimit: ProtoBuffer
|
||||
if ? pb.getField(4, pbLimit).toOpt():
|
||||
discard ? pbLimit.getField(1, msg.limit.duration).toOpt()
|
||||
discard ? pbLimit.getField(2, msg.limit.data).toOpt()
|
||||
|
||||
var statusOrd: uint32
|
||||
if ? pb.getField(5, statusOrd).toOpt():
|
||||
var status: StatusV2
|
||||
if not checkedEnumAssign(status, statusOrd):
|
||||
return none(HopMessage)
|
||||
msg.status = some(status)
|
||||
some(msg)
|
||||
return Opt.none(HopMessage)
|
||||
msg.status = Opt.some(status)
|
||||
Opt.some(msg)
|
||||
|
||||
# Circuit Relay V2 Stop Message
|
||||
|
||||
@@ -300,19 +272,19 @@ type
|
||||
Status = 1
|
||||
StopMessage* = object
|
||||
msgType*: StopMessageType
|
||||
peer*: Option[Peer]
|
||||
peer*: Opt[Peer]
|
||||
limit*: Limit
|
||||
status*: Option[StatusV2]
|
||||
status*: Opt[StatusV2]
|
||||
|
||||
|
||||
proc encode*(msg: StopMessage): ProtoBuffer =
|
||||
var pb = initProtoBuffer()
|
||||
|
||||
pb.write(1, msg.msgType.ord.uint)
|
||||
if msg.peer.isSome():
|
||||
msg.peer.withValue(peer):
|
||||
var ppb = initProtoBuffer()
|
||||
ppb.write(1, msg.peer.get().peerId)
|
||||
for ma in msg.peer.get().addrs:
|
||||
ppb.write(1, peer.peerId)
|
||||
for ma in peer.addrs:
|
||||
ppb.write(2, ma.data.buffer)
|
||||
ppb.finish()
|
||||
pb.write(2, ppb.buffer)
|
||||
@@ -322,52 +294,40 @@ proc encode*(msg: StopMessage): ProtoBuffer =
|
||||
if msg.limit.data > 0: lpb.write(2, msg.limit.data)
|
||||
lpb.finish()
|
||||
pb.write(3, lpb.buffer)
|
||||
if msg.status.isSome():
|
||||
pb.write(4, msg.status.get().ord.uint)
|
||||
msg.status.withValue(status):
|
||||
pb.write(4, status.ord.uint)
|
||||
|
||||
pb.finish()
|
||||
pb
|
||||
|
||||
proc decode*(_: typedesc[StopMessage], buf: seq[byte]): Option[StopMessage] =
|
||||
var
|
||||
msg: StopMessage
|
||||
msgTypeOrd: uint32
|
||||
pbPeer: ProtoBuffer
|
||||
pbLimit: ProtoBuffer
|
||||
statusOrd: uint32
|
||||
peer: Peer
|
||||
limit: Limit
|
||||
rVoucher: ProtoResult[bool]
|
||||
res: bool
|
||||
proc decode*(_: typedesc[StopMessage], buf: seq[byte]): Opt[StopMessage] =
|
||||
var msg: StopMessage
|
||||
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getRequiredField(1, msgTypeOrd)
|
||||
r2 = pb.getField(2, pbPeer)
|
||||
r3 = pb.getField(3, pbLimit)
|
||||
r4 = pb.getField(4, statusOrd)
|
||||
let pb = initProtoBuffer(buf)
|
||||
|
||||
if r1.isErr() or r2.isErr() or r3.isErr() or r4.isErr():
|
||||
return none(StopMessage)
|
||||
|
||||
if r2.get() and
|
||||
(pbPeer.getRequiredField(1, peer.peerId).isErr() or
|
||||
pbPeer.getRepeatedField(2, peer.addrs).isErr()):
|
||||
return none(StopMessage)
|
||||
|
||||
if r3.get() and
|
||||
(pbLimit.getField(1, limit.duration).isErr() or
|
||||
pbLimit.getField(2, limit.data).isErr()):
|
||||
return none(StopMessage)
|
||||
|
||||
if msgTypeOrd.int notin StopMessageType.low.ord .. StopMessageType.high.ord:
|
||||
return none(StopMessage)
|
||||
var msgTypeOrd: uint32
|
||||
? pb.getRequiredField(1, msgTypeOrd).toOpt()
|
||||
if msgTypeOrd.int notin StopMessageType:
|
||||
return Opt.none(StopMessage)
|
||||
msg.msgType = StopMessageType(msgTypeOrd)
|
||||
if r2.get(): msg.peer = some(peer)
|
||||
if r3.get(): msg.limit = limit
|
||||
if r4.get():
|
||||
|
||||
|
||||
var pbPeer: ProtoBuffer
|
||||
if ? pb.getField(2, pbPeer).toOpt():
|
||||
var peer: Peer
|
||||
? pbPeer.getRequiredField(1, peer.peerId).toOpt()
|
||||
discard ? pbPeer.getRepeatedField(2, peer.addrs).toOpt()
|
||||
msg.peer = Opt.some(peer)
|
||||
|
||||
var pbLimit: ProtoBuffer
|
||||
if ? pb.getField(3, pbLimit).toOpt():
|
||||
discard ? pbLimit.getField(1, msg.limit.duration).toOpt()
|
||||
discard ? pbLimit.getField(2, msg.limit.data).toOpt()
|
||||
|
||||
var statusOrd: uint32
|
||||
if ? pb.getField(4, statusOrd).toOpt():
|
||||
var status: StatusV2
|
||||
if not checkedEnumAssign(status, statusOrd):
|
||||
return none(StopMessage)
|
||||
msg.status = some(status)
|
||||
some(msg)
|
||||
return Opt.none(StopMessage)
|
||||
msg.status = Opt.some(status)
|
||||
Opt.some(msg)
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
|
||||
@@ -50,6 +47,7 @@ proc new*(
|
||||
limitDuration: uint32,
|
||||
limitData: uint64): T =
|
||||
let rc = T(conn: conn, limitDuration: limitDuration, limitData: limitData)
|
||||
rc.dir = conn.dir
|
||||
rc.initStream()
|
||||
if limitDuration > 0:
|
||||
proc checkDurationConnection() {.async.} =
|
||||
|
||||
@@ -7,12 +7,9 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import options, sequtils, tables
|
||||
import sequtils, tables
|
||||
|
||||
import chronos, chronicles
|
||||
|
||||
@@ -93,11 +90,11 @@ proc createReserveResponse(
|
||||
rsrv = Reservation(expire: expireUnix,
|
||||
addrs: r.switch.peerInfo.addrs.mapIt(
|
||||
? it.concat(ma).orErr(CryptoError.KeyError)),
|
||||
svoucher: some(? sv.encode))
|
||||
svoucher: Opt.some(? sv.encode))
|
||||
msg = HopMessage(msgType: HopMessageType.Status,
|
||||
reservation: some(rsrv),
|
||||
reservation: Opt.some(rsrv),
|
||||
limit: r.limit,
|
||||
status: some(Ok))
|
||||
status: Opt.some(Ok))
|
||||
return ok(msg)
|
||||
|
||||
proc isRelayed*(conn: Connection): bool =
|
||||
@@ -108,7 +105,7 @@ proc isRelayed*(conn: Connection): bool =
|
||||
wrappedConn = wrappedConn.getWrapped()
|
||||
return false
|
||||
|
||||
proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
proc handleReserve(r: Relay, conn: Connection) {.async.} =
|
||||
if conn.isRelayed():
|
||||
trace "reservation attempt over relay connection", pid = conn.peerId
|
||||
await sendHopStatus(conn, PermissionDenied)
|
||||
@@ -118,32 +115,30 @@ proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
trace "Too many reservations", pid = conn.peerId
|
||||
await sendHopStatus(conn, ReservationRefused)
|
||||
return
|
||||
trace "reserving relay slot for", pid = conn.peerId
|
||||
let
|
||||
pid = conn.peerId
|
||||
expire = now().utc + r.reservationTTL
|
||||
msg = r.createReserveResponse(pid, expire)
|
||||
msg = r.createReserveResponse(pid, expire).valueOr:
|
||||
trace "error signing the voucher", pid
|
||||
return
|
||||
|
||||
trace "reserving relay slot for", pid
|
||||
if msg.isErr():
|
||||
trace "error signing the voucher", error = error(msg), pid
|
||||
return
|
||||
r.rsvp[pid] = expire
|
||||
await conn.writeLp(encode(msg.get()).buffer)
|
||||
await conn.writeLp(encode(msg).buffer)
|
||||
|
||||
proc handleConnect(r: Relay,
|
||||
connSrc: Connection,
|
||||
msg: HopMessage) {.async, gcsafe.} =
|
||||
msg: HopMessage) {.async.} =
|
||||
if connSrc.isRelayed():
|
||||
trace "connection attempt over relay connection"
|
||||
await sendHopStatus(connSrc, PermissionDenied)
|
||||
return
|
||||
if msg.peer.isNone():
|
||||
await sendHopStatus(connSrc, MalformedMessage)
|
||||
return
|
||||
|
||||
let
|
||||
msgPeer = msg.peer.valueOr:
|
||||
await sendHopStatus(connSrc, MalformedMessage)
|
||||
return
|
||||
src = connSrc.peerId
|
||||
dst = msg.peer.get().peerId
|
||||
dst = msgPeer.peerId
|
||||
if dst notin r.rsvp:
|
||||
trace "refusing connection, no reservation", src, dst
|
||||
await sendHopStatus(connSrc, NoReservation)
|
||||
@@ -176,16 +171,17 @@ proc handleConnect(r: Relay,
|
||||
|
||||
proc sendStopMsg() {.async.} =
|
||||
let stopMsg = StopMessage(msgType: StopMessageType.Connect,
|
||||
peer: some(Peer(peerId: src, addrs: @[])),
|
||||
peer: Opt.some(Peer(peerId: src, addrs: @[])),
|
||||
limit: r.limit)
|
||||
await connDst.writeLp(encode(stopMsg).buffer)
|
||||
let msg = StopMessage.decode(await connDst.readLp(r.msgSize)).get()
|
||||
let msg = StopMessage.decode(await connDst.readLp(r.msgSize)).valueOr:
|
||||
raise newException(SendStopError, "Malformed message")
|
||||
if msg.msgType != StopMessageType.Status:
|
||||
raise newException(SendStopError, "Unexpected stop response, not a status message")
|
||||
if msg.status.get(UnexpectedMessage) != Ok:
|
||||
raise newException(SendStopError, "Relay stop failure")
|
||||
await connSrc.writeLp(encode(HopMessage(msgType: HopMessageType.Status,
|
||||
status: some(Ok))).buffer)
|
||||
status: Opt.some(Ok))).buffer)
|
||||
try:
|
||||
await sendStopMsg()
|
||||
except CancelledError as exc:
|
||||
@@ -204,13 +200,11 @@ proc handleConnect(r: Relay,
|
||||
await rconnDst.close()
|
||||
await bridge(rconnSrc, rconnDst)
|
||||
|
||||
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
let msgOpt = HopMessage.decode(await conn.readLp(r.msgSize))
|
||||
if msgOpt.isNone():
|
||||
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
|
||||
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
||||
await sendHopStatus(conn, MalformedMessage)
|
||||
return
|
||||
trace "relayv2 handle stream", msg = msgOpt.get()
|
||||
let msg = msgOpt.get()
|
||||
trace "relayv2 handle stream", msg = msg
|
||||
case msg.msgType:
|
||||
of HopMessageType.Reserve: await r.handleReserve(conn)
|
||||
of HopMessageType.Connect: await r.handleConnect(conn, msg)
|
||||
@@ -220,7 +214,7 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
|
||||
# Relay V1
|
||||
|
||||
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsafe.} =
|
||||
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
|
||||
r.streamCount.inc()
|
||||
defer: r.streamCount.dec()
|
||||
if r.streamCount + r.rsvp.len() >= r.maxCircuit:
|
||||
@@ -228,15 +222,14 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
|
||||
await sendStatus(connSrc, StatusV1.HopCantSpeakRelay)
|
||||
return
|
||||
|
||||
var src, dst: RelayPeer
|
||||
proc checkMsg(): Result[RelayMessage, StatusV1] =
|
||||
if msg.srcPeer.isNone:
|
||||
src = msg.srcPeer.valueOr:
|
||||
return err(StatusV1.HopSrcMultiaddrInvalid)
|
||||
let src = msg.srcPeer.get()
|
||||
if src.peerId != connSrc.peerId:
|
||||
return err(StatusV1.HopSrcMultiaddrInvalid)
|
||||
if msg.dstPeer.isNone:
|
||||
dst = msg.dstPeer.valueOr:
|
||||
return err(StatusV1.HopDstMultiaddrInvalid)
|
||||
let dst = msg.dstPeer.get()
|
||||
if dst.peerId == r.switch.peerInfo.peerId:
|
||||
return err(StatusV1.HopCantRelayToSelf)
|
||||
if not r.switch.isConnected(dst.peerId):
|
||||
@@ -248,9 +241,6 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
|
||||
await sendStatus(connSrc, check.error())
|
||||
return
|
||||
|
||||
let
|
||||
src = msg.srcPeer.get()
|
||||
dst = msg.dstPeer.get()
|
||||
if r.peerCount[src.peerId] >= r.maxCircuitPerPeer or
|
||||
r.peerCount[dst.peerId] >= r.maxCircuitPerPeer:
|
||||
trace "refusing connection; too many connection from src or to dst", src, dst
|
||||
@@ -274,9 +264,9 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
|
||||
await connDst.close()
|
||||
|
||||
let msgToSend = RelayMessage(
|
||||
msgType: some(RelayType.Stop),
|
||||
srcPeer: some(src),
|
||||
dstPeer: some(dst))
|
||||
msgType: Opt.some(RelayType.Stop),
|
||||
srcPeer: Opt.some(src),
|
||||
dstPeer: Opt.some(dst))
|
||||
|
||||
let msgRcvFromDstOpt = try:
|
||||
await connDst.writeLp(encode(msgToSend).buffer)
|
||||
@@ -288,12 +278,11 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
|
||||
await sendStatus(connSrc, StatusV1.HopCantOpenDstStream)
|
||||
return
|
||||
|
||||
if msgRcvFromDstOpt.isNone:
|
||||
let msgRcvFromDst = msgRcvFromDstOpt.valueOr:
|
||||
trace "error reading stop response", msg = msgRcvFromDstOpt
|
||||
await sendStatus(connSrc, StatusV1.HopCantOpenDstStream)
|
||||
return
|
||||
|
||||
let msgRcvFromDst = msgRcvFromDstOpt.get()
|
||||
if msgRcvFromDst.msgType.get(RelayType.Stop) != RelayType.Status or
|
||||
msgRcvFromDst.status.get(StatusV1.StopRelayRefused) != StatusV1.Success:
|
||||
trace "unexcepted relay stop response", msgRcvFromDst
|
||||
@@ -304,14 +293,17 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
|
||||
trace "relaying connection", src, dst
|
||||
await bridge(connSrc, connDst)
|
||||
|
||||
proc handleStreamV1(r: Relay, conn: Connection) {.async, gcsafe.} =
|
||||
let msgOpt = RelayMessage.decode(await conn.readLp(r.msgSize))
|
||||
if msgOpt.isNone:
|
||||
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
|
||||
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
trace "relay handle stream", msg = msgOpt.get()
|
||||
let msg = msgOpt.get()
|
||||
case msg.msgType.get:
|
||||
trace "relay handle stream", msg
|
||||
|
||||
let typ = msg.msgType.valueOr:
|
||||
trace "Message type not set"
|
||||
await sendStatus(conn, StatusV1.MalformedMessage)
|
||||
return
|
||||
case typ:
|
||||
of RelayType.Hop: await r.handleHop(conn, msg)
|
||||
of RelayType.Stop: await sendStatus(conn, StatusV1.StopRelayRefused)
|
||||
of RelayType.CanHop: await sendStatus(conn, StatusV1.Success)
|
||||
@@ -344,7 +336,7 @@ proc new*(T: typedesc[Relay],
|
||||
msgSize: msgSize,
|
||||
isCircuitRelayV1: circuitRelayV1)
|
||||
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
case proto:
|
||||
of RelayV2HopCodec: await r.handleHopStreamV2(conn)
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import sequtils, strutils
|
||||
|
||||
@@ -40,33 +37,33 @@ method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
|
||||
self.client.onNewConnection = proc(
|
||||
conn: Connection,
|
||||
duration: uint32 = 0,
|
||||
data: uint64 = 0) {.async, gcsafe, raises: [Defect].} =
|
||||
data: uint64 = 0) {.async.} =
|
||||
await self.queue.addLast(RelayConnection.new(conn, duration, data))
|
||||
await conn.join()
|
||||
self.selfRunning = true
|
||||
await procCall Transport(self).start(ma)
|
||||
trace "Starting Relay transport"
|
||||
|
||||
method stop*(self: RelayTransport) {.async, gcsafe.} =
|
||||
method stop*(self: RelayTransport) {.async.} =
|
||||
self.running = false
|
||||
self.selfRunning = false
|
||||
self.client.onNewConnection = nil
|
||||
while not self.queue.empty():
|
||||
await self.queue.popFirstNoWait().close()
|
||||
|
||||
method accept*(self: RelayTransport): Future[Connection] {.async, gcsafe.} =
|
||||
method accept*(self: RelayTransport): Future[Connection] {.async.} =
|
||||
result = await self.queue.popFirst()
|
||||
|
||||
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async, gcsafe.} =
|
||||
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
|
||||
let
|
||||
sma = toSeq(ma.items())
|
||||
relayAddrs = sma[0..sma.len-4].mapIt(it.tryGet()).foldl(a & b)
|
||||
var
|
||||
relayPeerId: PeerId
|
||||
dstPeerId: PeerId
|
||||
if not relayPeerId.init(($(sma[^3].get())).split('/')[2]):
|
||||
if not relayPeerId.init(($(sma[^3].tryGet())).split('/')[2]):
|
||||
raise newException(RelayV2DialError, "Relay doesn't exist")
|
||||
if not dstPeerId.init(($(sma[^1].get())).split('/')[2]):
|
||||
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
|
||||
raise newException(RelayV2DialError, "Destination doesn't exist")
|
||||
trace "Dial", relayPeerId, dstPeerId
|
||||
|
||||
@@ -93,14 +90,18 @@ method dial*(
|
||||
self: RelayTransport,
|
||||
hostname: string,
|
||||
ma: MultiAddress,
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
|
||||
let address = MultiAddress.init($ma & "/p2p/" & $peerId.get()).tryGet()
|
||||
result = await self.dial(address)
|
||||
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
|
||||
peerId.withValue(pid):
|
||||
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
|
||||
result = await self.dial(address)
|
||||
|
||||
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe} =
|
||||
if ma.protocols.isOk():
|
||||
let sma = toSeq(ma.items())
|
||||
result = sma.len >= 2 and CircuitRelay.match(sma[^1].get())
|
||||
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe.} =
|
||||
try:
|
||||
if ma.protocols.isOk():
|
||||
let sma = toSeq(ma.items())
|
||||
result = sma.len >= 2 and CircuitRelay.match(sma[^1].tryGet())
|
||||
except CatchableError as exc:
|
||||
result = false
|
||||
trace "Handles return", ma, result
|
||||
|
||||
proc new*(T: typedesc[RelayTransport], cl: RelayClient, upgrader: Upgrade): T =
|
||||
|
||||
@@ -7,15 +7,9 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import options
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles
|
||||
|
||||
import ./messages,
|
||||
../../../stream/connection
|
||||
|
||||
@@ -27,24 +21,24 @@ const
|
||||
RelayV2HopCodec* = "/libp2p/circuit/relay/0.2.0/hop"
|
||||
RelayV2StopCodec* = "/libp2p/circuit/relay/0.2.0/stop"
|
||||
|
||||
proc sendStatus*(conn: Connection, code: StatusV1) {.async, gcsafe.} =
|
||||
proc sendStatus*(conn: Connection, code: StatusV1) {.async.} =
|
||||
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
|
||||
let
|
||||
msg = RelayMessage(msgType: some(RelayType.Status), status: some(code))
|
||||
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async, gcsafe.} =
|
||||
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async.} =
|
||||
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
|
||||
let
|
||||
msg = HopMessage(msgType: HopMessageType.Status, status: some(code))
|
||||
msg = HopMessage(msgType: HopMessageType.Status, status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
proc sendStopStatus*(conn: Connection, code: StatusV2) {.async.} =
|
||||
trace "send stop relay/v2 status", status = $code & " (" & $ord(code) & ")"
|
||||
let
|
||||
msg = StopMessage(msgType: StopMessageType.Status, status: some(code))
|
||||
msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
|
||||
pb = encode(msg)
|
||||
await conn.writeLp(pb.buffer)
|
||||
|
||||
|
||||
@@ -10,10 +10,7 @@
|
||||
## `Identify <https://docs.libp2p.io/concepts/protocols/#identify>`_ and
|
||||
## `Push Identify <https://docs.libp2p.io/concepts/protocols/#identify-push>`_ implementation
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, options, strutils, sugar]
|
||||
import stew/results
|
||||
@@ -24,6 +21,7 @@ import ../protobuf/minprotobuf,
|
||||
../peerid,
|
||||
../crypto/crypto,
|
||||
../multiaddress,
|
||||
../multicodec,
|
||||
../protocols/protocol,
|
||||
../utility,
|
||||
../errors,
|
||||
@@ -65,7 +63,7 @@ type
|
||||
peer: PeerId,
|
||||
newInfo: IdentifyInfo):
|
||||
Future[void]
|
||||
{.gcsafe, raises: [Defect], public.}
|
||||
{.gcsafe, raises: [], public.}
|
||||
|
||||
IdentifyPush* = ref object of LPProtocol
|
||||
identifyHandler: IdentifyPushHandler
|
||||
@@ -74,30 +72,28 @@ chronicles.expandIt(IdentifyInfo):
|
||||
pubkey = ($it.pubkey).shortLog
|
||||
addresses = it.addrs.map(x => $x).join(",")
|
||||
protocols = it.protos.map(x => $x).join(",")
|
||||
observable_address =
|
||||
if it.observedAddr.isSome(): $it.observedAddr.get()
|
||||
else: "None"
|
||||
observable_address = $it.observedAddr
|
||||
proto_version = it.protoVersion.get("None")
|
||||
agent_version = it.agentVersion.get("None")
|
||||
signedPeerRecord =
|
||||
# The SPR contains the same data as the identify message
|
||||
# would be cumbersome to log
|
||||
if iinfo.signedPeerRecord.isSome(): "Some"
|
||||
if it.signedPeerRecord.isSome(): "Some"
|
||||
else: "None"
|
||||
|
||||
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
|
||||
{.raises: [Defect].} =
|
||||
{.raises: [].} =
|
||||
result = initProtoBuffer()
|
||||
|
||||
let pkey = peerInfo.publicKey
|
||||
|
||||
result.write(1, pkey.getBytes().get())
|
||||
result.write(1, pkey.getBytes().expect("valid key"))
|
||||
for ma in peerInfo.addrs:
|
||||
result.write(2, ma.data.buffer)
|
||||
for proto in peerInfo.protocols:
|
||||
result.write(3, proto)
|
||||
if observedAddr.isSome:
|
||||
result.write(4, observedAddr.get().data.buffer)
|
||||
observedAddr.withValue(observed):
|
||||
result.write(4, observed.data.buffer)
|
||||
let protoVersion = ProtoVersion
|
||||
result.write(5, protoVersion)
|
||||
let agentVersion = if peerInfo.agentVersion.len <= 0:
|
||||
@@ -109,13 +105,12 @@ proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: boo
|
||||
## Optionally populate signedPeerRecord field.
|
||||
## See https://github.com/libp2p/go-libp2p/blob/ddf96ce1cfa9e19564feb9bd3e8269958bbc0aba/p2p/protocol/identify/pb/identify.proto for reference.
|
||||
if sendSpr:
|
||||
let sprBuff = peerInfo.signedPeerRecord.envelope.encode()
|
||||
if sprBuff.isOk():
|
||||
result.write(8, sprBuff.get())
|
||||
peerInfo.signedPeerRecord.envelope.encode().toOpt().withValue(sprBuff):
|
||||
result.write(8, sprBuff)
|
||||
|
||||
result.finish()
|
||||
|
||||
proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
|
||||
proc decodeMsg*(buf: seq[byte]): Opt[IdentifyInfo] =
|
||||
var
|
||||
iinfo: IdentifyInfo
|
||||
pubkey: PublicKey
|
||||
@@ -125,53 +120,38 @@ proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
|
||||
signedPeerRecord: SignedPeerRecord
|
||||
|
||||
var pb = initProtoBuffer(buf)
|
||||
if ? pb.getField(1, pubkey).toOpt():
|
||||
iinfo.pubkey = some(pubkey)
|
||||
if ? pb.getField(8, signedPeerRecord).toOpt() and
|
||||
pubkey == signedPeerRecord.envelope.publicKey:
|
||||
iinfo.signedPeerRecord = some(signedPeerRecord.envelope)
|
||||
discard ? pb.getRepeatedField(2, iinfo.addrs).toOpt()
|
||||
discard ? pb.getRepeatedField(3, iinfo.protos).toOpt()
|
||||
if ? pb.getField(4, oaddr).toOpt():
|
||||
iinfo.observedAddr = some(oaddr)
|
||||
if ? pb.getField(5, protoVersion).toOpt():
|
||||
iinfo.protoVersion = some(protoVersion)
|
||||
if ? pb.getField(6, agentVersion).toOpt():
|
||||
iinfo.agentVersion = some(agentVersion)
|
||||
|
||||
let r1 = pb.getField(1, pubkey)
|
||||
let r2 = pb.getRepeatedField(2, iinfo.addrs)
|
||||
let r3 = pb.getRepeatedField(3, iinfo.protos)
|
||||
let r4 = pb.getField(4, oaddr)
|
||||
let r5 = pb.getField(5, protoVersion)
|
||||
let r6 = pb.getField(6, agentVersion)
|
||||
|
||||
let r8 = pb.getField(8, signedPeerRecord)
|
||||
|
||||
let res = r1.isOk() and r2.isOk() and r3.isOk() and
|
||||
r4.isOk() and r5.isOk() and r6.isOk() and
|
||||
r8.isOk()
|
||||
|
||||
if res:
|
||||
if r1.get():
|
||||
iinfo.pubkey = some(pubkey)
|
||||
if r4.get():
|
||||
iinfo.observedAddr = some(oaddr)
|
||||
if r5.get():
|
||||
iinfo.protoVersion = some(protoVersion)
|
||||
if r6.get():
|
||||
iinfo.agentVersion = some(agentVersion)
|
||||
if r8.get() and r1.get():
|
||||
if iinfo.pubkey.get() == signedPeerRecord.envelope.publicKey:
|
||||
iinfo.signedPeerRecord = some(signedPeerRecord.envelope)
|
||||
debug "decodeMsg: decoded identify", iinfo
|
||||
some(iinfo)
|
||||
else:
|
||||
trace "decodeMsg: failed to decode received message"
|
||||
none[IdentifyInfo]()
|
||||
Opt.some(iinfo)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[Identify],
|
||||
peerInfo: PeerInfo,
|
||||
sendSignedPeerRecord = false
|
||||
sendSignedPeerRecord = false,
|
||||
observedAddrManager = ObservedAddrManager.new(),
|
||||
): T =
|
||||
let identify = T(
|
||||
peerInfo: peerInfo,
|
||||
sendSignedPeerRecord: sendSignedPeerRecord,
|
||||
observedAddrManager: ObservedAddrManager.new(),
|
||||
observedAddrManager: observedAddrManager,
|
||||
)
|
||||
identify.init()
|
||||
identify
|
||||
|
||||
method init*(p: Identify) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
trace "handling identify request", conn
|
||||
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
|
||||
@@ -189,33 +169,31 @@ method init*(p: Identify) =
|
||||
|
||||
proc identify*(self: Identify,
|
||||
conn: Connection,
|
||||
remotePeerId: PeerId): Future[IdentifyInfo] {.async, gcsafe.} =
|
||||
remotePeerId: PeerId): Future[IdentifyInfo] {.async.} =
|
||||
trace "initiating identify", conn
|
||||
var message = await conn.readLp(64*1024)
|
||||
if len(message) == 0:
|
||||
trace "identify: Empty message received!", conn
|
||||
raise newException(IdentityInvalidMsgError, "Empty message received!")
|
||||
|
||||
let infoOpt = decodeMsg(message)
|
||||
if infoOpt.isNone():
|
||||
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||
var info = decodeMsg(message).valueOr: raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||
debug "identify: decoded message", conn, info
|
||||
let
|
||||
pubkey = info.pubkey.valueOr: raise newException(IdentityInvalidMsgError, "No pubkey in identify")
|
||||
peer = PeerId.init(pubkey).valueOr: raise newException(IdentityInvalidMsgError, $error)
|
||||
|
||||
var info = infoOpt.get()
|
||||
if info.pubkey.isNone():
|
||||
raise newException(IdentityInvalidMsgError, "No pubkey in identify")
|
||||
|
||||
let peer = PeerId.init(info.pubkey.get())
|
||||
if peer.isErr:
|
||||
raise newException(IdentityInvalidMsgError, $peer.error)
|
||||
|
||||
if peer.get() != remotePeerId:
|
||||
if peer != remotePeerId:
|
||||
trace "Peer ids don't match", remote = peer, local = remotePeerId
|
||||
raise newException(IdentityNoMatchError, "Peer ids don't match")
|
||||
info.peerId = peer.get()
|
||||
info.peerId = peer
|
||||
|
||||
if info.observedAddr.isSome:
|
||||
if not self.observedAddrManager.addObservation(info.observedAddr.get()):
|
||||
debug "Observed address is not valid", observedAddr = info.observedAddr.get()
|
||||
info.observedAddr.withValue(observed):
|
||||
# Currently, we use the ObservedAddrManager only to find our dialable external NAT address. Therefore, addresses
|
||||
# like "...\p2p-circuit\p2p\..." and "\p2p\..." are not useful to us.
|
||||
if observed.contains(multiCodec("p2p-circuit")).get(false) or P2PPattern.matchPartial(observed):
|
||||
trace "Not adding address to ObservedAddrManager.", observed
|
||||
elif not self.observedAddrManager.addObservation(observed):
|
||||
trace "Observed address is not valid.", observedAddr = observed
|
||||
return info
|
||||
|
||||
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
|
||||
@@ -226,26 +204,24 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
|
||||
identifypush
|
||||
|
||||
proc init*(p: IdentifyPush) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
trace "handling identify push", conn
|
||||
try:
|
||||
var message = await conn.readLp(64*1024)
|
||||
|
||||
let infoOpt = decodeMsg(message)
|
||||
if infoOpt.isNone():
|
||||
var identInfo = decodeMsg(message).valueOr:
|
||||
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
|
||||
debug "identify push: decoded message", conn, identInfo
|
||||
|
||||
var indentInfo = infoOpt.get()
|
||||
|
||||
if indentInfo.pubkey.isSome:
|
||||
let receivedPeerId = PeerId.init(indentInfo.pubkey.get()).tryGet()
|
||||
identInfo.pubkey.withValue(pubkey):
|
||||
let receivedPeerId = PeerId.init(pubkey).tryGet()
|
||||
if receivedPeerId != conn.peerId:
|
||||
raise newException(IdentityNoMatchError, "Peer ids don't match")
|
||||
indentInfo.peerId = receivedPeerId
|
||||
identInfo.peerId = receivedPeerId
|
||||
|
||||
trace "triggering peer event", peerInfo = conn.peerId
|
||||
if not isNil(p.identifyHandler):
|
||||
await p.identifyHandler(conn.peerId, indentInfo)
|
||||
await p.identifyHandler(conn.peerId, identInfo)
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
|
||||
47
libp2p/protocols/perf/client.nim
Normal file
47
libp2p/protocols/perf/client.nim
Normal file
@@ -0,0 +1,47 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
## `Perf <https://github.com/libp2p/specs/blob/master/perf/perf.md>`_ protocol specification
|
||||
|
||||
import chronos, chronicles, sequtils
|
||||
import stew/endians2
|
||||
import ./core, ../../stream/connection
|
||||
|
||||
logScope:
|
||||
topics = "libp2p perf"
|
||||
|
||||
type PerfClient* = ref object of RootObj
|
||||
|
||||
proc perf*(_: typedesc[PerfClient], conn: Connection,
|
||||
sizeToWrite: uint64 = 0, sizeToRead: uint64 = 0):
|
||||
Future[Duration] {.async, public.} =
|
||||
var
|
||||
size = sizeToWrite
|
||||
buf: array[PerfSize, byte]
|
||||
let start = Moment.now()
|
||||
trace "starting performance benchmark", conn, sizeToWrite, sizeToRead
|
||||
|
||||
await conn.write(toSeq(toBytesBE(sizeToRead)))
|
||||
while size > 0:
|
||||
let toWrite = min(size, PerfSize)
|
||||
await conn.write(buf[0..<toWrite])
|
||||
size -= toWrite
|
||||
|
||||
await conn.close()
|
||||
|
||||
size = sizeToRead
|
||||
|
||||
while size > 0:
|
||||
let toRead = min(size, PerfSize)
|
||||
await conn.readExactly(addr buf[0], toRead.int)
|
||||
size = size - toRead
|
||||
|
||||
let duration = Moment.now() - start
|
||||
trace "finishing performance benchmark", duration
|
||||
return duration
|
||||
14
libp2p/protocols/perf/core.nim
Normal file
14
libp2p/protocols/perf/core.nim
Normal file
@@ -0,0 +1,14 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
## `Perf <https://github.com/libp2p/specs/blob/master/perf/perf.md>`_ protocol specification
|
||||
|
||||
const
|
||||
PerfCodec* = "/perf/1.0.0"
|
||||
PerfSize* = 65536
|
||||
60
libp2p/protocols/perf/server.nim
Normal file
60
libp2p/protocols/perf/server.nim
Normal file
@@ -0,0 +1,60 @@
|
||||
# Nim-LibP2P
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||
# at your option.
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
## `Perf <https://github.com/libp2p/specs/blob/master/perf/perf.md>`_ protocol specification
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles
|
||||
import stew/endians2
|
||||
import ./core,
|
||||
../protocol,
|
||||
../../stream/connection,
|
||||
../../utility
|
||||
|
||||
export chronicles, connection
|
||||
|
||||
logScope:
|
||||
topics = "libp2p perf"
|
||||
|
||||
type Perf* = ref object of LPProtocol
|
||||
|
||||
proc new*(T: typedesc[Perf]): T {.public.} =
|
||||
var p = T()
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
var bytesRead = 0
|
||||
try:
|
||||
trace "Received benchmark performance check", conn
|
||||
var
|
||||
sizeBuffer: array[8, byte]
|
||||
size: uint64
|
||||
await conn.readExactly(addr sizeBuffer[0], 8)
|
||||
size = uint64.fromBytesBE(sizeBuffer)
|
||||
|
||||
var toReadBuffer: array[PerfSize, byte]
|
||||
try:
|
||||
while true:
|
||||
bytesRead += await conn.readOnce(addr toReadBuffer[0], PerfSize)
|
||||
except CatchableError as exc:
|
||||
discard
|
||||
|
||||
var buf: array[PerfSize, byte]
|
||||
while size > 0:
|
||||
let toWrite = min(size, PerfSize)
|
||||
await conn.write(buf[0..<toWrite])
|
||||
size -= toWrite
|
||||
except CancelledError as exc:
|
||||
raise exc
|
||||
except CatchableError as exc:
|
||||
trace "exception in perf handler", exc = exc.msg, conn
|
||||
await conn.close()
|
||||
|
||||
p.handler = handle
|
||||
p.codec = PerfCodec
|
||||
return p
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## `Ping <https://docs.libp2p.io/concepts/protocols/#ping>`_ protocol implementation
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles
|
||||
import bearssl/rand
|
||||
@@ -42,7 +39,7 @@ type
|
||||
PingHandler* {.public.} = proc (
|
||||
peer: PeerId):
|
||||
Future[void]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
{.gcsafe, raises: [].}
|
||||
|
||||
Ping* = ref object of LPProtocol
|
||||
pingHandler*: PingHandler
|
||||
@@ -54,7 +51,7 @@ proc new*(T: typedesc[Ping], handler: PingHandler = nil, rng: ref HmacDrbgContex
|
||||
ping
|
||||
|
||||
method init*(p: Ping) =
|
||||
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
|
||||
proc handle(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
trace "handling ping", conn
|
||||
var buf: array[PingSize, byte]
|
||||
@@ -74,7 +71,7 @@ method init*(p: Ping) =
|
||||
proc ping*(
|
||||
p: Ping,
|
||||
conn: Connection,
|
||||
): Future[Duration] {.async, gcsafe, public.} =
|
||||
): Future[Duration] {.async, public.} =
|
||||
## Sends ping to `conn`, returns the delay
|
||||
|
||||
trace "initiating ping", conn
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, stew/results
|
||||
import ../stream/connection
|
||||
@@ -25,7 +22,7 @@ type
|
||||
conn: Connection,
|
||||
proto: string):
|
||||
Future[void]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
{.gcsafe, raises: [].}
|
||||
|
||||
LPProtocol* = ref object of RootObj
|
||||
codecs*: seq[string]
|
||||
@@ -55,8 +52,8 @@ func `codec=`*(p: LPProtocol, codec: string) =
|
||||
proc new*(
|
||||
T: type LPProtocol,
|
||||
codecs: seq[string],
|
||||
handler: LPProtoHandler, # default(Opt[int]) or Opt.none(int) don't work on 1.2
|
||||
maxIncomingStreams: Opt[int] | int = Opt[int]()): T =
|
||||
handler: LPProtoHandler,
|
||||
maxIncomingStreams: Opt[int] | int = Opt.none(int)): T =
|
||||
T(
|
||||
codecs: codecs,
|
||||
handler: handler,
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, hashes, tables]
|
||||
import chronos, chronicles, metrics
|
||||
@@ -18,7 +15,7 @@ import ./pubsub,
|
||||
./pubsubpeer,
|
||||
./timedcache,
|
||||
./peertable,
|
||||
./rpc/[message, messages],
|
||||
./rpc/[message, messages, protobuf],
|
||||
../../crypto/crypto,
|
||||
../../stream/connection,
|
||||
../../peerid,
|
||||
@@ -98,7 +95,16 @@ method unsubscribePeer*(f: FloodSub, peer: PeerId) =
|
||||
|
||||
method rpcHandler*(f: FloodSub,
|
||||
peer: PubSubPeer,
|
||||
rpcMsg: RPCMsg) {.async.} =
|
||||
data: seq[byte]) {.async.} =
|
||||
|
||||
var rpcMsg = decodeRpcMsg(data).valueOr:
|
||||
debug "failed to decode msg from peer", peer, err = error
|
||||
raise newException(CatchableError, "")
|
||||
|
||||
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||
# trigger hooks
|
||||
peer.recvObservers(rpcMsg)
|
||||
|
||||
for i in 0..<min(f.topicsHigh, rpcMsg.subscriptions.len):
|
||||
template sub: untyped = rpcMsg.subscriptions[i]
|
||||
f.handleSubscribe(peer, sub.topic, sub.subscribe)
|
||||
@@ -223,7 +229,7 @@ method publish*(f: FloodSub,
|
||||
return peers.len
|
||||
|
||||
method initPubSub*(f: FloodSub)
|
||||
{.raises: [Defect, InitializationError].} =
|
||||
{.raises: [InitializationError].} =
|
||||
procCall PubSub(f).initPubSub()
|
||||
f.seen = TimedCache[MessageId].init(2.minutes)
|
||||
f.seenSalt = newSeqUninitialized[byte](sizeof(Hash))
|
||||
|
||||
@@ -9,20 +9,18 @@
|
||||
|
||||
## Gossip based publishing
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, sequtils]
|
||||
import chronos, chronicles, metrics
|
||||
import chronos/ratelimit
|
||||
import ./pubsub,
|
||||
./floodsub,
|
||||
./pubsubpeer,
|
||||
./peertable,
|
||||
./mcache,
|
||||
./timedcache,
|
||||
./rpc/[messages, message],
|
||||
./rpc/[messages, message, protobuf],
|
||||
../protocol,
|
||||
../../stream/connection,
|
||||
../../peerinfo,
|
||||
@@ -43,9 +41,14 @@ logScope:
|
||||
declareCounter(libp2p_gossipsub_failed_publish, "number of failed publish")
|
||||
declareCounter(libp2p_gossipsub_invalid_topic_subscription, "number of invalid topic subscriptions that happened")
|
||||
declareCounter(libp2p_gossipsub_duplicate_during_validation, "number of duplicates received during message validation")
|
||||
declareCounter(libp2p_gossipsub_idontwant_saved_messages, "number of duplicates avoided by idontwant")
|
||||
declareCounter(libp2p_gossipsub_saved_bytes, "bytes saved by gossipsub optimizations", labels=["kind"])
|
||||
declareCounter(libp2p_gossipsub_duplicate, "number of duplicates received")
|
||||
declareCounter(libp2p_gossipsub_received, "number of messages received (deduplicated)")
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
|
||||
|
||||
proc init*(_: type[GossipSubParams]): GossipSubParams =
|
||||
GossipSubParams(
|
||||
explicit: true,
|
||||
@@ -77,7 +80,11 @@ proc init*(_: type[GossipSubParams]): GossipSubParams =
|
||||
behaviourPenaltyWeight: -1.0,
|
||||
behaviourPenaltyDecay: 0.999,
|
||||
disconnectBadPeers: false,
|
||||
enablePX: false
|
||||
enablePX: false,
|
||||
bandwidthEstimatebps: 100_000_000, # 100 Mbps or 12.5 MBps
|
||||
overheadRateLimit: Opt.none(tuple[bytes: int, interval: Duration]),
|
||||
disconnectPeerAboveRateLimit: false,
|
||||
maxDurationInNonPriorityQueue: Opt.none(Duration),
|
||||
)
|
||||
|
||||
proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
|
||||
@@ -150,7 +157,7 @@ method init*(g: GossipSub) =
|
||||
g.codecs &= GossipSubCodec
|
||||
g.codecs &= GossipSubCodec_10
|
||||
|
||||
method onNewPeer(g: GossipSub, peer: PubSubPeer) =
|
||||
method onNewPeer*(g: GossipSub, peer: PubSubPeer) =
|
||||
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
|
||||
# Make sure stats and peer information match, even when reloading peer stats
|
||||
# from a previous connection
|
||||
@@ -158,7 +165,11 @@ method onNewPeer(g: GossipSub, peer: PubSubPeer) =
|
||||
peer.appScore = stats.appScore
|
||||
peer.behaviourPenalty = stats.behaviourPenalty
|
||||
|
||||
# Check if the score is below the threshold and disconnect the peer if necessary
|
||||
g.disconnectIfBadScorePeer(peer, stats.score)
|
||||
|
||||
peer.iHaveBudget = IHavePeerBudget
|
||||
peer.pingBudget = PingsPeerBudget
|
||||
|
||||
method onPubSubPeerEvent*(p: GossipSub, peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
|
||||
case event.kind
|
||||
@@ -187,11 +198,11 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
|
||||
return
|
||||
|
||||
# remove from peer IPs collection too
|
||||
if pubSubPeer.address.isSome():
|
||||
g.peersInIP.withValue(pubSubPeer.address.get(), s):
|
||||
pubSubPeer.address.withValue(address):
|
||||
g.peersInIP.withValue(address, s):
|
||||
s[].excl(pubSubPeer.peerId)
|
||||
if s[].len == 0:
|
||||
g.peersInIP.del(pubSubPeer.address.get())
|
||||
g.peersInIP.del(address)
|
||||
|
||||
for t in toSeq(g.mesh.keys):
|
||||
trace "pruning unsubscribing peer", pubSubPeer, score = pubSubPeer.score
|
||||
@@ -200,8 +211,8 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
|
||||
|
||||
for t in toSeq(g.gossipsub.keys):
|
||||
g.gossipsub.removePeer(t, pubSubPeer)
|
||||
# also try to remove from explicit table here
|
||||
g.explicit.removePeer(t, pubSubPeer)
|
||||
# also try to remove from direct peers table here
|
||||
g.subscribedDirectPeers.removePeer(t, pubSubPeer)
|
||||
|
||||
for t in toSeq(g.fanout.keys):
|
||||
g.fanout.removePeer(t, pubSubPeer)
|
||||
@@ -210,6 +221,8 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
|
||||
for topic, info in stats[].topicInfos.mpairs:
|
||||
info.firstMessageDeliveries = 0
|
||||
|
||||
pubSubPeer.stopSendNonPriorityTask()
|
||||
|
||||
procCall FloodSub(g).unsubscribePeer(peer)
|
||||
|
||||
proc handleSubscribe*(g: GossipSub,
|
||||
@@ -240,7 +253,7 @@ proc handleSubscribe*(g: GossipSub,
|
||||
# subscribe remote peer to the topic
|
||||
discard g.gossipsub.addPeer(topic, peer)
|
||||
if peer.peerId in g.parameters.directPeers:
|
||||
discard g.explicit.addPeer(topic, peer)
|
||||
discard g.subscribedDirectPeers.addPeer(topic, peer)
|
||||
else:
|
||||
trace "peer unsubscribed from topic"
|
||||
|
||||
@@ -254,7 +267,7 @@ proc handleSubscribe*(g: GossipSub,
|
||||
|
||||
g.fanout.removePeer(topic, peer)
|
||||
if peer.peerId in g.parameters.directPeers:
|
||||
g.explicit.removePeer(topic, peer)
|
||||
g.subscribedDirectPeers.removePeer(topic, peer)
|
||||
|
||||
trace "gossip peers", peers = g.gossipsub.peers(topic), topic
|
||||
|
||||
@@ -262,18 +275,35 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
|
||||
g.handlePrune(peer, control.prune)
|
||||
|
||||
var respControl: ControlMessage
|
||||
g.handleIDontWant(peer, control.idontwant)
|
||||
let iwant = g.handleIHave(peer, control.ihave)
|
||||
if iwant.messageIds.len > 0:
|
||||
respControl.iwant.add(iwant)
|
||||
respControl.prune.add(g.handleGraft(peer, control.graft))
|
||||
let messages = g.handleIWant(peer, control.iwant)
|
||||
|
||||
if
|
||||
respControl.prune.len > 0 or
|
||||
respControl.iwant.len > 0 or
|
||||
messages.len > 0:
|
||||
# iwant and prunes from here, also messages
|
||||
let
|
||||
isPruneNotEmpty = respControl.prune.len > 0
|
||||
isIWantNotEmpty = respControl.iwant.len > 0
|
||||
|
||||
if isPruneNotEmpty or isIWantNotEmpty:
|
||||
|
||||
if isIWantNotEmpty:
|
||||
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
|
||||
|
||||
if isPruneNotEmpty:
|
||||
for prune in respControl.prune:
|
||||
if g.knownTopics.contains(prune.topicId):
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicId])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
|
||||
|
||||
trace "sending control message", msg = shortLog(respControl), peer
|
||||
g.send(
|
||||
peer,
|
||||
RPCMsg(control: some(respControl)), true)
|
||||
|
||||
if messages.len > 0:
|
||||
for smsg in messages:
|
||||
for topic in smsg.topicIds:
|
||||
if g.knownTopics.contains(topic):
|
||||
@@ -281,18 +311,11 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
|
||||
else:
|
||||
libp2p_pubsub_broadcast_messages.inc(labelValues = ["generic"])
|
||||
|
||||
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
|
||||
|
||||
for prune in respControl.prune:
|
||||
if g.knownTopics.contains(prune.topicId):
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicId])
|
||||
else:
|
||||
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
|
||||
|
||||
trace "sending control message", msg = shortLog(respControl), peer
|
||||
# iwant replies have lower priority
|
||||
trace "sending iwant reply messages", peer
|
||||
g.send(
|
||||
peer,
|
||||
RPCMsg(control: some(respControl), messages: messages))
|
||||
RPCMsg(messages: messages), false)
|
||||
|
||||
proc validateAndRelay(g: GossipSub,
|
||||
msg: Message,
|
||||
@@ -304,12 +327,13 @@ proc validateAndRelay(g: GossipSub,
|
||||
var seenPeers: HashSet[PubSubPeer]
|
||||
discard g.validationSeen.pop(msgIdSalted, seenPeers)
|
||||
libp2p_gossipsub_duplicate_during_validation.inc(seenPeers.len.int64)
|
||||
libp2p_gossipsub_saved_bytes.inc((msg.data.len * seenPeers.len).int64, labelValues = ["validation_duplicate"])
|
||||
|
||||
case validation
|
||||
of ValidationResult.Reject:
|
||||
debug "Dropping message after validation, reason: reject",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg.topicIds)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
return
|
||||
of ValidationResult.Ignore:
|
||||
debug "Dropping message after validation, reason: ignore",
|
||||
@@ -331,14 +355,34 @@ proc validateAndRelay(g: GossipSub,
|
||||
g.floodsub.withValue(t, peers): toSendPeers.incl(peers[])
|
||||
g.mesh.withValue(t, peers): toSendPeers.incl(peers[])
|
||||
|
||||
# add direct peers
|
||||
toSendPeers.incl(g.subscribedDirectPeers.getOrDefault(t))
|
||||
|
||||
# Don't send it to source peer, or peers that
|
||||
# sent it during validation
|
||||
toSendPeers.excl(peer)
|
||||
toSendPeers.excl(seenPeers)
|
||||
|
||||
# IDontWant is only worth it if the message is substantially
|
||||
# bigger than the messageId
|
||||
if msg.data.len > msgId.len * 10:
|
||||
g.broadcast(toSendPeers, RPCMsg(control: some(ControlMessage(
|
||||
idontwant: @[ControlIWant(messageIds: @[msgId])]
|
||||
))))
|
||||
|
||||
for peer in toSendPeers:
|
||||
for heDontWant in peer.heDontWants:
|
||||
if msgId in heDontWant:
|
||||
seenPeers.incl(peer)
|
||||
libp2p_gossipsub_idontwant_saved_messages.inc
|
||||
libp2p_gossipsub_saved_bytes.inc(msg.data.len.int64, labelValues = ["idontwant"])
|
||||
break
|
||||
toSendPeers.excl(seenPeers)
|
||||
|
||||
|
||||
# In theory, if topics are the same in all messages, we could batch - we'd
|
||||
# also have to be careful to only include validated messages
|
||||
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
|
||||
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]), false)
|
||||
trace "forwarded message to peers", peers = toSendPeers.len, msgId, peer
|
||||
for topic in msg.topicIds:
|
||||
if topic notin g.topics: continue
|
||||
@@ -352,9 +396,65 @@ proc validateAndRelay(g: GossipSub,
|
||||
except CatchableError as exc:
|
||||
info "validateAndRelay failed", msg=exc.msg
|
||||
|
||||
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
|
||||
msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
|
||||
|
||||
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.async.} =
|
||||
# In this way we count even ignored fields by protobuf
|
||||
|
||||
var rmsg = rpcMsgOpt.valueOr:
|
||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||
if not overheadRateLimit.tryConsume(msgSize):
|
||||
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||
debug "Peer sent a msg that couldn't be decoded and it's above rate limit.", peer, uselessAppBytesNum = msgSize
|
||||
if g.parameters.disconnectPeerAboveRateLimit:
|
||||
await g.disconnectPeer(peer)
|
||||
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
|
||||
|
||||
raise newException(CatchableError, "Peer msg couldn't be decoded")
|
||||
|
||||
let usefulMsgBytesNum =
|
||||
if g.verifySignature:
|
||||
byteSize(rmsg.messages)
|
||||
else:
|
||||
dataAndTopicsIdSize(rmsg.messages)
|
||||
|
||||
var uselessAppBytesNum = msgSize - usefulMsgBytesNum
|
||||
rmsg.control.withValue(control):
|
||||
uselessAppBytesNum -= (byteSize(control.ihave) + byteSize(control.iwant))
|
||||
|
||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
|
||||
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||
debug "Peer sent too much useless application data and it's above rate limit.", peer, msgSize, uselessAppBytesNum, rmsg
|
||||
if g.parameters.disconnectPeerAboveRateLimit:
|
||||
await g.disconnectPeer(peer)
|
||||
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
|
||||
|
||||
method rpcHandler*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
rpcMsg: RPCMsg) {.async.} =
|
||||
data: seq[byte]) {.async.} =
|
||||
|
||||
let msgSize = data.len
|
||||
var rpcMsg = decodeRpcMsg(data).valueOr:
|
||||
debug "failed to decode msg from peer", peer, err = error
|
||||
await rateLimit(g, peer, Opt.none(RPCMsg), msgSize)
|
||||
return
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
for m in rpcMsg.messages:
|
||||
for t in m.topicIds:
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [$peer.peerId, t])
|
||||
|
||||
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
|
||||
await rateLimit(g, peer, Opt.some(rpcMsg), msgSize)
|
||||
|
||||
# trigger hooks
|
||||
peer.recvObservers(rpcMsg)
|
||||
|
||||
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
|
||||
g.send(peer, RPCMsg(pong: rpcMsg.ping), true)
|
||||
peer.pingBudget.dec
|
||||
for i in 0..<min(g.topicsHigh, rpcMsg.subscriptions.len):
|
||||
template sub: untyped = rpcMsg.subscriptions[i]
|
||||
g.handleSubscribe(peer, sub.topic, sub.subscribe)
|
||||
@@ -412,14 +512,14 @@ method rpcHandler*(g: GossipSub,
|
||||
# always validate if signature is present or required
|
||||
debug "Dropping message due to failed signature verification",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg.topicIds)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
continue
|
||||
|
||||
if msg.seqno.len > 0 and msg.seqno.len != 8:
|
||||
# if we have seqno should be 8 bytes long
|
||||
debug "Dropping message due to invalid seqno length",
|
||||
msgId = shortLog(msgId), peer
|
||||
g.punishInvalidMessage(peer, msg.topicIds)
|
||||
await g.punishInvalidMessage(peer, msg)
|
||||
continue
|
||||
|
||||
# g.anonymize needs no evaluation when receiving messages
|
||||
@@ -491,32 +591,38 @@ method publish*(g: GossipSub,
|
||||
|
||||
var peers: HashSet[PubSubPeer]
|
||||
|
||||
if g.parameters.floodPublish:
|
||||
# With flood publishing enabled, the mesh is used when propagating messages from other peers,
|
||||
# but a peer's own messages will always be published to all known peers in the topic.
|
||||
for peer in g.gossipsub.getOrDefault(topic):
|
||||
if peer.score >= g.parameters.publishThreshold:
|
||||
trace "publish: including flood/high score peer", peer
|
||||
peers.incl(peer)
|
||||
|
||||
# add always direct peers
|
||||
peers.incl(g.explicit.getOrDefault(topic))
|
||||
peers.incl(g.subscribedDirectPeers.getOrDefault(topic))
|
||||
|
||||
if topic in g.topics: # if we're subscribed use the mesh
|
||||
peers.incl(g.mesh.getOrDefault(topic))
|
||||
|
||||
if peers.len < g.parameters.dLow and g.parameters.floodPublish == false:
|
||||
# not subscribed or bad mesh, send to fanout peers
|
||||
# disable for floodPublish, since we already sent to every good peer
|
||||
#
|
||||
if g.parameters.floodPublish:
|
||||
# With flood publishing enabled, the mesh is used when propagating messages from other peers,
|
||||
# but a peer's own messages will always be published to all known peers in the topic, limited
|
||||
# to the amount of peers we can send it to in one heartbeat
|
||||
var maxPeersToFlodOpt: Opt[int64]
|
||||
if g.parameters.bandwidthEstimatebps > 0:
|
||||
let
|
||||
bandwidth = (g.parameters.bandwidthEstimatebps) div 8 div 1000 # Divisions are to convert it to Bytes per ms TODO replace with bandwidth estimate
|
||||
msToTransmit = max(data.len div bandwidth, 1)
|
||||
maxPeersToFlodOpt = Opt.some(max(g.parameters.heartbeatInterval.milliseconds div msToTransmit, g.parameters.dLow))
|
||||
|
||||
for peer in g.gossipsub.getOrDefault(topic):
|
||||
maxPeersToFlodOpt.withValue(maxPeersToFlod):
|
||||
if peers.len >= maxPeersToFlod: break
|
||||
if peer.score >= g.parameters.publishThreshold:
|
||||
trace "publish: including flood/high score peer", peer
|
||||
peers.incl(peer)
|
||||
|
||||
if peers.len < g.parameters.dLow:
|
||||
# not subscribed, or bad mesh, send to fanout peers
|
||||
var fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
|
||||
if fanoutPeers.len == 0:
|
||||
if fanoutPeers.len < g.parameters.dLow:
|
||||
g.replenishFanout(topic)
|
||||
fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
|
||||
|
||||
g.rng.shuffle(fanoutPeers)
|
||||
if fanoutPeers.len + peers.len > g.parameters.d:
|
||||
fanoutPeers.setLen(g.parameters.d - peers.len)
|
||||
|
||||
for fanPeer in fanoutPeers:
|
||||
peers.incl(fanPeer)
|
||||
@@ -534,7 +640,6 @@ method publish*(g: GossipSub,
|
||||
debug "No peers for topic, skipping publish", peersOnTopic = topicPeers.len,
|
||||
connectedPeers = topicPeers.filterIt(it.connected).len,
|
||||
topic
|
||||
# skipping topic as our metrics finds that heavy
|
||||
libp2p_gossipsub_failed_publish.inc()
|
||||
return 0
|
||||
|
||||
@@ -562,7 +667,7 @@ method publish*(g: GossipSub,
|
||||
|
||||
g.mcache.put(msgId, msg)
|
||||
|
||||
g.broadcast(peers, RPCMsg(messages: @[msg]))
|
||||
g.broadcast(peers, RPCMsg(messages: @[msg]), true)
|
||||
|
||||
if g.knownTopics.contains(topic):
|
||||
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])
|
||||
@@ -570,15 +675,16 @@ method publish*(g: GossipSub,
|
||||
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = ["generic"])
|
||||
|
||||
trace "Published message to peers", peers=peers.len
|
||||
|
||||
return peers.len
|
||||
|
||||
proc maintainDirectPeer(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.async.} =
|
||||
let peer = g.peers.getOrDefault(id)
|
||||
if isNil(peer):
|
||||
if id notin g.peers:
|
||||
trace "Attempting to dial a direct peer", peer = id
|
||||
if g.switch.isConnected(id):
|
||||
warn "We are connected to a direct peer, but it isn't a GossipSub peer!", id
|
||||
return
|
||||
try:
|
||||
await g.switch.connect(id, addrs)
|
||||
await g.switch.connect(id, addrs, forceDial = true)
|
||||
# populate the peer after it's connected
|
||||
discard g.getOrCreatePeer(id, g.codecs)
|
||||
except CancelledError as exc:
|
||||
@@ -622,7 +728,7 @@ method stop*(g: GossipSub) {.async.} =
|
||||
g.heartbeatFut = nil
|
||||
|
||||
method initPubSub*(g: GossipSub)
|
||||
{.raises: [Defect, InitializationError].} =
|
||||
{.raises: [InitializationError].} =
|
||||
procCall FloodSub(g).initPubSub()
|
||||
|
||||
if not g.parameters.explicit:
|
||||
@@ -637,3 +743,14 @@ method initPubSub*(g: GossipSub)
|
||||
|
||||
# init gossip stuff
|
||||
g.mcache = MCache.init(g.parameters.historyGossip, g.parameters.historyLength)
|
||||
|
||||
method getOrCreatePeer*(
|
||||
g: GossipSub,
|
||||
peerId: PeerId,
|
||||
protos: seq[string]): PubSubPeer =
|
||||
|
||||
let peer = procCall PubSub(g).getOrCreatePeer(peerId, protos)
|
||||
g.parameters.overheadRateLimit.withValue(overheadRateLimit):
|
||||
peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(overheadRateLimit.bytes, overheadRateLimit.interval))
|
||||
peer.rpcmessagequeue.maxDurationInNonPriorityQueue = g.parameters.maxDurationInNonPriorityQueue
|
||||
return peer
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sequtils, sets, algorithm, deques]
|
||||
import chronos, chronicles, metrics
|
||||
@@ -33,7 +30,7 @@ declareGauge(libp2p_gossipsub_healthy_peers_topics, "number of topics in mesh wi
|
||||
declareCounter(libp2p_gossipsub_above_dhigh_condition, "number of above dhigh pruning branches ran", labels = ["topic"])
|
||||
declareGauge(libp2p_gossipsub_received_iwants, "received iwants", labels = ["kind"])
|
||||
|
||||
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [Defect].} =
|
||||
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [].} =
|
||||
g.withPeerStats(p.peerId) do (stats: var PeerStats):
|
||||
var info = stats.topicInfos.getOrDefault(topic)
|
||||
info.graftTime = Moment.now()
|
||||
@@ -49,12 +46,10 @@ proc pruned*(g: GossipSub,
|
||||
p: PubSubPeer,
|
||||
topic: string,
|
||||
setBackoff: bool = true,
|
||||
backoff = none(Duration)) {.raises: [Defect].} =
|
||||
backoff = none(Duration)) {.raises: [].} =
|
||||
if setBackoff:
|
||||
let
|
||||
backoffDuration =
|
||||
if isSome(backoff): backoff.get()
|
||||
else: g.parameters.pruneBackoff
|
||||
backoffDuration = backoff.get(g.parameters.pruneBackoff)
|
||||
backoffMoment = Moment.fromNow(backoffDuration)
|
||||
|
||||
g.backingOff
|
||||
@@ -75,7 +70,7 @@ proc pruned*(g: GossipSub,
|
||||
|
||||
trace "pruned", peer=p, topic
|
||||
|
||||
proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [Defect].} =
|
||||
proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [].} =
|
||||
let now = Moment.now()
|
||||
var expired = toSeq(t.getOrDefault(topic).pairs())
|
||||
expired.keepIf do (pair: tuple[peer: PeerId, expire: Moment]) -> bool:
|
||||
@@ -84,7 +79,7 @@ proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [Defect].}
|
||||
t.withValue(topic, v):
|
||||
v[].del(peer)
|
||||
|
||||
proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] {.raises: [Defect].} =
|
||||
proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] {.raises: [].} =
|
||||
if not g.parameters.enablePX:
|
||||
return @[]
|
||||
var peers = g.gossipsub.getOrDefault(topic, initHashSet[PubSubPeer]()).toSeq()
|
||||
@@ -111,10 +106,11 @@ proc handleGraft*(g: GossipSub,
|
||||
let topic = graft.topicId
|
||||
trace "peer grafted topic", peer, topic
|
||||
|
||||
# It is an error to GRAFT on a explicit peer
|
||||
# It is an error to GRAFT on a direct peer
|
||||
if peer.peerId in g.parameters.directPeers:
|
||||
# receiving a graft from a direct peer should yield a more prominent warning (protocol violation)
|
||||
warn "an explicit peer attempted to graft us, peering agreements should be reciprocal",
|
||||
# we are trusting direct peer not to abuse this
|
||||
warn "a direct peer attempted to graft us, peering agreements should be reciprocal",
|
||||
peer, topic
|
||||
# and such an attempt should be logged and rejected with a PRUNE
|
||||
prunes.add(ControlPrune(
|
||||
@@ -194,27 +190,22 @@ proc handleGraft*(g: GossipSub,
|
||||
proc getPeers(prune: ControlPrune, peer: PubSubPeer): seq[(PeerId, Option[PeerRecord])] =
|
||||
var routingRecords: seq[(PeerId, Option[PeerRecord])]
|
||||
for record in prune.peers:
|
||||
let peerRecord =
|
||||
if record.signedPeerRecord.len == 0:
|
||||
none(PeerRecord)
|
||||
else:
|
||||
let signedRecord = SignedPeerRecord.decode(record.signedPeerRecord)
|
||||
if signedRecord.isErr:
|
||||
trace "peer sent invalid SPR", peer, error=signedRecord.error
|
||||
none(PeerRecord)
|
||||
var peerRecord = none(PeerRecord)
|
||||
if record.signedPeerRecord.len > 0:
|
||||
SignedPeerRecord.decode(record.signedPeerRecord).toOpt().withValue(spr):
|
||||
if record.peerId != spr.data.peerId:
|
||||
trace "peer sent envelope with wrong public key", peer
|
||||
else:
|
||||
if record.peerId != signedRecord.get().data.peerId:
|
||||
trace "peer sent envelope with wrong public key", peer
|
||||
none(PeerRecord)
|
||||
else:
|
||||
some(signedRecord.get().data)
|
||||
peerRecord = some(spr.data)
|
||||
else:
|
||||
trace "peer sent invalid SPR", peer
|
||||
|
||||
routingRecords.add((record.peerId, peerRecord))
|
||||
|
||||
routingRecords
|
||||
|
||||
|
||||
proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.raises: [Defect].} =
|
||||
proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.raises: [].} =
|
||||
for prune in prunes:
|
||||
let topic = prune.topicId
|
||||
|
||||
@@ -248,39 +239,42 @@ proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.r
|
||||
|
||||
proc handleIHave*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
ihaves: seq[ControlIHave]): ControlIWant {.raises: [Defect].} =
|
||||
ihaves: seq[ControlIHave]): ControlIWant {.raises: [].} =
|
||||
var res: ControlIWant
|
||||
if peer.score < g.parameters.gossipThreshold:
|
||||
trace "ihave: ignoring low score peer", peer, score = peer.score
|
||||
elif peer.iHaveBudget <= 0:
|
||||
trace "ihave: ignoring out of budget peer", peer, score = peer.score
|
||||
else:
|
||||
# TODO review deduplicate algorithm
|
||||
# * https://github.com/nim-lang/Nim/blob/5f46474555ee93306cce55342e81130c1da79a42/lib/pure/collections/sequtils.nim#L184
|
||||
# * it's probably not efficient and might give preference to the first dupe
|
||||
let deIhaves = ihaves.deduplicate()
|
||||
for ihave in deIhaves:
|
||||
for ihave in ihaves:
|
||||
trace "peer sent ihave",
|
||||
peer, topic = ihave.topicId, msgs = ihave.messageIds
|
||||
if ihave.topicId in g.mesh:
|
||||
# also avoid duplicates here!
|
||||
let deIhavesMsgs = ihave.messageIds.deduplicate()
|
||||
for msgId in deIhavesMsgs:
|
||||
if ihave.topicId in g.topics:
|
||||
for msgId in ihave.messageIds:
|
||||
if not g.hasSeen(msgId):
|
||||
if peer.iHaveBudget > 0:
|
||||
if peer.iHaveBudget <= 0:
|
||||
break
|
||||
elif msgId notin res.messageIds:
|
||||
res.messageIds.add(msgId)
|
||||
dec peer.iHaveBudget
|
||||
trace "requested message via ihave", messageID=msgId
|
||||
else:
|
||||
break
|
||||
# shuffling res.messageIDs before sending it out to increase the likelihood
|
||||
# of getting an answer if the peer truncates the list due to internal size restrictions.
|
||||
g.rng.shuffle(res.messageIds)
|
||||
return res
|
||||
|
||||
proc handleIDontWant*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
iDontWants: seq[ControlIWant]) =
|
||||
for dontWant in iDontWants:
|
||||
for messageId in dontWant.messageIds:
|
||||
if peer.heDontWants[^1].len > 1000: break
|
||||
if messageId.len > 100: continue
|
||||
peer.heDontWants[^1].incl(messageId)
|
||||
|
||||
proc handleIWant*(g: GossipSub,
|
||||
peer: PubSubPeer,
|
||||
iwants: seq[ControlIWant]): seq[Message] {.raises: [Defect].} =
|
||||
iwants: seq[ControlIWant]): seq[Message] {.raises: [].} =
|
||||
var
|
||||
messages: seq[Message]
|
||||
invalidRequests = 0
|
||||
@@ -299,15 +293,14 @@ proc handleIWant*(g: GossipSub,
|
||||
libp2p_gossipsub_received_iwants.inc(1, labelValues=["skipped"])
|
||||
return messages
|
||||
continue
|
||||
let msg = g.mcache.get(mid)
|
||||
if msg.isSome:
|
||||
libp2p_gossipsub_received_iwants.inc(1, labelValues=["correct"])
|
||||
messages.add(msg.get())
|
||||
else:
|
||||
let msg = g.mcache.get(mid).valueOr:
|
||||
libp2p_gossipsub_received_iwants.inc(1, labelValues=["unknown"])
|
||||
continue
|
||||
libp2p_gossipsub_received_iwants.inc(1, labelValues=["correct"])
|
||||
messages.add(msg)
|
||||
return messages
|
||||
|
||||
proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} =
|
||||
proc commitMetrics(metrics: var MeshMetrics) {.raises: [].} =
|
||||
libp2p_gossipsub_low_peers_topics.set(metrics.lowPeersTopics)
|
||||
libp2p_gossipsub_no_peers_topics.set(metrics.noPeersTopics)
|
||||
libp2p_gossipsub_under_dout_topics.set(metrics.underDoutTopics)
|
||||
@@ -316,7 +309,7 @@ proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} =
|
||||
libp2p_gossipsub_peers_per_topic_fanout.set(metrics.otherPeersPerTopicFanout, labelValues = ["other"])
|
||||
libp2p_gossipsub_peers_per_topic_mesh.set(metrics.otherPeersPerTopicMesh, labelValues = ["other"])
|
||||
|
||||
proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) {.raises: [Defect].} =
|
||||
proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) {.raises: [].} =
|
||||
logScope:
|
||||
topic
|
||||
mesh = g.mesh.peers(topic)
|
||||
@@ -348,7 +341,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
# avoid negative score peers
|
||||
it.score >= 0.0 and
|
||||
it notin currentMesh[] and
|
||||
# don't pick explicit peers
|
||||
# don't pick direct peers
|
||||
it.peerId notin g.parameters.directPeers and
|
||||
# and avoid peers we are backing off
|
||||
it.peerId notin backingOff:
|
||||
@@ -388,7 +381,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
it notin currentMesh[] and
|
||||
# avoid negative score peers
|
||||
it.score >= 0.0 and
|
||||
# don't pick explicit peers
|
||||
# don't pick direct peers
|
||||
it.peerId notin g.parameters.directPeers and
|
||||
# and avoid peers we are backing off
|
||||
it.peerId notin backingOff:
|
||||
@@ -490,7 +483,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
# avoid negative score peers
|
||||
it.score >= median.score and
|
||||
it notin currentMesh[] and
|
||||
# don't pick explicit peers
|
||||
# don't pick direct peers
|
||||
it.peerId notin g.parameters.directPeers and
|
||||
# and avoid peers we are backing off
|
||||
it.peerId notin backingOff:
|
||||
@@ -546,7 +539,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
|
||||
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
|
||||
g.broadcast(prunes, prune)
|
||||
|
||||
proc dropFanoutPeers*(g: GossipSub) {.raises: [Defect].} =
|
||||
proc dropFanoutPeers*(g: GossipSub) {.raises: [].} =
|
||||
# drop peers that we haven't published to in
|
||||
# GossipSubFanoutTTL seconds
|
||||
let now = Moment.now()
|
||||
@@ -559,13 +552,13 @@ proc dropFanoutPeers*(g: GossipSub) {.raises: [Defect].} =
|
||||
for topic in drops:
|
||||
g.lastFanoutPubSub.del topic
|
||||
|
||||
proc replenishFanout*(g: GossipSub, topic: string) {.raises: [Defect].} =
|
||||
proc replenishFanout*(g: GossipSub, topic: string) {.raises: [].} =
|
||||
## get fanout peers for a topic
|
||||
logScope: topic
|
||||
trace "about to replenish fanout"
|
||||
|
||||
let currentMesh = g.mesh.getOrDefault(topic)
|
||||
if g.fanout.peers(topic) < g.parameters.dLow:
|
||||
let currentMesh = g.mesh.getOrDefault(topic)
|
||||
trace "replenishing fanout", peers = g.fanout.peers(topic)
|
||||
for peer in g.gossipsub.getOrDefault(topic):
|
||||
if peer in currentMesh: continue
|
||||
@@ -575,7 +568,7 @@ proc replenishFanout*(g: GossipSub, topic: string) {.raises: [Defect].} =
|
||||
|
||||
trace "fanout replenished with peers", peers = g.fanout.peers(topic)
|
||||
|
||||
proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises: [Defect].} =
|
||||
proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises: [].} =
|
||||
## gossip iHave messages to peers
|
||||
##
|
||||
|
||||
@@ -638,7 +631,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises:
|
||||
|
||||
return control
|
||||
|
||||
proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
|
||||
proc onHeartbeat(g: GossipSub) {.raises: [].} =
|
||||
# reset IWANT budget
|
||||
# reset IHAVE cap
|
||||
block:
|
||||
@@ -646,7 +639,11 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
|
||||
peer.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||
if peer.sentIHaves.len > g.parameters.historyLength:
|
||||
discard peer.sentIHaves.popLast()
|
||||
peer.heDontWants.addFirst(default(HashSet[MessageId]))
|
||||
if peer.heDontWants.len > g.parameters.historyLength:
|
||||
discard peer.heDontWants.popLast()
|
||||
peer.iHaveBudget = IHavePeerBudget
|
||||
peer.pingBudget = PingsPeerBudget
|
||||
|
||||
var meshMetrics = MeshMetrics()
|
||||
|
||||
@@ -698,7 +695,7 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
|
||||
|
||||
g.mcache.shift() # shift the cache
|
||||
|
||||
# {.pop.} # raises [Defect]
|
||||
# {.pop.} # raises []
|
||||
|
||||
proc heartbeat*(g: GossipSub) {.async.} =
|
||||
heartbeat "GossipSub", g.parameters.heartbeatInterval:
|
||||
|
||||
@@ -7,16 +7,16 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sets, options]
|
||||
import std/[tables, sets]
|
||||
import chronos, chronicles, metrics
|
||||
import chronos/ratelimit
|
||||
import "."/[types]
|
||||
import ".."/[pubsubpeer]
|
||||
import ../rpc/messages
|
||||
import "../../.."/[peerid, multiaddress, switch, utils/heartbeat]
|
||||
import ../pubsub
|
||||
|
||||
logScope:
|
||||
topics = "libp2p gossipsub"
|
||||
@@ -30,6 +30,7 @@ declareGauge(libp2p_gossipsub_peers_score_invalidMessageDeliveries, "Detailed go
|
||||
declareGauge(libp2p_gossipsub_peers_score_appScore, "Detailed gossipsub scoring metric", labels = ["agent"])
|
||||
declareGauge(libp2p_gossipsub_peers_score_behaviourPenalty, "Detailed gossipsub scoring metric", labels = ["agent"])
|
||||
declareGauge(libp2p_gossipsub_peers_score_colocationFactor, "Detailed gossipsub scoring metric", labels = ["agent"])
|
||||
declarePublicCounter(libp2p_gossipsub_peers_rate_limit_hits, "The number of times peers were above their rate limit", labels = ["agent"])
|
||||
|
||||
proc init*(_: type[TopicParams]): TopicParams =
|
||||
TopicParams(
|
||||
@@ -55,7 +56,7 @@ proc init*(_: type[TopicParams]): TopicParams =
|
||||
proc withPeerStats*(
|
||||
g: GossipSub,
|
||||
peerId: PeerId,
|
||||
action: proc (stats: var PeerStats) {.gcsafe, raises: [Defect].}) =
|
||||
action: proc (stats: var PeerStats) {.gcsafe, raises: [].}) =
|
||||
## Add or update peer statistics for a particular peer id - the statistics
|
||||
## are retained across multiple connections until they expire
|
||||
g.peerStats.withValue(peerId, stats) do:
|
||||
@@ -74,39 +75,32 @@ func `/`(a, b: Duration): float64 =
|
||||
func byScore*(x,y: PubSubPeer): int = system.cmp(x.score, y.score)
|
||||
|
||||
proc colocationFactor(g: GossipSub, peer: PubSubPeer): float64 =
|
||||
if peer.address.isNone():
|
||||
0.0
|
||||
let address = peer.address.valueOr: return 0.0
|
||||
|
||||
g.peersInIP.mgetOrPut(address, initHashSet[PeerId]()).incl(peer.peerId)
|
||||
let
|
||||
ipPeers = g.peersInIP.getOrDefault(address).len().float64
|
||||
if ipPeers > g.parameters.ipColocationFactorThreshold:
|
||||
trace "colocationFactor over threshold", peer, address, ipPeers
|
||||
let over = ipPeers - g.parameters.ipColocationFactorThreshold
|
||||
over * over
|
||||
else:
|
||||
let
|
||||
address = peer.address.get()
|
||||
g.peersInIP.mgetOrPut(address, initHashSet[PeerId]()).incl(peer.peerId)
|
||||
let
|
||||
ipPeers = g.peersInIP.getOrDefault(address).len().float64
|
||||
if ipPeers > g.parameters.ipColocationFactorThreshold:
|
||||
trace "colocationFactor over threshold", peer, address, ipPeers
|
||||
let over = ipPeers - g.parameters.ipColocationFactorThreshold
|
||||
over * over
|
||||
else:
|
||||
0.0
|
||||
0.0
|
||||
|
||||
{.pop.}
|
||||
|
||||
proc disconnectPeer(g: GossipSub, peer: PubSubPeer) {.async.} =
|
||||
let agent =
|
||||
when defined(libp2p_agents_metrics):
|
||||
if peer.shortAgent.len > 0:
|
||||
peer.shortAgent
|
||||
else:
|
||||
"unknown"
|
||||
else:
|
||||
"unknown"
|
||||
libp2p_gossipsub_bad_score_disconnection.inc(labelValues = [agent])
|
||||
|
||||
proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async.} =
|
||||
try:
|
||||
await g.switch.disconnect(peer.peerId)
|
||||
except CatchableError as exc: # Never cancelled
|
||||
trace "Failed to close connection", peer, error = exc.name, msg = exc.msg
|
||||
|
||||
proc disconnectIfBadScorePeer*(g: GossipSub, peer: PubSubPeer, score: float64) =
|
||||
if g.parameters.disconnectBadPeers and score < g.parameters.graylistThreshold and
|
||||
peer.peerId notin g.parameters.directPeers:
|
||||
debug "disconnecting bad score peer", peer, score = peer.score
|
||||
asyncSpawn(g.disconnectPeer(peer))
|
||||
libp2p_gossipsub_bad_score_disconnection.inc(labelValues = [peer.getAgent()])
|
||||
|
||||
proc updateScores*(g: GossipSub) = # avoid async
|
||||
## https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#the-score-function
|
||||
@@ -176,14 +170,7 @@ proc updateScores*(g: GossipSub) = # avoid async
|
||||
score += topicScore * topicParams.topicWeight
|
||||
|
||||
# Score metrics
|
||||
let agent =
|
||||
when defined(libp2p_agents_metrics):
|
||||
if peer.shortAgent.len > 0:
|
||||
peer.shortAgent
|
||||
else:
|
||||
"unknown"
|
||||
else:
|
||||
"unknown"
|
||||
let agent = peer.getAgent()
|
||||
libp2p_gossipsub_peers_score_firstMessageDeliveries.inc(info.firstMessageDeliveries, labelValues = [agent])
|
||||
libp2p_gossipsub_peers_score_meshMessageDeliveries.inc(info.meshMessageDeliveries, labelValues = [agent])
|
||||
libp2p_gossipsub_peers_score_meshFailurePenalty.inc(info.meshFailurePenalty, labelValues = [agent])
|
||||
@@ -220,14 +207,7 @@ proc updateScores*(g: GossipSub) = # avoid async
|
||||
score += colocationFactor * g.parameters.ipColocationFactorWeight
|
||||
|
||||
# Score metrics
|
||||
let agent =
|
||||
when defined(libp2p_agents_metrics):
|
||||
if peer.shortAgent.len > 0:
|
||||
peer.shortAgent
|
||||
else:
|
||||
"unknown"
|
||||
else:
|
||||
"unknown"
|
||||
let agent = peer.getAgent()
|
||||
libp2p_gossipsub_peers_score_appScore.inc(peer.appScore, labelValues = [agent])
|
||||
libp2p_gossipsub_peers_score_behaviourPenalty.inc(peer.behaviourPenalty, labelValues = [agent])
|
||||
libp2p_gossipsub_peers_score_colocationFactor.inc(colocationFactor, labelValues = [agent])
|
||||
@@ -247,11 +227,7 @@ proc updateScores*(g: GossipSub) = # avoid async
|
||||
|
||||
trace "updated peer's score", peer, score = peer.score, n_topics, is_grafted
|
||||
|
||||
if g.parameters.disconnectBadPeers and stats.score < g.parameters.graylistThreshold and
|
||||
peer.peerId notin g.parameters.directPeers:
|
||||
debug "disconnecting bad score peer", peer, score = peer.score
|
||||
asyncSpawn(g.disconnectPeer(peer))
|
||||
|
||||
g.disconnectIfBadScorePeer(peer, stats.score)
|
||||
libp2p_gossipsub_peers_scores.inc(peer.score, labelValues = [agent])
|
||||
|
||||
for peer in evicting:
|
||||
@@ -264,8 +240,18 @@ proc scoringHeartbeat*(g: GossipSub) {.async.} =
|
||||
trace "running scoring heartbeat", instance = cast[int](g)
|
||||
g.updateScores()
|
||||
|
||||
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, topics: seq[string]) =
|
||||
for tt in topics:
|
||||
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
|
||||
let uselessAppBytesNum = msg.data.len
|
||||
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
|
||||
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
|
||||
debug "Peer sent invalid message and it's above rate limit", peer, uselessAppBytesNum
|
||||
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
|
||||
if g.parameters.disconnectPeerAboveRateLimit:
|
||||
await g.disconnectPeer(peer)
|
||||
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
|
||||
|
||||
|
||||
for tt in msg.topicIds:
|
||||
let t = tt
|
||||
if t notin g.topics:
|
||||
continue
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import std/[options, tables, sets]
|
||||
@@ -48,6 +45,7 @@ const
|
||||
|
||||
const
|
||||
BackoffSlackTime* = 2 # seconds
|
||||
PingsPeerBudget* = 100 # maximum of 6.4kb/heartbeat (6.4kb/s with default 1 second/hb)
|
||||
IHavePeerBudget* = 10
|
||||
# the max amount of IHave to expose, not by spec, but go as example
|
||||
# rust sigp: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/config.rs#L572
|
||||
@@ -144,6 +142,15 @@ type
|
||||
disconnectBadPeers*: bool
|
||||
enablePX*: bool
|
||||
|
||||
bandwidthEstimatebps*: int # This is currently used only for limting flood publishing. 0 disables flood-limiting completely
|
||||
|
||||
overheadRateLimit*: Opt[tuple[bytes: int, interval: Duration]]
|
||||
disconnectPeerAboveRateLimit*: bool
|
||||
|
||||
# The maximum duration a message can stay in the non-priority queue. If it exceeds this duration, it will be discarded
|
||||
# as soon as it is dequeued, instead of being sent to the remote peer. The default value is none, i.e., no maximum duration.
|
||||
maxDurationInNonPriorityQueue*: Opt[Duration]
|
||||
|
||||
BackoffTable* = Table[string, Table[PeerId, Moment]]
|
||||
ValidationSeenTable* = Table[MessageId, HashSet[PubSubPeer]]
|
||||
|
||||
@@ -152,13 +159,13 @@ type
|
||||
proc(peer: PeerId,
|
||||
tag: string, # For gossipsub, the topic
|
||||
peers: seq[RoutingRecordsPair])
|
||||
{.gcsafe, raises: [Defect].}
|
||||
{.gcsafe, raises: [].}
|
||||
|
||||
GossipSub* = ref object of FloodSub
|
||||
mesh*: PeerTable # peers that we send messages to when we are subscribed to the topic
|
||||
fanout*: PeerTable # peers that we send messages to when we're not subscribed to the topic
|
||||
gossipsub*: PeerTable # peers that are subscribed to a topic
|
||||
explicit*: PeerTable # directpeers that we keep alive explicitly
|
||||
subscribedDirectPeers*: PeerTable # directpeers that we keep alive
|
||||
backingOff*: BackoffTable # peers to backoff from when replenishing the mesh
|
||||
lastFanoutPubSub*: Table[string, Moment] # last publish time for fanout topics
|
||||
gossip*: Table[string, seq[ControlIHave]] # pending gossip
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sets, tables, options]
|
||||
import rpc/[messages]
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sets, sequtils]
|
||||
import ./pubsubpeer, ../../peerid
|
||||
|
||||
@@ -13,13 +13,11 @@
|
||||
## `publish<#publish.e%2CPubSub%2Cstring%2Cseq%5Bbyte%5D>`_ something on it,
|
||||
## and eventually `unsubscribe<#unsubscribe%2CPubSub%2Cstring%2CTopicHandler>`_ from it.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sequtils, sets, strutils]
|
||||
import chronos, chronicles, metrics
|
||||
import chronos/ratelimit
|
||||
import ./errors as pubsub_errors,
|
||||
./pubsubpeer,
|
||||
./rpc/[message, messages, protobuf],
|
||||
@@ -86,18 +84,18 @@ type
|
||||
InitializationError* = object of LPError
|
||||
|
||||
TopicHandler* {.public.} = proc(topic: string,
|
||||
data: seq[byte]): Future[void] {.gcsafe, raises: [Defect].}
|
||||
data: seq[byte]): Future[void] {.gcsafe, raises: [].}
|
||||
|
||||
ValidatorHandler* {.public.} = proc(topic: string,
|
||||
message: Message): Future[ValidationResult] {.gcsafe, raises: [Defect].}
|
||||
message: Message): Future[ValidationResult] {.gcsafe, raises: [].}
|
||||
|
||||
TopicPair* = tuple[topic: string, handler: TopicHandler]
|
||||
|
||||
MsgIdProvider* {.public.} =
|
||||
proc(m: Message): Result[MessageId, ValidationResult] {.noSideEffect, raises: [Defect], gcsafe.}
|
||||
proc(m: Message): Result[MessageId, ValidationResult] {.noSideEffect, raises: [], gcsafe.}
|
||||
|
||||
SubscriptionValidator* {.public.} =
|
||||
proc(topic: string): bool {.raises: [Defect], gcsafe.}
|
||||
proc(topic: string): bool {.raises: [], gcsafe.}
|
||||
## Every time a peer send us a subscription (even to an unknown topic),
|
||||
## we have to store it, which may be an attack vector.
|
||||
## This callback can be used to reject topic we're not interested in
|
||||
@@ -140,17 +138,18 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
|
||||
|
||||
libp2p_pubsub_peers.set(p.peers.len.int64)
|
||||
|
||||
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg) {.raises: [Defect].} =
|
||||
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool = false) {.raises: [].} =
|
||||
## Attempt to send `msg` to remote peer
|
||||
##
|
||||
|
||||
trace "sending pubsub message to peer", peer, msg = shortLog(msg)
|
||||
peer.send(msg, p.anonymize)
|
||||
asyncSpawn peer.send(msg, p.anonymize, isHighPriority)
|
||||
|
||||
proc broadcast*(
|
||||
p: PubSub,
|
||||
sendPeers: auto, # Iteratble[PubSubPeer]
|
||||
msg: RPCMsg) {.raises: [Defect].} =
|
||||
msg: RPCMsg,
|
||||
isHighPriority: bool = false) {.raises: [].} =
|
||||
## Attempt to send `msg` to the given peers
|
||||
|
||||
let npeers = sendPeers.len.int64
|
||||
@@ -173,10 +172,9 @@ proc broadcast*(
|
||||
else:
|
||||
libp2p_pubsub_broadcast_messages.inc(npeers, labelValues = ["generic"])
|
||||
|
||||
if msg.control.isSome():
|
||||
libp2p_pubsub_broadcast_iwant.inc(npeers * msg.control.get().iwant.len.int64)
|
||||
msg.control.withValue(control):
|
||||
libp2p_pubsub_broadcast_iwant.inc(npeers * control.iwant.len.int64)
|
||||
|
||||
let control = msg.control.get()
|
||||
for ihave in control.ihave:
|
||||
if p.knownTopics.contains(ihave.topicId):
|
||||
libp2p_pubsub_broadcast_ihave.inc(npeers, labelValues = [ihave.topicId])
|
||||
@@ -198,12 +196,12 @@ proc broadcast*(
|
||||
|
||||
if anyIt(sendPeers, it.hasObservers):
|
||||
for peer in sendPeers:
|
||||
p.send(peer, msg)
|
||||
p.send(peer, msg, isHighPriority)
|
||||
else:
|
||||
# Fast path that only encodes message once
|
||||
let encoded = encodeRpcMsg(msg, p.anonymize)
|
||||
for peer in sendPeers:
|
||||
asyncSpawn peer.sendEncoded(encoded)
|
||||
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
|
||||
|
||||
proc sendSubs*(p: PubSub,
|
||||
peer: PubSubPeer,
|
||||
@@ -247,9 +245,8 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
|
||||
else:
|
||||
libp2p_pubsub_received_messages.inc(labelValues = ["generic"])
|
||||
|
||||
if rpcMsg.control.isSome():
|
||||
libp2p_pubsub_received_iwant.inc(rpcMsg.control.get().iwant.len.int64)
|
||||
template control: untyped = rpcMsg.control.unsafeGet()
|
||||
rpcMsg.control.withValue(control):
|
||||
libp2p_pubsub_received_iwant.inc(control.iwant.len.int64)
|
||||
for ihave in control.ihave:
|
||||
if p.knownTopics.contains(ihave.topicId):
|
||||
libp2p_pubsub_received_ihave.inc(labelValues = [ihave.topicId])
|
||||
@@ -268,7 +265,7 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
|
||||
|
||||
method rpcHandler*(p: PubSub,
|
||||
peer: PubSubPeer,
|
||||
rpcMsg: RPCMsg): Future[void] {.base, async.} =
|
||||
data: seq[byte]): Future[void] {.base, async.} =
|
||||
## Handler that must be overridden by concrete implementation
|
||||
raiseAssert "Unimplemented"
|
||||
|
||||
@@ -283,10 +280,11 @@ method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent) {
|
||||
of PubSubPeerEventKind.Disconnected:
|
||||
discard
|
||||
|
||||
proc getOrCreatePeer*(
|
||||
method getOrCreatePeer*(
|
||||
p: PubSub,
|
||||
peerId: PeerId,
|
||||
protos: seq[string]): PubSubPeer =
|
||||
protos: seq[string]): PubSubPeer {.base, gcsafe.} =
|
||||
|
||||
p.peers.withValue(peerId, peer):
|
||||
return peer[]
|
||||
|
||||
@@ -359,9 +357,9 @@ method handleConn*(p: PubSub,
|
||||
## that we're interested in
|
||||
##
|
||||
|
||||
proc handler(peer: PubSubPeer, msg: RPCMsg): Future[void] =
|
||||
proc handler(peer: PubSubPeer, data: seq[byte]): Future[void] =
|
||||
# call pubsub rpc handler
|
||||
p.rpcHandler(peer, msg)
|
||||
p.rpcHandler(peer, data)
|
||||
|
||||
let peer = p.getOrCreatePeer(conn.peerId, @[proto])
|
||||
|
||||
@@ -491,7 +489,7 @@ method publish*(p: PubSub,
|
||||
return 0
|
||||
|
||||
method initPubSub*(p: PubSub)
|
||||
{.base, raises: [Defect, InitializationError].} =
|
||||
{.base, raises: [InitializationError].} =
|
||||
## perform pubsub initialization
|
||||
p.observers = new(seq[PubSubObserver])
|
||||
if p.msgIdProvider == nil:
|
||||
@@ -559,7 +557,7 @@ proc init*[PubParams: object | bool](
|
||||
maxMessageSize: int = 1024 * 1024,
|
||||
rng: ref HmacDrbgContext = newRng(),
|
||||
parameters: PubParams = false): P
|
||||
{.raises: [Defect, InitializationError], public.} =
|
||||
{.raises: [InitializationError], public.} =
|
||||
let pubsub =
|
||||
when PubParams is bool:
|
||||
P(switch: switch,
|
||||
|
||||
@@ -7,14 +7,12 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, strutils, tables, hashes, options, sets, deques]
|
||||
import stew/results
|
||||
import chronos, chronicles, nimcrypto/sha2, metrics
|
||||
import chronos/ratelimit
|
||||
import rpc/[messages, message, protobuf],
|
||||
../../peerid,
|
||||
../../peerinfo,
|
||||
@@ -23,21 +21,28 @@ import rpc/[messages, message, protobuf],
|
||||
../../protobuf/minprotobuf,
|
||||
../../utility
|
||||
|
||||
export peerid, connection
|
||||
export peerid, connection, deques
|
||||
|
||||
logScope:
|
||||
topics = "libp2p pubsubpeer"
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
declareCounter(libp2p_pubsub_sent_messages, "number of messages sent", labels = ["id", "topic"])
|
||||
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
|
||||
declareCounter(libp2p_pubsub_skipped_received_messages, "number of received skipped messages", labels = ["id"])
|
||||
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
|
||||
|
||||
declareGauge(libp2p_gossipsub_priority_queue_size, "the number of messages in the priority queue", labels = ["id"])
|
||||
declareGauge(libp2p_gossipsub_non_priority_queue_size, "the number of messages in the non-priority queue", labels = ["id"])
|
||||
|
||||
declareCounter(libp2p_gossipsub_non_priority_msgs_dropped, "the number of dropped messages in the non-priority queue", labels = ["id"])
|
||||
|
||||
|
||||
type
|
||||
PeerRateLimitError* = object of CatchableError
|
||||
|
||||
PubSubObserver* = ref object
|
||||
onRecv*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [Defect].}
|
||||
onSend*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [Defect].}
|
||||
onRecv*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||
onSend*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
|
||||
|
||||
PubSubPeerEventKind* {.pure.} = enum
|
||||
Connected
|
||||
@@ -46,9 +51,23 @@ type
|
||||
PubSubPeerEvent* = object
|
||||
kind*: PubSubPeerEventKind
|
||||
|
||||
GetConn* = proc(): Future[Connection] {.gcsafe, raises: [Defect].}
|
||||
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [Defect].} # have to pass peer as it's unknown during init
|
||||
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [Defect].}
|
||||
GetConn* = proc(): Future[Connection] {.gcsafe, raises: [].}
|
||||
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [].} # have to pass peer as it's unknown during init
|
||||
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
|
||||
|
||||
QueuedMessage* = object
|
||||
msg*: seq[byte]
|
||||
addedAt*: Moment
|
||||
|
||||
RpcMessageQueue* = ref object
|
||||
# Tracks async tasks for sending high-priority peer-published messages.
|
||||
sendPriorityQueue: Deque[Future[void]]
|
||||
# Queue for lower-priority messages, like "IWANT" replies and relay messages.
|
||||
nonPriorityQueue: AsyncQueue[QueuedMessage]
|
||||
# Task for processing non-priority message queue.
|
||||
sendNonPriorityTask: Future[void]
|
||||
# The max duration a message to be relayed can wait to be sent before it is dropped. The default is 500ms.
|
||||
maxDurationInNonPriorityQueue*: Opt[Duration]
|
||||
|
||||
PubSubPeer* = ref object of RootObj
|
||||
getConn*: GetConn # callback to establish a new send connection
|
||||
@@ -63,13 +82,18 @@ type
|
||||
|
||||
score*: float64
|
||||
sentIHaves*: Deque[HashSet[MessageId]]
|
||||
heDontWants*: Deque[HashSet[MessageId]]
|
||||
iHaveBudget*: int
|
||||
pingBudget*: int
|
||||
maxMessageSize: int
|
||||
appScore*: float64 # application specific score
|
||||
behaviourPenalty*: float64 # the eventual penalty score
|
||||
overheadRateLimitOpt*: Opt[TokenBucket]
|
||||
|
||||
RPCHandler* = proc(peer: PubSubPeer, msg: RPCMsg): Future[void]
|
||||
{.gcsafe, raises: [Defect].}
|
||||
rpcmessagequeue*: RpcMessageQueue
|
||||
|
||||
RPCHandler* = proc(peer: PubSubPeer, data: seq[byte]): Future[void]
|
||||
{.gcsafe, raises: [].}
|
||||
|
||||
when defined(libp2p_agents_metrics):
|
||||
func shortAgent*(p: PubSubPeer): string =
|
||||
@@ -80,6 +104,16 @@ when defined(libp2p_agents_metrics):
|
||||
#so we have to read the parents short agent..
|
||||
p.sendConn.getWrapped().shortAgent
|
||||
|
||||
proc getAgent*(peer: PubSubPeer): string =
|
||||
return
|
||||
when defined(libp2p_agents_metrics):
|
||||
if peer.shortAgent.len > 0:
|
||||
peer.shortAgent
|
||||
else:
|
||||
"unknown"
|
||||
else:
|
||||
"unknown"
|
||||
|
||||
func hash*(p: PubSubPeer): Hash =
|
||||
p.peerId.hash
|
||||
|
||||
@@ -108,7 +142,7 @@ func outbound*(p: PubSubPeer): bool =
|
||||
else:
|
||||
false
|
||||
|
||||
proc recvObservers(p: PubSubPeer, msg: var RPCMsg) =
|
||||
proc recvObservers*(p: PubSubPeer, msg: var RPCMsg) =
|
||||
# trigger hooks
|
||||
if not(isNil(p.observers)) and p.observers[].len > 0:
|
||||
for obs in p.observers[]:
|
||||
@@ -135,28 +169,13 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
|
||||
conn, peer = p, closed = conn.closed,
|
||||
data = data.shortLog
|
||||
|
||||
var rmsg = decodeRpcMsg(data)
|
||||
await p.handler(p, data)
|
||||
data = newSeq[byte]() # Release memory
|
||||
|
||||
if rmsg.isErr():
|
||||
notice "failed to decode msg from peer",
|
||||
conn, peer = p, closed = conn.closed,
|
||||
err = rmsg.error()
|
||||
break
|
||||
|
||||
trace "decoded msg from peer",
|
||||
conn, peer = p, closed = conn.closed,
|
||||
msg = rmsg.get().shortLog
|
||||
# trigger hooks
|
||||
p.recvObservers(rmsg.get())
|
||||
|
||||
when defined(libp2p_expensive_metrics):
|
||||
for m in rmsg.get().messages:
|
||||
for t in m.topicIDs:
|
||||
# metrics
|
||||
libp2p_pubsub_received_messages.inc(labelValues = [$p.peerId, t])
|
||||
|
||||
await p.handler(p, rmsg.get())
|
||||
except PeerRateLimitError as exc:
|
||||
debug "Peer rate limit exceeded, exiting read while", conn, peer = p, error = exc.msg
|
||||
except CatchableError as exc:
|
||||
debug "Exception occurred in PubSubPeer.handle",
|
||||
conn, peer = p, closed = conn.closed, exc = exc.msg
|
||||
finally:
|
||||
await conn.close()
|
||||
except CancelledError:
|
||||
@@ -174,7 +193,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
||||
try:
|
||||
if p.connectedFut.finished:
|
||||
p.connectedFut = newFuture[void]()
|
||||
let newConn = await p.getConn()
|
||||
let newConn = await p.getConn().wait(5.seconds)
|
||||
if newConn.isNil:
|
||||
raise (ref LPError)(msg: "Cannot establish send connection")
|
||||
|
||||
@@ -201,6 +220,9 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
|
||||
await p.sendConn.close()
|
||||
p.sendConn = nil
|
||||
|
||||
if not p.connectedFut.finished:
|
||||
p.connectedFut.complete()
|
||||
|
||||
try:
|
||||
if p.onEvent != nil:
|
||||
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.Disconnected))
|
||||
@@ -233,27 +255,25 @@ proc hasSendConn*(p: PubSubPeer): bool =
|
||||
template sendMetrics(msg: RPCMsg): untyped =
|
||||
when defined(libp2p_expensive_metrics):
|
||||
for x in msg.messages:
|
||||
for t in x.topicIDs:
|
||||
for t in x.topicIds:
|
||||
# metrics
|
||||
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
|
||||
|
||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect], async.} =
|
||||
doAssert(not isNil(p), "pubsubpeer nil!")
|
||||
|
||||
if msg.len <= 0:
|
||||
debug "empty message, skipping", p, msg = shortLog(msg)
|
||||
return
|
||||
|
||||
if msg.len > p.maxMessageSize:
|
||||
info "trying to send a too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
|
||||
return
|
||||
proc clearSendPriorityQueue(p: PubSubPeer) =
|
||||
while p.rpcmessagequeue.sendPriorityQueue.len > 0 and p.rpcmessagequeue.sendPriorityQueue[0].finished:
|
||||
when defined(libp2p_expensive_metrics):
|
||||
libp2p_gossipsub_priority_queue_size.dec(labelValues = [$p.peerId])
|
||||
discard p.rpcmessagequeue.sendPriorityQueue.popFirst()
|
||||
|
||||
proc sendMsg(p: PubSubPeer, msg: seq[byte]) {.async.} =
|
||||
if p.sendConn == nil:
|
||||
discard await p.connectedFut.withTimeout(1.seconds)
|
||||
# Wait for a send conn to be setup. `connectOnce` will
|
||||
# complete this even if the sendConn setup failed
|
||||
await p.connectedFut
|
||||
|
||||
var conn = p.sendConn
|
||||
if conn == nil or conn.closed():
|
||||
debug "No send connection, skipping message", p, msg = shortLog(msg)
|
||||
debug "No send connection", p, msg = shortLog(msg)
|
||||
return
|
||||
|
||||
trace "sending encoded msgs to peer", conn, encoded = shortLog(msg)
|
||||
@@ -270,9 +290,66 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect], async.} =
|
||||
|
||||
await conn.close() # This will clean up the send connection
|
||||
|
||||
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
|
||||
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
|
||||
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool = false) {.async.} =
|
||||
doAssert(not isNil(p), "pubsubpeer nil!")
|
||||
|
||||
if msg.len <= 0:
|
||||
debug "empty message, skipping", p, msg = shortLog(msg)
|
||||
return
|
||||
|
||||
if msg.len > p.maxMessageSize:
|
||||
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
|
||||
return
|
||||
|
||||
if isHighPriority:
|
||||
p.clearSendPriorityQueue()
|
||||
let f = p.sendMsg(msg)
|
||||
if not f.finished:
|
||||
p.rpcmessagequeue.sendPriorityQueue.addLast(f)
|
||||
when defined(libp2p_expensive_metrics):
|
||||
libp2p_gossipsub_priority_queue_size.inc(labelValues = [$p.peerId])
|
||||
else:
|
||||
await p.rpcmessagequeue.nonPriorityQueue.addLast(QueuedMessage(msg: msg, addedAt: Moment.now()))
|
||||
when defined(libp2p_expensive_metrics):
|
||||
libp2p_gossipsub_non_priority_queue_size.inc(labelValues = [$p.peerId])
|
||||
trace "message queued", p, msg = shortLog(msg)
|
||||
|
||||
iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize: bool): seq[byte] =
|
||||
## This iterator takes an `RPCMsg` and sequentially repackages its Messages into new `RPCMsg` instances.
|
||||
## Each new `RPCMsg` accumulates Messages until reaching the specified `maxSize`. If a single Message
|
||||
## exceeds the `maxSize` when trying to fit into an empty `RPCMsg`, the latter is skipped as too large to send.
|
||||
## Every constructed `RPCMsg` is then encoded, optionally anonymized, and yielded as a sequence of bytes.
|
||||
|
||||
var currentRPCMsg = rpcMsg
|
||||
currentRPCMsg.messages = newSeq[Message]()
|
||||
|
||||
var currentSize = byteSize(currentRPCMsg)
|
||||
|
||||
for msg in rpcMsg.messages:
|
||||
let msgSize = byteSize(msg)
|
||||
|
||||
# Check if adding the next message will exceed maxSize
|
||||
if float(currentSize + msgSize) * 1.1 > float(maxSize): # Guessing 10% protobuf overhead
|
||||
if currentRPCMsg.messages.len == 0:
|
||||
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
|
||||
continue # Skip this message
|
||||
|
||||
trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
|
||||
yield encodeRpcMsg(currentRPCMsg, anonymize)
|
||||
currentRPCMsg = RPCMsg()
|
||||
currentSize = 0
|
||||
|
||||
currentRPCMsg.messages.add(msg)
|
||||
currentSize += msgSize
|
||||
|
||||
# Check if there is a non-empty currentRPCMsg left to be added
|
||||
if currentSize > 0 and currentRPCMsg.messages.len > 0:
|
||||
trace "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
|
||||
yield encodeRpcMsg(currentRPCMsg, anonymize)
|
||||
else:
|
||||
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
|
||||
|
||||
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool = false) {.async.} =
|
||||
# When sending messages, we take care to re-encode them with the right
|
||||
# anonymization flag to ensure that we're not penalized for sending invalid
|
||||
# or malicious data on the wire - in particular, re-encoding protects against
|
||||
@@ -290,7 +367,13 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
|
||||
sendMetrics(msg)
|
||||
encodeRpcMsg(msg, anonymize)
|
||||
|
||||
asyncSpawn p.sendEncoded(encoded)
|
||||
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
|
||||
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
|
||||
await p.sendEncoded(encodedSplitMsg, isHighPriority)
|
||||
else:
|
||||
# If the message size is within limits, send it as is
|
||||
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
|
||||
await p.sendEncoded(encoded, isHighPriority)
|
||||
|
||||
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
||||
for sentIHave in p.sentIHaves.mitems():
|
||||
@@ -299,13 +382,58 @@ proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
|
||||
return true
|
||||
return false
|
||||
|
||||
proc sendNonPriorityTask(p: PubSubPeer) {.async.} =
|
||||
while true:
|
||||
# we send non-priority messages only if there are no pending priority messages
|
||||
let queuedMsg = await p.rpcmessagequeue.nonPriorityQueue.popFirst()
|
||||
while p.rpcmessagequeue.sendPriorityQueue.len > 0:
|
||||
p.clearSendPriorityQueue()
|
||||
# this minimizes the number of times we have to wait for something (each wait = performance cost)
|
||||
# we will never wait for a finished future and by waiting for the last one, all that come before it are guaranteed
|
||||
# to be finished already (since sends are processed in order).
|
||||
if p.rpcmessagequeue.sendPriorityQueue.len > 0:
|
||||
await p.rpcmessagequeue.sendPriorityQueue[^1]
|
||||
when defined(libp2p_expensive_metrics):
|
||||
libp2p_gossipsub_non_priority_queue_size.dec(labelValues = [$p.peerId])
|
||||
p.rpcmessagequeue.maxDurationInNonPriorityQueue.withValue(maxDurationInNonPriorityQueue):
|
||||
if Moment.now() - queuedMsg.addedAt >= maxDurationInNonPriorityQueue:
|
||||
when defined(libp2p_expensive_metrics):
|
||||
libp2p_gossipsub_non_priority_msgs_dropped.inc(labelValues = [$p.peerId])
|
||||
continue
|
||||
await p.sendMsg(queuedMsg.msg)
|
||||
|
||||
proc startSendNonPriorityTask(p: PubSubPeer) =
|
||||
debug "starting sendNonPriorityTask", p
|
||||
if p.rpcmessagequeue.sendNonPriorityTask.isNil:
|
||||
p.rpcmessagequeue.sendNonPriorityTask = p.sendNonPriorityTask()
|
||||
|
||||
proc stopSendNonPriorityTask*(p: PubSubPeer) =
|
||||
if not p.rpcmessagequeue.sendNonPriorityTask.isNil:
|
||||
debug "stopping sendNonPriorityTask", p
|
||||
p.rpcmessagequeue.sendNonPriorityTask.cancel()
|
||||
p.rpcmessagequeue.sendNonPriorityTask = nil
|
||||
p.rpcmessagequeue.sendPriorityQueue.clear()
|
||||
p.rpcmessagequeue.nonPriorityQueue.clear()
|
||||
when defined(libp2p_expensive_metrics):
|
||||
libp2p_gossipsub_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
|
||||
libp2p_gossipsub_non_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
|
||||
|
||||
proc new(T: typedesc[RpcMessageQueue], maxDurationInNonPriorityQueue = Opt.none(Duration)): T =
|
||||
return T(
|
||||
sendPriorityQueue: initDeque[Future[void]](),
|
||||
nonPriorityQueue: newAsyncQueue[QueuedMessage](),
|
||||
maxDurationInNonPriorityQueue: maxDurationInNonPriorityQueue,
|
||||
)
|
||||
|
||||
proc new*(
|
||||
T: typedesc[PubSubPeer],
|
||||
peerId: PeerId,
|
||||
getConn: GetConn,
|
||||
onEvent: OnEvent,
|
||||
codec: string,
|
||||
maxMessageSize: int): T =
|
||||
maxMessageSize: int,
|
||||
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket),
|
||||
maxDurationInNonPriorityQueue = Opt.none(Duration)): T =
|
||||
|
||||
result = T(
|
||||
getConn: getConn,
|
||||
@@ -313,6 +441,10 @@ proc new*(
|
||||
codec: codec,
|
||||
peerId: peerId,
|
||||
connectedFut: newFuture[void](),
|
||||
maxMessageSize: maxMessageSize
|
||||
maxMessageSize: maxMessageSize,
|
||||
overheadRateLimitOpt: overheadRateLimitOpt,
|
||||
rpcmessagequeue: RpcMessageQueue.new(maxDurationInNonPriorityQueue),
|
||||
)
|
||||
result.sentIHaves.addFirst(default(HashSet[MessageId]))
|
||||
result.heDontWants.addFirst(default(HashSet[MessageId]))
|
||||
result.startSendNonPriorityTask()
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronicles, metrics, stew/[byteutils, endians2]
|
||||
import ./messages,
|
||||
@@ -65,22 +62,21 @@ proc init*(
|
||||
topic: string,
|
||||
seqno: Option[uint64],
|
||||
sign: bool = true): Message
|
||||
{.gcsafe, raises: [Defect, LPError].} =
|
||||
{.gcsafe, raises: [LPError].} =
|
||||
var msg = Message(data: data, topicIDs: @[topic])
|
||||
|
||||
# order matters, we want to include seqno in the signature
|
||||
if seqno.isSome:
|
||||
msg.seqno = @(seqno.get().toBytesBE())
|
||||
seqno.withValue(seqn):
|
||||
msg.seqno = @(seqn.toBytesBE())
|
||||
|
||||
if peer.isSome:
|
||||
let peer = peer.get()
|
||||
peer.withValue(peer):
|
||||
msg.fromPeer = peer.peerId
|
||||
if sign:
|
||||
msg.signature = sign(msg, peer.privateKey).expect("Couldn't sign message!")
|
||||
msg.key = peer.privateKey.getPublicKey().expect("Invalid private key!")
|
||||
.getBytes().expect("Couldn't get public key bytes!")
|
||||
elif sign:
|
||||
raise (ref LPError)(msg: "Cannot sign message without peer info")
|
||||
else:
|
||||
if sign: raise (ref LPError)(msg: "Cannot sign message without peer info")
|
||||
|
||||
msg
|
||||
|
||||
@@ -90,10 +86,10 @@ proc init*(
|
||||
data: seq[byte],
|
||||
topic: string,
|
||||
seqno: Option[uint64]): Message
|
||||
{.gcsafe, raises: [Defect, LPError].} =
|
||||
{.gcsafe, raises: [LPError].} =
|
||||
var msg = Message(data: data, topicIDs: @[topic])
|
||||
msg.fromPeer = peerId
|
||||
|
||||
if seqno.isSome:
|
||||
msg.seqno = @(seqno.get().toBytesBE())
|
||||
seqno.withValue(seqn):
|
||||
msg.seqno = @(seqn.toBytesBE())
|
||||
msg
|
||||
|
||||
@@ -7,12 +7,9 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import options, sequtils
|
||||
import options, sequtils, sugar
|
||||
import "../../.."/[
|
||||
peerid,
|
||||
routing_record,
|
||||
@@ -21,6 +18,14 @@ import "../../.."/[
|
||||
|
||||
export options
|
||||
|
||||
proc expectedFields[T](t: typedesc[T], existingFieldNames: seq[string]) {.raises: [CatchableError].} =
|
||||
var fieldNames: seq[string]
|
||||
for name, _ in fieldPairs(T()):
|
||||
fieldNames &= name
|
||||
if fieldNames != existingFieldNames:
|
||||
fieldNames.keepIf(proc(it: string): bool = it notin existingFieldNames)
|
||||
raise newException(CatchableError, $T & " fields changed, please search for and revise all relevant procs. New fields: " & $fieldNames)
|
||||
|
||||
type
|
||||
PeerInfoMsg* = object
|
||||
peerId*: PeerId
|
||||
@@ -45,6 +50,7 @@ type
|
||||
iwant*: seq[ControlIWant]
|
||||
graft*: seq[ControlGraft]
|
||||
prune*: seq[ControlPrune]
|
||||
idontwant*: seq[ControlIWant]
|
||||
|
||||
ControlIHave* = object
|
||||
topicId*: string
|
||||
@@ -65,6 +71,8 @@ type
|
||||
subscriptions*: seq[SubOpts]
|
||||
messages*: seq[Message]
|
||||
control*: Option[ControlMessage]
|
||||
ping*: seq[byte]
|
||||
pong*: seq[byte]
|
||||
|
||||
func withSubs*(
|
||||
T: type RPCMsg, topics: openArray[string], subscribe: bool): T =
|
||||
@@ -111,15 +119,59 @@ func shortLog*(msg: Message): auto =
|
||||
)
|
||||
|
||||
func shortLog*(m: RPCMsg): auto =
|
||||
if m.control.isSome:
|
||||
(
|
||||
subscriptions: m.subscriptions,
|
||||
messages: mapIt(m.messages, it.shortLog),
|
||||
control: m.control.get().shortLog
|
||||
)
|
||||
else:
|
||||
(
|
||||
subscriptions: m.subscriptions,
|
||||
messages: mapIt(m.messages, it.shortLog),
|
||||
control: ControlMessage().shortLog
|
||||
)
|
||||
(
|
||||
subscriptions: m.subscriptions,
|
||||
messages: mapIt(m.messages, it.shortLog),
|
||||
control: m.control.get(ControlMessage()).shortLog
|
||||
)
|
||||
|
||||
static: expectedFields(PeerInfoMsg, @["peerId", "signedPeerRecord"])
|
||||
proc byteSize(peerInfo: PeerInfoMsg): int =
|
||||
peerInfo.peerId.len + peerInfo.signedPeerRecord.len
|
||||
|
||||
static: expectedFields(SubOpts, @["subscribe", "topic"])
|
||||
proc byteSize(subOpts: SubOpts): int =
|
||||
1 + subOpts.topic.len # 1 byte for the bool
|
||||
|
||||
static: expectedFields(Message, @["fromPeer", "data", "seqno", "topicIds", "signature", "key"])
|
||||
proc byteSize*(msg: Message): int =
|
||||
msg.fromPeer.len + msg.data.len + msg.seqno.len +
|
||||
msg.signature.len + msg.key.len + msg.topicIds.foldl(a + b.len, 0)
|
||||
|
||||
proc byteSize*(msgs: seq[Message]): int =
|
||||
msgs.foldl(a + b.byteSize, 0)
|
||||
|
||||
static: expectedFields(ControlIHave, @["topicId", "messageIds"])
|
||||
proc byteSize(controlIHave: ControlIHave): int =
|
||||
controlIHave.topicId.len + controlIHave.messageIds.foldl(a + b.len, 0)
|
||||
|
||||
proc byteSize*(ihaves: seq[ControlIHave]): int =
|
||||
ihaves.foldl(a + b.byteSize, 0)
|
||||
|
||||
static: expectedFields(ControlIWant, @["messageIds"])
|
||||
proc byteSize(controlIWant: ControlIWant): int =
|
||||
controlIWant.messageIds.foldl(a + b.len, 0)
|
||||
|
||||
proc byteSize*(iwants: seq[ControlIWant]): int =
|
||||
iwants.foldl(a + b.byteSize, 0)
|
||||
|
||||
static: expectedFields(ControlGraft, @["topicId"])
|
||||
proc byteSize(controlGraft: ControlGraft): int =
|
||||
controlGraft.topicId.len
|
||||
|
||||
static: expectedFields(ControlPrune, @["topicId", "peers", "backoff"])
|
||||
proc byteSize(controlPrune: ControlPrune): int =
|
||||
controlPrune.topicId.len + controlPrune.peers.foldl(a + b.byteSize, 0) + 8 # 8 bytes for uint64
|
||||
|
||||
static: expectedFields(ControlMessage, @["ihave", "iwant", "graft", "prune", "idontwant"])
|
||||
proc byteSize(control: ControlMessage): int =
|
||||
control.ihave.foldl(a + b.byteSize, 0) + control.iwant.foldl(a + b.byteSize, 0) +
|
||||
control.graft.foldl(a + b.byteSize, 0) + control.prune.foldl(a + b.byteSize, 0) +
|
||||
control.idontwant.foldl(a + b.byteSize, 0)
|
||||
|
||||
static: expectedFields(RPCMsg, @["subscriptions", "messages", "control", "ping", "pong"])
|
||||
proc byteSize*(rpc: RPCMsg): int =
|
||||
result = rpc.subscriptions.foldl(a + b.byteSize, 0) + byteSize(rpc.messages) +
|
||||
rpc.ping.len + rpc.pong.len
|
||||
rpc.control.withValue(ctrl):
|
||||
result += ctrl.byteSize
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import options
|
||||
import stew/assign2
|
||||
@@ -90,6 +87,8 @@ proc write*(pb: var ProtoBuffer, field: int, control: ControlMessage) =
|
||||
ipb.write(3, graft)
|
||||
for prune in control.prune:
|
||||
ipb.write(4, prune)
|
||||
for idontwant in control.idontwant:
|
||||
ipb.write(5, idontwant)
|
||||
if len(ipb.buffer) > 0:
|
||||
ipb.finish()
|
||||
pb.write(field, ipb)
|
||||
@@ -213,6 +212,7 @@ proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.
|
||||
var iwantpbs: seq[seq[byte]]
|
||||
var graftpbs: seq[seq[byte]]
|
||||
var prunepbs: seq[seq[byte]]
|
||||
var idontwant: seq[seq[byte]]
|
||||
if ? cpb.getRepeatedField(1, ihavepbs):
|
||||
for item in ihavepbs:
|
||||
control.ihave.add(? decodeIHave(initProtoBuffer(item)))
|
||||
@@ -225,6 +225,9 @@ proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.
|
||||
if ? cpb.getRepeatedField(4, prunepbs):
|
||||
for item in prunepbs:
|
||||
control.prune.add(? decodePrune(initProtoBuffer(item)))
|
||||
if ? cpb.getRepeatedField(5, idontwant):
|
||||
for item in idontwant:
|
||||
control.idontwant.add(? decodeIWant(initProtoBuffer(item)))
|
||||
trace "decodeControl: message statistics", graft_count = len(control.graft),
|
||||
prune_count = len(control.prune),
|
||||
ihave_count = len(control.ihave),
|
||||
@@ -317,8 +320,14 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
|
||||
pb.write(1, item)
|
||||
for item in msg.messages:
|
||||
pb.write(2, item, anonymize)
|
||||
if msg.control.isSome():
|
||||
pb.write(3, msg.control.get())
|
||||
msg.control.withValue(control):
|
||||
pb.write(3, control)
|
||||
# nim-libp2p extension, using fields which are unlikely to be used
|
||||
# by other extensions
|
||||
if msg.ping.len > 0:
|
||||
pb.write(60, msg.ping)
|
||||
if msg.pong.len > 0:
|
||||
pb.write(61, msg.pong)
|
||||
if len(pb.buffer) > 0:
|
||||
pb.finish()
|
||||
pb.buffer
|
||||
@@ -326,8 +335,10 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
|
||||
proc decodeRpcMsg*(msg: seq[byte]): ProtoResult[RPCMsg] {.inline.} =
|
||||
trace "decodeRpcMsg: decoding message", msg = msg.shortLog()
|
||||
var pb = initProtoBuffer(msg, maxSize = uint.high)
|
||||
var rpcMsg = ok(RPCMsg())
|
||||
assign(rpcMsg.get().messages, ? pb.decodeMessages())
|
||||
assign(rpcMsg.get().subscriptions, ? pb.decodeSubscriptions())
|
||||
assign(rpcMsg.get().control, ? pb.decodeControl())
|
||||
rpcMsg
|
||||
var rpcMsg = RPCMsg()
|
||||
assign(rpcMsg.messages, ? pb.decodeMessages())
|
||||
assign(rpcMsg.subscriptions, ? pb.decodeSubscriptions())
|
||||
assign(rpcMsg.control, ? pb.decodeControl())
|
||||
discard ? pb.getField(60, rpcMsg.ping)
|
||||
discard ? pb.getField(61, rpcMsg.pong)
|
||||
ok(rpcMsg)
|
||||
|
||||
@@ -7,15 +7,14 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables]
|
||||
|
||||
import chronos/timer, stew/results
|
||||
|
||||
import ../../utility
|
||||
|
||||
const Timeout* = 10.seconds # default timeout in ms
|
||||
|
||||
type
|
||||
@@ -58,9 +57,9 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
|
||||
|
||||
var previous = t.del(k) # Refresh existing item
|
||||
|
||||
let addedAt =
|
||||
if previous.isSome: previous.get().addedAt
|
||||
else: now
|
||||
var addedAt = now
|
||||
previous.withValue(previous):
|
||||
addedAt = previous.addedAt
|
||||
|
||||
let node = TimedEntry[K](key: k, addedAt: addedAt, expiresAt: now + t.timeout)
|
||||
|
||||
|
||||
@@ -7,16 +7,14 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import tables, sequtils, sugar, sets, options
|
||||
import tables, sequtils, sugar, sets
|
||||
import metrics except collect
|
||||
import chronos,
|
||||
chronicles,
|
||||
bearssl/rand,
|
||||
stew/[byteutils, objects]
|
||||
stew/[byteutils, objects, results]
|
||||
import ./protocol,
|
||||
../switch,
|
||||
../routing_record,
|
||||
@@ -30,6 +28,11 @@ export chronicles
|
||||
logScope:
|
||||
topics = "libp2p discovery rendezvous"
|
||||
|
||||
declareCounter(libp2p_rendezvous_register, "number of advertise requests")
|
||||
declareCounter(libp2p_rendezvous_discover, "number of discovery requests")
|
||||
declareGauge(libp2p_rendezvous_registered, "number of registered peers")
|
||||
declareGauge(libp2p_rendezvous_namespaces, "number of registered namespaces")
|
||||
|
||||
const
|
||||
RendezVousCodec* = "/rendezvous/1.0.0"
|
||||
MinimumDuration* = 2.hours
|
||||
@@ -65,34 +68,34 @@ type
|
||||
Register = object
|
||||
ns : string
|
||||
signedPeerRecord: seq[byte]
|
||||
ttl: Option[uint64] # in seconds
|
||||
ttl: Opt[uint64] # in seconds
|
||||
|
||||
RegisterResponse = object
|
||||
status: ResponseStatus
|
||||
text: Option[string]
|
||||
ttl: Option[uint64] # in seconds
|
||||
text: Opt[string]
|
||||
ttl: Opt[uint64] # in seconds
|
||||
|
||||
Unregister = object
|
||||
ns: string
|
||||
|
||||
Discover = object
|
||||
ns: string
|
||||
limit: Option[uint64]
|
||||
cookie: Option[seq[byte]]
|
||||
limit: Opt[uint64]
|
||||
cookie: Opt[seq[byte]]
|
||||
|
||||
DiscoverResponse = object
|
||||
registrations: seq[Register]
|
||||
cookie: Option[seq[byte]]
|
||||
cookie: Opt[seq[byte]]
|
||||
status: ResponseStatus
|
||||
text: Option[string]
|
||||
text: Opt[string]
|
||||
|
||||
Message = object
|
||||
msgType: MessageType
|
||||
register: Option[Register]
|
||||
registerResponse: Option[RegisterResponse]
|
||||
unregister: Option[Unregister]
|
||||
discover: Option[Discover]
|
||||
discoverResponse: Option[DiscoverResponse]
|
||||
register: Opt[Register]
|
||||
registerResponse: Opt[RegisterResponse]
|
||||
unregister: Opt[Unregister]
|
||||
discover: Opt[Discover]
|
||||
discoverResponse: Opt[DiscoverResponse]
|
||||
|
||||
proc encode(c: Cookie): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
@@ -104,17 +107,17 @@ proc encode(r: Register): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, r.ns)
|
||||
result.write(2, r.signedPeerRecord)
|
||||
if r.ttl.isSome():
|
||||
result.write(3, r.ttl.get())
|
||||
r.ttl.withValue(ttl):
|
||||
result.write(3, ttl)
|
||||
result.finish()
|
||||
|
||||
proc encode(rr: RegisterResponse): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, rr.status.uint)
|
||||
if rr.text.isSome():
|
||||
result.write(2, rr.text.get())
|
||||
if rr.ttl.isSome():
|
||||
result.write(3, rr.ttl.get())
|
||||
rr.text.withValue(text):
|
||||
result.write(2, text)
|
||||
rr.ttl.withValue(ttl):
|
||||
result.write(3, ttl)
|
||||
result.finish()
|
||||
|
||||
proc encode(u: Unregister): ProtoBuffer =
|
||||
@@ -125,48 +128,48 @@ proc encode(u: Unregister): ProtoBuffer =
|
||||
proc encode(d: Discover): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, d.ns)
|
||||
if d.limit.isSome():
|
||||
result.write(2, d.limit.get())
|
||||
if d.cookie.isSome():
|
||||
result.write(3, d.cookie.get())
|
||||
d.limit.withValue(limit):
|
||||
result.write(2, limit)
|
||||
d.cookie.withValue(cookie):
|
||||
result.write(3, cookie)
|
||||
result.finish()
|
||||
|
||||
proc encode(d: DiscoverResponse): ProtoBuffer =
|
||||
proc encode(dr: DiscoverResponse): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
for reg in d.registrations:
|
||||
for reg in dr.registrations:
|
||||
result.write(1, reg.encode())
|
||||
if d.cookie.isSome():
|
||||
result.write(2, d.cookie.get())
|
||||
result.write(3, d.status.uint)
|
||||
if d.text.isSome():
|
||||
result.write(4, d.text.get())
|
||||
dr.cookie.withValue(cookie):
|
||||
result.write(2, cookie)
|
||||
result.write(3, dr.status.uint)
|
||||
dr.text.withValue(text):
|
||||
result.write(4, text)
|
||||
result.finish()
|
||||
|
||||
proc encode(msg: Message): ProtoBuffer =
|
||||
result = initProtoBuffer()
|
||||
result.write(1, msg.msgType.uint)
|
||||
if msg.register.isSome():
|
||||
result.write(2, msg.register.get().encode())
|
||||
if msg.registerResponse.isSome():
|
||||
result.write(3, msg.registerResponse.get().encode())
|
||||
if msg.unregister.isSome():
|
||||
result.write(4, msg.unregister.get().encode())
|
||||
if msg.discover.isSome():
|
||||
result.write(5, msg.discover.get().encode())
|
||||
if msg.discoverResponse.isSome():
|
||||
result.write(6, msg.discoverResponse.get().encode())
|
||||
msg.register.withValue(register):
|
||||
result.write(2, register.encode())
|
||||
msg.registerResponse.withValue(registerResponse):
|
||||
result.write(3, registerResponse.encode())
|
||||
msg.unregister.withValue(unregister):
|
||||
result.write(4, unregister.encode())
|
||||
msg.discover.withValue(discover):
|
||||
result.write(5, discover.encode())
|
||||
msg.discoverResponse.withValue(discoverResponse):
|
||||
result.write(6, discoverResponse.encode())
|
||||
result.finish()
|
||||
|
||||
proc decode(_: typedesc[Cookie], buf: seq[byte]): Option[Cookie] =
|
||||
proc decode(_: typedesc[Cookie], buf: seq[byte]): Opt[Cookie] =
|
||||
var c: Cookie
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getRequiredField(1, c.offset)
|
||||
r2 = pb.getRequiredField(2, c.ns)
|
||||
if r1.isErr() or r2.isErr(): return none(Cookie)
|
||||
some(c)
|
||||
if r1.isErr() or r2.isErr(): return Opt.none(Cookie)
|
||||
Opt.some(c)
|
||||
|
||||
proc decode(_: typedesc[Register], buf: seq[byte]): Option[Register] =
|
||||
proc decode(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
|
||||
var
|
||||
r: Register
|
||||
ttl: uint64
|
||||
@@ -175,11 +178,11 @@ proc decode(_: typedesc[Register], buf: seq[byte]): Option[Register] =
|
||||
r1 = pb.getRequiredField(1, r.ns)
|
||||
r2 = pb.getRequiredField(2, r.signedPeerRecord)
|
||||
r3 = pb.getField(3, ttl)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr(): return none(Register)
|
||||
if r3.get(): r.ttl = some(ttl)
|
||||
some(r)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr(): return Opt.none(Register)
|
||||
if r3.get(false): r.ttl = Opt.some(ttl)
|
||||
Opt.some(r)
|
||||
|
||||
proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Option[RegisterResponse] =
|
||||
proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Opt[RegisterResponse] =
|
||||
var
|
||||
rr: RegisterResponse
|
||||
statusOrd: uint
|
||||
@@ -191,20 +194,20 @@ proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Option[RegisterRespo
|
||||
r2 = pb.getField(2, text)
|
||||
r3 = pb.getField(3, ttl)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr() or
|
||||
not checkedEnumAssign(rr.status, statusOrd): return none(RegisterResponse)
|
||||
if r2.get(): rr.text = some(text)
|
||||
if r3.get(): rr.ttl = some(ttl)
|
||||
some(rr)
|
||||
not checkedEnumAssign(rr.status, statusOrd): return Opt.none(RegisterResponse)
|
||||
if r2.get(false): rr.text = Opt.some(text)
|
||||
if r3.get(false): rr.ttl = Opt.some(ttl)
|
||||
Opt.some(rr)
|
||||
|
||||
proc decode(_: typedesc[Unregister], buf: seq[byte]): Option[Unregister] =
|
||||
proc decode(_: typedesc[Unregister], buf: seq[byte]): Opt[Unregister] =
|
||||
var u: Unregister
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getRequiredField(1, u.ns)
|
||||
if r1.isErr(): return none(Unregister)
|
||||
some(u)
|
||||
if r1.isErr(): return Opt.none(Unregister)
|
||||
Opt.some(u)
|
||||
|
||||
proc decode(_: typedesc[Discover], buf: seq[byte]): Option[Discover] =
|
||||
proc decode(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
|
||||
var
|
||||
d: Discover
|
||||
limit: uint64
|
||||
@@ -214,12 +217,12 @@ proc decode(_: typedesc[Discover], buf: seq[byte]): Option[Discover] =
|
||||
r1 = pb.getRequiredField(1, d.ns)
|
||||
r2 = pb.getField(2, limit)
|
||||
r3 = pb.getField(3, cookie)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr: return none(Discover)
|
||||
if r2.get(): d.limit = some(limit)
|
||||
if r3.get(): d.cookie = some(cookie)
|
||||
some(d)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr: return Opt.none(Discover)
|
||||
if r2.get(false): d.limit = Opt.some(limit)
|
||||
if r3.get(false): d.cookie = Opt.some(cookie)
|
||||
Opt.some(d)
|
||||
|
||||
proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Option[DiscoverResponse] =
|
||||
proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Opt[DiscoverResponse] =
|
||||
var
|
||||
dr: DiscoverResponse
|
||||
registrations: seq[seq[byte]]
|
||||
@@ -233,48 +236,47 @@ proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Option[DiscoverRespo
|
||||
r3 = pb.getRequiredField(3, statusOrd)
|
||||
r4 = pb.getField(4, text)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr or r4.isErr() or
|
||||
not checkedEnumAssign(dr.status, statusOrd): return none(DiscoverResponse)
|
||||
not checkedEnumAssign(dr.status, statusOrd): return Opt.none(DiscoverResponse)
|
||||
for reg in registrations:
|
||||
var r: Register
|
||||
let regOpt = Register.decode(reg)
|
||||
if regOpt.isNone(): return none(DiscoverResponse)
|
||||
dr.registrations.add(regOpt.get())
|
||||
if r2.get(): dr.cookie = some(cookie)
|
||||
if r4.get(): dr.text = some(text)
|
||||
some(dr)
|
||||
let regOpt = Register.decode(reg).valueOr:
|
||||
return
|
||||
dr.registrations.add(regOpt)
|
||||
if r2.get(false): dr.cookie = Opt.some(cookie)
|
||||
if r4.get(false): dr.text = Opt.some(text)
|
||||
Opt.some(dr)
|
||||
|
||||
proc decode(_: typedesc[Message], buf: seq[byte]): Option[Message] =
|
||||
proc decode(_: typedesc[Message], buf: seq[byte]): Opt[Message] =
|
||||
var
|
||||
msg: Message
|
||||
statusOrd: uint
|
||||
pbr, pbrr, pbu, pbd, pbdr: ProtoBuffer
|
||||
let
|
||||
pb = initProtoBuffer(buf)
|
||||
r1 = pb.getRequiredField(1, statusOrd)
|
||||
r2 = pb.getField(2, pbr)
|
||||
r3 = pb.getField(3, pbrr)
|
||||
r4 = pb.getField(4, pbu)
|
||||
r5 = pb.getField(5, pbd)
|
||||
r6 = pb.getField(6, pbdr)
|
||||
if r1.isErr() or r2.isErr() or r3.isErr() or
|
||||
r4.isErr() or r5.isErr() or r6.isErr() or
|
||||
not checkedEnumAssign(msg.msgType, statusOrd): return none(Message)
|
||||
if r2.get():
|
||||
let pb = initProtoBuffer(buf)
|
||||
|
||||
? pb.getRequiredField(1, statusOrd).toOpt
|
||||
if not checkedEnumAssign(msg.msgType, statusOrd): return Opt.none(Message)
|
||||
|
||||
if ? pb.getField(2, pbr).optValue:
|
||||
msg.register = Register.decode(pbr.buffer)
|
||||
if msg.register.isNone(): return none(Message)
|
||||
if r3.get():
|
||||
if msg.register.isNone(): return Opt.none(Message)
|
||||
|
||||
if ? pb.getField(3, pbrr).optValue:
|
||||
msg.registerResponse = RegisterResponse.decode(pbrr.buffer)
|
||||
if msg.registerResponse.isNone(): return none(Message)
|
||||
if r4.get():
|
||||
if msg.registerResponse.isNone(): return Opt.none(Message)
|
||||
|
||||
if ? pb.getField(4, pbu).optValue:
|
||||
msg.unregister = Unregister.decode(pbu.buffer)
|
||||
if msg.unregister.isNone(): return none(Message)
|
||||
if r5.get():
|
||||
if msg.unregister.isNone(): return Opt.none(Message)
|
||||
|
||||
if ? pb.getField(5, pbd).optValue:
|
||||
msg.discover = Discover.decode(pbd.buffer)
|
||||
if msg.discover.isNone(): return none(Message)
|
||||
if r6.get():
|
||||
if msg.discover.isNone(): return Opt.none(Message)
|
||||
|
||||
if ? pb.getField(6, pbdr).optValue:
|
||||
msg.discoverResponse = DiscoverResponse.decode(pbdr.buffer)
|
||||
if msg.discoverResponse.isNone(): return none(Message)
|
||||
some(msg)
|
||||
if msg.discoverResponse.isNone(): return Opt.none(Message)
|
||||
|
||||
Opt.some(msg)
|
||||
|
||||
|
||||
type
|
||||
@@ -314,7 +316,7 @@ proc sendRegisterResponse(conn: Connection,
|
||||
ttl: uint64) {.async.} =
|
||||
let msg = encode(Message(
|
||||
msgType: MessageType.RegisterResponse,
|
||||
registerResponse: some(RegisterResponse(status: Ok, ttl: some(ttl)))))
|
||||
registerResponse: Opt.some(RegisterResponse(status: Ok, ttl: Opt.some(ttl)))))
|
||||
await conn.writeLp(msg.buffer)
|
||||
|
||||
proc sendRegisterResponseError(conn: Connection,
|
||||
@@ -322,7 +324,7 @@ proc sendRegisterResponseError(conn: Connection,
|
||||
text: string = "") {.async.} =
|
||||
let msg = encode(Message(
|
||||
msgType: MessageType.RegisterResponse,
|
||||
registerResponse: some(RegisterResponse(status: status, text: some(text)))))
|
||||
registerResponse: Opt.some(RegisterResponse(status: status, text: Opt.some(text)))))
|
||||
await conn.writeLp(msg.buffer)
|
||||
|
||||
proc sendDiscoverResponse(conn: Connection,
|
||||
@@ -330,10 +332,10 @@ proc sendDiscoverResponse(conn: Connection,
|
||||
cookie: Cookie) {.async.} =
|
||||
let msg = encode(Message(
|
||||
msgType: MessageType.DiscoverResponse,
|
||||
discoverResponse: some(DiscoverResponse(
|
||||
discoverResponse: Opt.some(DiscoverResponse(
|
||||
status: Ok,
|
||||
registrations: s,
|
||||
cookie: some(cookie.encode().buffer)
|
||||
cookie: Opt.some(cookie.encode().buffer)
|
||||
))
|
||||
))
|
||||
await conn.writeLp(msg.buffer)
|
||||
@@ -343,7 +345,7 @@ proc sendDiscoverResponseError(conn: Connection,
|
||||
text: string = "") {.async.} =
|
||||
let msg = encode(Message(
|
||||
msgType: MessageType.DiscoverResponse,
|
||||
discoverResponse: some(DiscoverResponse(status: status, text: some(text)))))
|
||||
discoverResponse: Opt.some(DiscoverResponse(status: status, text: Opt.some(text)))))
|
||||
await conn.writeLp(msg.buffer)
|
||||
|
||||
proc countRegister(rdv: RendezVous, peerId: PeerId): int =
|
||||
@@ -378,6 +380,7 @@ proc save(rdv: RendezVous,
|
||||
|
||||
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
|
||||
trace "Received Register", peerId = conn.peerId, ns = r.ns
|
||||
libp2p_rendezvous_register.inc()
|
||||
if r.ns.len notin 1..255:
|
||||
return conn.sendRegisterResponseError(InvalidNamespace)
|
||||
let ttl = r.ttl.get(MinimumTTL)
|
||||
@@ -389,6 +392,8 @@ proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
|
||||
if rdv.countRegister(conn.peerId) >= RegistrationLimitPerPeer:
|
||||
return conn.sendRegisterResponseError(NotAuthorized, "Registration limit reached")
|
||||
rdv.save(r.ns, conn.peerId, r)
|
||||
libp2p_rendezvous_registered.inc()
|
||||
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
|
||||
conn.sendRegisterResponse(ttl)
|
||||
|
||||
proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
|
||||
@@ -398,11 +403,13 @@ proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
|
||||
for index in rdv.namespaces[nsSalted]:
|
||||
if rdv.registered[index].peerId == conn.peerId:
|
||||
rdv.registered[index].expiration = rdv.defaultDT
|
||||
libp2p_rendezvous_registered.dec()
|
||||
except KeyError:
|
||||
return
|
||||
|
||||
proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
|
||||
trace "Received Discover", peerId = conn.peerId, ns = d.ns
|
||||
libp2p_rendezvous_discover.inc()
|
||||
if d.ns.len notin 0..255:
|
||||
await conn.sendDiscoverResponseError(InvalidNamespace)
|
||||
return
|
||||
@@ -411,7 +418,7 @@ proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
|
||||
cookie =
|
||||
if d.cookie.isSome():
|
||||
try:
|
||||
Cookie.decode(d.cookie.get()).get()
|
||||
Cookie.decode(d.cookie.tryGet()).tryGet()
|
||||
except CatchableError:
|
||||
await conn.sendDiscoverResponseError(InvalidCookie)
|
||||
return
|
||||
@@ -442,7 +449,7 @@ proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
|
||||
break
|
||||
if reg.expiration < n or index.uint64 <= cookie.offset: continue
|
||||
limit.dec()
|
||||
reg.data.ttl = some((reg.expiration - Moment.now()).seconds.uint64)
|
||||
reg.data.ttl = Opt.some((reg.expiration - Moment.now()).seconds.uint64)
|
||||
reg.data
|
||||
rdv.rng.shuffle(s)
|
||||
await conn.sendDiscoverResponse(s, Cookie(offset: offset.uint64, ns: d.ns))
|
||||
@@ -457,12 +464,13 @@ proc advertisePeer(rdv: RendezVous,
|
||||
await conn.writeLp(msg)
|
||||
let
|
||||
buf = await conn.readLp(4096)
|
||||
msgRecv = Message.decode(buf).get()
|
||||
msgRecv = Message.decode(buf).tryGet()
|
||||
if msgRecv.msgType != MessageType.RegisterResponse:
|
||||
trace "Unexpected register response", peer, msgType = msgRecv.msgType
|
||||
elif msgRecv.registerResponse.isNone() or
|
||||
msgRecv.registerResponse.get().status != ResponseStatus.Ok:
|
||||
elif msgRecv.registerResponse.tryGet().status != ResponseStatus.Ok:
|
||||
trace "Refuse to register", peer, response = msgRecv.registerResponse
|
||||
else:
|
||||
trace "Successfully registered", peer, response = msgRecv.registerResponse
|
||||
except CatchableError as exc:
|
||||
trace "exception in the advertise", error = exc.msg
|
||||
finally:
|
||||
@@ -470,19 +478,18 @@ proc advertisePeer(rdv: RendezVous,
|
||||
await rdv.sema.acquire()
|
||||
discard await advertiseWrap().withTimeout(5.seconds)
|
||||
|
||||
proc advertise*(rdv: RendezVous,
|
||||
method advertise*(rdv: RendezVous,
|
||||
ns: string,
|
||||
ttl: Duration = MinimumDuration) {.async.} =
|
||||
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode()
|
||||
if sprBuff.isErr():
|
||||
ttl: Duration = MinimumDuration) {.async, base.} =
|
||||
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
|
||||
raise newException(RendezVousError, "Wrong Signed Peer Record")
|
||||
if ns.len notin 1..255:
|
||||
raise newException(RendezVousError, "Invalid namespace")
|
||||
if ttl notin MinimumDuration..MaximumDuration:
|
||||
raise newException(RendezVousError, "Invalid time to live")
|
||||
let
|
||||
r = Register(ns: ns, signedPeerRecord: sprBuff.get(), ttl: some(ttl.seconds.uint64))
|
||||
msg = encode(Message(msgType: MessageType.Register, register: some(r)))
|
||||
r = Register(ns: ns, signedPeerRecord: sprBuff, ttl: Opt.some(ttl.seconds.uint64))
|
||||
msg = encode(Message(msgType: MessageType.Register, register: Opt.some(r)))
|
||||
rdv.save(ns, rdv.switch.peerInfo.peerId, r)
|
||||
let fut = collect(newSeq()):
|
||||
for peer in rdv.peers:
|
||||
@@ -498,7 +505,9 @@ proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
|
||||
collect(newSeq()):
|
||||
for index in rdv.namespaces[nsSalted]:
|
||||
if rdv.registered[index].expiration > n:
|
||||
SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).get().data
|
||||
let res = SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).valueOr:
|
||||
continue
|
||||
res.data
|
||||
except KeyError as exc:
|
||||
@[]
|
||||
|
||||
@@ -519,38 +528,42 @@ proc request*(rdv: RendezVous,
|
||||
proc requestPeer(peer: PeerId) {.async.} =
|
||||
let conn = await rdv.switch.dial(peer, RendezVousCodec)
|
||||
defer: await conn.close()
|
||||
d.limit = some(limit)
|
||||
d.limit = Opt.some(limit)
|
||||
d.cookie =
|
||||
try:
|
||||
some(rdv.cookiesSaved[peer][ns])
|
||||
Opt.some(rdv.cookiesSaved[peer][ns])
|
||||
except KeyError as exc:
|
||||
none(seq[byte])
|
||||
Opt.none(seq[byte])
|
||||
await conn.writeLp(encode(Message(
|
||||
msgType: MessageType.Discover,
|
||||
discover: some(d))).buffer)
|
||||
discover: Opt.some(d))).buffer)
|
||||
let
|
||||
buf = await conn.readLp(65536)
|
||||
msgRcv = Message.decode(buf).get()
|
||||
if msgRcv.msgType != MessageType.DiscoverResponse or
|
||||
msgRcv.discoverResponse.isNone():
|
||||
msgRcv = Message.decode(buf).valueOr:
|
||||
debug "Message undecodable"
|
||||
return
|
||||
if msgRcv.msgType != MessageType.DiscoverResponse:
|
||||
debug "Unexpected discover response", msgType = msgRcv.msgType
|
||||
return
|
||||
let resp = msgRcv.discoverResponse.get()
|
||||
let resp = msgRcv.discoverResponse.valueOr:
|
||||
debug "Discover response is empty"
|
||||
return
|
||||
if resp.status != ResponseStatus.Ok:
|
||||
trace "Cannot discover", ns, status = resp.status, text = resp.text
|
||||
return
|
||||
if resp.cookie.isSome() and resp.cookie.get().len < 1000:
|
||||
if rdv.cookiesSaved.hasKeyOrPut(peer, {ns: resp.cookie.get()}.toTable):
|
||||
rdv.cookiesSaved[peer][ns] = resp.cookie.get()
|
||||
resp.cookie.withValue(cookie):
|
||||
if cookie.len() < 1000 and rdv.cookiesSaved.hasKeyOrPut(peer, {ns: cookie}.toTable()):
|
||||
rdv.cookiesSaved[peer][ns] = cookie
|
||||
for r in resp.registrations:
|
||||
if limit == 0: return
|
||||
if r.ttl.isNone() or r.ttl.get() > MaximumTTL: continue
|
||||
let sprRes = SignedPeerRecord.decode(r.signedPeerRecord)
|
||||
if sprRes.isErr(): continue
|
||||
let pr = sprRes.get().data
|
||||
let ttl = r.ttl.get(MaximumTTL + 1)
|
||||
if ttl > MaximumTTL: continue
|
||||
let
|
||||
spr = SignedPeerRecord.decode(r.signedPeerRecord).valueOr: continue
|
||||
pr = spr.data
|
||||
if s.hasKey(pr.peerId):
|
||||
let (prSaved, rSaved) = s[pr.peerId]
|
||||
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get() < r.ttl.get()) or
|
||||
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(MaximumTTL) < ttl) or
|
||||
prSaved.seqNo < pr.seqNo:
|
||||
s[pr.peerId] = (pr, r)
|
||||
else:
|
||||
@@ -589,7 +602,7 @@ proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
|
||||
rdv.unsubscribeLocally(ns)
|
||||
let msg = encode(Message(
|
||||
msgType: MessageType.Unregister,
|
||||
unregister: some(Unregister(ns: ns))))
|
||||
unregister: Opt.some(Unregister(ns: ns))))
|
||||
|
||||
proc unsubscribePeer(rdv: RendezVous, peerId: PeerId) {.async.} =
|
||||
try:
|
||||
@@ -623,17 +636,17 @@ proc new*(T: typedesc[RendezVous],
|
||||
sema: newAsyncSemaphore(SemaphoreDefaultSize)
|
||||
)
|
||||
logScope: topics = "libp2p discovery rendezvous"
|
||||
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
|
||||
proc handleStream(conn: Connection, proto: string) {.async.} =
|
||||
try:
|
||||
let
|
||||
buf = await conn.readLp(4096)
|
||||
msg = Message.decode(buf).get()
|
||||
msg = Message.decode(buf).tryGet()
|
||||
case msg.msgType:
|
||||
of MessageType.Register: await rdv.register(conn, msg.register.get())
|
||||
of MessageType.Register: await rdv.register(conn, msg.register.tryGet())
|
||||
of MessageType.RegisterResponse:
|
||||
trace "Got an unexpected Register Response", response = msg.registerResponse
|
||||
of MessageType.Unregister: rdv.unregister(conn, msg.unregister.get())
|
||||
of MessageType.Discover: await rdv.discover(conn, msg.discover.get())
|
||||
of MessageType.Unregister: rdv.unregister(conn, msg.unregister.tryGet())
|
||||
of MessageType.Discover: await rdv.discover(conn, msg.discover.tryGet())
|
||||
of MessageType.DiscoverResponse:
|
||||
trace "Got an unexpected Discover Response", response = msg.discoverResponse
|
||||
except CancelledError as exc:
|
||||
@@ -657,9 +670,13 @@ proc new*(T: typedesc[RendezVous],
|
||||
proc deletesRegister(rdv: RendezVous) {.async.} =
|
||||
heartbeat "Register timeout", 1.minutes:
|
||||
let n = Moment.now()
|
||||
var total = 0
|
||||
rdv.registered.flushIfIt(it.expiration < n)
|
||||
for data in rdv.namespaces.mvalues():
|
||||
data.keepItIf(it >= rdv.registered.offset)
|
||||
total += data.len
|
||||
libp2p_rendezvous_registered.set(int64(total))
|
||||
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
|
||||
|
||||
method start*(rdv: RendezVous) {.async.} =
|
||||
if not rdv.registerDeletionLoop.isNil:
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/strformat
|
||||
import chronos
|
||||
@@ -136,7 +133,7 @@ proc encrypt(
|
||||
state: var CipherState,
|
||||
data: var openArray[byte],
|
||||
ad: openArray[byte]): ChaChaPolyTag
|
||||
{.noinit, raises: [Defect, NoiseNonceMaxError].} =
|
||||
{.noinit, raises: [NoiseNonceMaxError].} =
|
||||
|
||||
var nonce: ChaChaPolyNonce
|
||||
nonce[4..<12] = toBytesLE(state.n)
|
||||
@@ -148,7 +145,7 @@ proc encrypt(
|
||||
raise newException(NoiseNonceMaxError, "Noise max nonce value reached")
|
||||
|
||||
proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
|
||||
{.raises: [Defect, NoiseNonceMaxError].} =
|
||||
{.raises: [NoiseNonceMaxError].} =
|
||||
result = newSeqOfCap[byte](data.len + sizeof(ChaChaPolyTag))
|
||||
result.add(data)
|
||||
|
||||
@@ -160,7 +157,7 @@ proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
|
||||
tag = byteutils.toHex(tag), data = result.shortLog, nonce = state.n - 1
|
||||
|
||||
proc decryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
|
||||
{.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} =
|
||||
{.raises: [NoiseDecryptTagError, NoiseNonceMaxError].} =
|
||||
var
|
||||
tagIn = data.toOpenArray(data.len - ChaChaPolyTag.len, data.high).intoChaChaPolyTag
|
||||
tagOut: ChaChaPolyTag
|
||||
@@ -209,7 +206,7 @@ proc mixKeyAndHash(ss: var SymmetricState, ikm: openArray[byte]) {.used.} =
|
||||
ss.cs = CipherState(k: temp_keys[2])
|
||||
|
||||
proc encryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
|
||||
{.raises: [Defect, NoiseNonceMaxError].} =
|
||||
{.raises: [NoiseNonceMaxError].} =
|
||||
# according to spec if key is empty leave plaintext
|
||||
if ss.cs.hasKey:
|
||||
result = ss.cs.encryptWithAd(ss.h.data, data)
|
||||
@@ -218,7 +215,7 @@ proc encryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
|
||||
ss.mixHash(result)
|
||||
|
||||
proc decryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
|
||||
{.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} =
|
||||
{.raises: [NoiseDecryptTagError, NoiseNonceMaxError].} =
|
||||
# according to spec if key is empty leave plaintext
|
||||
if ss.cs.hasKey and data.len > ChaChaPolyTag.len:
|
||||
result = ss.cs.decryptWithAd(ss.h.data, data)
|
||||
@@ -448,7 +445,7 @@ proc encryptFrame(
|
||||
sconn: NoiseConnection,
|
||||
cipherFrame: var openArray[byte],
|
||||
src: openArray[byte])
|
||||
{.raises: [Defect, NoiseNonceMaxError].} =
|
||||
{.raises: [NoiseNonceMaxError].} =
|
||||
# Frame consists of length + cipher data + tag
|
||||
doAssert src.len <= MaxPlainSize
|
||||
doAssert cipherFrame.len == 2 + src.len + sizeof(ChaChaPolyTag)
|
||||
@@ -557,8 +554,7 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
|
||||
|
||||
trace "Remote peer id", pid = $pid
|
||||
|
||||
if peerId.isSome():
|
||||
let targetPid = peerId.get()
|
||||
peerId.withValue(targetPid):
|
||||
if not targetPid.validate():
|
||||
raise newException(NoiseHandshakeError, "Failed to validate expected peerId.")
|
||||
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos
|
||||
import secure, ../../stream/connection
|
||||
@@ -22,7 +19,7 @@ type
|
||||
|
||||
method init(p: PlainText) {.gcsafe.} =
|
||||
proc handle(conn: Connection, proto: string)
|
||||
{.async, gcsafe.} = discard
|
||||
{.async.} = discard
|
||||
## plain text doesn't do anything
|
||||
|
||||
p.codec = PlainTextCodec
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[oids, strformat]
|
||||
import bearssl/rand
|
||||
@@ -262,7 +259,7 @@ proc newSecioConn(conn: Connection,
|
||||
secrets: Secret,
|
||||
order: int,
|
||||
remotePubKey: PublicKey): SecioConn
|
||||
{.raises: [Defect, LPError].} =
|
||||
{.raises: [LPError].} =
|
||||
## Create new secure stream/lpstream, using specified hash algorithm ``hash``,
|
||||
## cipher algorithm ``cipher``, stretched keys ``secrets`` and order
|
||||
## ``order``.
|
||||
@@ -342,8 +339,7 @@ method handshake*(s: Secio, conn: Connection, initiator: bool, peerId: Opt[PeerI
|
||||
|
||||
remotePeerId = PeerId.init(remotePubkey).tryGet()
|
||||
|
||||
if peerId.isSome():
|
||||
let targetPid = peerId.get()
|
||||
peerId.withValue(targetPid):
|
||||
if not targetPid.validate():
|
||||
raise newException(SecioError, "Failed to validate expected peerId.")
|
||||
|
||||
@@ -439,14 +435,10 @@ proc new*(
|
||||
T: typedesc[Secio],
|
||||
rng: ref HmacDrbgContext,
|
||||
localPrivateKey: PrivateKey): T =
|
||||
let pkRes = localPrivateKey.getPublicKey()
|
||||
if pkRes.isErr:
|
||||
raise newException(Defect, "Invalid private key")
|
||||
|
||||
let secio = Secio(
|
||||
rng: rng,
|
||||
localPrivateKey: localPrivateKey,
|
||||
localPublicKey: pkRes.get(),
|
||||
localPublicKey: localPrivateKey.getPublicKey().expect("Invalid private key"),
|
||||
)
|
||||
secio.init()
|
||||
secio
|
||||
|
||||
@@ -8,10 +8,7 @@
|
||||
# those terms.
|
||||
|
||||
{.push gcsafe.}
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strformat]
|
||||
import stew/results
|
||||
@@ -138,10 +135,9 @@ method init*(s: Secure) =
|
||||
|
||||
method secure*(s: Secure,
|
||||
conn: Connection,
|
||||
initiator: bool,
|
||||
peerId: Opt[PeerId]):
|
||||
Future[Connection] {.base.} =
|
||||
s.handleConn(conn, initiator, peerId)
|
||||
s.handleConn(conn, conn.dir == Direction.Out, peerId)
|
||||
|
||||
method readOnce*(s: SecureConn,
|
||||
pbytes: pointer,
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implements Routing Records.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, times]
|
||||
import pkg/stew/results
|
||||
@@ -45,14 +42,12 @@ proc decode*(
|
||||
? pb.getRequiredField(2, record.seqNo)
|
||||
|
||||
var addressInfos: seq[seq[byte]]
|
||||
let pb3 = ? pb.getRepeatedField(3, addressInfos)
|
||||
|
||||
if pb3:
|
||||
if ? pb.getRepeatedField(3, addressInfos):
|
||||
for address in addressInfos:
|
||||
var addressInfo = AddressInfo()
|
||||
let subProto = initProtoBuffer(address)
|
||||
let f = subProto.getField(1, addressInfo.address)
|
||||
if f.isOk() and f.get():
|
||||
if f.get(false):
|
||||
record.addresses &= addressInfo
|
||||
|
||||
if record.addresses.len == 0:
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import chronos, chronicles, times, tables, sequtils
|
||||
import ../switch,
|
||||
@@ -20,7 +17,7 @@ logScope:
|
||||
topics = "libp2p autorelay"
|
||||
|
||||
type
|
||||
OnReservationHandler = proc (addresses: seq[MultiAddress]) {.gcsafe, raises: [Defect].}
|
||||
OnReservationHandler = proc (addresses: seq[MultiAddress]) {.gcsafe, raises: [].}
|
||||
|
||||
AutoRelayService* = ref object of Service
|
||||
running: bool
|
||||
@@ -35,9 +32,12 @@ type
|
||||
addressMapper: AddressMapper
|
||||
rng: ref HmacDrbgContext
|
||||
|
||||
proc isRunning*(self: AutoRelayService): bool =
|
||||
return self.running
|
||||
|
||||
proc addressMapper(
|
||||
self: AutoRelayService,
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return concat(toSeq(self.relayAddresses.values))
|
||||
|
||||
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
|
||||
@@ -58,8 +58,8 @@ proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch)
|
||||
self.onReservation(concat(toSeq(self.relayAddresses.values)))
|
||||
await sleepAsync chronos.seconds(ttl - 30)
|
||||
|
||||
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.gcsafe, async.} =
|
||||
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
|
||||
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
|
||||
return await addressMapper(self, listenAddrs)
|
||||
|
||||
let hasBeenSetUp = await procCall Service(self).setup(switch)
|
||||
@@ -83,7 +83,7 @@ proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
|
||||
self.backingOff.keepItIf(it != pid)
|
||||
self.peerAvailable.fire()
|
||||
|
||||
proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
|
||||
while true:
|
||||
# Remove relayPeers that failed
|
||||
let peers = toSeq(self.relayPeers.keys())
|
||||
@@ -116,14 +116,14 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
await self.peerAvailable.wait()
|
||||
await sleepAsync(200.millis)
|
||||
|
||||
method run*(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
|
||||
method run*(self: AutoRelayService, switch: Switch) {.async.} =
|
||||
if self.running:
|
||||
trace "Autorelay is already running"
|
||||
return
|
||||
self.running = true
|
||||
self.runner = self.innerRun(switch)
|
||||
|
||||
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
|
||||
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
|
||||
let hasBeenStopped = await procCall Service(self).stop(switch)
|
||||
if hasBeenStopped:
|
||||
self.running = false
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[tables, sequtils]
|
||||
|
||||
@@ -33,13 +30,9 @@ type
|
||||
onNewStatusHandler: StatusAndConfidenceHandler
|
||||
autoRelayService: AutoRelayService
|
||||
autonatService: AutonatService
|
||||
isPublicIPAddrProc: IsPublicIPAddrProc
|
||||
|
||||
IsPublicIPAddrProc* = proc(ta: TransportAddress): bool {.gcsafe, raises: [Defect].}
|
||||
|
||||
proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayService: AutoRelayService,
|
||||
isPublicIPAddrProc: IsPublicIPAddrProc = isGlobal): T =
|
||||
return T(autonatService: autonatService, autoRelayService: autoRelayService, isPublicIPAddrProc: isPublicIPAddrProc)
|
||||
proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayService: AutoRelayService): T =
|
||||
return T(autonatService: autonatService, autoRelayService: autoRelayService)
|
||||
|
||||
proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bool] {.async.} =
|
||||
proc tryConnect(address: MultiAddress): Future[bool] {.async.} =
|
||||
@@ -52,14 +45,8 @@ proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Fut
|
||||
for address in switch.peerStore[AddressBook][peerId]:
|
||||
try:
|
||||
let isRelayed = address.contains(multiCodec("p2p-circuit"))
|
||||
if isRelayed.isErr() or isRelayed.get():
|
||||
continue
|
||||
if DNS.matchPartial(address):
|
||||
if not isRelayed.get(false) and address.isPublicMA():
|
||||
return await tryConnect(address)
|
||||
else:
|
||||
let ta = initTAddress(address)
|
||||
if ta.isOk() and self.isPublicIPAddrProc(ta.get()):
|
||||
return await tryConnect(address)
|
||||
except CatchableError as err:
|
||||
debug "Failed to create direct connection.", err = err.msg
|
||||
continue
|
||||
@@ -107,10 +94,10 @@ method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
|
||||
|
||||
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
|
||||
|
||||
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Option[float]) {.gcsafe, async.} =
|
||||
if networkReachability == NetworkReachability.NotReachable:
|
||||
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
|
||||
if networkReachability == NetworkReachability.NotReachable and not self.autoRelayService.isRunning():
|
||||
discard await self.autoRelayService.setup(switch)
|
||||
elif networkReachability == NetworkReachability.Reachable:
|
||||
elif networkReachability == NetworkReachability.Reachable and self.autoRelayService.isRunning():
|
||||
discard await self.autoRelayService.stop(switch)
|
||||
|
||||
# We do it here instead of in the AutonatService because this is useful only when hole punching.
|
||||
|
||||
@@ -9,10 +9,7 @@
|
||||
|
||||
## This module implements Signed Envelope.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/sugar
|
||||
import pkg/stew/[results, byteutils]
|
||||
@@ -115,19 +112,12 @@ proc getField*(pb: ProtoBuffer, field: int,
|
||||
if not(res):
|
||||
ok(false)
|
||||
else:
|
||||
let env = Envelope.decode(buffer, domain)
|
||||
if env.isOk():
|
||||
value = env.get()
|
||||
ok(true)
|
||||
else:
|
||||
err(ProtoError.IncorrectBlob)
|
||||
value = Envelope.decode(buffer, domain).valueOr: return err(ProtoError.IncorrectBlob)
|
||||
ok(true)
|
||||
|
||||
proc write*(pb: var ProtoBuffer, field: int, env: Envelope): Result[void, CryptoError] =
|
||||
let e = env.encode()
|
||||
|
||||
if e.isErr():
|
||||
return err(e.error)
|
||||
pb.write(field, e.get())
|
||||
let e = ? env.encode()
|
||||
pb.write(field, e)
|
||||
ok()
|
||||
|
||||
type
|
||||
@@ -145,7 +135,7 @@ proc init*[T](_: typedesc[SignedPayload[T]],
|
||||
T.payloadType(),
|
||||
data.encode(),
|
||||
T.payloadDomain)
|
||||
|
||||
|
||||
ok(SignedPayload[T](data: data, envelope: envelope))
|
||||
|
||||
proc getField*[T](pb: ProtoBuffer, field: int,
|
||||
|
||||
@@ -7,10 +7,7 @@
|
||||
# This file may not be copied, modified, or distributed except according to
|
||||
# those terms.
|
||||
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
{.push raises: [].}
|
||||
|
||||
import std/strformat
|
||||
import stew/byteutils
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user