Compare commits

...

96 Commits

Author SHA1 Message Date
Jacek Sieka
a9b5f504e9 debug logging 2024-03-02 10:19:16 +01:00
diegomrsantos
fe4ff79885 feat: message prioritization with immediate peer-published dispatch and queuing for other msgs (#1015) 2024-02-16 10:54:16 +01:00
Álex Cabeza Romero
aa4ebb0b3c docs(general): Improve docs (#1021) 2024-02-15 16:14:26 +01:00
diegomrsantos
e0f70b7177 improvement: enhanced checkExpiring macro with custom timeout (#1023) 2024-02-09 11:51:27 +01:00
Ludovic Chenut
c1dfd58772 fix: yamux metrics (#1022) 2024-02-08 12:36:58 +01:00
Álex Cabeza Romero
04af0c4323 test(flaky): Log checkExpiring failure (#1018)
Add simple logging mechanism on checkExpiring failure.
2024-02-06 17:47:13 +01:00
Ludovic Chenut
eb0890cd6f docs: add comments and improve yamux readability (#1006) 2024-02-02 15:14:02 +01:00
Álex Cabeza Romero
9bc5ec1566 tests(flaky): Increase check timeouts (#995)
Increase checkExpiring timeouts to verify impact on flaky tests.
2024-01-31 23:46:12 +00:00
diegomrsantos
5594bcb33e fix: more metrics issues when libp2p_expensive_metrics is enabled (#1016) 2024-01-30 16:55:55 +01:00
diegomrsantos
d46bcdb6ac fix: compilation issue when libp2p_expensive_metrics is enabled. (#1014) 2024-01-29 11:31:11 +01:00
diegomrsantos
9468bb6b4d fix(hole-punching-interop): update nim to 1.6.16 (#1012) 2024-01-26 11:15:40 +01:00
diegomrsantos
2725be64ba fix: use a temp var in withValue (#1010) 2024-01-18 16:25:56 +01:00
diegomrsantos
e3c967ad19 improvement(ci): improve ci daily workflows (#1002) 2023-12-18 20:14:33 +01:00
Ludovic Chenut
d2c98bd87d improvement(yamux): make the window size configurable (#987)
Co-authored-by: Diego <diego@status.im>
2023-12-15 16:30:50 +01:00
Ivan FB
3011ba4326 libp2p/multiaddress.nim: use of IpAddress instead of ValidIpAddress (#1001) 2023-12-12 12:53:36 +01:00
Etan Kissling
c6566707fa include connection info when logging identify message (#991) 2023-12-05 18:44:16 +01:00
diegomrsantos
3be681ec4d feat: add hole-punching interop tests (#998) 2023-12-05 18:37:33 +01:00
Jacek Sieka
2ede0fa40c remove redundant gcsafe annotations (#999) 2023-12-05 08:05:32 +01:00
Roman Zajic
7c195ab927 fix: remove forgotten "matrix-prep" job (#997) 2023-12-02 09:56:50 +08:00
Roman Zajic
3230407ffe fix: move workflows for Nim Devel and legacy i386 from "Daily" (#968) 2023-12-01 17:47:47 +08:00
diegomrsantos
deb72c8580 fix(dcutr): update the DCUtR initiator transport direction to Inbound (#994) 2023-11-29 17:38:47 +01:00
diegomrsantos
ce0685c272 fix(identify): do not add p2p and relayed addrs to observed addr manager (#990) 2023-11-21 18:24:35 +01:00
diegomrsantos
1f4b090227 fix(yamux): doesn't work in a Relayv2 connection (#979)
Co-authored-by: Ludovic Chenut <ludovic@status.im>
2023-11-21 16:03:29 +01:00
diegomrsantos
fb05f5ae22 fix(dcutr): handle tcp/p2p addresses (#989) 2023-11-20 17:06:17 +01:00
diegomrsantos
e12f65f193 fix(multiaddress): add quic-v1 multiaddress support (#988) 2023-11-20 11:09:56 +01:00
diegomrsantos
4b3bc4f819 Make ObservedAddrManager injectable (#970) 2023-11-20 11:06:02 +01:00
diegomrsantos
6791f5e7bb fix(dcutr): make the dcutr client inbound and the server outbound (#983) 2023-11-17 10:46:35 +01:00
diegomrsantos
08d9c84aca Remove unittest2 range (#986) 2023-11-17 08:20:02 +01:00
Jacek Sieka
4e7eaba67a fix chronos v4 compat (#982) 2023-11-16 16:54:34 +01:00
diegomrsantos
5f7a3ab829 fix: doc workflow (#985) 2023-11-16 15:58:05 +01:00
diegomrsantos
ebef85c9d7 Rate limit fixes (#965) 2023-11-09 14:20:28 +01:00
diegomrsantos
3fc1236659 Revert "Prevent concurrent IWANT of the same message (#943)" (#977) 2023-11-03 15:24:27 +01:00
Ludovic Chenut
fc4e9a8bb8 Fix WS transport when the connection aborts (#967) 2023-10-23 17:12:20 +02:00
Tanguy
60f953629d Remove ConnManager from Upgrade (#959) 2023-10-13 12:08:17 +00:00
diegomrsantos
18b0f726df Rate Limit tests (#953) 2023-10-05 15:12:07 +00:00
diegomrsantos
459f6851e7 Add a flag if a peer should be disconnected when above rate limit (#954) 2023-10-05 14:51:27 +02:00
Tanguy
575344e2e9 Update interop CI name (#956) 2023-10-05 10:54:24 +02:00
diegomrsantos
75871817ee Split msgs in iwant response if bigger than limit (#944) 2023-10-02 11:39:28 +02:00
diegomrsantos
61929aed6c Improve rdv advertise (#951)
Co-authored-by: Ludovic Chenut <ludovic@status.im>
2023-09-27 15:52:22 +02:00
diegomrsantos
56599f5b9d GossipSub Traffic scoring (#920) 2023-09-22 16:45:08 +02:00
Tanguy
b2eac7ecbd GS: Relay messages to direct peers (#949) 2023-09-15 17:22:02 +02:00
Tanguy
20b0e40f7d Fix doc generation CI (#948) 2023-09-08 12:21:04 +02:00
Tanguy
ff77d52851 IDontWant metrics (#946) 2023-09-06 16:05:59 +00:00
Tanguy
545a31d4f0 Bump dependencies (#947) 2023-09-06 17:52:43 +02:00
Jacek Sieka
b76bac752f avoid importing ecnist when not needed (#942) 2023-08-30 11:39:48 +02:00
diegomrsantos
c6aa085e98 Prevent concurrent IWANT of the same message (#943) 2023-08-21 16:34:24 +02:00
Ludovic Chenut
e03547ea3e Perf protocol (#925) 2023-08-14 17:25:55 +02:00
diegomrsantos
f80ce3133c Bandwidth estimate as a parameter (#941) 2023-08-14 17:03:46 +02:00
Tanguy
d6263bf751 nim-websock new version compatibility (#939) 2023-08-02 17:10:31 +02:00
Tanguy
56c23a286a Add specs crypto tests (#938) 2023-08-01 15:28:38 +02:00
Tanguy
7a369dd1bf GossipSub: Limit flood publishing (#911)
Co-authored-by: Diego <diego@status.im>
2023-07-31 11:13:51 +02:00
Tanguy
b784167805 GossipSub: IDontWant (#934) 2023-07-28 10:58:05 +02:00
Tanguy
440461b24b GS: improve handleIHave (#922) 2023-07-11 12:17:50 +02:00
Jacek Sieka
fab1340020 avoid a few zeroMem (#932) 2023-07-11 12:17:28 +02:00
Tanguy
1721f078c7 Fix crash on empty write (#930) 2023-07-10 13:52:08 +00:00
Ivan Folgueira Bande
74c402ed9d wstransport.nim: avoid re-raising 'TransportOsError' to avoid stopping switch.accept (#929) 2023-07-07 11:32:20 +02:00
diegomrsantos
c45f9705ab Gossipsub scoring improvements (#909) 2023-07-04 00:27:45 +02:00
Etan Kissling
81b861b34e avoid ProveField warning in crypto.init (#915) 2023-06-29 15:28:25 +02:00
Jacek Sieka
43359dd9d1 standard nimble env vars (#921) 2023-06-28 17:51:51 +02:00
diegomrsantos
f85d0f75ea Handling Opt[PeerId] in logging (#923) 2023-06-28 17:00:33 +02:00
Tanguy
66f9dc9167 Remove all Result.get()s & Option -> Opt (#902)
Co-authored-by: Ludovic Chenut <ludovic@status.im>
Co-authored-by: Diego <diego@status.im>
2023-06-28 16:44:58 +02:00
Tanguy
1c4d0832ce Add GossipSub ping (#912) 2023-06-21 10:40:10 +02:00
Tanguy
224f92e172 Fix #916 regression causing accept loop lockup (#919) 2023-06-20 14:18:49 +00:00
Tanguy
5efa089196 TCP transport: handle getObservedAddr errors (#918) 2023-06-20 10:25:29 +02:00
Tanguy
9d4c4307de Bumper: fix case where target is up to date (#917) 2023-06-15 18:37:01 +02:00
Tanguy
49dfa84c6f Transports: handle TransportAbortedError properly (#916) 2023-06-14 15:55:56 +00:00
Tanguy
a65b7b028f GossipSub: remove peer if we can't establish sendConn (#886) 2023-06-14 17:23:39 +02:00
diegomrsantos
67711478ce Consider dns as public address (#913) 2023-06-13 17:58:41 +02:00
Tanguy
c28d8bb353 WS Transport: handle 'tls/ws' (#914) 2023-06-12 15:45:53 +00:00
Tanguy
eb78292d9c Bump deps (#896) 2023-06-07 17:42:42 +02:00
Vaclav Pavlin
3725f6a95b chore: add basic metrics for rendezvous (#905) 2023-06-07 15:45:06 +02:00
diegomrsantos
3640b4dd89 Autonat and HP changes (#899) 2023-06-07 15:26:58 +02:00
Tanguy
32085ca88a Allow to override TCP connection timeouts (#903) 2023-06-07 14:27:32 +02:00
Tanguy
c76d1e18ef Remove nim 1.2 support (#907) 2023-06-07 11:12:49 +00:00
Tanguy
41649f0999 Version 1.1.0 (#904) 2023-06-06 11:05:49 +00:00
diegomrsantos
67102873ba Fail only if all addresses in PeerRecord are invalid (#898)
Fixes https://github.com/waku-org/nwaku/issues/1768
2023-05-31 08:59:50 +02:00
diegomrsantos
d40d324160 fix missing import (#897) 2023-05-26 10:36:39 +02:00
diegomrsantos
a677b06273 Try a direct connection only if there isn't one already (#891) 2023-05-25 15:48:22 +02:00
diegomrsantos
6050cdef7e Refinement of Hole Punching Service (#892) 2023-05-25 15:47:00 +02:00
Tanguy
fedfa8e817 Fix bumper CI (#894) 2023-05-22 17:37:42 +02:00
diegomrsantos
6887b43777 Improve utility tests (#893) 2023-05-22 11:07:22 +02:00
Tanguy
225accd11b Less warnings (#813)
Co-authored-by: Diego <diego@status.im>
2023-05-18 10:24:17 +02:00
diegomrsantos
7d6bc545e0 Handle dns addrs in HP service (#890) 2023-05-16 14:59:02 +02:00
Tanguy
a1eb53b181 Fix gossipsub dOut handling (#883) 2023-04-26 13:44:45 +02:00
Tanguy
db629dca25 Fix network protocol metrics typo (#874) 2023-04-26 09:52:06 +02:00
diegomrsantos
a5666789b0 Hole Punching (#806)
Co-authored-by: Tanguy <tanguy@status.im>
2023-04-18 12:50:21 +02:00
diegomrsantos
b7726bf68f Dcutr (#824)
Co-authored-by: Tanguy <tanguy@status.im>
2023-04-14 16:23:19 +02:00
diegomrsantos
0221affe98 Invalid MA is ignored (#881) 2023-04-14 12:05:32 +00:00
diegomrsantos
edbd35b16c Fix interop tests (#882) 2023-04-13 19:38:34 +02:00
diegomrsantos
80cca0ecac Does not allow an empty MA (#877) 2023-04-06 14:19:01 +00:00
diegomrsantos
0041ed4cf8 Transport hole punching (#873)
Co-authored-by: Tanguy <tanguy@status.im>
2023-04-06 15:23:35 +02:00
Tanguy
95e98e8c51 Fix traffic metrics (#879) 2023-04-03 12:37:23 +00:00
Tanguy
4aa615c44c GossipSub: TimedEntry & shortAgent fixes (#858) 2023-04-03 11:05:01 +02:00
Tanguy
6b61ce8c91 GossipSub: Better IWANT handling (#875) 2023-04-03 10:56:20 +02:00
Alvaro Revuelta
53b060f8f0 Add getters for conns and streams (#878) 2023-03-31 00:16:39 +02:00
diegomrsantos
af5299f26c Create an ObservedAddrManager and add an AddressMapper in AutonatService and AutoRelayService (#871)
Co-authored-by: Tanguy <tanguy@status.im>
2023-03-24 15:42:49 +00:00
190 changed files with 6402 additions and 3367 deletions

View File

@@ -10,11 +10,12 @@ jobs:
bumpProjects:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
target: [
{ repo: status-im/nimbus-eth2, branch: unstable },
{ repo: status-im/nwaku, branch: master },
{ repo: status-im/nim-codex, branch: main }
{ repo: waku-org/nwaku, branch: master },
{ repo: codex-storage/nim-codex, branch: master }
]
steps:
- name: Clone repo
@@ -23,13 +24,14 @@ jobs:
repository: ${{ matrix.target.repo }}
ref: ${{ matrix.target.branch }}
path: nbc
submodules: true
fetch-depth: 0
token: ${{ secrets.ACTIONS_GITHUB_TOKEN }}
- name: Checkout this ref
run: |
cd nbc/vendor/nim-libp2p
cd nbc
git submodule update --init vendor/nim-libp2p
cd vendor/nim-libp2p
git checkout $GITHUB_SHA
- name: Commit this bump
@@ -37,7 +39,7 @@ jobs:
cd nbc
git config --global user.email "${{ github.actor }}@users.noreply.github.com"
git config --global user.name = "${{ github.actor }}"
git commit -a -m "auto-bump nim-libp2p"
git commit --allow-empty -a -m "auto-bump nim-libp2p"
git branch -D nim-libp2p-auto-bump-${GITHUB_REF##*/} || true
git switch -c nim-libp2p-auto-bump-${GITHUB_REF##*/}
git push -f origin nim-libp2p-auto-bump-${GITHUB_REF##*/}

View File

@@ -28,7 +28,7 @@ jobs:
cpu: amd64
#- os: windows
#cpu: i386
branch: [version-1-2, version-1-6]
branch: [version-1-6]
include:
- target:
os: linux

12
.github/workflows/daily.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
name: Daily
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
call-multi-nim-common:
uses: ./.github/workflows/daily_common.yml
with:
nim-branch: "['version-1-6','version-2-0']"
cpu: "['amd64']"

84
.github/workflows/daily_common.yml vendored Normal file
View File

@@ -0,0 +1,84 @@
name: daily-common
on:
workflow_call:
inputs:
nim-branch:
description: 'Nim branch'
required: true
type: string
cpu:
description: 'CPU'
required: true
type: string
exclude:
description: 'Exclude matrix configurations'
required: false
type: string
default: "[]"
jobs:
delete-cache:
runs-on: ubuntu-latest
steps:
- uses: snnaplab/delete-branch-cache-action@v1
build:
needs: delete-cache
timeout-minutes: 120
strategy:
fail-fast: false
matrix:
platform:
- os: linux
builder: ubuntu-20
shell: bash
- os: macos
builder: macos-12
shell: bash
- os: windows
builder: windows-2019
shell: msys2 {0}
branch: ${{ fromJSON(inputs.nim-branch) }}
cpu: ${{ fromJSON(inputs.cpu) }}
exclude: ${{ fromJSON(inputs.exclude) }}
defaults:
run:
shell: ${{ matrix.platform.shell }}
name: '${{ matrix.platform.os }}-${{ matrix.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.platform.builder }}
continue-on-error: ${{ matrix.branch == 'devel' || matrix.branch == 'version-2-0' }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: ${{ matrix.platform.os }}
shell: ${{ matrix.platform.shell }}
nim_branch: ${{ matrix.branch }}
cpu: ${{ matrix.cpu }}
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: '~1.15.5'
cache: false
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Run tests
run: |
nim --version
nimble --version
nimble install -y --depsOnly
NIMFLAGS="${NIMFLAGS} --mm:refc" nimble test
if [[ "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--mm:orc':\n"
NIMFLAGS="${NIMFLAGS} --mm:orc" nimble test
fi

13
.github/workflows/daily_i386.yml vendored Normal file
View File

@@ -0,0 +1,13 @@
name: Daily i386
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
call-multi-nim-common:
uses: ./.github/workflows/daily_common.yml
with:
nim-branch: "['version-1-6','version-2-0', 'devel']"
cpu: "['i386']"
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"

12
.github/workflows/daily_nim_devel.yml vendored Normal file
View File

@@ -0,0 +1,12 @@
name: Daily Nim Devel
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
call-multi-nim-common:
uses: ./.github/workflows/daily_common.yml
with:
nim-branch: "['devel']"
cpu: "['amd64']"

View File

@@ -19,7 +19,7 @@ jobs:
- uses: jiro4989/setup-nim-action@v1
with:
nim-version: 'stable'
nim-version: '1.6.x'
- name: Generate doc
run: |

View File

@@ -23,7 +23,7 @@ jobs:
- name: Build image
run: >
cd multidim-interop/nim/v1.0 &&
cd transport-interop/impl/nim/v1.0 &&
make commitSha=$GITHUB_SHA image_name=nim-libp2p-head
- name: Create ping-version.json
@@ -45,10 +45,24 @@ jobs:
]
}
EOF
) > ${{ github.workspace }}/test_head.json
- uses: libp2p/test-plans/.github/actions/run-interop-ping-test@master
- uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/test_head.json
run-hole-punching-interop:
name: Run hole-punching interoperability tests
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json

View File

@@ -1,82 +0,0 @@
name: Daily
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
delete-cache:
runs-on: ubuntu-latest
steps:
- uses: snnaplab/delete-branch-cache-action@v1
build:
needs: delete-cache
timeout-minutes: 120
strategy:
fail-fast: false
matrix:
target:
- os: linux
cpu: amd64
- os: linux
cpu: i386
- os: macos
cpu: amd64
- os: windows
cpu: amd64
#- os: windows
#cpu: i386
branch: [version-1-2, version-1-6, devel]
include:
- target:
os: linux
builder: ubuntu-20.04
shell: bash
- target:
os: macos
builder: macos-12
shell: bash
- target:
os: windows
builder: windows-2019
shell: msys2 {0}
defaults:
run:
shell: ${{ matrix.shell }}
name: '${{ matrix.target.os }}-${{ matrix.target.cpu }} (Nim ${{ matrix.branch }})'
runs-on: ${{ matrix.builder }}
continue-on-error: ${{ matrix.branch == 'devel' }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Nim
uses: "./.github/actions/install_nim"
with:
os: ${{ matrix.target.os }}
shell: ${{ matrix.shell }}
nim_branch: ${{ matrix.branch }}
cpu: ${{ matrix.target.cpu }}
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: '~1.15.5'
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Run tests
run: |
nim --version
nimble --version
nimble install -y --depsOnly
NIMFLAGS="${NIMFLAGS} --gc:refc" nimble test
if [[ "${{ matrix.branch }}" == "devel" ]]; then
echo -e "\nTesting with '--gc:orc':\n"
NIMFLAGS="${NIMFLAGS} --gc:orc" nimble test
fi

1
.gitignore vendored
View File

@@ -16,3 +16,4 @@ tests/pubsub/testgossipsub
examples/*.md
nimble.develop
nimble.paths
go-libp2p-daemon/

29
.pinned
View File

@@ -1,16 +1,17 @@
bearssl;https://github.com/status-im/nim-bearssl@#acf9645e328bdcab481cfda1c158e07ecd46bd7b
bearssl;https://github.com/status-im/nim-bearssl@#e4157639db180e52727712a47deaefcbbac6ec86
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
chronos;https://github.com/status-im/nim-chronos@#5d3da66e563d21277b57a9b601744273c083a01b
dnsclient;https://github.com/ba0f3/dnsclient.nim@#fcd7443634b950eaea574e5eaa00a628ae029823
faststreams;https://github.com/status-im/nim-faststreams@#814f8927e1f356f39219f37f069b83066bcc893a
httputils;https://github.com/status-im/nim-http-utils@#a85bd52ae0a956983ca6b3267c72961d2ec0245f
json_serialization;https://github.com/status-im/nim-json-serialization@#a7d815ed92f200f490c95d3cfd722089cc923ce6
metrics;https://github.com/status-im/nim-metrics@#21e99a2e9d9f80e68bef65c80ef781613005fccb
nimcrypto;https://github.com/cheatfate/nimcrypto@#4014ef939b51e02053c2e16dd3481d47bc9267dd
secp256k1;https://github.com/status-im/nim-secp256k1@#fd173fdff863ce2e211cf64c9a03bc7539fe40b0
serialization;https://github.com/status-im/nim-serialization@#5b7cea55efeb074daa8abd8146a03a34adb4521a
stew;https://github.com/status-im/nim-stew@#407a59883691d362db2fe8eab7f7c3b1f75112ff
chronos;https://github.com/status-im/nim-chronos@#ba143e029f35fd9b4cd3d89d007cc834d0d5ba3c
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
faststreams;https://github.com/status-im/nim-faststreams@#720fc5e5c8e428d9d0af618e1e27c44b42350309
httputils;https://github.com/status-im/nim-http-utils@#3b491a40c60aad9e8d3407443f46f62511e63b18
json_serialization;https://github.com/status-im/nim-json-serialization@#85b7ea093cb85ee4f433a617b97571bd709d30df
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
nimcrypto;https://github.com/cheatfate/nimcrypto@#1c8d6e3caf3abc572136ae9a1da81730c4eb4288
results;https://github.com/arnetheduck/nim-results@#f3c666a272c69d70cb41e7245e7f6844797303ad
secp256k1;https://github.com/status-im/nim-secp256k1@#7246d91c667f4cc3759fdd50339caa45a2ecd8be
serialization;https://github.com/status-im/nim-serialization@#4bdbc29e54fe54049950e352bb969aab97173b35
stew;https://github.com/status-im/nim-stew@#3159137d9a3110edb4024145ce0ba778975de40e
testutils;https://github.com/status-im/nim-testutils@#dfc4c1b39f9ded9baf6365014de2b4bfb4dafc34
unittest2;https://github.com/status-im/nim-unittest2@#da8398c45cafd5bd7772da1fc96e3924a18d3823
websock;https://github.com/status-im/nim-websock@#fea05cde8b123b38d1a0a8524b77efbc84daa848
zlib;https://github.com/status-im/nim-zlib@#826e2fc013f55b4478802d4f2e39f187c50d520a
unittest2;https://github.com/status-im/nim-unittest2@#2300fa9924a76e6c96bc4ea79d043e3a0f27120c
websock;https://github.com/status-im/nim-websock@#f8ed9b40a5ff27ad02a3c237c4905b0924e3f982
zlib;https://github.com/status-im/nim-zlib@#38b72eda9d70067df4a953f56b5ed59630f2a17b

View File

@@ -20,6 +20,7 @@
- [Background](#background)
- [Install](#install)
- [Getting Started](#getting-started)
- [Go-libp2p-daemon](#go-libp2p-daemon)
- [Modules](#modules)
- [Users](#users)
- [Stability](#stability)
@@ -40,6 +41,8 @@ Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p'
## Install
**Prerequisite**
- [Nim](https://nim-lang.org/install.html)
> The currently supported Nim version is 1.6.18.
```
nimble install libp2p
```
@@ -47,11 +50,11 @@ nimble install libp2p
## Getting Started
You'll find the nim-libp2p documentation [here](https://status-im.github.io/nim-libp2p/docs/).
**Go Daemon:**
Please find the installation and usage intructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
### Testing
Remember you'll need to build the `go-libp2p-daemon` binary to run the `nim-libp2p` tests.
To do so, please follow the installation instructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
## Modules
List of packages modules implemented in nim-libp2p:
| Name | Description |
@@ -105,7 +108,7 @@ The versioning follows [semver](https://semver.org/), with some additions:
- Some of libp2p procedures are marked as `.public.`, they will remain compatible during each `MAJOR` version
- The rest of the procedures are considered internal, and can change at any `MINOR` version (but remain compatible for each new `PATCH`)
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.2 & 1.6`
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.6 & 2.0`
## Development
Clone and Install dependencies:

View File

@@ -9,11 +9,8 @@ switch("warning", "ObservableStores:off")
switch("warning", "LockLevel:off")
--define:chronosStrictException
--styleCheck:usages
if (NimMajor, NimMinor) < (1, 6):
--styleCheck:hint
else:
switch("warningAsError", "UseBase:on")
--styleCheck:error
switch("warningAsError", "UseBase:on")
--styleCheck:error
# Avoid some rare stack corruption while using exceptions with a SEH-enabled
# toolchain: https://github.com/status-im/nimbus-eth2/issues/3121

View File

@@ -1,6 +1,8 @@
# Table of Contents
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Script](#script)
- [Usage](#usage)
- [Example](#example)
- [Getting Started](#getting-started)
@@ -8,26 +10,29 @@
# Introduction
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
For more information about the go daemon, check out [this repository](https://github.com/libp2p/go-libp2p-daemon).
> **Required only** for running the tests.
# Prerequisites
Go with version `1.15.15`.
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
# Installation
Follow one of the methods below:
## Script
Run the build script while having the `go` command pointing to the correct Go version.
We recommend using `1.15.15`, as previously stated.
```sh
# clone and install dependencies
git clone https://github.com/status-im/nim-libp2p
cd nim-libp2p
nimble install
# perform unit tests
nimble test
# update the git submodule to install the go daemon
git submodule update --init --recursive
go version
git clone https://github.com/libp2p/go-libp2p-daemon
cd go-libp2p-daemon
git checkout v0.0.1
go install ./...
cd ..
./scripts/build_p2pd.sh
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the correct directory.
If you find any issues, please head into our discord and ask for our asistance.
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
```sh
export PATH="$PATH:$HOME/go/bin"
```
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
# Usage

View File

@@ -13,7 +13,7 @@ type
proc new(T: typedesc[TestProto]): T =
# every incoming connections will be in handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
await conn.writeLp("Roger p2p!")
@@ -40,7 +40,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
##
# The actual application
##
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng() # Single random number source for the whole application
# port 0 will take a random available port

View File

@@ -53,7 +53,7 @@ proc createSwitch(ma: MultiAddress, rng: ref HmacDrbgContext): Switch =
##
##
## Let's now start to create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng()
localAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()

View File

@@ -25,7 +25,7 @@ type TestProto = ref object of LPProtocol
proc new(T: typedesc[TestProto]): T =
# every incoming connections will in be handled in this closure
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
# Read up to 1024 bytes from this connection, and transform them into
# a string
echo "Got from remote - ", string.fromBytes(await conn.readLp(1024))
@@ -44,7 +44,7 @@ proc hello(p: TestProto, conn: Connection) {.async.} =
## Again, pretty straight-forward, we just send a message on the connection.
##
## We can now create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let
rng = newRng()
testProto = TestProto.new()

View File

@@ -108,7 +108,7 @@ type
proc new(_: typedesc[MetricProto], cb: MetricCallback): MetricProto =
var res: MetricProto
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
let
metrics = await res.metricGetter()
asProtobuf = metrics.encode()
@@ -126,7 +126,7 @@ proc fetch(p: MetricProto, conn: Connection): Future[MetricList] {.async.} =
return MetricList.decode(protobuf).tryGet()
## We can now create our main procedure:
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let rng = newRng()
proc randomMetricGenerator: Future[MetricList] {.async.} =
let metricCount = rng[].generate(uint32) mod 16

View File

@@ -33,7 +33,7 @@ proc createSwitch(rdv: RendezVous = RendezVous.new()): Switch =
const DumbCodec = "/dumb/proto/1.0.0"
type DumbProto = ref object of LPProtocol
proc new(T: typedesc[DumbProto], nodeNumber: int): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
echo "Node", nodeNumber, " received: ", string.fromBytes(await conn.readLp(1024))
await conn.close()
return T.new(codecs = @[DumbCodec], handler = handle)
@@ -49,7 +49,7 @@ proc new(T: typedesc[DumbProto], nodeNumber: int): T =
## (rendezvous in this case) as a bootnode. For this example, we'll
## create a bootnode, and then every peer will advertise itself on the
## bootnode, and use it to find other peers
proc main() {.async, gcsafe.} =
proc main() {.async.} =
let bootNode = createSwitch()
await bootNode.start()

View File

@@ -143,7 +143,7 @@ proc draw(g: Game) =
## peer know that we are available, check that he is also available,
## and launch the game.
proc new(T: typedesc[GameProto], g: Game): T =
proc handle(conn: Connection, proto: string) {.async, gcsafe.} =
proc handle(conn: Connection, proto: string) {.async.} =
defer: await conn.closeWithEof()
if g.peerFound.finished or g.hasCandidate:
await conn.close()

View File

@@ -48,12 +48,9 @@ else:
stream/connection,
transports/transport,
transports/tcptransport,
transports/wstransport,
protocols/secure/noise,
protocols/ping,
cid,
multihash,
multibase,
multicodec,
errors,
switch,

View File

@@ -1,13 +1,13 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "1.0.0"
version = "1.1.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
requires "nim >= 1.2.0",
requires "nim >= 1.6.0",
"nimcrypto >= 0.4.1",
"dnsclient >= 0.3.0 & < 0.4.0",
"bearssl >= 0.1.4",
@@ -17,14 +17,24 @@ requires "nim >= 1.2.0",
"secp256k1",
"stew#head",
"websock",
"unittest2 >= 0.0.5 & < 0.1.0"
"unittest2"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
let flags = getEnv("NIMFLAGS", "") # Extra flags for the compiler
let verbose = getEnv("V", "") notin ["", "0"]
let cfg =
" --styleCheck:usages --styleCheck:error" &
(if verbose: "" else: " --verbosity:0 --hints:off") &
" --skipParentCfg --skipUserCfg -f" &
" --threads:on --opt:speed"
import hashes, strutils
import hashes
proc runTest(filename: string, verify: bool = true, sign: bool = true,
moreoptions: string = "") =
var excstr = "nim c --skipParentCfg --opt:speed -d:debug "
excstr.add(" " & getEnv("NIMFLAGS") & " ")
excstr.add(" --verbosity:0 --hints:off ")
var excstr = nimc & " " & lang & " -d:debug " & cfg & " " & flags
excstr.add(" -d:libp2p_pubsub_sign=" & $sign)
excstr.add(" -d:libp2p_pubsub_verify=" & $verify)
excstr.add(" " & moreoptions & " ")
@@ -34,7 +44,7 @@ proc runTest(filename: string, verify: bool = true, sign: bool = true,
rmFile "tests/" & filename.toExe
proc buildSample(filename: string, run = false, extraFlags = "") =
var excstr = "nim c --opt:speed --threads:on -d:debug --verbosity:0 --hints:off -p:. " & extraFlags
var excstr = nimc & " " & lang & " " & cfg & " " & flags & " -p:. " & extraFlags
excstr.add(" examples/" & filename)
exec excstr
if run:
@@ -42,7 +52,7 @@ proc buildSample(filename: string, run = false, extraFlags = "") =
rmFile "examples/" & filename.toExe
proc tutorialToMd(filename: string) =
let markdown = gorge "cat " & filename & " | nim c -r --verbosity:0 --hints:off tools/markdown_builder.nim "
let markdown = gorge "cat " & filename & " | " & nimc & " " & lang & " -r --verbosity:0 --hints:off tools/markdown_builder.nim "
writeFile(filename.replace(".nim", ".md"), markdown)
task testnative, "Runs libp2p native tests":
@@ -104,15 +114,12 @@ task examples_build, "Build the samples":
buildSample("circuitrelay", true)
buildSample("tutorial_1_connect", true)
buildSample("tutorial_2_customproto", true)
if (NimMajor, NimMinor) > (1, 2):
# These tutorials relies on post 1.4 exception tracking
buildSample("tutorial_3_protobuf", true)
buildSample("tutorial_4_gossipsub", true)
buildSample("tutorial_5_discovery", true)
# Nico doesn't work in 1.2
exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
exec "nimble install -y nico"
buildSample("tutorial_6_game", false, "--styleCheck:off")
buildSample("tutorial_3_protobuf", true)
buildSample("tutorial_4_gossipsub", true)
buildSample("tutorial_5_discovery", true)
exec "nimble install -y nimpng@#HEAD" # this is to fix broken build on 1.7.3, remove it when nimpng version 0.3.2 or later is released
exec "nimble install -y nico"
buildSample("tutorial_6_game", false, "--styleCheck:off")
# pin system
# while nimble lockfile
@@ -123,7 +130,7 @@ task pin, "Create a lockfile":
# pinner.nim was originally here
# but you can't read output from
# a command in a nimscript
exec "nim c -r tools/pinner.nim"
exec nimc & " c -r tools/pinner.nim"
import sequtils
import os

View File

@@ -16,10 +16,7 @@ runnableExamples:
# etc
.build()
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import
options, tables, chronos, chronicles, sequtils,
@@ -28,7 +25,7 @@ import
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
connmanager, upgrademngrs/muxedupgrade,
connmanager, upgrademngrs/muxedupgrade, observedaddrmanager,
nameresolving/nameresolver,
errors, utility
@@ -36,7 +33,7 @@ export
switch, peerid, peerinfo, connection, multiaddress, crypto, errors
type
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [Defect].}
TransportProvider* {.public.} = proc(upgr: Upgrade): Transport {.gcsafe, raises: [].}
SecureProtocol* {.pure.} = enum
Noise,
@@ -57,11 +54,12 @@ type
protoVersion: string
agentVersion: string
nameResolver: NameResolver
peerStoreCapacity: Option[int]
peerStoreCapacity: Opt[int]
autonat: bool
circuitRelay: Relay
rdv: RendezVous
services: seq[Service]
observedAddrManager: ObservedAddrManager
proc new*(T: type[SwitchBuilder]): T {.public.} =
## Creates a SwitchBuilder
@@ -124,8 +122,8 @@ proc withMplex*(
b.muxers.add(MuxerProvider.new(newMuxer, MplexCodec))
b
proc withYamux*(b: SwitchBuilder): SwitchBuilder =
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn)
proc withYamux*(b: SwitchBuilder, windowSize: int = YamuxDefaultWindowSize): SwitchBuilder =
proc newMuxer(conn: Connection): Muxer = Yamux.new(conn, windowSize)
assert b.muxers.countIt(it.codec == YamuxCodec) == 0, "Yamux build multiple times"
b.muxers.add(MuxerProvider.new(newMuxer, YamuxCodec))
@@ -173,7 +171,7 @@ proc withMaxConnsPerPeer*(b: SwitchBuilder, maxConnsPerPeer: int): SwitchBuilder
b
proc withPeerStore*(b: SwitchBuilder, capacity: int): SwitchBuilder {.public.} =
b.peerStoreCapacity = some(capacity)
b.peerStoreCapacity = Opt.some(capacity)
b
proc withProtoVersion*(b: SwitchBuilder, protoVersion: string): SwitchBuilder {.public.} =
@@ -204,8 +202,12 @@ proc withServices*(b: SwitchBuilder, services: seq[Service]): SwitchBuilder =
b.services = services
b
proc withObservedAddrManager*(b: SwitchBuilder, observedAddrManager: ObservedAddrManager): SwitchBuilder =
b.observedAddrManager = observedAddrManager
b
proc build*(b: SwitchBuilder): Switch
{.raises: [Defect, LPError], public.} =
{.raises: [LPError], public.} =
if b.rng == nil: # newRng could fail
raise newException(Defect, "Cannot initialize RNG")
@@ -226,11 +228,16 @@ proc build*(b: SwitchBuilder): Switch
protoVersion = b.protoVersion,
agentVersion = b.agentVersion)
let identify =
if b.observedAddrManager != nil:
Identify.new(peerInfo, b.sendSignedPeerRecord, b.observedAddrManager)
else:
Identify.new(peerInfo, b.sendSignedPeerRecord)
let
identify = Identify.new(peerInfo, b.sendSignedPeerRecord)
connManager = ConnManager.new(b.maxConnsPerPeer, b.maxConnections, b.maxIn, b.maxOut)
ms = MultistreamSelect.new()
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, connManager, ms)
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
let
transports = block:
@@ -245,9 +252,9 @@ proc build*(b: SwitchBuilder): Switch
if isNil(b.rng):
b.rng = newRng()
let peerStore =
if isSome(b.peerStoreCapacity):
PeerStore.new(identify, b.peerStoreCapacity.get())
let peerStore = block:
b.peerStoreCapacity.withValue(capacity):
PeerStore.new(identify, capacity)
else:
PeerStore.new(identify)
@@ -296,11 +303,12 @@ proc newStandardSwitch*(
nameResolver: NameResolver = nil,
sendSignedPeerRecord = false,
peerStoreCapacity = 1000): Switch
{.raises: [Defect, LPError], public.} =
{.raises: [LPError], public.} =
## Helper for common switch configurations.
{.push warning[Deprecated]:off.}
if SecureProtocol.Secio in secureManagers:
quit("Secio is deprecated!") # use of secio is unsafe
{.pop.}
let addrs = when addrs is MultiAddress: @[addrs] else: addrs
var b = SwitchBuilder
@@ -318,7 +326,7 @@ proc newStandardSwitch*(
.withNameResolver(nameResolver)
.withNoise()
if privKey.isSome():
b = b.withPrivateKey(privKey.get())
privKey.withValue(pkey):
b = b.withPrivateKey(pkey)
b.build()

View File

@@ -9,10 +9,7 @@
## This module implementes CID (Content IDentifier).
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import tables, hashes
import multibase, multicodec, multihash, vbuffer, varint
@@ -279,9 +276,6 @@ proc `$`*(cid: Cid): string =
BTCBase58.encode(cid.data.buffer)
elif cid.cidver == CIDv1:
let res = MultiBase.encode("base58btc", cid.data.buffer)
if res.isOk():
res.get()
else:
""
res.get("")
else:
""

View File

@@ -7,12 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[options, tables, sequtils, sets]
import std/[tables, sequtils, sets]
import pkg/[chronos, chronicles, metrics]
import peerinfo,
peerstore,
@@ -51,7 +48,7 @@ type
ConnEventHandler* =
proc(peerId: PeerId, event: ConnEvent): Future[void]
{.gcsafe, raises: [Defect].}
{.gcsafe, raises: [].}
PeerEventKind* {.pure.} = enum
Left,
@@ -65,7 +62,7 @@ type
discard
PeerEventHandler* =
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [Defect].}
proc(peerId: PeerId, event: PeerEvent): Future[void] {.gcsafe, raises: [].}
ConnManager* = ref object of RootObj
maxConnsPerPeer: int
@@ -113,37 +110,25 @@ proc connectedPeers*(c: ConnManager, dir: Direction): seq[PeerId] =
peers.add(peerId)
return peers
proc getConnections*(c: ConnManager): Table[PeerId, seq[Muxer]] =
return c.muxed
proc addConnEventHandler*(c: ConnManager,
handler: ConnEventHandler,
kind: ConnEventKind) =
## Add peer event handler - handlers must not raise exceptions!
##
try:
if isNil(handler): return
c.connEvents[kind].incl(handler)
except Exception as exc:
# TODO: there is an Exception being raised
# somewhere in the depths of the std.
# Might be related to https://github.com/nim-lang/Nim/issues/17382
raiseAssert exc.msg
if isNil(handler): return
c.connEvents[kind].incl(handler)
proc removeConnEventHandler*(c: ConnManager,
handler: ConnEventHandler,
kind: ConnEventKind) =
try:
c.connEvents[kind].excl(handler)
except Exception as exc:
# TODO: there is an Exception being raised
# somewhere in the depths of the std.
# Might be related to https://github.com/nim-lang/Nim/issues/17382
raiseAssert exc.msg
proc triggerConnEvent*(c: ConnManager,
peerId: PeerId,
event: ConnEvent) {.async, gcsafe.} =
event: ConnEvent) {.async.} =
try:
trace "About to trigger connection events", peer = peerId
if c.connEvents[event.kind].len() > 0:
@@ -166,30 +151,16 @@ proc addPeerEventHandler*(c: ConnManager,
##
if isNil(handler): return
try:
c.peerEvents[kind].incl(handler)
except Exception as exc:
# TODO: there is an Exception being raised
# somewhere in the depths of the std.
# Might be related to https://github.com/nim-lang/Nim/issues/17382
raiseAssert exc.msg
c.peerEvents[kind].incl(handler)
proc removePeerEventHandler*(c: ConnManager,
handler: PeerEventHandler,
kind: PeerEventKind) =
try:
c.peerEvents[kind].excl(handler)
except Exception as exc:
# TODO: there is an Exception being raised
# somewhere in the depths of the std.
# Might be related to https://github.com/nim-lang/Nim/issues/17382
raiseAssert exc.msg
c.peerEvents[kind].excl(handler)
proc triggerPeerEvents*(c: ConnManager,
peerId: PeerId,
event: PeerEvent) {.async, gcsafe.} =
event: PeerEvent) {.async.} =
trace "About to trigger peer events", peer = peerId
if c.peerEvents[event.kind].len == 0:
@@ -311,7 +282,7 @@ proc selectMuxer*(c: ConnManager, peerId: PeerId): Muxer =
proc storeMuxer*(c: ConnManager,
muxer: Muxer)
{.raises: [Defect, CatchableError].} =
{.raises: [CatchableError].} =
## store the connection and muxer
##
@@ -364,7 +335,7 @@ proc getIncomingSlot*(c: ConnManager): Future[ConnectionSlot] {.async.} =
await c.inSema.acquire()
return ConnectionSlot(connManager: c, direction: In)
proc getOutgoingSlot*(c: ConnManager, forceDial = false): ConnectionSlot {.raises: [Defect, TooManyConnectionsError].} =
proc getOutgoingSlot*(c: ConnManager, forceDial = false): ConnectionSlot {.raises: [TooManyConnectionsError].} =
if forceDial:
c.outSema.forceAcquire()
elif not c.outSema.tryAcquire():
@@ -408,7 +379,7 @@ proc trackMuxer*(cs: ConnectionSlot, mux: Muxer) =
cs.trackConnection(mux.connection)
proc getStream*(c: ConnManager,
muxer: Muxer): Future[Connection] {.async, gcsafe.} =
muxer: Muxer): Future[Connection] {.async.} =
## get a muxed stream for the passed muxer
##
@@ -416,7 +387,7 @@ proc getStream*(c: ConnManager,
return await muxer.newStream()
proc getStream*(c: ConnManager,
peerId: PeerId): Future[Connection] {.async, gcsafe.} =
peerId: PeerId): Future[Connection] {.async.} =
## get a muxed stream for the passed peer from any connection
##
@@ -424,7 +395,7 @@ proc getStream*(c: ConnManager,
proc getStream*(c: ConnManager,
peerId: PeerId,
dir: Direction): Future[Connection] {.async, gcsafe.} =
dir: Direction): Future[Connection] {.async.} =
## get a muxed stream for the passed peer from a connection with `dir`
##

View File

@@ -15,14 +15,11 @@
# RFC @ https://tools.ietf.org/html/rfc7539
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import bearssl/blockx
from stew/assign2 import assign
from stew/ranges/ptr_arith import baseAddr
from stew/ptrops import baseAddr
const
ChaChaPolyKeySize = 32

View File

@@ -8,10 +8,7 @@
# those terms.
## This module implements Public Key and Private Key interface for libp2p.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
from strutils import split, strip, cmpIgnoreCase
@@ -68,11 +65,13 @@ when supported(PKScheme.Ed25519):
import ed25519/ed25519
when supported(PKScheme.Secp256k1):
import secp
when supported(PKScheme.ECDSA):
import ecnist
# We are still importing `ecnist` because, it is used for SECIO handshake,
# but it will be impossible to create ECNIST keys or import ECNIST keys.
# These used to be declared in `crypto` itself
export ecnist.ephemeral, ecnist.ECDHEScheme
import ecnist, bearssl/rand, bearssl/hash as bhash
import bearssl/rand, bearssl/hash as bhash
import ../protobuf/minprotobuf, ../vbuffer, ../multihash, ../multicodec
import nimcrypto/[rijndael, twofish, sha2, hash, hmac]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
@@ -89,8 +88,6 @@ type
Sha256,
Sha512
ECDHEScheme* = EcCurveKind
PublicKey* = object
case scheme*: PKScheme
of PKScheme.RSA:
@@ -458,7 +455,8 @@ proc getBytes*(sig: Signature): seq[byte] =
## Return signature ``sig`` in binary form.
result = sig.data
proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
template initImpl[T: PrivateKey|PublicKey](
key: var T, data: openArray[byte]): bool =
## Initialize private key ``key`` from libp2p's protobuf serialized raw
## binary form.
##
@@ -471,7 +469,7 @@ proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
var pb = initProtoBuffer(@data)
let r1 = pb.getField(1, id)
let r2 = pb.getField(2, buffer)
if not(r1.isOk() and r1.get() and r2.isOk() and r2.get()):
if not(r1.get(false) and r2.get(false)):
false
else:
if cast[int8](id) notin SupportedSchemesInt or len(buffer) <= 0:
@@ -520,6 +518,14 @@ proc init*[T: PrivateKey|PublicKey](key: var T, data: openArray[byte]): bool =
else:
false
{.push warning[ProveField]:off.} # https://github.com/nim-lang/Nim/issues/22060
proc init*(key: var PrivateKey, data: openArray[byte]): bool =
initImpl(key, data)
proc init*(key: var PublicKey, data: openArray[byte]): bool =
initImpl(key, data)
{.pop.}
proc init*(sig: var Signature, data: openArray[byte]): bool =
## Initialize signature ``sig`` from raw binary form.
##
@@ -873,34 +879,6 @@ proc mac*(secret: Secret, id: int): seq[byte] {.inline.} =
offset += secret.ivsize + secret.keysize
copyMem(addr result[0], unsafeAddr secret.data[offset], secret.macsize)
proc ephemeral*(
scheme: ECDHEScheme,
rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE.
var keypair: EcKeyPair
if scheme == Secp256r1:
keypair = ? EcKeyPair.random(Secp256r1, rng).orError(KeyError)
elif scheme == Secp384r1:
keypair = ? EcKeyPair.random(Secp384r1, rng).orError(KeyError)
elif scheme == Secp521r1:
keypair = ? EcKeyPair.random(Secp521r1, rng).orError(KeyError)
ok(keypair)
proc ephemeral*(
scheme: string, rng: var HmacDrbgContext): CryptoResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE using string encoding.
##
## Currently supported encoding strings are P-256, P-384, P-521, if encoding
## string is not supported P-521 key will be generated.
if scheme == "P-256":
ephemeral(Secp256r1, rng)
elif scheme == "P-384":
ephemeral(Secp384r1, rng)
elif scheme == "P-521":
ephemeral(Secp521r1, rng)
else:
ephemeral(Secp521r1, rng)
proc getOrder*(remotePubkey, localNonce: openArray[byte],
localPubkey, remoteNonce: openArray[byte]): CryptoResult[int] =
## Compare values and calculate `order` parameter.
@@ -976,9 +954,8 @@ proc decodeProposal*(message: seq[byte], nonce, pubkey: var seq[byte],
let r4 = pb.getField(4, ciphers)
let r5 = pb.getField(5, hashes)
r1.isOk() and r1.get() and r2.isOk() and r2.get() and
r3.isOk() and r3.get() and r4.isOk() and r4.get() and
r5.isOk() and r5.get()
r1.get(false) and r2.get(false) and r3.get(false) and
r4.get(false) and r5.get(false)
proc createExchange*(epubkey, signature: openArray[byte]): seq[byte] =
## Create SecIO exchange message using ephemeral public key ``epubkey`` and
@@ -998,32 +975,32 @@ proc decodeExchange*(message: seq[byte],
var pb = initProtoBuffer(message)
let r1 = pb.getField(1, pubkey)
let r2 = pb.getField(2, signature)
r1.isOk() and r1.get() and r2.isOk() and r2.get()
r1.get(false) and r2.get(false)
## Serialization/Deserialization helpers
proc write*(vb: var VBuffer, pubkey: PublicKey) {.
inline, raises: [Defect, ResultError[CryptoError]].} =
inline, raises: [ResultError[CryptoError]].} =
## Write PublicKey value ``pubkey`` to buffer ``vb``.
vb.writeSeq(pubkey.getBytes().tryGet())
proc write*(vb: var VBuffer, seckey: PrivateKey) {.
inline, raises: [Defect, ResultError[CryptoError]].} =
inline, raises: [ResultError[CryptoError]].} =
## Write PrivateKey value ``seckey`` to buffer ``vb``.
vb.writeSeq(seckey.getBytes().tryGet())
proc write*(vb: var VBuffer, sig: PrivateKey) {.
inline, raises: [Defect, ResultError[CryptoError]].} =
inline, raises: [ResultError[CryptoError]].} =
## Write Signature value ``sig`` to buffer ``vb``.
vb.writeSeq(sig.getBytes().tryGet())
proc write*[T: PublicKey|PrivateKey](pb: var ProtoBuffer, field: int,
key: T) {.
inline, raises: [Defect, ResultError[CryptoError]].} =
inline, raises: [ResultError[CryptoError]].} =
write(pb, field, key.getBytes().tryGet())
proc write*(pb: var ProtoBuffer, field: int, sig: Signature) {.
inline, raises: [Defect].} =
inline, raises: [].} =
write(pb, field, sig.getBytes())
proc getField*[T: PublicKey|PrivateKey](pb: ProtoBuffer, field: int,

View File

@@ -15,12 +15,9 @@
# RFC @ https://tools.ietf.org/html/rfc7748
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import bearssl/[ec, rand, hash]
import bearssl/[ec, rand]
import stew/results
from stew/assign2 import assign
export results

View File

@@ -14,10 +14,7 @@
## BearSSL library <https://bearssl.org/>
## Copyright(C) 2018 Thomas Pornin <pornin@bolet.org>.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import bearssl/[ec, rand, hash]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
@@ -25,6 +22,9 @@ import nimcrypto/utils as ncrutils
import minasn1
export minasn1.Asn1Error
import stew/[results, ctops]
import ../utility
export results
const
@@ -74,7 +74,7 @@ type
EcResult*[T] = Result[T, EcError]
const
EcSupportedCurvesCint* = {cint(Secp256r1), cint(Secp384r1), cint(Secp521r1)}
EcSupportedCurvesCint* = @[cint(Secp256r1), cint(Secp384r1), cint(Secp521r1)]
proc `-`(x: uint32): uint32 {.inline.} =
result = (0xFFFF_FFFF'u32 - x) + 1'u32
@@ -243,7 +243,7 @@ proc random*(
var res = new EcPrivateKey
if ecKeygen(addr rng.vtable, ecimp,
addr res.key, addr res.buffer[0],
cast[cint](kind)) == 0:
safeConvert[cint](kind)) == 0:
err(EcKeyGenError)
else:
ok(res)
@@ -630,11 +630,11 @@ proc init*(key: var EcPrivateKey, data: openArray[byte]): Result[void, Asn1Error
return err(Asn1Error.Incorrect)
if oid == Asn1OidSecp256r1:
curve = cast[cint](Secp256r1)
curve = safeConvert[cint](Secp256r1)
elif oid == Asn1OidSecp384r1:
curve = cast[cint](Secp384r1)
curve = safeConvert[cint](Secp384r1)
elif oid == Asn1OidSecp521r1:
curve = cast[cint](Secp521r1)
curve = safeConvert[cint](Secp521r1)
else:
return err(Asn1Error.Incorrect)
@@ -684,11 +684,11 @@ proc init*(pubkey: var EcPublicKey, data: openArray[byte]): Result[void, Asn1Err
return err(Asn1Error.Incorrect)
if oid == Asn1OidSecp256r1:
curve = cast[cint](Secp256r1)
curve = safeConvert[cint](Secp256r1)
elif oid == Asn1OidSecp384r1:
curve = cast[cint](Secp384r1)
curve = safeConvert[cint](Secp384r1)
elif oid == Asn1OidSecp521r1:
curve = cast[cint](Secp521r1)
curve = safeConvert[cint](Secp521r1)
else:
return err(Asn1Error.Incorrect)
@@ -774,13 +774,13 @@ proc initRaw*(key: var EcPrivateKey, data: openArray[byte]): bool =
## Procedure returns ``true`` on success, ``false`` otherwise.
var curve: cint
if len(data) == SecKey256Length:
curve = cast[cint](Secp256r1)
curve = safeConvert[cint](Secp256r1)
result = true
elif len(data) == SecKey384Length:
curve = cast[cint](Secp384r1)
curve = safeConvert[cint](Secp384r1)
result = true
elif len(data) == SecKey521Length:
curve = cast[cint](Secp521r1)
curve = safeConvert[cint](Secp521r1)
result = true
if result:
result = false
@@ -805,13 +805,13 @@ proc initRaw*(pubkey: var EcPublicKey, data: openArray[byte]): bool =
if len(data) > 0:
if data[0] == 0x04'u8:
if len(data) == PubKey256Length:
curve = cast[cint](Secp256r1)
curve = safeConvert[cint](Secp256r1)
result = true
elif len(data) == PubKey384Length:
curve = cast[cint](Secp384r1)
curve = safeConvert[cint](Secp384r1)
result = true
elif len(data) == PubKey521Length:
curve = cast[cint](Secp521r1)
curve = safeConvert[cint](Secp521r1)
result = true
if result:
result = false
@@ -994,3 +994,33 @@ proc verify*[T: byte|char](sig: EcSignature, message: openArray[T],
# Clear context with initial value
kv.init(addr hc.vtable)
result = (res == 1)
type ECDHEScheme* = EcCurveKind
proc ephemeral*(
scheme: ECDHEScheme,
rng: var HmacDrbgContext): EcResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE.
var keypair: EcKeyPair
if scheme == Secp256r1:
keypair = ? EcKeyPair.random(Secp256r1, rng)
elif scheme == Secp384r1:
keypair = ? EcKeyPair.random(Secp384r1, rng)
elif scheme == Secp521r1:
keypair = ? EcKeyPair.random(Secp521r1, rng)
ok(keypair)
proc ephemeral*(
scheme: string, rng: var HmacDrbgContext): EcResult[EcKeyPair] =
## Generate ephemeral keys used to perform ECDHE using string encoding.
##
## Currently supported encoding strings are P-256, P-384, P-521, if encoding
## string is not supported P-521 key will be generated.
if scheme == "P-256":
ephemeral(Secp256r1, rng)
elif scheme == "P-384":
ephemeral(Secp384r1, rng)
elif scheme == "P-521":
ephemeral(Secp521r1, rng)
else:
ephemeral(Secp521r1, rng)

View File

@@ -11,10 +11,7 @@
## This code is a port of the public domain, "ref10" implementation of ed25519
## from SUPERCOP.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import bearssl/rand
import constants
@@ -22,6 +19,9 @@ import nimcrypto/[hash, sha2]
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import stew/[results, ctops]
import ../../utility
export results
# This workaround needed because of some bugs in Nim Static[T].
@@ -170,15 +170,15 @@ proc feCopy(h: var Fe, f: Fe) =
h[9] = f9
proc load_3(inp: openArray[byte]): uint64 =
result = cast[uint64](inp[0])
result = result or (cast[uint64](inp[1]) shl 8)
result = result or (cast[uint64](inp[2]) shl 16)
result = safeConvert[uint64](inp[0])
result = result or (safeConvert[uint64](inp[1]) shl 8)
result = result or (safeConvert[uint64](inp[2]) shl 16)
proc load_4(inp: openArray[byte]): uint64 =
result = cast[uint64](inp[0])
result = result or (cast[uint64](inp[1]) shl 8)
result = result or (cast[uint64](inp[2]) shl 16)
result = result or (cast[uint64](inp[3]) shl 24)
result = safeConvert[uint64](inp[0])
result = result or (safeConvert[uint64](inp[1]) shl 8)
result = result or (safeConvert[uint64](inp[2]) shl 16)
result = result or (safeConvert[uint64](inp[3]) shl 24)
proc feFromBytes(h: var Fe, s: openArray[byte]) =
var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9: int64
@@ -299,106 +299,106 @@ proc feMul(h: var Fe, f, g: Fe) =
var f5_2 = 2 * f5
var f7_2 = 2 * f7
var f9_2 = 2 * f9
var f0g0 = cast[int64](f0) * cast[int64](g0)
var f0g1 = cast[int64](f0) * cast[int64](g1)
var f0g2 = cast[int64](f0) * cast[int64](g2)
var f0g3 = cast[int64](f0) * cast[int64](g3)
var f0g4 = cast[int64](f0) * cast[int64](g4)
var f0g5 = cast[int64](f0) * cast[int64](g5)
var f0g6 = cast[int64](f0) * cast[int64](g6)
var f0g7 = cast[int64](f0) * cast[int64](g7)
var f0g8 = cast[int64](f0) * cast[int64](g8)
var f0g9 = cast[int64](f0) * cast[int64](g9)
var f1g0 = cast[int64](f1) * cast[int64](g0)
var f1g1_2 = cast[int64](f1_2) * cast[int64](g1)
var f1g2 = cast[int64](f1) * cast[int64](g2)
var f1g3_2 = cast[int64](f1_2) * cast[int64](g3)
var f1g4 = cast[int64](f1) * cast[int64](g4)
var f1g5_2 = cast[int64](f1_2) * cast[int64](g5)
var f1g6 = cast[int64](f1) * cast[int64](g6)
var f1g7_2 = cast[int64](f1_2) * cast[int64](g7)
var f1g8 = cast[int64](f1) * cast[int64](g8)
var f1g9_38 = cast[int64](f1_2) * cast[int64](g9_19)
var f2g0 = cast[int64](f2) * cast[int64](g0)
var f2g1 = cast[int64](f2) * cast[int64](g1)
var f2g2 = cast[int64](f2) * cast[int64](g2)
var f2g3 = cast[int64](f2) * cast[int64](g3)
var f2g4 = cast[int64](f2) * cast[int64](g4)
var f2g5 = cast[int64](f2) * cast[int64](g5)
var f2g6 = cast[int64](f2) * cast[int64](g6)
var f2g7 = cast[int64](f2) * cast[int64](g7)
var f2g8_19 = cast[int64](f2) * cast[int64](g8_19)
var f2g9_19 = cast[int64](f2) * cast[int64](g9_19)
var f3g0 = cast[int64](f3) * cast[int64](g0)
var f3g1_2 = cast[int64](f3_2) * cast[int64](g1)
var f3g2 = cast[int64](f3) * cast[int64](g2)
var f3g3_2 = cast[int64](f3_2) * cast[int64](g3)
var f3g4 = cast[int64](f3) * cast[int64](g4)
var f3g5_2 = cast[int64](f3_2) * cast[int64](g5)
var f3g6 = cast[int64](f3) * cast[int64](g6)
var f3g7_38 = cast[int64](f3_2) * cast[int64](g7_19)
var f3g8_19 = cast[int64](f3) * cast[int64](g8_19)
var f3g9_38 = cast[int64](f3_2) * cast[int64](g9_19)
var f4g0 = cast[int64](f4) * cast[int64](g0)
var f4g1 = cast[int64](f4) * cast[int64](g1)
var f4g2 = cast[int64](f4) * cast[int64](g2)
var f4g3 = cast[int64](f4) * cast[int64](g3)
var f4g4 = cast[int64](f4) * cast[int64](g4)
var f4g5 = cast[int64](f4) * cast[int64](g5)
var f4g6_19 = cast[int64](f4) * cast[int64](g6_19)
var f4g7_19 = cast[int64](f4) * cast[int64](g7_19)
var f4g8_19 = cast[int64](f4) * cast[int64](g8_19)
var f4g9_19 = cast[int64](f4) * cast[int64](g9_19)
var f5g0 = cast[int64](f5) * cast[int64](g0)
var f5g1_2 = cast[int64](f5_2) * cast[int64](g1)
var f5g2 = cast[int64](f5) * cast[int64](g2)
var f5g3_2 = cast[int64](f5_2) * cast[int64](g3)
var f5g4 = cast[int64](f5) * cast[int64](g4)
var f5g5_38 = cast[int64](f5_2) * cast[int64](g5_19)
var f5g6_19 = cast[int64](f5) * cast[int64](g6_19)
var f5g7_38 = cast[int64](f5_2) * cast[int64](g7_19)
var f5g8_19 = cast[int64](f5) * cast[int64](g8_19)
var f5g9_38 = cast[int64](f5_2) * cast[int64](g9_19)
var f6g0 = cast[int64](f6) * cast[int64](g0)
var f6g1 = cast[int64](f6) * cast[int64](g1)
var f6g2 = cast[int64](f6) * cast[int64](g2)
var f6g3 = cast[int64](f6) * cast[int64](g3)
var f6g4_19 = cast[int64](f6) * cast[int64](g4_19)
var f6g5_19 = cast[int64](f6) * cast[int64](g5_19)
var f6g6_19 = cast[int64](f6) * cast[int64](g6_19)
var f6g7_19 = cast[int64](f6) * cast[int64](g7_19)
var f6g8_19 = cast[int64](f6) * cast[int64](g8_19)
var f6g9_19 = cast[int64](f6) * cast[int64](g9_19)
var f7g0 = cast[int64](f7) * cast[int64](g0)
var f7g1_2 = cast[int64](f7_2) * cast[int64](g1)
var f7g2 = cast[int64](f7) * cast[int64](g2)
var f7g3_38 = cast[int64](f7_2) * cast[int64](g3_19)
var f7g4_19 = cast[int64](f7) * cast[int64](g4_19)
var f7g5_38 = cast[int64](f7_2) * cast[int64](g5_19)
var f7g6_19 = cast[int64](f7) * cast[int64](g6_19)
var f7g7_38 = cast[int64](f7_2) * cast[int64](g7_19)
var f7g8_19 = cast[int64](f7) * cast[int64](g8_19)
var f7g9_38 = cast[int64](f7_2) * cast[int64](g9_19)
var f8g0 = cast[int64](f8) * cast[int64](g0)
var f8g1 = cast[int64](f8) * cast[int64](g1)
var f8g2_19 = cast[int64](f8) * cast[int64](g2_19)
var f8g3_19 = cast[int64](f8) * cast[int64](g3_19)
var f8g4_19 = cast[int64](f8) * cast[int64](g4_19)
var f8g5_19 = cast[int64](f8) * cast[int64](g5_19)
var f8g6_19 = cast[int64](f8) * cast[int64](g6_19)
var f8g7_19 = cast[int64](f8) * cast[int64](g7_19)
var f8g8_19 = cast[int64](f8) * cast[int64](g8_19)
var f8g9_19 = cast[int64](f8) * cast[int64](g9_19)
var f9g0 = cast[int64](f9) * cast[int64](g0)
var f9g1_38 = cast[int64](f9_2) * cast[int64](g1_19)
var f9g2_19 = cast[int64](f9) * cast[int64](g2_19)
var f9g3_38 = cast[int64](f9_2) * cast[int64](g3_19)
var f9g4_19 = cast[int64](f9) * cast[int64](g4_19)
var f9g5_38 = cast[int64](f9_2) * cast[int64](g5_19)
var f9g6_19 = cast[int64](f9) * cast[int64](g6_19)
var f9g7_38 = cast[int64](f9_2) * cast[int64](g7_19)
var f9g8_19 = cast[int64](f9) * cast[int64](g8_19)
var f9g9_38 = cast[int64](f9_2) * cast[int64](g9_19)
var f0g0 = safeConvert[int64](f0) * safeConvert[int64](g0)
var f0g1 = safeConvert[int64](f0) * safeConvert[int64](g1)
var f0g2 = safeConvert[int64](f0) * safeConvert[int64](g2)
var f0g3 = safeConvert[int64](f0) * safeConvert[int64](g3)
var f0g4 = safeConvert[int64](f0) * safeConvert[int64](g4)
var f0g5 = safeConvert[int64](f0) * safeConvert[int64](g5)
var f0g6 = safeConvert[int64](f0) * safeConvert[int64](g6)
var f0g7 = safeConvert[int64](f0) * safeConvert[int64](g7)
var f0g8 = safeConvert[int64](f0) * safeConvert[int64](g8)
var f0g9 = safeConvert[int64](f0) * safeConvert[int64](g9)
var f1g0 = safeConvert[int64](f1) * safeConvert[int64](g0)
var f1g1_2 = safeConvert[int64](f1_2) * safeConvert[int64](g1)
var f1g2 = safeConvert[int64](f1) * safeConvert[int64](g2)
var f1g3_2 = safeConvert[int64](f1_2) * safeConvert[int64](g3)
var f1g4 = safeConvert[int64](f1) * safeConvert[int64](g4)
var f1g5_2 = safeConvert[int64](f1_2) * safeConvert[int64](g5)
var f1g6 = safeConvert[int64](f1) * safeConvert[int64](g6)
var f1g7_2 = safeConvert[int64](f1_2) * safeConvert[int64](g7)
var f1g8 = safeConvert[int64](f1) * safeConvert[int64](g8)
var f1g9_38 = safeConvert[int64](f1_2) * safeConvert[int64](g9_19)
var f2g0 = safeConvert[int64](f2) * safeConvert[int64](g0)
var f2g1 = safeConvert[int64](f2) * safeConvert[int64](g1)
var f2g2 = safeConvert[int64](f2) * safeConvert[int64](g2)
var f2g3 = safeConvert[int64](f2) * safeConvert[int64](g3)
var f2g4 = safeConvert[int64](f2) * safeConvert[int64](g4)
var f2g5 = safeConvert[int64](f2) * safeConvert[int64](g5)
var f2g6 = safeConvert[int64](f2) * safeConvert[int64](g6)
var f2g7 = safeConvert[int64](f2) * safeConvert[int64](g7)
var f2g8_19 = safeConvert[int64](f2) * safeConvert[int64](g8_19)
var f2g9_19 = safeConvert[int64](f2) * safeConvert[int64](g9_19)
var f3g0 = safeConvert[int64](f3) * safeConvert[int64](g0)
var f3g1_2 = safeConvert[int64](f3_2) * safeConvert[int64](g1)
var f3g2 = safeConvert[int64](f3) * safeConvert[int64](g2)
var f3g3_2 = safeConvert[int64](f3_2) * safeConvert[int64](g3)
var f3g4 = safeConvert[int64](f3) * safeConvert[int64](g4)
var f3g5_2 = safeConvert[int64](f3_2) * safeConvert[int64](g5)
var f3g6 = safeConvert[int64](f3) * safeConvert[int64](g6)
var f3g7_38 = safeConvert[int64](f3_2) * safeConvert[int64](g7_19)
var f3g8_19 = safeConvert[int64](f3) * safeConvert[int64](g8_19)
var f3g9_38 = safeConvert[int64](f3_2) * safeConvert[int64](g9_19)
var f4g0 = safeConvert[int64](f4) * safeConvert[int64](g0)
var f4g1 = safeConvert[int64](f4) * safeConvert[int64](g1)
var f4g2 = safeConvert[int64](f4) * safeConvert[int64](g2)
var f4g3 = safeConvert[int64](f4) * safeConvert[int64](g3)
var f4g4 = safeConvert[int64](f4) * safeConvert[int64](g4)
var f4g5 = safeConvert[int64](f4) * safeConvert[int64](g5)
var f4g6_19 = safeConvert[int64](f4) * safeConvert[int64](g6_19)
var f4g7_19 = safeConvert[int64](f4) * safeConvert[int64](g7_19)
var f4g8_19 = safeConvert[int64](f4) * safeConvert[int64](g8_19)
var f4g9_19 = safeConvert[int64](f4) * safeConvert[int64](g9_19)
var f5g0 = safeConvert[int64](f5) * safeConvert[int64](g0)
var f5g1_2 = safeConvert[int64](f5_2) * safeConvert[int64](g1)
var f5g2 = safeConvert[int64](f5) * safeConvert[int64](g2)
var f5g3_2 = safeConvert[int64](f5_2) * safeConvert[int64](g3)
var f5g4 = safeConvert[int64](f5) * safeConvert[int64](g4)
var f5g5_38 = safeConvert[int64](f5_2) * safeConvert[int64](g5_19)
var f5g6_19 = safeConvert[int64](f5) * safeConvert[int64](g6_19)
var f5g7_38 = safeConvert[int64](f5_2) * safeConvert[int64](g7_19)
var f5g8_19 = safeConvert[int64](f5) * safeConvert[int64](g8_19)
var f5g9_38 = safeConvert[int64](f5_2) * safeConvert[int64](g9_19)
var f6g0 = safeConvert[int64](f6) * safeConvert[int64](g0)
var f6g1 = safeConvert[int64](f6) * safeConvert[int64](g1)
var f6g2 = safeConvert[int64](f6) * safeConvert[int64](g2)
var f6g3 = safeConvert[int64](f6) * safeConvert[int64](g3)
var f6g4_19 = safeConvert[int64](f6) * safeConvert[int64](g4_19)
var f6g5_19 = safeConvert[int64](f6) * safeConvert[int64](g5_19)
var f6g6_19 = safeConvert[int64](f6) * safeConvert[int64](g6_19)
var f6g7_19 = safeConvert[int64](f6) * safeConvert[int64](g7_19)
var f6g8_19 = safeConvert[int64](f6) * safeConvert[int64](g8_19)
var f6g9_19 = safeConvert[int64](f6) * safeConvert[int64](g9_19)
var f7g0 = safeConvert[int64](f7) * safeConvert[int64](g0)
var f7g1_2 = safeConvert[int64](f7_2) * safeConvert[int64](g1)
var f7g2 = safeConvert[int64](f7) * safeConvert[int64](g2)
var f7g3_38 = safeConvert[int64](f7_2) * safeConvert[int64](g3_19)
var f7g4_19 = safeConvert[int64](f7) * safeConvert[int64](g4_19)
var f7g5_38 = safeConvert[int64](f7_2) * safeConvert[int64](g5_19)
var f7g6_19 = safeConvert[int64](f7) * safeConvert[int64](g6_19)
var f7g7_38 = safeConvert[int64](f7_2) * safeConvert[int64](g7_19)
var f7g8_19 = safeConvert[int64](f7) * safeConvert[int64](g8_19)
var f7g9_38 = safeConvert[int64](f7_2) * safeConvert[int64](g9_19)
var f8g0 = safeConvert[int64](f8) * safeConvert[int64](g0)
var f8g1 = safeConvert[int64](f8) * safeConvert[int64](g1)
var f8g2_19 = safeConvert[int64](f8) * safeConvert[int64](g2_19)
var f8g3_19 = safeConvert[int64](f8) * safeConvert[int64](g3_19)
var f8g4_19 = safeConvert[int64](f8) * safeConvert[int64](g4_19)
var f8g5_19 = safeConvert[int64](f8) * safeConvert[int64](g5_19)
var f8g6_19 = safeConvert[int64](f8) * safeConvert[int64](g6_19)
var f8g7_19 = safeConvert[int64](f8) * safeConvert[int64](g7_19)
var f8g8_19 = safeConvert[int64](f8) * safeConvert[int64](g8_19)
var f8g9_19 = safeConvert[int64](f8) * safeConvert[int64](g9_19)
var f9g0 = safeConvert[int64](f9) * safeConvert[int64](g0)
var f9g1_38 = safeConvert[int64](f9_2) * safeConvert[int64](g1_19)
var f9g2_19 = safeConvert[int64](f9) * safeConvert[int64](g2_19)
var f9g3_38 = safeConvert[int64](f9_2) * safeConvert[int64](g3_19)
var f9g4_19 = safeConvert[int64](f9) * safeConvert[int64](g4_19)
var f9g5_38 = safeConvert[int64](f9_2) * safeConvert[int64](g5_19)
var f9g6_19 = safeConvert[int64](f9) * safeConvert[int64](g6_19)
var f9g7_38 = safeConvert[int64](f9_2) * safeConvert[int64](g7_19)
var f9g8_19 = safeConvert[int64](f9) * safeConvert[int64](g8_19)
var f9g9_38 = safeConvert[int64](f9_2) * safeConvert[int64](g9_19)
var
c0, c1, c2, c3, c4, c5, c6, c7, c8, c9: int64
h0: int64 = f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 +
@@ -493,7 +493,7 @@ proc verify32(x: openArray[byte], y: openArray[byte]): int32 =
proc feIsNegative(f: Fe): int32 =
var s: array[32, byte]
feToBytes(s, f)
result = cast[int32](s[0] and 1'u8)
result = safeConvert[int32](s[0] and 1'u8)
proc feIsNonZero(f: Fe): int32 =
var s: array[32, byte]
@@ -516,61 +516,61 @@ proc feSq(h: var Fe, f: Fe) =
var f7_38: int32 = 38 * f7
var f8_19: int32 = 19 * f8
var f9_38: int32 = 38 * f9
var f0f0: int64 = f0 * cast[int64](f0)
var f0f1_2: int64 = f0_2 * cast[int64](f1)
var f0f2_2: int64 = f0_2 * cast[int64](f2)
var f0f3_2: int64 = f0_2 * cast[int64](f3)
var f0f4_2: int64 = f0_2 * cast[int64](f4)
var f0f5_2: int64 = f0_2 * cast[int64](f5)
var f0f6_2: int64 = f0_2 * cast[int64](f6)
var f0f7_2: int64 = f0_2 * cast[int64](f7)
var f0f8_2: int64 = f0_2 * cast[int64](f8)
var f0f9_2: int64 = f0_2 * cast[int64](f9)
var f1f1_2: int64 = f1_2 * cast[int64](f1)
var f1f2_2: int64 = f1_2 * cast[int64](f2)
var f1f3_4: int64 = f1_2 * cast[int64](f3_2)
var f1f4_2: int64 = f1_2 * cast[int64](f4)
var f1f5_4: int64 = f1_2 * cast[int64](f5_2)
var f1f6_2: int64 = f1_2 * cast[int64](f6)
var f1f7_4: int64 = f1_2 * cast[int64](f7_2)
var f1f8_2: int64 = f1_2 * cast[int64](f8)
var f1f9_76: int64 = f1_2 * cast[int64](f9_38)
var f2f2: int64 = f2 * cast[int64](f2)
var f2f3_2: int64 = f2_2 * cast[int64](f3)
var f2f4_2: int64 = f2_2 * cast[int64](f4)
var f2f5_2: int64 = f2_2 * cast[int64](f5)
var f2f6_2: int64 = f2_2 * cast[int64](f6)
var f2f7_2: int64 = f2_2 * cast[int64](f7)
var f2f8_38: int64 = f2_2 * cast[int64](f8_19)
var f2f9_38: int64 = f2 * cast[int64](f9_38)
var f3f3_2: int64 = f3_2 * cast[int64](f3)
var f3f4_2: int64 = f3_2 * cast[int64](f4)
var f3f5_4: int64 = f3_2 * cast[int64](f5_2)
var f3f6_2: int64 = f3_2 * cast[int64](f6)
var f3f7_76: int64 = f3_2 * cast[int64](f7_38)
var f3f8_38: int64 = f3_2 * cast[int64](f8_19)
var f3f9_76: int64 = f3_2 * cast[int64](f9_38)
var f4f4: int64 = f4 * cast[int64](f4)
var f4f5_2: int64 = f4_2 * cast[int64](f5)
var f4f6_38: int64 = f4_2 * cast[int64](f6_19)
var f4f7_38: int64 = f4 * cast[int64](f7_38)
var f4f8_38: int64 = f4_2 * cast[int64](f8_19)
var f4f9_38: int64 = f4 * cast[int64](f9_38)
var f5f5_38: int64 = f5 * cast[int64](f5_38)
var f5f6_38: int64 = f5_2 * cast[int64](f6_19)
var f5f7_76: int64 = f5_2 * cast[int64](f7_38)
var f5f8_38: int64 = f5_2 * cast[int64](f8_19)
var f5f9_76: int64 = f5_2 * cast[int64](f9_38)
var f6f6_19: int64 = f6 * cast[int64](f6_19)
var f6f7_38: int64 = f6 * cast[int64](f7_38)
var f6f8_38: int64 = f6_2 * cast[int64](f8_19)
var f6f9_38: int64 = f6 * cast[int64](f9_38)
var f7f7_38: int64 = f7 * cast[int64](f7_38)
var f7f8_38: int64 = f7_2 * cast[int64](f8_19)
var f7f9_76: int64 = f7_2 * cast[int64](f9_38)
var f8f8_19: int64 = f8 * cast[int64](f8_19)
var f8f9_38: int64 = f8 * cast[int64](f9_38)
var f9f9_38: int64 = f9 * cast[int64](f9_38)
var f0f0: int64 = f0 * safeConvert[int64](f0)
var f0f1_2: int64 = f0_2 * safeConvert[int64](f1)
var f0f2_2: int64 = f0_2 * safeConvert[int64](f2)
var f0f3_2: int64 = f0_2 * safeConvert[int64](f3)
var f0f4_2: int64 = f0_2 * safeConvert[int64](f4)
var f0f5_2: int64 = f0_2 * safeConvert[int64](f5)
var f0f6_2: int64 = f0_2 * safeConvert[int64](f6)
var f0f7_2: int64 = f0_2 * safeConvert[int64](f7)
var f0f8_2: int64 = f0_2 * safeConvert[int64](f8)
var f0f9_2: int64 = f0_2 * safeConvert[int64](f9)
var f1f1_2: int64 = f1_2 * safeConvert[int64](f1)
var f1f2_2: int64 = f1_2 * safeConvert[int64](f2)
var f1f3_4: int64 = f1_2 * safeConvert[int64](f3_2)
var f1f4_2: int64 = f1_2 * safeConvert[int64](f4)
var f1f5_4: int64 = f1_2 * safeConvert[int64](f5_2)
var f1f6_2: int64 = f1_2 * safeConvert[int64](f6)
var f1f7_4: int64 = f1_2 * safeConvert[int64](f7_2)
var f1f8_2: int64 = f1_2 * safeConvert[int64](f8)
var f1f9_76: int64 = f1_2 * safeConvert[int64](f9_38)
var f2f2: int64 = f2 * safeConvert[int64](f2)
var f2f3_2: int64 = f2_2 * safeConvert[int64](f3)
var f2f4_2: int64 = f2_2 * safeConvert[int64](f4)
var f2f5_2: int64 = f2_2 * safeConvert[int64](f5)
var f2f6_2: int64 = f2_2 * safeConvert[int64](f6)
var f2f7_2: int64 = f2_2 * safeConvert[int64](f7)
var f2f8_38: int64 = f2_2 * safeConvert[int64](f8_19)
var f2f9_38: int64 = f2 * safeConvert[int64](f9_38)
var f3f3_2: int64 = f3_2 * safeConvert[int64](f3)
var f3f4_2: int64 = f3_2 * safeConvert[int64](f4)
var f3f5_4: int64 = f3_2 * safeConvert[int64](f5_2)
var f3f6_2: int64 = f3_2 * safeConvert[int64](f6)
var f3f7_76: int64 = f3_2 * safeConvert[int64](f7_38)
var f3f8_38: int64 = f3_2 * safeConvert[int64](f8_19)
var f3f9_76: int64 = f3_2 * safeConvert[int64](f9_38)
var f4f4: int64 = f4 * safeConvert[int64](f4)
var f4f5_2: int64 = f4_2 * safeConvert[int64](f5)
var f4f6_38: int64 = f4_2 * safeConvert[int64](f6_19)
var f4f7_38: int64 = f4 * safeConvert[int64](f7_38)
var f4f8_38: int64 = f4_2 * safeConvert[int64](f8_19)
var f4f9_38: int64 = f4 * safeConvert[int64](f9_38)
var f5f5_38: int64 = f5 * safeConvert[int64](f5_38)
var f5f6_38: int64 = f5_2 * safeConvert[int64](f6_19)
var f5f7_76: int64 = f5_2 * safeConvert[int64](f7_38)
var f5f8_38: int64 = f5_2 * safeConvert[int64](f8_19)
var f5f9_76: int64 = f5_2 * safeConvert[int64](f9_38)
var f6f6_19: int64 = f6 * safeConvert[int64](f6_19)
var f6f7_38: int64 = f6 * safeConvert[int64](f7_38)
var f6f8_38: int64 = f6_2 * safeConvert[int64](f8_19)
var f6f9_38: int64 = f6 * safeConvert[int64](f9_38)
var f7f7_38: int64 = f7 * safeConvert[int64](f7_38)
var f7f8_38: int64 = f7_2 * safeConvert[int64](f8_19)
var f7f9_76: int64 = f7_2 * safeConvert[int64](f9_38)
var f8f8_19: int64 = f8 * safeConvert[int64](f8_19)
var f8f9_38: int64 = f8 * safeConvert[int64](f9_38)
var f9f9_38: int64 = f9 * safeConvert[int64](f9_38)
var h0: int64 = f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
var h1: int64 = f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
var h2: int64 = f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
@@ -623,61 +623,61 @@ proc feSq2(h: var Fe, f: Fe) =
var f7_38 = 38 * f7
var f8_19 = 19 * f8
var f9_38 = 38 * f9
var f0f0 = cast[int64](f0) * cast[int64](f0)
var f0f1_2 = cast[int64](f0_2) * cast[int64](f1)
var f0f2_2 = cast[int64](f0_2) * cast[int64](f2)
var f0f3_2 = cast[int64](f0_2) * cast[int64](f3)
var f0f4_2 = cast[int64](f0_2) * cast[int64](f4)
var f0f5_2 = cast[int64](f0_2) * cast[int64](f5)
var f0f6_2 = cast[int64](f0_2) * cast[int64](f6)
var f0f7_2 = cast[int64](f0_2) * cast[int64](f7)
var f0f8_2 = cast[int64](f0_2) * cast[int64](f8)
var f0f9_2 = cast[int64](f0_2) * cast[int64](f9)
var f1f1_2 = cast[int64](f1_2) * cast[int64](f1)
var f1f2_2 = cast[int64](f1_2) * cast[int64](f2)
var f1f3_4 = cast[int64](f1_2) * cast[int64](f3_2)
var f1f4_2 = cast[int64](f1_2) * cast[int64](f4)
var f1f5_4 = cast[int64](f1_2) * cast[int64](f5_2)
var f1f6_2 = cast[int64](f1_2) * cast[int64](f6)
var f1f7_4 = cast[int64](f1_2) * cast[int64](f7_2)
var f1f8_2 = cast[int64](f1_2) * cast[int64](f8)
var f1f9_76 = cast[int64](f1_2) * cast[int64](f9_38)
var f2f2 = cast[int64](f2) * cast[int64](f2)
var f2f3_2 = cast[int64](f2_2) * cast[int64](f3)
var f2f4_2 = cast[int64](f2_2) * cast[int64](f4)
var f2f5_2 = cast[int64](f2_2) * cast[int64](f5)
var f2f6_2 = cast[int64](f2_2) * cast[int64](f6)
var f2f7_2 = cast[int64](f2_2) * cast[int64](f7)
var f2f8_38 = cast[int64](f2_2) * cast[int64](f8_19)
var f2f9_38 = cast[int64](f2) * cast[int64](f9_38)
var f3f3_2 = cast[int64](f3_2) * cast[int64](f3)
var f3f4_2 = cast[int64](f3_2) * cast[int64](f4)
var f3f5_4 = cast[int64](f3_2) * cast[int64](f5_2)
var f3f6_2 = cast[int64](f3_2) * cast[int64](f6)
var f3f7_76 = cast[int64](f3_2) * cast[int64](f7_38)
var f3f8_38 = cast[int64](f3_2) * cast[int64](f8_19)
var f3f9_76 = cast[int64](f3_2) * cast[int64](f9_38)
var f4f4 = cast[int64](f4) * cast[int64](f4)
var f4f5_2 = cast[int64](f4_2) * cast[int64](f5)
var f4f6_38 = cast[int64](f4_2) * cast[int64](f6_19)
var f4f7_38 = cast[int64](f4) * cast[int64](f7_38)
var f4f8_38 = cast[int64](f4_2) * cast[int64](f8_19)
var f4f9_38 = cast[int64](f4) * cast[int64](f9_38)
var f5f5_38 = cast[int64](f5) * cast[int64](f5_38)
var f5f6_38 = cast[int64](f5_2) * cast[int64](f6_19)
var f5f7_76 = cast[int64](f5_2) * cast[int64](f7_38)
var f5f8_38 = cast[int64](f5_2) * cast[int64](f8_19)
var f5f9_76 = cast[int64](f5_2) * cast[int64](f9_38)
var f6f6_19 = cast[int64](f6) * cast[int64](f6_19)
var f6f7_38 = cast[int64](f6) * cast[int64](f7_38)
var f6f8_38 = cast[int64](f6_2) * cast[int64](f8_19)
var f6f9_38 = cast[int64](f6) * cast[int64](f9_38)
var f7f7_38 = cast[int64](f7) * cast[int64](f7_38)
var f7f8_38 = cast[int64](f7_2) * cast[int64](f8_19)
var f7f9_76 = cast[int64](f7_2) * cast[int64](f9_38)
var f8f8_19 = cast[int64](f8) * cast[int64](f8_19)
var f8f9_38 = cast[int64](f8) * cast[int64](f9_38)
var f9f9_38 = cast[int64](f9) * cast[int64](f9_38)
var f0f0 = safeConvert[int64](f0) * safeConvert[int64](f0)
var f0f1_2 = safeConvert[int64](f0_2) * safeConvert[int64](f1)
var f0f2_2 = safeConvert[int64](f0_2) * safeConvert[int64](f2)
var f0f3_2 = safeConvert[int64](f0_2) * safeConvert[int64](f3)
var f0f4_2 = safeConvert[int64](f0_2) * safeConvert[int64](f4)
var f0f5_2 = safeConvert[int64](f0_2) * safeConvert[int64](f5)
var f0f6_2 = safeConvert[int64](f0_2) * safeConvert[int64](f6)
var f0f7_2 = safeConvert[int64](f0_2) * safeConvert[int64](f7)
var f0f8_2 = safeConvert[int64](f0_2) * safeConvert[int64](f8)
var f0f9_2 = safeConvert[int64](f0_2) * safeConvert[int64](f9)
var f1f1_2 = safeConvert[int64](f1_2) * safeConvert[int64](f1)
var f1f2_2 = safeConvert[int64](f1_2) * safeConvert[int64](f2)
var f1f3_4 = safeConvert[int64](f1_2) * safeConvert[int64](f3_2)
var f1f4_2 = safeConvert[int64](f1_2) * safeConvert[int64](f4)
var f1f5_4 = safeConvert[int64](f1_2) * safeConvert[int64](f5_2)
var f1f6_2 = safeConvert[int64](f1_2) * safeConvert[int64](f6)
var f1f7_4 = safeConvert[int64](f1_2) * safeConvert[int64](f7_2)
var f1f8_2 = safeConvert[int64](f1_2) * safeConvert[int64](f8)
var f1f9_76 = safeConvert[int64](f1_2) * safeConvert[int64](f9_38)
var f2f2 = safeConvert[int64](f2) * safeConvert[int64](f2)
var f2f3_2 = safeConvert[int64](f2_2) * safeConvert[int64](f3)
var f2f4_2 = safeConvert[int64](f2_2) * safeConvert[int64](f4)
var f2f5_2 = safeConvert[int64](f2_2) * safeConvert[int64](f5)
var f2f6_2 = safeConvert[int64](f2_2) * safeConvert[int64](f6)
var f2f7_2 = safeConvert[int64](f2_2) * safeConvert[int64](f7)
var f2f8_38 = safeConvert[int64](f2_2) * safeConvert[int64](f8_19)
var f2f9_38 = safeConvert[int64](f2) * safeConvert[int64](f9_38)
var f3f3_2 = safeConvert[int64](f3_2) * safeConvert[int64](f3)
var f3f4_2 = safeConvert[int64](f3_2) * safeConvert[int64](f4)
var f3f5_4 = safeConvert[int64](f3_2) * safeConvert[int64](f5_2)
var f3f6_2 = safeConvert[int64](f3_2) * safeConvert[int64](f6)
var f3f7_76 = safeConvert[int64](f3_2) * safeConvert[int64](f7_38)
var f3f8_38 = safeConvert[int64](f3_2) * safeConvert[int64](f8_19)
var f3f9_76 = safeConvert[int64](f3_2) * safeConvert[int64](f9_38)
var f4f4 = safeConvert[int64](f4) * safeConvert[int64](f4)
var f4f5_2 = safeConvert[int64](f4_2) * safeConvert[int64](f5)
var f4f6_38 = safeConvert[int64](f4_2) * safeConvert[int64](f6_19)
var f4f7_38 = safeConvert[int64](f4) * safeConvert[int64](f7_38)
var f4f8_38 = safeConvert[int64](f4_2) * safeConvert[int64](f8_19)
var f4f9_38 = safeConvert[int64](f4) * safeConvert[int64](f9_38)
var f5f5_38 = safeConvert[int64](f5) * safeConvert[int64](f5_38)
var f5f6_38 = safeConvert[int64](f5_2) * safeConvert[int64](f6_19)
var f5f7_76 = safeConvert[int64](f5_2) * safeConvert[int64](f7_38)
var f5f8_38 = safeConvert[int64](f5_2) * safeConvert[int64](f8_19)
var f5f9_76 = safeConvert[int64](f5_2) * safeConvert[int64](f9_38)
var f6f6_19 = safeConvert[int64](f6) * safeConvert[int64](f6_19)
var f6f7_38 = safeConvert[int64](f6) * safeConvert[int64](f7_38)
var f6f8_38 = safeConvert[int64](f6_2) * safeConvert[int64](f8_19)
var f6f9_38 = safeConvert[int64](f6) * safeConvert[int64](f9_38)
var f7f7_38 = safeConvert[int64](f7) * safeConvert[int64](f7_38)
var f7f8_38 = safeConvert[int64](f7_2) * safeConvert[int64](f8_19)
var f7f9_76 = safeConvert[int64](f7_2) * safeConvert[int64](f9_38)
var f8f8_19 = safeConvert[int64](f8) * safeConvert[int64](f8_19)
var f8f9_38 = safeConvert[int64](f8) * safeConvert[int64](f9_38)
var f9f9_38 = safeConvert[int64](f9) * safeConvert[int64](f9_38)
var
c0, c1, c2, c3, c4, c5, c6, c7, c8, c9: int64
h0: int64 = f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
@@ -834,7 +834,7 @@ proc geFromBytesNegateVartime(h: var GeP3, s: openArray[byte]): int32 =
return -1;
feMul(h.x, h.x, SqrTm1)
if feIsNegative(h.x) == cast[int32](s[31] shr 7):
if feIsNegative(h.x) == safeConvert[int32](s[31] shr 7):
feNeg(h.x, h.x)
feMul(h.t, h.x, h.y)
@@ -956,14 +956,14 @@ proc equal(b, c: int8): byte =
var ub = cast[byte](b)
var uc = cast[byte](c)
var x = ub xor uc
var y = cast[uint32](x)
var y = safeConvert[uint32](x)
y = y - 1
y = y shr 31
result = cast[byte](y)
proc negative(b: int8): byte =
var x = cast[uint64](b)
x = x shr 63
var x = cast[uint8](b)
x = x shr 7
result = cast[byte](x)
proc cmov(t: var GePrecomp, u: GePrecomp, b: byte) =

View File

@@ -9,13 +9,10 @@
# https://tools.ietf.org/html/rfc5869
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import nimcrypto
import bearssl/[kdf, rand, hash]
import bearssl/[kdf, hash]
type HkdfResult*[len: static int] = array[len, byte]

View File

@@ -9,15 +9,13 @@
## This module implements minimal ASN.1 encoding/decoding primitives.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import stew/[endians2, results, ctops]
export results
# We use `ncrutils` for constant-time hexadecimal encoding/decoding procedures.
import nimcrypto/utils as ncrutils
import ../utility
type
Asn1Error* {.pure.} = enum
@@ -119,7 +117,7 @@ template toOpenArray*(af: Asn1Field): untyped =
template isEmpty*(ab: Asn1Buffer): bool =
ab.offset >= len(ab.buffer)
template isEnough*(ab: Asn1Buffer, length: int): bool =
template isEnough*(ab: Asn1Buffer, length: int64): bool =
len(ab.buffer) >= ab.offset + length
proc len*[T: Asn1Buffer|Asn1Composite](abc: T): int {.inline.} =
@@ -344,32 +342,6 @@ proc asn1EncodeTag[T: SomeUnsignedInt](dest: var openArray[byte],
dest[k - 1] = dest[k - 1] and 0x7F'u8
res
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[int]): int =
## Encode array of integers ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and
## return number of bytes (octets) used.
##
## If length of ``dest`` is less then number of required bytes to encode
## ``value``, then result of encoding will not be stored in ``dest``
## but number of bytes (octets) required will be returned.
var buffer: array[16, byte]
var res = 1
var oidlen = 1
for i in 2..<len(value):
oidlen += asn1EncodeTag(buffer, cast[uint64](value[i]))
res += asn1EncodeLength(buffer, uint64(oidlen))
res += oidlen
if len(dest) >= res:
let last = dest.high
var offset = 1
dest[0] = Asn1Tag.Oid.code()
offset += asn1EncodeLength(dest.toOpenArray(offset, last), uint64(oidlen))
dest[offset] = cast[byte](value[0] * 40 + value[1])
offset += 1
for i in 2..<len(value):
offset += asn1EncodeTag(dest.toOpenArray(offset, last),
cast[uint64](value[i]))
res
proc asn1EncodeOid*(dest: var openArray[byte], value: openArray[byte]): int =
## Encode array of bytes ``value`` as ASN.1 DER `OBJECT IDENTIFIER` and return
## number of bytes (octets) used.
@@ -443,26 +415,29 @@ proc asn1EncodeContextTag*(dest: var openArray[byte], value: openArray[byte],
copyMem(addr dest[1 + lenlen], unsafeAddr value[0], len(value))
res
proc getLength(ab: var Asn1Buffer): Asn1Result[uint64] =
proc getLength(ab: var Asn1Buffer): Asn1Result[int] =
## Decode length part of ASN.1 TLV triplet.
if not ab.isEmpty():
let b = ab.buffer[ab.offset]
if (b and 0x80'u8) == 0x00'u8:
let length = cast[uint64](b)
let length = safeConvert[int](b)
ab.offset += 1
return ok(length)
if b == 0x80'u8:
return err(Asn1Error.Indefinite)
if b == 0xFF'u8:
return err(Asn1Error.Incorrect)
let octets = cast[uint64](b and 0x7F'u8)
if octets > 8'u64:
let octets = safeConvert[int](b and 0x7F'u8)
if octets > 8:
return err(Asn1Error.Overflow)
if ab.isEnough(int(octets)):
var length: uint64 = 0
for i in 0..<int(octets):
length = (length shl 8) or cast[uint64](ab.buffer[ab.offset + i + 1])
ab.offset = ab.offset + int(octets) + 1
if ab.isEnough(octets):
var lengthU: uint64 = 0
for i in 0..<octets:
lengthU = (lengthU shl 8) or safeConvert[uint64](ab.buffer[ab.offset + i + 1])
if lengthU > uint64(int64.high):
return err(Asn1Error.Overflow)
let length = int(lengthU)
ab.offset = ab.offset + octets + 1
return ok(length)
else:
return err(Asn1Error.Incomplete)
@@ -474,8 +449,8 @@ proc getTag(ab: var Asn1Buffer, tag: var int): Asn1Result[Asn1Class] =
if not ab.isEmpty():
let
b = ab.buffer[ab.offset]
c = int((b and 0xC0'u8) shr 6)
tag = int(b and 0x3F)
c = safeConvert[int]((b and 0xC0'u8) shr 6)
tag = safeConvert[int](b and 0x3F)
ab.offset += 1
if c >= 0 and c < 4:
ok(cast[Asn1Class](c))
@@ -489,7 +464,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
var
field: Asn1Field
tag, ttag, offset: int
length, tlength: uint64
length, tlength: int
aclass: Asn1Class
inclass: bool
@@ -519,7 +494,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
if length != 1:
return err(Asn1Error.Incorrect)
if not ab.isEnough(int(length)):
if not ab.isEnough(length):
return err(Asn1Error.Incomplete)
let b = ab.buffer[ab.offset]
@@ -527,7 +502,7 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
return err(Asn1Error.Incorrect)
field = Asn1Field(kind: Asn1Tag.Boolean, klass: aclass,
index: ttag, offset: int(ab.offset),
index: ttag, offset: ab.offset,
length: 1, buffer: ab.buffer)
field.vbool = (b == 0xFF'u8)
ab.offset += 1
@@ -538,12 +513,12 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
if length == 0:
return err(Asn1Error.Incorrect)
if not ab.isEnough(int(length)):
if not ab.isEnough(length):
return err(Asn1Error.Incomplete)
# Count number of leading zeroes
var zc = 0
while (zc < int(length)) and (ab.buffer[ab.offset + zc] == 0x00'u8):
while (zc < length) and (ab.buffer[ab.offset + zc] == 0x00'u8):
inc(zc)
if zc > 1:
@@ -552,45 +527,45 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
if zc == 0:
# Negative or Positive integer
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length), buffer: ab.buffer)
index: ttag, offset: ab.offset,
length: length, buffer: ab.buffer)
if (ab.buffer[ab.offset] and 0x80'u8) == 0x80'u8:
# Negative integer
if length <= 8:
# We need this transformation because our field.vint is uint64.
for i in 0 ..< 8:
if i < 8 - int(length):
if i < 8 - length:
field.vint = (field.vint shl 8) or 0xFF'u64
else:
let offset = ab.offset + i - (8 - int(length))
field.vint = (field.vint shl 8) or uint64(ab.buffer[offset])
let offset = ab.offset + i - (8 - length)
field.vint = (field.vint shl 8) or safeConvert[uint64](ab.buffer[offset])
else:
# Positive integer
if length <= 8:
for i in 0 ..< int(length):
for i in 0 ..< length:
field.vint = (field.vint shl 8) or
uint64(ab.buffer[ab.offset + i])
ab.offset += int(length)
safeConvert[uint64](ab.buffer[ab.offset + i])
ab.offset += length
return ok(field)
else:
if length == 1:
# Zero value integer
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length), vint: 0'u64,
index: ttag, offset: ab.offset,
length: length, vint: 0'u64,
buffer: ab.buffer)
ab.offset += int(length)
ab.offset += length
return ok(field)
else:
# Positive integer with leading zero
field = Asn1Field(kind: Asn1Tag.Integer, klass: aclass,
index: ttag, offset: int(ab.offset) + 1,
length: int(length) - 1, buffer: ab.buffer)
index: ttag, offset: ab.offset + 1,
length: length - 1, buffer: ab.buffer)
if length <= 9:
for i in 1 ..< int(length):
for i in 1 ..< length:
field.vint = (field.vint shl 8) or
uint64(ab.buffer[ab.offset + i])
ab.offset += int(length)
safeConvert[uint64](ab.buffer[ab.offset + i])
ab.offset += length
return ok(field)
of Asn1Tag.BitString.code():
@@ -606,13 +581,13 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
else:
# Zero-length BIT STRING.
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
index: ttag, offset: int(ab.offset + 1),
index: ttag, offset: ab.offset + 1,
length: 0, ubits: 0, buffer: ab.buffer)
ab.offset += int(length)
ab.offset += length
return ok(field)
else:
if not ab.isEnough(int(length)):
if not ab.isEnough(length):
return err(Asn1Error.Incomplete)
let unused = ab.buffer[ab.offset]
@@ -620,27 +595,27 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
# Number of unused bits should not be bigger then `7`.
return err(Asn1Error.Incorrect)
let mask = (1'u8 shl int(unused)) - 1'u8
if (ab.buffer[ab.offset + int(length) - 1] and mask) != 0x00'u8:
let mask = (1'u8 shl safeConvert[int](unused)) - 1'u8
if (ab.buffer[ab.offset + length - 1] and mask) != 0x00'u8:
## All unused bits should be set to `0`.
return err(Asn1Error.Incorrect)
field = Asn1Field(kind: Asn1Tag.BitString, klass: aclass,
index: ttag, offset: int(ab.offset + 1),
length: int(length - 1), ubits: int(unused),
index: ttag, offset: ab.offset + 1,
length: length - 1, ubits: safeConvert[int](unused),
buffer: ab.buffer)
ab.offset += int(length)
ab.offset += length
return ok(field)
of Asn1Tag.OctetString.code():
# OCTET STRING
if not ab.isEnough(int(length)):
if not ab.isEnough(length):
return err(Asn1Error.Incomplete)
field = Asn1Field(kind: Asn1Tag.OctetString, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length), buffer: ab.buffer)
ab.offset += int(length)
index: ttag, offset: ab.offset,
length: length, buffer: ab.buffer)
ab.offset += length
return ok(field)
of Asn1Tag.Null.code():
@@ -649,30 +624,30 @@ proc read*(ab: var Asn1Buffer): Asn1Result[Asn1Field] =
return err(Asn1Error.Incorrect)
field = Asn1Field(kind: Asn1Tag.Null, klass: aclass, index: ttag,
offset: int(ab.offset), length: 0, buffer: ab.buffer)
ab.offset += int(length)
offset: ab.offset, length: 0, buffer: ab.buffer)
ab.offset += length
return ok(field)
of Asn1Tag.Oid.code():
# OID
if not ab.isEnough(int(length)):
if not ab.isEnough(length):
return err(Asn1Error.Incomplete)
field = Asn1Field(kind: Asn1Tag.Oid, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length), buffer: ab.buffer)
ab.offset += int(length)
index: ttag, offset: ab.offset,
length: length, buffer: ab.buffer)
ab.offset += length
return ok(field)
of Asn1Tag.Sequence.code():
# SEQUENCE
if not ab.isEnough(int(length)):
if not ab.isEnough(length):
return err(Asn1Error.Incomplete)
field = Asn1Field(kind: Asn1Tag.Sequence, klass: aclass,
index: ttag, offset: int(ab.offset),
length: int(length), buffer: ab.buffer)
ab.offset += int(length)
index: ttag, offset: ab.offset,
length: length, buffer: ab.buffer)
ab.offset += length
return ok(field)
else:

View File

@@ -13,10 +13,7 @@
## BearSSL library <https://bearssl.org/>
## Copyright(C) 2018 Thomas Pornin <pornin@bolet.org>.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import bearssl/[rsa, rand, hash]
import minasn1
@@ -686,7 +683,7 @@ proc `==`*(a, b: RsaPrivateKey): bool =
false
else:
if a.seck.nBitlen == b.seck.nBitlen:
if cast[int](a.seck.nBitlen) > 0:
if a.seck.nBitlen > 0'u:
let r1 = CT.isEqual(getArray(a.buffer, a.seck.p, a.seck.plen),
getArray(b.buffer, b.seck.p, b.seck.plen))
let r2 = CT.isEqual(getArray(a.buffer, a.seck.q, a.seck.qlen),

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import bearssl/rand
import
@@ -35,9 +32,6 @@ type
SkSignature* = distinct secp256k1.SkSignature
SkKeyPair* = distinct secp256k1.SkKeyPair
template pubkey*(v: SkKeyPair): SkPublicKey = SkPublicKey(secp256k1.SkKeyPair(v).pubkey)
template seckey*(v: SkKeyPair): SkPrivateKey = SkPrivateKey(secp256k1.SkKeyPair(v).seckey)
proc random*(t: typedesc[SkPrivateKey], rng: var HmacDrbgContext): SkPrivateKey =
#TODO is there a better way?
var rngPtr = addr rng

View File

@@ -7,17 +7,14 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
## This module implementes API for `go-libp2p-daemon`.
import std/[os, osproc, strutils, tables, strtabs, sequtils]
import pkg/[chronos, chronicles]
import ../varint, ../multiaddress, ../multicodec, ../cid, ../peerid
import ../wire, ../multihash, ../protobuf/minprotobuf, ../errors
import ../crypto/crypto
import ../crypto/crypto, ../utility
export
peerid, multiaddress, multicodec, multihash, cid, crypto, wire, errors
@@ -153,10 +150,10 @@ type
key*: PublicKey
P2PStreamCallback* = proc(api: DaemonAPI,
stream: P2PStream): Future[void] {.gcsafe, raises: [Defect, CatchableError].}
stream: P2PStream): Future[void] {.gcsafe, raises: [CatchableError].}
P2PPubSubCallback* = proc(api: DaemonAPI,
ticket: PubsubTicket,
message: PubSubMessage): Future[bool] {.gcsafe, raises: [Defect, CatchableError].}
message: PubSubMessage): Future[bool] {.gcsafe, raises: [CatchableError].}
DaemonError* = object of LPError
DaemonRemoteError* = object of DaemonError
@@ -170,7 +167,7 @@ proc requestIdentity(): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
## Processing function `doIdentify(req *pb.Request)`.
result = initProtoBuffer({WithVarintLength})
result.write(1, cast[uint](RequestType.IDENTIFY))
result.write(1, safeConvert[uint](RequestType.IDENTIFY))
result.finish()
proc requestConnect(peerid: PeerId,
@@ -185,7 +182,7 @@ proc requestConnect(peerid: PeerId,
msg.write(2, item.data.buffer)
if timeout > 0:
msg.write(3, hint64(timeout))
result.write(1, cast[uint](RequestType.CONNECT))
result.write(1, safeConvert[uint](RequestType.CONNECT))
result.write(2, msg)
result.finish()
@@ -195,7 +192,7 @@ proc requestDisconnect(peerid: PeerId): ProtoBuffer =
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, peerid)
result.write(1, cast[uint](RequestType.DISCONNECT))
result.write(1, safeConvert[uint](RequestType.DISCONNECT))
result.write(7, msg)
result.finish()
@@ -211,7 +208,7 @@ proc requestStreamOpen(peerid: PeerId,
msg.write(2, item)
if timeout > 0:
msg.write(3, hint64(timeout))
result.write(1, cast[uint](RequestType.STREAM_OPEN))
result.write(1, safeConvert[uint](RequestType.STREAM_OPEN))
result.write(3, msg)
result.finish()
@@ -224,7 +221,7 @@ proc requestStreamHandler(address: MultiAddress,
msg.write(1, address.data.buffer)
for item in protocols:
msg.write(2, item)
result.write(1, cast[uint](RequestType.STREAM_HANDLER))
result.write(1, safeConvert[uint](RequestType.STREAM_HANDLER))
result.write(4, msg)
result.finish()
@@ -232,13 +229,13 @@ proc requestListPeers(): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/conn.go
## Processing function `doListPeers(req *pb.Request)`
result = initProtoBuffer({WithVarintLength})
result.write(1, cast[uint](RequestType.LIST_PEERS))
result.write(1, safeConvert[uint](RequestType.LIST_PEERS))
result.finish()
proc requestDHTFindPeer(peer: PeerId, timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTFindPeer(req *pb.DHTRequest)`.
let msgid = cast[uint](DHTRequestType.FIND_PEER)
let msgid = safeConvert[uint](DHTRequestType.FIND_PEER)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
@@ -246,7 +243,7 @@ proc requestDHTFindPeer(peer: PeerId, timeout = 0): ProtoBuffer =
if timeout > 0:
msg.write(7, hint64(timeout))
msg.finish()
result.write(1, cast[uint](RequestType.DHT))
result.write(1, safeConvert[uint](RequestType.DHT))
result.write(5, msg)
result.finish()
@@ -254,7 +251,7 @@ proc requestDHTFindPeersConnectedToPeer(peer: PeerId,
timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTFindPeersConnectedToPeer(req *pb.DHTRequest)`.
let msgid = cast[uint](DHTRequestType.FIND_PEERS_CONNECTED_TO_PEER)
let msgid = safeConvert[uint](DHTRequestType.FIND_PEERS_CONNECTED_TO_PEER)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
@@ -262,7 +259,7 @@ proc requestDHTFindPeersConnectedToPeer(peer: PeerId,
if timeout > 0:
msg.write(7, hint64(timeout))
msg.finish()
result.write(1, cast[uint](RequestType.DHT))
result.write(1, safeConvert[uint](RequestType.DHT))
result.write(5, msg)
result.finish()
@@ -270,7 +267,7 @@ proc requestDHTFindProviders(cid: Cid,
count: uint32, timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTFindProviders(req *pb.DHTRequest)`.
let msgid = cast[uint](DHTRequestType.FIND_PROVIDERS)
let msgid = safeConvert[uint](DHTRequestType.FIND_PROVIDERS)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
@@ -279,14 +276,14 @@ proc requestDHTFindProviders(cid: Cid,
if timeout > 0:
msg.write(7, hint64(timeout))
msg.finish()
result.write(1, cast[uint](RequestType.DHT))
result.write(1, safeConvert[uint](RequestType.DHT))
result.write(5, msg)
result.finish()
proc requestDHTGetClosestPeers(key: string, timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTGetClosestPeers(req *pb.DHTRequest)`.
let msgid = cast[uint](DHTRequestType.GET_CLOSEST_PEERS)
let msgid = safeConvert[uint](DHTRequestType.GET_CLOSEST_PEERS)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
@@ -294,14 +291,14 @@ proc requestDHTGetClosestPeers(key: string, timeout = 0): ProtoBuffer =
if timeout > 0:
msg.write(7, hint64(timeout))
msg.finish()
result.write(1, cast[uint](RequestType.DHT))
result.write(1, safeConvert[uint](RequestType.DHT))
result.write(5, msg)
result.finish()
proc requestDHTGetPublicKey(peer: PeerId, timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTGetPublicKey(req *pb.DHTRequest)`.
let msgid = cast[uint](DHTRequestType.GET_PUBLIC_KEY)
let msgid = safeConvert[uint](DHTRequestType.GET_PUBLIC_KEY)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
@@ -309,14 +306,14 @@ proc requestDHTGetPublicKey(peer: PeerId, timeout = 0): ProtoBuffer =
if timeout > 0:
msg.write(7, hint64(timeout))
msg.finish()
result.write(1, cast[uint](RequestType.DHT))
result.write(1, safeConvert[uint](RequestType.DHT))
result.write(5, msg)
result.finish()
proc requestDHTGetValue(key: string, timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTGetValue(req *pb.DHTRequest)`.
let msgid = cast[uint](DHTRequestType.GET_VALUE)
let msgid = safeConvert[uint](DHTRequestType.GET_VALUE)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
@@ -324,14 +321,14 @@ proc requestDHTGetValue(key: string, timeout = 0): ProtoBuffer =
if timeout > 0:
msg.write(7, hint64(timeout))
msg.finish()
result.write(1, cast[uint](RequestType.DHT))
result.write(1, safeConvert[uint](RequestType.DHT))
result.write(5, msg)
result.finish()
proc requestDHTSearchValue(key: string, timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTSearchValue(req *pb.DHTRequest)`.
let msgid = cast[uint](DHTRequestType.SEARCH_VALUE)
let msgid = safeConvert[uint](DHTRequestType.SEARCH_VALUE)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
@@ -339,7 +336,7 @@ proc requestDHTSearchValue(key: string, timeout = 0): ProtoBuffer =
if timeout > 0:
msg.write(7, hint64(timeout))
msg.finish()
result.write(1, cast[uint](RequestType.DHT))
result.write(1, safeConvert[uint](RequestType.DHT))
result.write(5, msg)
result.finish()
@@ -347,7 +344,7 @@ proc requestDHTPutValue(key: string, value: openArray[byte],
timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTPutValue(req *pb.DHTRequest)`.
let msgid = cast[uint](DHTRequestType.PUT_VALUE)
let msgid = safeConvert[uint](DHTRequestType.PUT_VALUE)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
@@ -356,14 +353,14 @@ proc requestDHTPutValue(key: string, value: openArray[byte],
if timeout > 0:
msg.write(7, hint64(timeout))
msg.finish()
result.write(1, cast[uint](RequestType.DHT))
result.write(1, uint(RequestType.DHT))
result.write(5, msg)
result.finish()
proc requestDHTProvide(cid: Cid, timeout = 0): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/dht.go
## Processing function `doDHTProvide(req *pb.DHTRequest)`.
let msgid = cast[uint](DHTRequestType.PROVIDE)
let msgid = safeConvert[uint](DHTRequestType.PROVIDE)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
@@ -371,13 +368,13 @@ proc requestDHTProvide(cid: Cid, timeout = 0): ProtoBuffer =
if timeout > 0:
msg.write(7, hint64(timeout))
msg.finish()
result.write(1, cast[uint](RequestType.DHT))
result.write(1, safeConvert[uint](RequestType.DHT))
result.write(5, msg)
result.finish()
proc requestCMTagPeer(peer: PeerId, tag: string, weight: int): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/connmgr.go#L18
let msgid = cast[uint](ConnManagerRequestType.TAG_PEER)
let msgid = safeConvert[uint](ConnManagerRequestType.TAG_PEER)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
@@ -385,83 +382,83 @@ proc requestCMTagPeer(peer: PeerId, tag: string, weight: int): ProtoBuffer =
msg.write(3, tag)
msg.write(4, hint64(weight))
msg.finish()
result.write(1, cast[uint](RequestType.CONNMANAGER))
result.write(1, safeConvert[uint](RequestType.CONNMANAGER))
result.write(6, msg)
result.finish()
proc requestCMUntagPeer(peer: PeerId, tag: string): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/connmgr.go#L33
let msgid = cast[uint](ConnManagerRequestType.UNTAG_PEER)
let msgid = safeConvert[uint](ConnManagerRequestType.UNTAG_PEER)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
msg.write(2, peer)
msg.write(3, tag)
msg.finish()
result.write(1, cast[uint](RequestType.CONNMANAGER))
result.write(1, safeConvert[uint](RequestType.CONNMANAGER))
result.write(6, msg)
result.finish()
proc requestCMTrim(): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/connmgr.go#L47
let msgid = cast[uint](ConnManagerRequestType.TRIM)
let msgid = safeConvert[uint](ConnManagerRequestType.TRIM)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
msg.finish()
result.write(1, cast[uint](RequestType.CONNMANAGER))
result.write(1, safeConvert[uint](RequestType.CONNMANAGER))
result.write(6, msg)
result.finish()
proc requestPSGetTopics(): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/pubsub.go
## Processing function `doPubsubGetTopics(req *pb.PSRequest)`.
let msgid = cast[uint](PSRequestType.GET_TOPICS)
let msgid = safeConvert[uint](PSRequestType.GET_TOPICS)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
msg.finish()
result.write(1, cast[uint](RequestType.PUBSUB))
result.write(1, safeConvert[uint](RequestType.PUBSUB))
result.write(8, msg)
result.finish()
proc requestPSListPeers(topic: string): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/pubsub.go
## Processing function `doPubsubListPeers(req *pb.PSRequest)`.
let msgid = cast[uint](PSRequestType.LIST_PEERS)
let msgid = safeConvert[uint](PSRequestType.LIST_PEERS)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
msg.write(2, topic)
msg.finish()
result.write(1, cast[uint](RequestType.PUBSUB))
result.write(1, safeConvert[uint](RequestType.PUBSUB))
result.write(8, msg)
result.finish()
proc requestPSPublish(topic: string, data: openArray[byte]): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/pubsub.go
## Processing function `doPubsubPublish(req *pb.PSRequest)`.
let msgid = cast[uint](PSRequestType.PUBLISH)
let msgid = safeConvert[uint](PSRequestType.PUBLISH)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
msg.write(2, topic)
msg.write(3, data)
msg.finish()
result.write(1, cast[uint](RequestType.PUBSUB))
result.write(1, safeConvert[uint](RequestType.PUBSUB))
result.write(8, msg)
result.finish()
proc requestPSSubscribe(topic: string): ProtoBuffer =
## https://github.com/libp2p/go-libp2p-daemon/blob/master/pubsub.go
## Processing function `doPubsubSubscribe(req *pb.PSRequest)`.
let msgid = cast[uint](PSRequestType.SUBSCRIBE)
let msgid = safeConvert[uint](PSRequestType.SUBSCRIBE)
result = initProtoBuffer({WithVarintLength})
var msg = initProtoBuffer()
msg.write(1, msgid)
msg.write(2, topic)
msg.finish()
result.write(1, cast[uint](RequestType.PUBSUB))
result.write(1, safeConvert[uint](RequestType.PUBSUB))
result.write(8, msg)
result.finish()
@@ -474,7 +471,7 @@ proc checkResponse(pb: ProtoBuffer): ResponseKind {.inline.} =
else:
result = ResponseKind.Error
proc getErrorMessage(pb: ProtoBuffer): string {.inline, raises: [Defect, DaemonLocalError].} =
proc getErrorMessage(pb: ProtoBuffer): string {.inline, raises: [DaemonLocalError].} =
var error: seq[byte]
if pb.getRequiredField(ResponseType.ERROR.int, error).isOk():
if initProtoBuffer(error).getRequiredField(1, result).isErr():
@@ -504,7 +501,7 @@ proc recvMessage(conn: StreamTransport): Future[seq[byte]] {.async.} =
result = buffer
proc newConnection*(api: DaemonAPI): Future[StreamTransport]
{.raises: [Defect, LPError].} =
{.raises: [LPError].} =
result = connect(api.address)
proc closeConnection*(api: DaemonAPI, transp: StreamTransport): Future[void] =
@@ -515,7 +512,7 @@ proc socketExists(address: MultiAddress): Future[bool] {.async.} =
var transp = await connect(address)
await transp.closeWait()
result = true
except:
except CatchableError:
result = false
when defined(windows):
@@ -525,7 +522,7 @@ when defined(windows):
result = cast[int](getCurrentProcessId())
else:
proc getProcessId(): int =
result = cast[int](posix.getpid())
result = int(posix.getpid())
proc getSocket(pattern: string,
count: ptr int): Future[MultiAddress] {.async.} =
@@ -556,7 +553,7 @@ proc getSocket(pattern: string,
closeSocket(sock)
# This is forward declaration needed for newDaemonApi()
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async, gcsafe.}
proc listPeers*(api: DaemonAPI): Future[seq[PeerInfo]] {.async.}
proc copyEnv(): StringTableRef =
## This procedure copy all environment variables into StringTable.
@@ -758,13 +755,9 @@ proc newDaemonApi*(flags: set[P2PDaemonFlags] = {},
# Starting daemon process
# echo "Starting ", cmd, " ", args.join(" ")
api.process =
try:
api.process =
exceptionToAssert:
startProcess(cmd, "", args, env, {poParentStreams})
except CatchableError as exc:
raise exc
except Exception as exc:
raiseAssert exc.msg
# Waiting until daemon will not be bound to control socket.
while true:
if not api.process.running():
@@ -841,7 +834,7 @@ proc transactMessage(transp: StreamTransport,
result = initProtoBuffer(message)
proc getPeerInfo(pb: ProtoBuffer): PeerInfo
{.raises: [Defect, DaemonLocalError].} =
{.raises: [DaemonLocalError].} =
## Get PeerInfo object from ``pb``.
result.addresses = newSeq[MultiAddress]()
if pb.getRequiredField(1, result.peer).isErr():
@@ -872,7 +865,7 @@ proc connect*(api: DaemonAPI, peer: PeerId,
timeout))
pb.withMessage() do:
discard
except:
except CatchableError:
await api.closeConnection(transp)
proc disconnect*(api: DaemonAPI, peer: PeerId) {.async.} =
@@ -932,7 +925,7 @@ proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
asyncSpawn handler(api, stream)
proc addHandler*(api: DaemonAPI, protocols: seq[string],
handler: P2PStreamCallback) {.async, raises: [Defect, LPError].} =
handler: P2PStreamCallback) {.async, raises: [LPError].} =
## Add stream handler ``handler`` for set of protocols ``protocols``.
var transp = await api.newConnection()
let maddress = await getSocket(api.pattern, addr api.ucounter)
@@ -1002,7 +995,7 @@ proc cmTrimPeers*(api: DaemonAPI) {.async.} =
await api.closeConnection(transp)
proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo
{.raises: [Defect, DaemonLocalError].} =
{.raises: [DaemonLocalError].} =
var res: seq[byte]
if pb.getRequiredField(2, res).isOk():
result = initProtoBuffer(res).getPeerInfo()
@@ -1010,42 +1003,42 @@ proc dhtGetSinglePeerInfo(pb: ProtoBuffer): PeerInfo
raise newException(DaemonLocalError, "Missing required field `peer`!")
proc dhtGetSingleValue(pb: ProtoBuffer): seq[byte]
{.raises: [Defect, DaemonLocalError].} =
{.raises: [DaemonLocalError].} =
result = newSeq[byte]()
if pb.getRequiredField(3, result).isErr():
raise newException(DaemonLocalError, "Missing field `value`!")
proc dhtGetSinglePublicKey(pb: ProtoBuffer): PublicKey
{.raises: [Defect, DaemonLocalError].} =
{.raises: [DaemonLocalError].} =
if pb.getRequiredField(3, result).isErr():
raise newException(DaemonLocalError, "Missing field `value`!")
proc dhtGetSinglePeerId(pb: ProtoBuffer): PeerId
{.raises: [Defect, DaemonLocalError].} =
{.raises: [DaemonLocalError].} =
if pb.getRequiredField(3, result).isErr():
raise newException(DaemonLocalError, "Missing field `value`!")
proc enterDhtMessage(pb: ProtoBuffer, rt: DHTResponseType): ProtoBuffer
{.inline, raises: [Defect, DaemonLocalError].} =
{.inline, raises: [DaemonLocalError].} =
var dhtResponse: seq[byte]
if pb.getRequiredField(ResponseType.DHT.int, dhtResponse).isOk():
var pbDhtResponse = initProtoBuffer(dhtResponse)
var dtype: uint
if pbDhtResponse.getRequiredField(1, dtype).isErr():
raise newException(DaemonLocalError, "Missing required DHT field `type`!")
if dtype != cast[uint](rt):
if dtype != safeConvert[uint](rt):
raise newException(DaemonLocalError, "Wrong DHT answer type! ")
var value: seq[byte]
if pbDhtResponse.getRequiredField(3, value).isErr():
raise newException(DaemonLocalError, "Missing required DHT field `value`!")
return initProtoBuffer(value)
else:
raise newException(DaemonLocalError, "Wrong message type!")
proc enterPsMessage(pb: ProtoBuffer): ProtoBuffer
{.inline, raises: [Defect, DaemonLocalError].} =
{.inline, raises: [DaemonLocalError].} =
var res: seq[byte]
if pb.getRequiredField(ResponseType.PUBSUB.int, res).isErr():
raise newException(DaemonLocalError, "Wrong message type!")
@@ -1053,13 +1046,13 @@ proc enterPsMessage(pb: ProtoBuffer): ProtoBuffer
initProtoBuffer(res)
proc getDhtMessageType(pb: ProtoBuffer): DHTResponseType
{.inline, raises: [Defect, DaemonLocalError].} =
{.inline, raises: [DaemonLocalError].} =
var dtype: uint
if pb.getRequiredField(1, dtype).isErr():
raise newException(DaemonLocalError, "Missing required DHT field `type`!")
if dtype == cast[uint](DHTResponseType.VALUE):
if dtype == safeConvert[uint](DHTResponseType.VALUE):
result = DHTResponseType.VALUE
elif dtype == cast[uint](DHTResponseType.END):
elif dtype == safeConvert[uint](DHTResponseType.END):
result = DHTResponseType.END
else:
raise newException(DaemonLocalError, "Wrong DHT answer type!")

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
## This module implements Pool of StreamTransport.
import chronos

View File

@@ -25,7 +25,7 @@
## 5. LocalAddress: optional bytes
## 6. RemoteAddress: optional bytes
## 7. Message: required bytes
import os, options
import os
import nimcrypto/utils, stew/endians2
import protobuf/minprotobuf, stream/connection, protocols/secure/secure,
multiaddress, peerid, varint, muxers/mplex/coder
@@ -33,7 +33,7 @@ import protobuf/minprotobuf, stream/connection, protocols/secure/secure,
from times import getTime, toUnix, fromUnix, nanosecond, format, Time,
NanosecondRange, initTime
from strutils import toHex, repeat
export peerid, options, multiaddress
export peerid, multiaddress
type
FlowDirection* = enum
@@ -43,10 +43,10 @@ type
timestamp*: uint64
direction*: FlowDirection
message*: seq[byte]
seqID*: Option[uint64]
mtype*: Option[uint64]
local*: Option[MultiAddress]
remote*: Option[MultiAddress]
seqID*: Opt[uint64]
mtype*: Opt[uint64]
local*: Opt[MultiAddress]
remote*: Opt[MultiAddress]
const
libp2p_dump_dir* {.strdefine.} = "nim-libp2p"
@@ -72,7 +72,8 @@ proc dumpMessage*(conn: SecureConn, direction: FlowDirection,
var pb = initProtoBuffer(options = {WithVarintLength})
pb.write(2, getTimestamp())
pb.write(4, uint64(direction))
pb.write(6, conn.observedAddr)
conn.observedAddr.withValue(oaddr):
pb.write(6, oaddr)
pb.write(7, data)
pb.finish()
@@ -100,7 +101,7 @@ proc dumpMessage*(conn: SecureConn, direction: FlowDirection,
finally:
close(handle)
proc decodeDumpMessage*(data: openArray[byte]): Option[ProtoMessage] =
proc decodeDumpMessage*(data: openArray[byte]): Opt[ProtoMessage] =
## Decode protobuf's message ProtoMessage from array of bytes ``data``.
var
pb = initProtoBuffer(data)
@@ -108,13 +109,12 @@ proc decodeDumpMessage*(data: openArray[byte]): Option[ProtoMessage] =
ma1, ma2: MultiAddress
pmsg: ProtoMessage
let res2 = pb.getField(2, pmsg.timestamp)
if res2.isErr() or not(res2.get()):
return none[ProtoMessage]()
let res4 = pb.getField(4, value)
if res4.isErr() or not(res4.get()):
return none[ProtoMessage]()
let
r2 = pb.getField(2, pmsg.timestamp)
r4 = pb.getField(4, value)
r7 = pb.getField(7, pmsg.message)
if not r2.get(false) or not r4.get(false) or not r7.get(false):
return Opt.none(ProtoMessage)
# `case` statement could not work here with an error "selector must be of an
# ordinal type, float or string"
@@ -124,30 +124,27 @@ proc decodeDumpMessage*(data: openArray[byte]): Option[ProtoMessage] =
elif value == uint64(Incoming):
Incoming
else:
return none[ProtoMessage]()
return Opt.none(ProtoMessage)
let res7 = pb.getField(7, pmsg.message)
if res7.isErr() or not(res7.get()):
return none[ProtoMessage]()
let r1 = pb.getField(1, value)
if r1.get(false):
pmsg.seqID = Opt.some(value)
value = 0'u64
let res1 = pb.getField(1, value)
if res1.isOk() and res1.get():
pmsg.seqID = some(value)
value = 0'u64
let res3 = pb.getField(3, value)
if res3.isOk() and res3.get():
pmsg.mtype = some(value)
let res5 = pb.getField(5, ma1)
if res5.isOk() and res5.get():
pmsg.local = some(ma1)
let res6 = pb.getField(6, ma2)
if res6.isOk() and res6.get():
pmsg.remote = some(ma2)
let r3 = pb.getField(3, value)
if r3.get(false):
pmsg.mtype = Opt.some(value)
some(pmsg)
let
r5 = pb.getField(5, ma1)
r6 = pb.getField(6, ma2)
if r5.get(false):
pmsg.local = Opt.some(ma1)
if r6.get(false):
pmsg.remote = Opt.some(ma2)
iterator messages*(data: seq[byte]): Option[ProtoMessage] =
Opt.some(pmsg)
iterator messages*(data: seq[byte]): Opt[ProtoMessage] =
## Iterate over sequence of bytes and decode all the ``ProtoMessage``
## messages we found.
var value: uint64
@@ -242,27 +239,19 @@ proc toString*(msg: ProtoMessage, dump = true): string =
" >> "
let address =
block:
let local =
if msg.local.isSome():
"[" & $(msg.local.get()) & "]"
else:
"[LOCAL]"
let remote =
if msg.remote.isSome():
"[" & $(msg.remote.get()) & "]"
else:
"[REMOTE]"
let local = block:
msg.local.withValue(loc): "[" & $loc & "]"
else: "[LOCAL]"
let remote = block:
msg.remote.withValue(rem): "[" & $rem & "]"
else: "[REMOTE]"
local & direction & remote
let seqid =
if msg.seqID.isSome():
"seqID = " & $(msg.seqID.get()) & " "
else:
""
let mtype =
if msg.mtype.isSome():
"type = " & $(msg.mtype.get()) & " "
else:
""
let seqid = block:
msg.seqID.wihValue(seqid): "seqID = " & $seqid & " "
else: ""
let mtype = block:
msg.mtype.withValue(typ): "type = " & $typ & " "
else: ""
res.add(" ")
res.add(address)
res.add(" ")

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import chronos
import stew/results
@@ -28,7 +25,8 @@ method connect*(
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true) {.async, base.} =
reuseConnection = true,
dir = Direction.Out) {.async, base.} =
## connect remote peer without negotiating
## a protocol
##

View File

@@ -7,7 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
import std/[sugar, tables, sequtils]
import std/tables
import stew/results
import pkg/[chronos,
@@ -36,7 +36,6 @@ logScope:
declareCounter(libp2p_total_dial_attempts, "total attempted dials")
declareCounter(libp2p_successful_dials, "dialed successful peers")
declareCounter(libp2p_failed_dials, "failed dials")
declareCounter(libp2p_failed_upgrades_outgoing, "outgoing connections failed upgrades")
type
DialFailedError* = object of LPError
@@ -53,45 +52,50 @@ proc dialAndUpgrade(
self: Dialer,
peerId: Opt[PeerId],
hostname: string,
address: MultiAddress):
address: MultiAddress,
dir = Direction.Out):
Future[Muxer] {.async.} =
for transport in self.transports: # for each transport
if transport.handles(address): # check if it can dial it
trace "Dialing address", address, peerId, hostname
trace "Dialing address", address, peerId = peerId.get(default(PeerId)), hostname
let dialed =
try:
libp2p_total_dial_attempts.inc()
await transport.dial(hostname, address, peerId)
except CancelledError as exc:
debug "Dialing canceled", msg = exc.msg, peerId
debug "Dialing canceled", err = exc.msg, peerId = peerId.get(default(PeerId))
raise exc
except CatchableError as exc:
debug "Dialing failed", msg = exc.msg, peerId
debug "Dialing failed", err = exc.msg, peerId = peerId.get(default(PeerId))
libp2p_failed_dials.inc()
return nil # Try the next address
# also keep track of the connection's bottom unsafe transport direction
# required by gossipsub scoring
dialed.transportDir = Direction.Out
libp2p_successful_dials.inc()
let mux =
try:
await transport.upgradeOutgoing(dialed, peerId)
# This is for the very specific case of a simultaneous dial during DCUtR. In this case, both sides will have
# an Outbound direction at the transport level. Therefore we update the DCUtR initiator transport direction to Inbound.
# The if below is more general and might handle other use cases in the future.
if dialed.dir != dir:
dialed.dir = dir
await transport.upgrade(dialed, peerId)
except CatchableError as exc:
# If we failed to establish the connection through one transport,
# we won't succeeded through another - no use in trying again
await dialed.close()
debug "Upgrade failed", msg = exc.msg, peerId
debug "Upgrade failed", err = exc.msg, peerId = peerId.get(default(PeerId))
if exc isnot CancelledError:
libp2p_failed_upgrades_outgoing.inc()
if dialed.dir == Direction.Out:
libp2p_failed_upgrades_outgoing.inc()
else:
libp2p_failed_upgrades_incoming.inc()
# Try other address
return nil
doAssert not isNil(mux), "connection died after upgradeOutgoing"
doAssert not isNil(mux), "connection died after upgrade " & $dialed.dir
debug "Dial successful", peerId = mux.connection.peerId
return mux
return nil
@@ -127,10 +131,11 @@ proc expandDnsAddr(
proc dialAndUpgrade(
self: Dialer,
peerId: Opt[PeerId],
addrs: seq[MultiAddress]):
addrs: seq[MultiAddress],
dir = Direction.Out):
Future[Muxer] {.async.} =
debug "Dialing peer", peerId
debug "Dialing peer", peerId = peerId.get(default(PeerId))
for rawAddress in addrs:
# resolve potential dnsaddr
@@ -145,11 +150,11 @@ proc dialAndUpgrade(
else: await self.nameResolver.resolveMAddress(expandedAddress)
for resolvedAddress in resolvedAddresses:
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress)
result = await self.dialAndUpgrade(addrPeerId, hostname, resolvedAddress, dir)
if not isNil(result):
return result
proc tryReusingConnection(self: Dialer, peerId: PeerId): Future[Opt[Muxer]] {.async.} =
proc tryReusingConnection(self: Dialer, peerId: PeerId): Opt[Muxer] =
let muxer = self.connManager.selectMuxer(peerId)
if muxer == nil:
return Opt.none(Muxer)
@@ -162,7 +167,8 @@ proc internalConnect(
peerId: Opt[PeerId],
addrs: seq[MultiAddress],
forceDial: bool,
reuseConnection = true):
reuseConnection = true,
dir = Direction.Out):
Future[Muxer] {.async.} =
if Opt.some(self.localPeerId) == peerId:
raise newException(CatchableError, "can't dial self!")
@@ -172,15 +178,15 @@ proc internalConnect(
try:
await lock.acquire()
if peerId.isSome and reuseConnection:
let muxOpt = await self.tryReusingConnection(peerId.get())
if muxOpt.isSome:
return muxOpt.get()
if reuseConnection:
peerId.withValue(peerId):
self.tryReusingConnection(peerId).withValue(mux):
return mux
let slot = self.connManager.getOutgoingSlot(forceDial)
let muxed =
try:
await self.dialAndUpgrade(peerId, addrs)
await self.dialAndUpgrade(peerId, addrs, dir)
except CatchableError as exc:
slot.release()
raise exc
@@ -206,7 +212,8 @@ method connect*(
peerId: PeerId,
addrs: seq[MultiAddress],
forceDial = false,
reuseConnection = true) {.async.} =
reuseConnection = true,
dir = Direction.Out) {.async.} =
## connect remote peer without negotiating
## a protocol
##
@@ -214,7 +221,7 @@ method connect*(
if self.connManager.connCount(peerId) > 0 and reuseConnection:
return
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection)
discard await self.internalConnect(Opt.some(peerId), addrs, forceDial, reuseConnection, dir)
method connect*(
self: Dialer,
@@ -222,20 +229,20 @@ method connect*(
allowUnknownPeerId = false): Future[PeerId] {.async.} =
## Connects to a peer and retrieve its PeerId
let fullAddress = parseFullAddress(address)
if fullAddress.isOk:
parseFullAddress(address).toOpt().withValue(fullAddress):
return (await self.internalConnect(
Opt.some(fullAddress.get()[0]),
@[fullAddress.get()[1]],
false)).connection.peerId
else:
if allowUnknownPeerId == false:
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
return (await self.internalConnect(
Opt.none(PeerId),
@[address],
Opt.some(fullAddress[0]),
@[fullAddress[1]],
false)).connection.peerId
if allowUnknownPeerId == false:
raise newException(DialFailedError, "Address without PeerID and unknown peer id disabled!")
return (await self.internalConnect(
Opt.none(PeerId),
@[address],
false)).connection.peerId
proc negotiateStream(
self: Dialer,
conn: Connection,
@@ -321,7 +328,7 @@ method dial*(
await cleanup()
raise exc
except CatchableError as exc:
debug "Error dialing", conn, msg = exc.msg
debug "Error dialing", conn, err = exc.msg
await cleanup()
raise exc

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/sequtils
import chronos, chronicles, stew/results
@@ -18,7 +15,7 @@ import ../errors
type
BaseAttr = ref object of RootObj
comparator: proc(f, c: BaseAttr): bool {.gcsafe, raises: [Defect].}
comparator: proc(f, c: BaseAttr): bool {.gcsafe, raises: [].}
Attribute[T] = ref object of BaseAttr
value: T
@@ -60,7 +57,7 @@ proc `{}`*[T](pa: PeerAttributes, t: typedesc[T]): Opt[T] =
return Opt.some(f.to(T))
Opt.none(T)
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [Defect, KeyError].} =
proc `[]`*[T](pa: PeerAttributes, t: typedesc[T]): T {.raises: [KeyError].} =
pa{T}.valueOr: raise newException(KeyError, "Attritute not found")
proc match*(pa, candidate: PeerAttributes): bool =
@@ -73,7 +70,7 @@ proc match*(pa, candidate: PeerAttributes): bool =
return true
type
PeerFoundCallback* = proc(pa: PeerAttributes) {.raises: [Defect], gcsafe.}
PeerFoundCallback* = proc(pa: PeerAttributes) {.raises: [], gcsafe.}
DiscoveryInterface* = ref object of RootObj
onPeerFound*: PeerFoundCallback
@@ -125,20 +122,15 @@ proc request*[T](dm: DiscoveryManager, value: T): DiscoveryQuery =
pa.add(value)
return dm.request(pa)
proc advertise*(dm: DiscoveryManager, pa: PeerAttributes) =
proc advertise*[T](dm: DiscoveryManager, value: T) =
for i in dm.interfaces:
i.toAdvertise = pa
i.toAdvertise.add(value)
if i.advertiseLoop.isNil:
i.advertisementUpdated = newAsyncEvent()
i.advertiseLoop = i.advertise()
else:
i.advertisementUpdated.fire()
proc advertise*[T](dm: DiscoveryManager, value: T) =
var pa: PeerAttributes
pa.add(value)
dm.advertise(pa)
template forEach*(query: DiscoveryQuery, code: untyped) =
## Will execute `code` for each discovered peer. The
## peer attritubtes are available through the variable

View File

@@ -7,12 +7,8 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import sequtils
import chronos
import ./discoverymngr,
../protocols/rendezvous,
@@ -23,6 +19,7 @@ type
rdv*: RendezVous
timeToRequest: Duration
timeToAdvertise: Duration
ttl: Duration
RdvNamespace* = distinct string
@@ -66,12 +63,16 @@ method advertise*(self: RendezVousInterface) {.async.} =
self.advertisementUpdated.clear()
for toAdv in toAdvertise:
await self.rdv.advertise(toAdv, self.timeToAdvertise)
try:
await self.rdv.advertise(toAdv, self.ttl)
except CatchableError as error:
debug "RendezVous advertise error: ", msg = error.msg
await sleepAsync(self.timeToAdvertise) or self.advertisementUpdated.wait()
proc new*(T: typedesc[RendezVousInterface],
rdv: RendezVous,
ttr: Duration = 1.minutes,
tta: Duration = MinimumDuration): RendezVousInterface =
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta)
tta: Duration = 1.minutes,
ttl: Duration = MinimumDuration): RendezVousInterface =
T(rdv: rdv, timeToRequest: ttr, timeToAdvertise: tta, ttl: ttl)

View File

@@ -19,7 +19,8 @@ func toException*(e: string): ref LPError =
# sadly nim needs more love for hygienic templates
# so here goes the macro, its based on the proc/template version
# and uses quote do so it's quite readable
macro checkFutures*[T](futs: seq[Future[T]], exclude: untyped = []): untyped =
# TODO https://github.com/nim-lang/Nim/issues/22936
macro checkFutures*[F](futs: seq[F], exclude: untyped = []): untyped =
let nexclude = exclude.len
case nexclude
of 0:

View File

@@ -9,13 +9,10 @@
## This module implements MultiAddress.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
{.push public.}
import pkg/chronos
import pkg/chronos, chronicles
import std/[nativesockets, hashes]
import tables, strutils, sets, stew/shims/net
import multicodec, multihash, multibase, transcoder, vbuffer, peerid,
@@ -23,6 +20,9 @@ import multicodec, multihash, multibase, transcoder, vbuffer, peerid,
import stew/[base58, base32, endians2, results]
export results, minprotobuf, vbuffer, utility
logScope:
topics = "libp2p multiaddress"
type
MAKind* = enum
None, Fixed, Length, Path, Marker
@@ -34,7 +34,7 @@ type
coder*: Transcoder
MultiAddress* = object
data*: VBuffer
data: VBuffer
MaPatternOp* = enum
Eq, Or, And
@@ -63,6 +63,10 @@ const
IPPROTO_TCP = Protocol.IPPROTO_TCP
IPPROTO_UDP = Protocol.IPPROTO_UDP
proc data*(ma: MultiAddress): VBuffer =
## Returns the data buffer of the MultiAddress.
return ma.data
proc hash*(a: MultiAddress): Hash =
var h: Hash = 0
h = h !& hash(a.data.buffer)
@@ -76,7 +80,7 @@ proc ip4StB(s: string, vb: var VBuffer): bool =
if a.family == IpAddressFamily.IPv4:
vb.writeArray(a.address_v4)
result = true
except:
except CatchableError:
discard
proc ip4BtS(vb: var VBuffer, s: var string): bool =
@@ -99,7 +103,7 @@ proc ip6StB(s: string, vb: var VBuffer): bool =
if a.family == IpAddressFamily.IPv6:
vb.writeArray(a.address_v6)
result = true
except:
except CatchableError:
discard
proc ip6BtS(vb: var VBuffer, s: var string): bool =
@@ -143,14 +147,14 @@ proc portStB(s: string, vb: var VBuffer): bool =
port[1] = cast[byte](nport and 0xFF)
vb.writeArray(port)
result = true
except:
except CatchableError:
discard
proc portBtS(vb: var VBuffer, s: var string): bool =
## Port number bufferToString() implementation.
var port: array[2, byte]
if vb.readArray(port) == 2:
var nport = (cast[uint16](port[0]) shl 8) or cast[uint16](port[1])
var nport = (safeConvert[uint16](port[0]) shl 8) or safeConvert[uint16](port[1])
s = $nport
result = true
@@ -168,7 +172,7 @@ proc p2pStB(s: string, vb: var VBuffer): bool =
if MultiHash.decode(data, mh).isOk:
vb.writeSeq(data)
result = true
except:
except CatchableError:
discard
proc p2pBtS(vb: var VBuffer, s: var string): bool =
@@ -203,14 +207,14 @@ proc onionStB(s: string, vb: var VBuffer): bool =
address[11] = cast[byte](nport and 0xFF)
vb.writeArray(address)
result = true
except:
except CatchableError:
discard
proc onionBtS(vb: var VBuffer, s: var string): bool =
## ONION address bufferToString() implementation.
var buf: array[12, byte]
if vb.readArray(buf) == 12:
var nport = (cast[uint16](buf[10]) shl 8) or cast[uint16](buf[11])
var nport = (safeConvert[uint16](buf[10]) shl 8) or safeConvert[uint16](buf[11])
s = Base32Lower.encode(buf.toOpenArray(0, 9))
s.add(":")
s.add($nport)
@@ -237,14 +241,14 @@ proc onion3StB(s: string, vb: var VBuffer): bool =
address[36] = cast[byte](nport and 0xFF)
vb.writeArray(address)
result = true
except:
except CatchableError:
discard
proc onion3BtS(vb: var VBuffer, s: var string): bool =
## ONION address bufferToString() implementation.
var buf: array[37, byte]
if vb.readArray(buf) == 37:
var nport = (cast[uint16](buf[35]) shl 8) or cast[uint16](buf[36])
var nport = (safeConvert[uint16](buf[35]) shl 8) or safeConvert[uint16](buf[36])
s = Base32Lower.encode(buf.toOpenArray(0, 34))
s.add(":")
s.add($nport)
@@ -394,6 +398,9 @@ const
MAProtocol(
mcodec: multiCodec("quic"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("quic-v1"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("ip6zone"), kind: Length, size: 0,
coder: TranscoderIP6Zone
@@ -412,6 +419,9 @@ const
MAProtocol(
mcodec: multiCodec("wss"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("tls"), kind: Marker, size: 0
),
MAProtocol(
mcodec: multiCodec("ipfs"), kind: Length, size: 0,
coder: TranscoderP2P
@@ -463,13 +473,24 @@ const
DNS* = mapOr(DNSANY, DNS4, DNS6, DNSADDR)
IP* = mapOr(IP4, IP6)
DNS_OR_IP* = mapOr(DNS, IP)
TCP* = mapOr(mapAnd(DNS, mapEq("tcp")), mapAnd(IP, mapEq("tcp")))
UDP* = mapOr(mapAnd(DNS, mapEq("udp")), mapAnd(IP, mapEq("udp")))
TCP_DNS* = mapAnd(DNS, mapEq("tcp"))
TCP_IP* = mapAnd(IP, mapEq("tcp"))
TCP* = mapOr(TCP_DNS, TCP_IP)
UDP_DNS* = mapAnd(DNS, mapEq("udp"))
UDP_IP* = mapAnd(IP, mapEq("udp"))
UDP* = mapOr(UDP_DNS, UDP_IP)
UTP* = mapAnd(UDP, mapEq("utp"))
QUIC* = mapAnd(UDP, mapEq("quic"))
UNIX* = mapEq("unix")
WS_DNS* = mapAnd(TCP_DNS, mapEq("ws"))
WS_IP* = mapAnd(TCP_IP, mapEq("ws"))
WS* = mapAnd(TCP, mapEq("ws"))
WSS* = mapAnd(TCP, mapEq("wss"))
TLS_WS* = mapOr(mapEq("wss"), mapAnd(mapEq("tls"), mapEq("ws")))
WSS_DNS* = mapAnd(TCP_DNS, TLS_WS)
WSS_IP* = mapAnd(TCP_IP, TLS_WS)
WSS* = mapAnd(TCP, TLS_WS)
WebSockets_DNS* = mapOr(WS_DNS, WSS_DNS)
WebSockets_IP* = mapOr(WS_IP, WSS_IP)
WebSockets* = mapOr(WS, WSS)
Onion3* = mapEq("onion3")
TcpOnion3* = mapAnd(TCP, Onion3)
@@ -758,7 +779,7 @@ proc toString*(value: MultiAddress): MaResult[string] =
res = "/" & parts.join("/")
ok(res)
proc `$`*(value: MultiAddress): string {.raises: [Defect].} =
proc `$`*(value: MultiAddress): string =
## Return string representation of MultiAddress ``value``.
let s = value.toString()
if s.isErr: s.error
@@ -873,6 +894,8 @@ proc getProtocol(name: string): MAProtocol {.inline.} =
proc init*(mtype: typedesc[MultiAddress],
value: string): MaResult[MultiAddress] =
## Initialize MultiAddress object from string representation ``value``.
if len(value) == 0 or value == "/":
return err("multiaddress: Address must not be empty!")
var parts = value.trimRight('/').split('/')
if len(parts[0]) != 0:
err("multiaddress: Invalid MultiAddress, must start with `/`")
@@ -920,7 +943,7 @@ proc init*(mtype: typedesc[MultiAddress],
data: openArray[byte]): MaResult[MultiAddress] =
## Initialize MultiAddress with array of bytes ``data``.
if len(data) == 0:
err("multiaddress: Address could not be empty!")
err("multiaddress: Address must not be empty!")
else:
var res: MultiAddress
res.data = initVBuffer()
@@ -935,7 +958,7 @@ proc init*(mtype: typedesc[MultiAddress]): MultiAddress =
## Initialize empty MultiAddress.
result.data = initVBuffer()
proc init*(mtype: typedesc[MultiAddress], address: ValidIpAddress,
proc init*(mtype: typedesc[MultiAddress], address: IpAddress,
protocol: IpTransportProtocol, port: Port): MultiAddress =
var res: MultiAddress
res.data = initVBuffer()
@@ -1006,7 +1029,7 @@ proc append*(m1: var MultiAddress, m2: MultiAddress): MaResult[void] =
ok()
proc `&`*(m1, m2: MultiAddress): MultiAddress {.
raises: [Defect, LPError].} =
raises: [LPError].} =
## Concatenates two addresses ``m1`` and ``m2``, and returns result.
##
## This procedure performs validation of concatenated result and can raise
@@ -1016,7 +1039,7 @@ proc `&`*(m1, m2: MultiAddress): MultiAddress {.
concat(m1, m2).tryGet()
proc `&=`*(m1: var MultiAddress, m2: MultiAddress) {.
raises: [Defect, LPError].} =
raises: [LPError].} =
## Concatenates two addresses ``m1`` and ``m2``.
##
## This procedure performs validation of concatenated result and can raise
@@ -1060,19 +1083,15 @@ proc matchPart(pat: MaPattern, protos: seq[MultiCodec]): MaPatResult =
proc match*(pat: MaPattern, address: MultiAddress): bool =
## Match full ``address`` using pattern ``pat`` and return ``true`` if
## ``address`` satisfies pattern.
let protos = address.protocols()
if protos.isErr():
return false
let res = matchPart(pat, protos.get())
let protos = address.protocols().valueOr: return false
let res = matchPart(pat, protos)
res.flag and (len(res.rem) == 0)
proc matchPartial*(pat: MaPattern, address: MultiAddress): bool =
## Match prefix part of ``address`` using pattern ``pat`` and return
## ``true`` if ``address`` starts with pattern.
let protos = address.protocols()
if protos.isErr():
return false
let res = matchPart(pat, protos.get())
let protos = address.protocols().valueOr: return false
let res = matchPart(pat, protos)
res.flag
proc `$`*(pat: MaPattern): string =
@@ -1101,16 +1120,16 @@ proc getField*(pb: ProtoBuffer, field: int,
if not(res):
ok(false)
else:
let ma = MultiAddress.init(buffer)
if ma.isOk():
value = ma.get()
ok(true)
else:
err(ProtoError.IncorrectBlob)
value = MultiAddress.init(buffer).valueOr: return err(ProtoError.IncorrectBlob)
ok(true)
proc getRepeatedField*(pb: ProtoBuffer, field: int,
value: var seq[MultiAddress]): ProtoResult[bool] {.
inline.} =
## Read repeated field from protobuf message. ``field`` is field number. If the message is malformed, an error is returned.
## If field is not present in message, then ``ok(false)`` is returned and value is empty. If field is present,
## but no items could be parsed, then ``err(ProtoError.IncorrectBlob)`` is returned and value is empty.
## If field is present and some item could be parsed, then ``true`` is returned and value contains the parsed values.
var items: seq[seq[byte]]
value.setLen(0)
let res = ? pb.getRepeatedField(field, items)
@@ -1118,10 +1137,12 @@ proc getRepeatedField*(pb: ProtoBuffer, field: int,
ok(false)
else:
for item in items:
let ma = MultiAddress.init(item)
if ma.isOk():
value.add(ma.get())
else:
value.setLen(0)
return err(ProtoError.IncorrectBlob)
ok(true)
let ma = MultiAddress.init(item).valueOr:
debug "Unsupported MultiAddress in blob", ma = item
continue
value.add(ma)
if value.len == 0:
err(ProtoError.IncorrectBlob)
else:
ok(true)

View File

@@ -13,10 +13,7 @@
## 1. base32z
##
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import tables
import stew/[base32, base58, base64, results]
@@ -27,17 +24,17 @@ type
MultiBase* = object
MBCodeSize = proc(length: int): int {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
MBCodeSize = proc(length: int): int {.nimcall, gcsafe, noSideEffect, raises: [].}
MBCodec = object
code: char
name: string
encr: proc(inbytes: openArray[byte],
outbytes: var openArray[char],
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
decr: proc(inbytes: openArray[char],
outbytes: var openArray[byte],
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
outlen: var int): MultiBaseStatus {.nimcall, gcsafe, noSideEffect, raises: [].}
encl: MBCodeSize
decl: MBCodeSize

View File

@@ -9,18 +9,13 @@
## This module implements MultiCodec.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import tables, hashes
import varint, vbuffer
import stew/results
export results
{.deadCodeElim: on.}
## List of officially supported codecs can BE found here
## https://github.com/multiformats/multicodec/blob/master/table.csv
const MultiCodecList = [
@@ -196,9 +191,11 @@ const MultiCodecList = [
("p2p", 0x01A5),
("http", 0x01E0),
("https", 0x01BB),
("tls", 0x01C0),
("quic", 0x01CC),
("quic-v1", 0x01CD),
("ws", 0x01DD),
("wss", 0x01DE), # not in multicodec list
("wss", 0x01DE),
("p2p-websocket-star", 0x01DF), # not in multicodec list
("p2p-webrtc-star", 0x0113), # not in multicodec list
("p2p-webrtc-direct", 0x0114), # not in multicodec list

View File

@@ -21,10 +21,7 @@
## 1. SKEIN
## 2. MURMUR
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import tables
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
@@ -45,7 +42,7 @@ const
type
MHashCoderProc* = proc(data: openArray[byte],
output: var openArray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [Defect].}
output: var openArray[byte]) {.nimcall, gcsafe, noSideEffect, raises: [].}
MHash* = object
mcodec*: MultiCodec
size*: int

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[strutils, sequtils, tables]
import chronos, chronicles, stew/byteutils
@@ -28,7 +25,7 @@ const
Ls = "ls\n"
type
Matcher* = proc (proto: string): bool {.gcsafe, raises: [Defect].}
Matcher* = proc (proto: string): bool {.gcsafe, raises: [].}
MultiStreamError* = object of LPError
@@ -134,7 +131,7 @@ proc handle*(
protos: seq[string],
matchers = newSeq[Matcher](),
active: bool = false,
): Future[string] {.async, gcsafe.} =
): Future[string] {.async.} =
trace "Starting multistream negotiation", conn, handshaked = active
var handshaked = active
while not conn.atEof:
@@ -175,10 +172,9 @@ proc handle*(
trace "no handlers", conn, protocol = ms
await conn.writeLp(Na)
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async, gcsafe.} =
proc handle*(m: MultistreamSelect, conn: Connection, active: bool = false) {.async.} =
trace "Starting multistream handler", conn, handshaked = active
var
handshaked = active
protos: seq[string]
matchers: seq[Matcher]
for h in m.handlers:

View File

@@ -7,12 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import pkg/[chronos, nimcrypto/utils, chronicles, stew/byteutils]
import pkg/[chronos, chronicles, stew/byteutils]
import ../../stream/connection,
../../utility,
../../varint,
@@ -45,7 +42,7 @@ const MaxMsgSize* = 1 shl 20 # 1mb
proc newInvalidMplexMsgType*(): ref InvalidMplexMsgType =
newException(InvalidMplexMsgType, "invalid message type")
proc readMsg*(conn: Connection): Future[Msg] {.async, gcsafe.} =
proc readMsg*(conn: Connection): Future[Msg] {.async.} =
let header = await conn.readVarint()
trace "read header varint", varint = header, conn

View File

@@ -7,13 +7,10 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[oids, strformat]
import pkg/[chronos, chronicles, metrics, nimcrypto/utils]
import pkg/[chronos, chronicles, metrics]
import ./coder,
../muxer,
../../stream/[bufferstream, connection, streamseq],
@@ -76,8 +73,8 @@ func shortLog*(s: LPChannel): auto =
chronicles.formatIt(LPChannel): shortLog(it)
proc open*(s: LPChannel) {.async, gcsafe.} =
trace "Opening channel", s, conn = s.conn
proc open*(s: LPChannel) {.async.} =
debug "Opening channel", s, conn = s.conn
if s.conn.isClosed:
return
try:
@@ -98,44 +95,44 @@ proc closeUnderlying(s: LPChannel): Future[void] {.async.} =
if s.closedLocal and s.atEof():
await procCall BufferStream(s).close()
proc reset*(s: LPChannel) {.async, gcsafe.} =
proc reset*(s: LPChannel) {.deprecated, async.} =
if s.isClosed:
trace "Already closed", s
debug "Already closed", s
return
s.isClosed = true
s.closedLocal = true
s.localReset = not s.remoteReset
trace "Resetting channel", s, len = s.len
debug "Resetting channel", s, len = s.len
if s.isOpen and not s.conn.isClosed:
# If the connection is still active, notify the other end
proc resetMessage() {.async.} =
try:
trace "sending reset message", s, conn = s.conn
debug "sending reset message", s, conn = s.conn
await s.conn.writeMsg(s.id, s.resetCode) # write reset
except CatchableError as exc:
# No cancellations
await s.conn.close()
trace "Can't send reset message", s, conn = s.conn, msg = exc.msg
debug "Can't send reset message", s, conn = s.conn, msg = exc.msg
asyncSpawn resetMessage()
await s.closeImpl() # noraises, nocancels
trace "Channel reset", s
debug "Channel reset", s
method close*(s: LPChannel) {.async, gcsafe.} =
method close*(s: LPChannel) {.async.} =
## Close channel for writing - a message will be sent to the other peer
## informing them that the channel is closed and that we're waiting for
## their acknowledgement.
if s.closedLocal:
trace "Already closed", s
debug "Already closed", s
return
s.closedLocal = true
trace "Closing channel", s, conn = s.conn, len = s.len
debug "Closing channel", s, conn = s.conn, len = s.len
if s.isOpen and not s.conn.isClosed:
try:
@@ -147,18 +144,18 @@ method close*(s: LPChannel) {.async, gcsafe.} =
# It's harmless that close message cannot be sent - the connection is
# likely down already
await s.conn.close()
trace "Cannot send close message", s, id = s.id, msg = exc.msg
debug "Cannot send close message", s, id = s.id, msg = exc.msg
await s.closeUnderlying() # maybe already eofed
trace "Closed channel", s, len = s.len
debug "Closed channel", s, len = s.len
method initStream*(s: LPChannel) =
if s.objName.len == 0:
s.objName = LPChannelTrackerName
s.timeoutHandler = proc(): Future[void] {.gcsafe.} =
trace "Idle timeout expired, resetting LPChannel", s
debug "Idle timeout expired, resetting LPChannel", s
s.reset()
procCall BufferStream(s).initStream()
@@ -185,7 +182,7 @@ method readOnce*(s: LPChannel,
if s.protocol.len > 0:
libp2p_protocols_bytes.inc(bytes.int64, labelValues=[s.protocol, "in"])
trace "readOnce", s, bytes
debug "readOnce", s, bytes
if bytes == 0:
await s.closeUnderlying()
return bytes
@@ -220,9 +217,13 @@ proc prepareWrite(s: LPChannel, msg: seq[byte]): Future[void] {.async.} =
return
if not s.isOpen:
debug "Opening channel for writing", s
await s.open()
debug "Opened channel", s
debug "Writing msg (prep)", s, msg = msg.len
await s.conn.writeMsg(s.id, s.msgCode, msg)
debug "Wrote msg (prep)", s, msg = msg.len
proc completeWrite(
s: LPChannel, fut: Future[void], msgLen: int): Future[void] {.async.} =
@@ -234,9 +235,11 @@ proc completeWrite(
libp2p_mplex_qtime.time:
await fut
else:
debug "Waiting for complete", s, msg = msgLen
await fut
debug "Completed", s, msg = msgLen
when defined(libp2p_network_protocol_metrics):
when defined(libp2p_network_protocols_metrics):
if s.protocol.len > 0:
libp2p_protocols_bytes.inc(msgLen.int64, labelValues=[s.protocol, "out"])
@@ -251,7 +254,7 @@ proc completeWrite(
except LPStreamEOFError as exc:
raise exc
except CatchableError as exc:
trace "exception in lpchannel write handler", s, msg = exc.msg
debug "exception in lpchannel write handler", s, msg = exc.msg
await s.reset()
await s.conn.close()
raise newLPStreamConnDownError(exc)
@@ -270,6 +273,7 @@ method write*(s: LPChannel, msg: seq[byte]): Future[void] =
# Fast path: Avoid a copy of msg being kept in the closure created by
# `{.async.}` as this drives up memory usage - the conditions are laid out
# in prepareWrite
debug "Writing fast path", s, msg = msg.len
s.conn.writeMsg(s.id, s.msgCode, msg)
else:
prepareWrite(s, msg)
@@ -303,6 +307,6 @@ proc init*(
when chronicles.enabledLogLevel == LogLevel.TRACE:
chann.name = if chann.name.len > 0: chann.name else: $chann.oid
trace "Created new lpchannel", s = chann, id, initiator
debug "Created new lpchannel", s = chann, id, initiator
return chann

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import tables, sequtils, oids
import chronos, chronicles, stew/byteutils, metrics
@@ -57,15 +54,15 @@ proc newTooManyChannels(): ref TooManyChannels =
newException(TooManyChannels, "max allowed channel count exceeded")
proc newInvalidChannelIdError(): ref InvalidChannelIdError =
newException(InvalidChannelIdError, "max allowed channel count exceeded")
newException(InvalidChannelIdError, "Channel id already taken")
proc cleanupChann(m: Mplex, chann: LPChannel) {.async, inline.} =
proc cleanupChann(m: Mplex, chann: LPChannel) {.async.} =
## remove the local channel from the internal tables
##
try:
await chann.join()
m.channels[chann.initiator].del(chann.id)
trace "cleaned up channel", m, chann
debug "cleaned up channel", m, chann
when defined(libp2p_expensive_metrics):
libp2p_mplex_channels.set(
@@ -79,7 +76,7 @@ proc newStreamInternal*(m: Mplex,
chanId: uint64 = 0,
name: string = "",
timeout: Duration): LPChannel
{.gcsafe, raises: [Defect, InvalidChannelIdError].} =
{.gcsafe, raises: [InvalidChannelIdError].} =
## create new channel/stream
##
let id = if initiator:
@@ -102,7 +99,7 @@ proc newStreamInternal*(m: Mplex,
when defined(libp2p_agents_metrics):
result.shortAgent = m.connection.shortAgent
trace "Creating new channel", m, channel = result, id, initiator, name
debug "Creating new channel", m, channel = result, id, initiator, name
m.channels[initiator][id] = result
@@ -119,17 +116,17 @@ proc handleStream(m: Mplex, chann: LPChannel) {.async.} =
##
try:
await m.streamHandler(chann)
trace "finished handling stream", m, chann
debug "finished handling stream", m, chann
doAssert(chann.closed, "connection not closed by handler!")
except CatchableError as exc:
trace "Exception in mplex stream handler", m, chann, msg = exc.msg
debug "Exception in mplex stream handler", m, chann, msg = exc.msg
await chann.reset()
method handle*(m: Mplex) {.async, gcsafe.} =
trace "Starting mplex handler", m
method handle*(m: Mplex) {.async.} =
debug "Starting mplex handler", m
try:
while not m.connection.atEof:
trace "waiting for data", m
debug "waiting for data", m
let
(id, msgType, data) = await m.connection.readMsg()
initiator = bool(ord(msgType) and 1)
@@ -140,13 +137,13 @@ method handle*(m: Mplex) {.async, gcsafe.} =
msgType = msgType
size = data.len
trace "read message from connection", m, data = data.shortLog
debug "read message from connection", m, data = data.shortLog
var channel =
if MessageType(msgType) != MessageType.New:
let tmp = m.channels[initiator].getOrDefault(id, nil)
if tmp == nil:
trace "Channel not found, skipping", m
debug "Channel not found, skipping", m
continue
tmp
@@ -159,11 +156,11 @@ method handle*(m: Mplex) {.async, gcsafe.} =
let name = string.fromBytes(data)
m.newStreamInternal(false, id, name, timeout = m.outChannTimeout)
trace "Processing channel message", m, channel, data = data.shortLog
debug "Processing channel message", m, channel, data = data.shortLog
case msgType:
of MessageType.New:
trace "created channel", m, channel
debug "created channel", m, channel
if not isNil(m.streamHandler):
# Launch handler task
@@ -176,13 +173,13 @@ method handle*(m: Mplex) {.async, gcsafe.} =
allowed = MaxMsgSize, channel
raise newLPStreamLimitError()
trace "pushing data to channel", m, channel, len = data.len
debug "pushing data to channel", m, channel, len = data.len
try:
await channel.pushData(data)
trace "pushed data to channel", m, channel, len = data.len
debug "pushed data to channel", m, channel, len = data.len
except LPStreamClosedError as exc:
# Channel is being closed, but `cleanupChann` was not yet triggered.
trace "pushing data to channel failed", m, channel, len = data.len,
debug "pushing data to channel failed", m, channel, len = data.len,
msg = exc.msg
discard # Ignore message, same as if `cleanupChann` had completed.
@@ -194,16 +191,17 @@ method handle*(m: Mplex) {.async, gcsafe.} =
except CancelledError:
debug "Unexpected cancellation in mplex handler", m
except LPStreamEOFError as exc:
trace "Stream EOF", m, msg = exc.msg
debug "Stream EOF", m, msg = exc.msg
except CatchableError as exc:
debug "Unexpected exception in mplex read loop", m, msg = exc.msg
finally:
await m.close()
trace "Stopped mplex handler", m
debug "Stopped mplex handler", m
proc new*(M: type Mplex,
conn: Connection,
inTimeout, outTimeout: Duration = DefaultChanTimeout,
inTimeout: Duration = DefaultChanTimeout,
outTimeout: Duration = DefaultChanTimeout,
maxChannCount: int = MaxChannelCount): Mplex =
M(connection: conn,
inChannTimeout: inTimeout,
@@ -213,7 +211,7 @@ proc new*(M: type Mplex,
method newStream*(m: Mplex,
name: string = "",
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
lazy: bool = false): Future[Connection] {.async.} =
let channel = m.newStreamInternal(timeout = m.inChannTimeout)
if not lazy:
@@ -221,13 +219,13 @@ method newStream*(m: Mplex,
return Connection(channel)
method close*(m: Mplex) {.async, gcsafe.} =
method close*(m: Mplex) {.async.} =
if m.isClosed:
trace "Already closed", m
debug "Already closed", m
return
m.isClosed = true
trace "Closing mplex", m
debug "Closing mplex", m
var channs = toSeq(m.channels[false].values) & toSeq(m.channels[true].values)
@@ -247,4 +245,8 @@ method close*(m: Mplex) {.async, gcsafe.} =
m.channels[false].clear()
m.channels[true].clear()
trace "Closed mplex", m
debug "Closed mplex", m
method getStreams*(m: Mplex): seq[Connection] =
for c in m.channels[false].values: result.add(c)
for c in m.channels[true].values: result.add(c)

View File

@@ -7,14 +7,10 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import chronos, chronicles
import ../protocols/protocol,
../stream/connection,
import ../stream/connection,
../errors
logScope:
@@ -27,8 +23,8 @@ type
MuxerError* = object of LPError
TooManyChannels* = object of MuxerError
StreamHandler* = proc(conn: Connection): Future[void] {.gcsafe, raises: [Defect].}
MuxerHandler* = proc(muxer: Muxer): Future[void] {.gcsafe, raises: [Defect].}
StreamHandler* = proc(conn: Connection): Future[void] {.gcsafe, raises: [].}
MuxerHandler* = proc(muxer: Muxer): Future[void] {.gcsafe, raises: [].}
Muxer* = ref object of RootObj
streamHandler*: StreamHandler
@@ -36,7 +32,7 @@ type
connection*: Connection
# user provider proc that returns a constructed Muxer
MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [Defect].}
MuxerConstructor* = proc(conn: Connection): Muxer {.gcsafe, closure, raises: [].}
# this wraps a creator proc that knows how to make muxers
MuxerProvider* = object
@@ -50,11 +46,11 @@ chronicles.formatIt(Muxer): shortLog(it)
# muxer interface
method newStream*(m: Muxer, name: string = "", lazy: bool = false):
Future[Connection] {.base, async, gcsafe.} = discard
method close*(m: Muxer) {.base, async, gcsafe.} =
Future[Connection] {.base, async.} = discard
method close*(m: Muxer) {.base, async.} =
if not isNil(m.connection):
await m.connection.close()
method handle*(m: Muxer): Future[void] {.base, async, gcsafe.} = discard
method handle*(m: Muxer): Future[void] {.base, async.} = discard
proc new*(
T: typedesc[MuxerProvider],
@@ -63,3 +59,5 @@ proc new*(
let muxerProvider = T(newMuxer: creator, codec: codec)
muxerProvider
method getStreams*(m: Muxer): seq[Connection] {.base.} = doAssert false, "not implemented"

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import sequtils, std/[tables]
import chronos, chronicles, metrics, stew/[endians2, byteutils, objects]
@@ -25,15 +22,16 @@ logScope:
const
YamuxCodec* = "/yamux/1.0.0"
YamuxVersion = 0.uint8
DefaultWindowSize = 256000
YamuxDefaultWindowSize* = 256000
MaxSendQueueSize = 256000
MaxChannelCount = 200
when defined(libp2p_yamux_metrics):
declareGauge(libp2p_yamux_channels, "yamux channels", labels = ["initiator", "peer"])
declareHistogram libp2p_yamux_send_queue, "message send queue length (in byte)",
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
declareHistogram libp2p_yamux_recv_queue, "message recv queue length (in byte)",
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 1600.0, 6400.0, 25600.0, 256000.0]
buckets = [0.0, 100.0, 250.0, 1000.0, 2000.0, 3200.0, 6400.0, 25600.0, 256000.0]
type
YamuxError* = object of CatchableError
@@ -62,7 +60,7 @@ type
streamId: uint32
length: uint32
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async, gcsafe.} =
proc readHeader(conn: LPStream): Future[YamuxHeader] {.async.} =
var buffer: array[12, byte]
await conn.readExactly(addr buffer[0], 12)
@@ -84,7 +82,7 @@ proc `$`(header: YamuxHeader): string =
proc encode(header: YamuxHeader): array[12, byte] =
result[0] = header.version
result[1] = uint8(header.msgType)
result[2..3] = toBytesBE(cast[uint16](header.flags))
result[2..3] = toBytesBE(uint16(cast[uint8](header.flags))) # workaround https://github.com/nim-lang/Nim/issues/21789
result[4..7] = toBytesBE(header.streamId)
result[8..11] = toBytesBE(header.length)
@@ -146,6 +144,7 @@ type
recvWindow: int
sendWindow: int
maxRecvWindow: int
maxSendQueueSize: int
conn: Connection
isSrc: bool
opened: bool
@@ -172,9 +171,18 @@ proc `$`(channel: YamuxChannel): string =
if s.len > 0:
result &= " {" & s.foldl(if a != "": a & ", " & b else: b, "") & "}"
proc sendQueueBytes(channel: YamuxChannel, limit: bool = false): int =
for (elem, sent, _) in channel.sendQueue:
result.inc(min(elem.len - sent, if limit: channel.maxRecvWindow div 3 else: elem.len - sent))
proc lengthSendQueue(channel: YamuxChannel): int =
## Returns the length of what remains to be sent
##
channel.sendQueue.foldl(a + b.data.len - b.sent, 0)
proc lengthSendQueueWithLimit(channel: YamuxChannel): int =
## Returns the length of what remains to be sent, but limit the size of big messages.
##
# For leniency, limit big messages size to the third of maxSendQueueSize
# This value is arbitrary, it's not in the specs, it permits to store up to
# 3 big messages if the peer is stalling.
channel.sendQueue.foldl(a + min(b.data.len - b.sent, channel.maxSendQueueSize div 3), 0)
proc actuallyClose(channel: YamuxChannel) {.async.} =
if channel.closedLocally and channel.sendQueue.len == 0 and
@@ -186,15 +194,19 @@ proc remoteClosed(channel: YamuxChannel) {.async.} =
channel.closedRemotely.complete()
await channel.actuallyClose()
method closeImpl*(channel: YamuxChannel) {.async, gcsafe.} =
method closeImpl*(channel: YamuxChannel) {.async.} =
if not channel.closedLocally:
channel.closedLocally = true
channel.isEof = true
if channel.isReset == false and channel.sendQueue.len == 0:
await channel.conn.write(YamuxHeader.data(channel.id, 0, {Fin}))
await channel.actuallyClose()
proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
# If we reset locally, we want to flush up to a maximum of recvWindow
# bytes. It's because the peer we're connected to can send us data before
# it receives the reset.
if channel.isReset:
return
trace "Reset channel"
@@ -215,11 +227,14 @@ proc reset(channel: YamuxChannel, isLocal: bool = false) {.async.} =
await channel.remoteClosed()
channel.receivedData.fire()
if not isLocal:
# If we reset locally, we want to flush up to a maximum of recvWindow
# bytes. We use the recvWindow in the proc cleanupChann.
# If the reset is remote, there's no reason to flush anything.
channel.recvWindow = 0
proc updateRecvWindow(channel: YamuxChannel) {.async.} =
## Send to the peer a window update when the recvWindow is empty enough
##
# In order to avoid spamming a window update everytime a byte is read,
# we send it everytime half of the maxRecvWindow is read.
let inWindow = channel.recvWindow + channel.recvQueue.len
if inWindow > channel.maxRecvWindow div 2:
return
@@ -237,6 +252,7 @@ method readOnce*(
pbytes: pointer,
nbytes: int):
Future[int] {.async.} =
## Read from a yamux channel
if channel.isReset:
raise if channel.remoteReset:
@@ -252,6 +268,7 @@ method readOnce*(
await channel.closedRemotely or channel.receivedData.wait()
if channel.closedRemotely.done() and channel.recvQueue.len == 0:
channel.returnedEof = true
channel.isEof = true
return 0
let toRead = min(channel.recvQueue.len, nbytes)
@@ -281,21 +298,22 @@ proc trySend(channel: YamuxChannel) {.async.} =
return
channel.isSending = true
defer: channel.isSending = false
while channel.sendQueue.len != 0:
channel.sendQueue.keepItIf(not (it.fut.cancelled() and it.sent == 0))
if channel.sendWindow == 0:
trace "send window empty"
if channel.sendQueueBytes(true) > channel.maxRecvWindow:
debug "channel send queue too big, resetting", maxSendWindow=channel.maxRecvWindow,
currentQueueSize = channel.sendQueueBytes(true)
trace "trying to send while the sendWindow is empty"
if channel.lengthSendQueueWithLimit() > channel.maxSendQueueSize:
trace "channel send queue too big, resetting", maxSendQueueSize=channel.maxSendQueueSize,
currentQueueSize = channel.lengthSendQueueWithLimit()
try:
await channel.reset(true)
except CatchableError as exc:
debug "failed to reset", msg=exc.msg
warn "failed to reset", msg=exc.msg
break
let
bytesAvailable = channel.sendQueueBytes()
bytesAvailable = channel.lengthSendQueue()
toSend = min(channel.sendWindow, bytesAvailable)
var
sendBuffer = newSeqUninitialized[byte](toSend + 12)
@@ -310,20 +328,24 @@ proc trySend(channel: YamuxChannel) {.async.} =
var futures: seq[Future[void]]
while inBuffer < toSend:
# concatenate the different message we try to send into one buffer
let (data, sent, fut) = channel.sendQueue[0]
let bufferToSend = min(data.len - sent, toSend - inBuffer)
sendBuffer.toOpenArray(12, 12 + toSend - 1)[inBuffer..<(inBuffer+bufferToSend)] =
channel.sendQueue[0].data.toOpenArray(sent, sent + bufferToSend - 1)
channel.sendQueue[0].sent.inc(bufferToSend)
if channel.sendQueue[0].sent >= data.len:
# if every byte of the message is in the buffer, add the write future to the
# sequence of futures to be completed (or failed) when the buffer is sent
futures.add(fut)
channel.sendQueue.delete(0)
inBuffer.inc(bufferToSend)
trace "build send buffer", h = $header, msg=string.fromBytes(sendBuffer[12..^1])
trace "try to send the buffer", h = $header
channel.sendWindow.dec(toSend)
try: await channel.conn.write(sendBuffer)
except CatchableError as exc:
trace "failed to send the buffer"
let connDown = newLPStreamConnDownError(exc)
for fut in futures.items():
fut.fail(connDown)
@@ -334,6 +356,8 @@ proc trySend(channel: YamuxChannel) {.async.} =
channel.activity = true
method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
## Write to yamux channel
##
result = newFuture[void]("Yamux Send")
if channel.remoteReset:
result.fail(newLPStreamResetError())
@@ -346,15 +370,20 @@ method write*(channel: YamuxChannel, msg: seq[byte]): Future[void] =
return result
channel.sendQueue.add((msg, 0, result))
when defined(libp2p_yamux_metrics):
libp2p_yamux_recv_queue.observe(channel.sendQueueBytes().int64)
libp2p_yamux_send_queue.observe(channel.lengthSendQueue().int64)
asyncSpawn channel.trySend()
proc open*(channel: YamuxChannel) {.async, gcsafe.} =
proc open(channel: YamuxChannel) {.async.} =
## Open a yamux channel by sending a window update with Syn or Ack flag
##
if channel.opened:
trace "Try to open channel twice"
return
channel.opened = true
await channel.conn.write(YamuxHeader.data(channel.id, 0, {if channel.isSrc: Syn else: Ack}))
await channel.conn.write(YamuxHeader.windowUpdate(
channel.id,
uint32(max(channel.maxRecvWindow - YamuxDefaultWindowSize, 0)),
{if channel.isSrc: Syn else: Ack}))
method getWrapped*(channel: YamuxChannel): Connection = channel.conn
@@ -365,12 +394,14 @@ type
currentId: uint32
isClosed: bool
maxChannCount: int
windowSize: int
maxSendQueueSize: int
proc lenBySrc(m: Yamux, isSrc: bool): int =
for v in m.channels.values():
if v.isSrc == isSrc: result += 1
proc cleanupChann(m: Yamux, channel: YamuxChannel) {.async.} =
proc cleanupChannel(m: Yamux, channel: YamuxChannel) {.async.} =
await channel.join()
m.channels.del(channel.id)
when defined(libp2p_yamux_metrics):
@@ -378,12 +409,19 @@ proc cleanupChann(m: Yamux, channel: YamuxChannel) {.async.} =
if channel.isReset and channel.recvWindow > 0:
m.flushed[channel.id] = channel.recvWindow
proc createStream(m: Yamux, id: uint32, isSrc: bool): YamuxChannel =
proc createStream(m: Yamux, id: uint32, isSrc: bool,
recvWindow: int, maxSendQueueSize: int): YamuxChannel =
# As you can see, during initialization, recvWindow can be larger than maxRecvWindow.
# This is because the peer we're connected to will always assume
# that the initial recvWindow is 256k.
# To solve this contradiction, no updateWindow will be sent until recvWindow is less
# than maxRecvWindow
result = YamuxChannel(
id: id,
maxRecvWindow: DefaultWindowSize,
recvWindow: DefaultWindowSize,
sendWindow: DefaultWindowSize,
maxRecvWindow: recvWindow,
recvWindow: if recvWindow > YamuxDefaultWindowSize: recvWindow else: YamuxDefaultWindowSize,
sendWindow: YamuxDefaultWindowSize,
maxSendQueueSize: maxSendQueueSize,
isSrc: isSrc,
conn: m.connection,
receivedData: newAsyncEvent(),
@@ -401,7 +439,7 @@ proc createStream(m: Yamux, id: uint32, isSrc: bool): YamuxChannel =
when defined(libp2p_agents_metrics):
result.shortAgent = m.connection.shortAgent
m.channels[id] = result
asyncSpawn m.cleanupChann(result)
asyncSpawn m.cleanupChannel(result)
trace "created channel", id, pid=m.connection.peerId
when defined(libp2p_yamux_metrics):
libp2p_yamux_channels.set(m.lenBySrc(isSrc).int64, [$isSrc, $result.peerId])
@@ -422,7 +460,7 @@ method close*(m: Yamux) {.async.} =
trace "Closed yamux"
proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
## call the muxer stream handler for this channel
## Call the muxer stream handler for this channel
##
try:
await m.streamHandler(channel)
@@ -432,7 +470,7 @@ proc handleStream(m: Yamux, channel: YamuxChannel) {.async.} =
trace "Exception in yamux stream handler", msg = exc.msg
await channel.reset()
method handle*(m: Yamux) {.async, gcsafe.} =
method handle*(m: Yamux) {.async.} =
trace "Starting yamux handler", pid=m.connection.peerId
try:
while not m.connection.atEof:
@@ -456,9 +494,11 @@ method handle*(m: Yamux) {.async, gcsafe.} =
else:
if header.streamId in m.flushed:
m.flushed.del(header.streamId)
if header.streamId mod 2 == m.currentId mod 2:
debug "Peer used our reserved stream id, skipping", id=header.streamId, currentId=m.currentId, peerId=m.connection.peerId
raise newException(YamuxError, "Peer used our reserved stream id")
let newStream = m.createStream(header.streamId, false)
let newStream = m.createStream(header.streamId, false, m.windowSize, m.maxSendQueueSize)
if m.channels.len >= m.maxChannCount:
await newStream.reset()
continue
@@ -508,22 +548,30 @@ method handle*(m: Yamux) {.async, gcsafe.} =
await m.close()
trace "Stopped yamux handler"
method getStreams*(m: Yamux): seq[Connection] =
for c in m.channels.values: result.add(c)
method newStream*(
m: Yamux,
name: string = "",
lazy: bool = false): Future[Connection] {.async, gcsafe.} =
lazy: bool = false): Future[Connection] {.async.} =
if m.channels.len > m.maxChannCount - 1:
raise newException(TooManyChannels, "max allowed channel count exceeded")
let stream = m.createStream(m.currentId, true)
let stream = m.createStream(m.currentId, true, m.windowSize, m.maxSendQueueSize)
m.currentId += 2
if not lazy:
await stream.open()
return stream
proc new*(T: type[Yamux], conn: Connection, maxChannCount: int = MaxChannelCount): T =
proc new*(T: type[Yamux], conn: Connection,
maxChannCount: int = MaxChannelCount,
windowSize: int = YamuxDefaultWindowSize,
maxSendQueueSize: int = MaxSendQueueSize): T =
T(
connection: conn,
currentId: if conn.dir == Out: 1 else: 2,
maxChannCount: maxChannCount
maxChannCount: maxChannCount,
windowSize: windowSize,
maxSendQueueSize: maxSendQueueSize
)

View File

@@ -7,15 +7,13 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import
std/[streams, strutils, sets, sequtils],
chronos, chronicles, stew/byteutils,
dnsclientpkg/[protocol, types]
dnsclientpkg/[protocol, types],
../utility
import
nameresolver
@@ -80,9 +78,7 @@ proc getDnsResponse(
# parseResponse can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
return parseResponse(string.fromBytes(rawResponse))
except CatchableError as exc: raise exc
except Exception as exc: raiseAssert exc.msg
return exceptionToAssert: parseResponse(string.fromBytes(rawResponse))
finally:
await sock.closeWait()
@@ -118,9 +114,7 @@ method resolveIp*(
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
resolvedAddresses.incl(
try: answer.toString()
except CatchableError as exc: raise exc
except Exception as exc: raiseAssert exc.msg
exceptionToAssert(answer.toString())
)
except CancelledError as e:
raise e
@@ -151,9 +145,13 @@ method resolveTxt*(
for _ in 0 ..< self.nameServers.len:
let server = self.nameServers[0]
try:
# toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
let response = await getDnsResponse(server, address, TXT)
trace "Got TXT response", server = $server, answer=response.answers.mapIt(it.toString())
return response.answers.mapIt(it.toString())
return exceptionToAssert:
trace "Got TXT response", server = $server, answer=response.answers.mapIt(it.toString())
response.answers.mapIt(it.toString())
except CancelledError as e:
raise e
except CatchableError as e:
@@ -161,11 +159,6 @@ method resolveTxt*(
self.nameServers.add(self.nameServers[0])
self.nameServers.delete(0)
continue
except Exception as e:
# toString can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert e.msg
debug "Failed to resolve TXT, returning empty set"
return @[]

View File

@@ -7,13 +7,10 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import
std/[streams, strutils, tables],
std/tables,
chronos, chronicles
import nameresolver

View File

@@ -7,16 +7,13 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[sugar, sets, sequtils, strutils]
import
chronos,
chronicles,
stew/[endians2, byteutils]
stew/endians2
import ".."/[multiaddress, multicodec]
logScope:
@@ -55,7 +52,7 @@ proc resolveOneAddress(
ma: MultiAddress,
domain: Domain = Domain.AF_UNSPEC,
prefix = ""): Future[seq[MultiAddress]]
{.async, raises: [Defect, MaError, TransportAddressError].} =
{.async.} =
#Resolve a single address
var pbuf: array[2, byte]
@@ -121,7 +118,7 @@ proc resolveMAddress*(
if not DNS.matchPartial(address):
res.incl(address)
else:
let code = address[0].get().protoCode().get()
let code = address[0].tryGet().protoCode().tryGet()
let seq = case code:
of multiCodec("dns"):
await self.resolveOneAddress(address)
@@ -132,7 +129,7 @@ proc resolveMAddress*(
of multiCodec("dnsaddr"):
await self.resolveDnsAddr(address)
else:
doAssert false
assert false
@[address]
for ad in seq:
res.incl(ad)

View File

@@ -0,0 +1,86 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import std/[sequtils, tables, sugar]
import chronos
import multiaddress, multicodec
type
## Manages observed MultiAddresses by reomte peers. It keeps track of the most observed IP and IP/Port.
ObservedAddrManager* = ref object of RootObj
observedIPsAndPorts: seq[MultiAddress]
maxSize: int
minCount: int
proc addObservation*(self:ObservedAddrManager, observedAddr: MultiAddress): bool =
## Adds a new observed MultiAddress. If the number of observations exceeds maxSize, the oldest one is removed.
if self.observedIPsAndPorts.len >= self.maxSize:
self.observedIPsAndPorts.del(0)
self.observedIPsAndPorts.add(observedAddr)
return true
proc getProtocol(self: ObservedAddrManager, observations: seq[MultiAddress], multiCodec: MultiCodec): Opt[MultiAddress] =
var countTable = toCountTable(observations)
countTable.sort()
var orderedPairs = toSeq(countTable.pairs)
for (ma, count) in orderedPairs:
let protoCode = (ma[0].flatMap(protoCode)).valueOr: continue
if protoCode == multiCodec and count >= self.minCount:
return Opt.some(ma)
return Opt.none(MultiAddress)
proc getMostObservedProtocol(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
## Returns the most observed IP address or none if the number of observations are less than minCount.
let observedIPs = collect:
for observedIp in self.observedIPsAndPorts:
observedIp[0].valueOr: continue
return self.getProtocol(observedIPs, multiCodec)
proc getMostObservedProtoAndPort(self: ObservedAddrManager, multiCodec: MultiCodec): Opt[MultiAddress] =
## Returns the most observed IP/Port address or none if the number of observations are less than minCount.
return self.getProtocol(self.observedIPsAndPorts, multiCodec)
proc getMostObservedProtosAndPorts*(self: ObservedAddrManager): seq[MultiAddress] =
## Returns the most observed IP4/Port and IP6/Port address or an empty seq if the number of observations
## are less than minCount.
var res: seq[MultiAddress]
self.getMostObservedProtoAndPort(multiCodec("ip4")).withValue(ip4):
res.add(ip4)
self.getMostObservedProtoAndPort(multiCodec("ip6")).withValue(ip6):
res.add(ip6)
return res
proc guessDialableAddr*(
self: ObservedAddrManager,
ma: MultiAddress): MultiAddress =
## Replaces the first proto value of each listen address by the corresponding (matching the proto code) most observed value.
## If the most observed value is not available, the original MultiAddress is returned.
let
maFirst = ma[0].valueOr: return ma
maRest = ma[1..^1].valueOr: return ma
maFirstProto = maFirst.protoCode().valueOr: return ma
let observedIP = self.getMostObservedProtocol(maFirstProto).valueOr: return ma
return concat(observedIP, maRest).valueOr: ma
proc `$`*(self: ObservedAddrManager): string =
## Returns a string representation of the ObservedAddrManager.
return "IPs and Ports: " & $self.observedIPsAndPorts
proc new*(
T: typedesc[ObservedAddrManager],
maxSize = 10,
minCount = 3): T =
## Creates a new ObservedAddrManager.
return T(
observedIPsAndPorts: newSeq[MultiAddress](),
maxSize: maxSize,
minCount: minCount)

View File

@@ -9,10 +9,7 @@
## This module implementes API for libp2p peer.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
{.push public.}
import
@@ -44,10 +41,7 @@ func shortLog*(pid: PeerId): string =
if len(spid) > 10:
spid[3] = '*'
when (NimMajor, NimMinor) > (1, 4):
spid.delete(4 .. spid.high - 6)
else:
spid.delete(4, spid.high - 6)
spid.delete(4 .. spid.high - 6)
spid
@@ -191,19 +185,11 @@ proc random*(t: typedesc[PeerId], rng = newRng()): Result[PeerId, cstring] =
func match*(pid: PeerId, pubkey: PublicKey): bool =
## Returns ``true`` if ``pid`` matches public key ``pubkey``.
let p = PeerId.init(pubkey)
if p.isErr:
false
else:
pid == p.get()
PeerId.init(pubkey) == Result[PeerId, cstring].ok(pid)
func match*(pid: PeerId, seckey: PrivateKey): bool =
## Returns ``true`` if ``pid`` matches private key ``seckey``.
let p = PeerId.init(seckey)
if p.isErr:
false
else:
pid == p.get()
PeerId.init(seckey) == Result[PeerId, cstring].ok(pid)
## Serialization/Deserialization helpers

View File

@@ -7,13 +7,10 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
{.push public.}
import std/[options, sequtils]
import std/sequtils
import pkg/[chronos, chronicles, stew/results]
import peerid, multiaddress, multicodec, crypto/crypto, routing_record, errors, utility
@@ -26,7 +23,7 @@ type
AddressMapper* =
proc(listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]]
{.gcsafe, raises: [Defect].}
{.gcsafe, raises: [].}
PeerInfo* {.public.} = ref object
peerId*: PeerId
@@ -56,15 +53,12 @@ proc update*(p: PeerInfo) {.async.} =
for mapper in p.addressMappers:
p.addrs = await mapper(p.addrs)
let sprRes = SignedPeerRecord.init(
p.signedPeerRecord = SignedPeerRecord.init(
p.privateKey,
PeerRecord.init(p.peerId, p.addrs)
)
if sprRes.isOk:
p.signedPeerRecord = sprRes.get()
else:
discard
#info "Can't update the signed peer record"
).valueOr():
info "Can't update the signed peer record"
return
proc addrs*(p: PeerInfo): seq[MultiAddress] =
p.addrs
@@ -99,7 +93,7 @@ proc new*(
agentVersion: string = "",
addressMappers = newSeq[AddressMapper](),
): PeerInfo
{.raises: [Defect, LPError].} =
{.raises: [LPError].} =
let pubkey = try:
key.getPublicKey().tryGet()

View File

@@ -16,15 +16,12 @@ runnableExamples:
# Create a custom book type
type MoodBook = ref object of PeerBook[string]
var somePeerId = PeerId.random().get()
var somePeerId = PeerId.random().expect("get random key")
peerStore[MoodBook][somePeerId] = "Happy"
doAssert peerStore[MoodBook][somePeerId] == "Happy"
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import
std/[tables, sets, options, macros],
@@ -45,7 +42,7 @@ type
# Handler types #
#################
PeerBookChangeHandler* = proc(peerId: PeerId) {.gcsafe, raises: [Defect].}
PeerBookChangeHandler* = proc(peerId: PeerId) {.gcsafe, raises: [].}
#########
# Books #
@@ -161,20 +158,20 @@ proc updatePeerInfo*(
if info.addrs.len > 0:
peerStore[AddressBook][info.peerId] = info.addrs
if info.pubkey.isSome:
peerStore[KeyBook][info.peerId] = info.pubkey.get()
info.pubkey.withValue(pubkey):
peerStore[KeyBook][info.peerId] = pubkey
if info.agentVersion.isSome:
peerStore[AgentBook][info.peerId] = info.agentVersion.get().string
info.agentVersion.withValue(agentVersion):
peerStore[AgentBook][info.peerId] = agentVersion.string
if info.protoVersion.isSome:
peerStore[ProtoVersionBook][info.peerId] = info.protoVersion.get().string
info.protoVersion.withValue(protoVersion):
peerStore[ProtoVersionBook][info.peerId] = protoVersion.string
if info.protos.len > 0:
peerStore[ProtoBook][info.peerId] = info.protos
if info.signedPeerRecord.isSome:
peerStore[SPRBook][info.peerId] = info.signedPeerRecord.get()
info.signedPeerRecord.withValue(signedPeerRecord):
peerStore[SPRBook][info.peerId] = signedPeerRecord
let cleanupPos = peerStore.toClean.find(info.peerId)
if cleanupPos >= 0:
@@ -210,13 +207,19 @@ proc identify*(
let info = await peerStore.identify.identify(stream, stream.peerId)
when defined(libp2p_agents_metrics):
var knownAgent = "unknown"
if info.agentVersion.isSome and info.agentVersion.get().len > 0:
let shortAgent = info.agentVersion.get().split("/")[0].safeToLowerAscii()
if shortAgent.isOk() and KnownLibP2PAgentsSeq.contains(shortAgent.get()):
knownAgent = shortAgent.get()
var
knownAgent = "unknown"
shortAgent = info.agentVersion.get("").split("/")[0].safeToLowerAscii().get("")
if KnownLibP2PAgentsSeq.contains(shortAgent):
knownAgent = shortAgent
muxer.connection.setShortAgent(knownAgent)
peerStore.updatePeerInfo(info)
finally:
await stream.closeWithEOF()
proc getMostObservedProtosAndPorts*(self: PeerStore): seq[MultiAddress] =
return self.identify.observedAddrManager.getMostObservedProtosAndPorts()
proc guessDialableAddr*(self: PeerStore, ma: MultiAddress): MultiAddress =
return self.identify.observedAddrManager.guessDialableAddr(ma)

View File

@@ -9,10 +9,7 @@
## This module implements minimal Google's ProtoBuf primitives.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import ../varint, ../utility, stew/[endians2, results]
export results, utility
@@ -72,12 +69,12 @@ type
hint | hint32 | hint64 | float32 | float64
const
SupportedWireTypes* = {
int(ProtoFieldKind.Varint),
int(ProtoFieldKind.Fixed64),
int(ProtoFieldKind.Length),
int(ProtoFieldKind.Fixed32)
}
SupportedWireTypes* = @[
uint64(ProtoFieldKind.Varint),
uint64(ProtoFieldKind.Fixed64),
uint64(ProtoFieldKind.Length),
uint64(ProtoFieldKind.Fixed32)
]
template checkFieldNumber*(i: int) =
doAssert((i > 0 and i < (1 shl 29)) and not(i >= 19000 and i <= 19999),
@@ -579,26 +576,18 @@ proc getField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
proc getField*(pb: ProtoBuffer, field: int,
output: var ProtoBuffer): ProtoResult[bool] {.inline.} =
var buffer: seq[byte]
let res = pb.getField(field, buffer)
if res.isOk():
if res.get():
output = initProtoBuffer(buffer)
ok(true)
else:
ok(false)
if ? pb.getField(field, buffer):
output = initProtoBuffer(buffer)
ok(true)
else:
err(res.error)
ok(false)
proc getRequiredField*[T](pb: ProtoBuffer, field: int,
output: var T): ProtoResult[void] {.inline.} =
let res = pb.getField(field, output)
if res.isOk():
if res.get():
ok()
else:
err(RequiredFieldMissing)
if ? pb.getField(field, output):
ok()
else:
err(res.error)
err(RequiredFieldMissing)
proc getRepeatedField*[T: seq[byte]|string](data: ProtoBuffer, field: int,
output: var seq[T]): ProtoResult[bool] =
@@ -678,14 +667,10 @@ proc getRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
proc getRequiredRepeatedField*[T](pb: ProtoBuffer, field: int,
output: var seq[T]): ProtoResult[void] {.inline.} =
let res = pb.getRepeatedField(field, output)
if res.isOk():
if res.get():
ok()
else:
err(RequiredFieldMissing)
if ? pb.getRepeatedField(field, output):
ok()
else:
err(res.error)
err(RequiredFieldMissing)
proc getPackedRepeatedField*[T: ProtoScalar](data: ProtoBuffer, field: int,
output: var seq[T]): ProtoResult[bool] =

View File

@@ -7,21 +7,15 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[options, sets, sequtils]
import stew/[results, objects]
import stew/results
import chronos, chronicles
import ../../../switch,
../../../multiaddress,
../../../peerid
import core
export core
logScope:
topics = "libp2p autonat"
@@ -29,8 +23,8 @@ type
AutonatClient* = ref object of RootObj
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
id: some(pid),
let pb = AutonatDial(peerInfo: Opt.some(AutonatPeerInfo(
id: Opt.some(pid),
addrs: addrs
))).encode()
await conn.writeLp(pb.buffer)
@@ -38,15 +32,13 @@ proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.}
method dialMe*(self: AutonatClient, switch: Switch, pid: PeerId, addrs: seq[MultiAddress] = newSeq[MultiAddress]()):
Future[MultiAddress] {.base, async.} =
proc getResponseOrRaise(autonatMsg: Option[AutonatMsg]): AutonatDialResponse {.raises: [UnpackError, AutonatError].} =
if autonatMsg.isNone() or
autonatMsg.get().msgType != DialResponse or
autonatMsg.get().response.isNone() or
(autonatMsg.get().response.get().status == Ok and
autonatMsg.get().response.get().ma.isNone()):
raise newException(AutonatError, "Unexpected response")
else:
autonatMsg.get().response.get()
proc getResponseOrRaise(autonatMsg: Opt[AutonatMsg]): AutonatDialResponse {.raises: [AutonatError].} =
autonatMsg.withValue(msg):
if msg.msgType == DialResponse:
msg.response.withValue(res):
if not (res.status == Ok and res.ma.isNone()):
return res
raise newException(AutonatError, "Unexpected response")
let conn =
try:
@@ -71,7 +63,7 @@ method dialMe*(self: AutonatClient, switch: Switch, pid: PeerId, addrs: seq[Mult
let response = getResponseOrRaise(AutonatMsg.decode(await conn.readLp(1024)))
return case response.status:
of ResponseStatus.Ok:
response.ma.get()
response.ma.tryGet()
of ResponseStatus.DialError:
raise newException(AutonatUnreachableError, "Peer could not dial us back: " & response.text.get(""))
else:

View File

@@ -7,12 +7,8 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[options, sets, sequtils]
import stew/[results, objects]
import chronos, chronicles
import ../../../multiaddress,
@@ -42,26 +38,29 @@ type
InternalError = 300
AutonatPeerInfo* = object
id*: Option[PeerId]
id*: Opt[PeerId]
addrs*: seq[MultiAddress]
AutonatDial* = object
peerInfo*: Option[AutonatPeerInfo]
peerInfo*: Opt[AutonatPeerInfo]
AutonatDialResponse* = object
status*: ResponseStatus
text*: Option[string]
ma*: Option[MultiAddress]
text*: Opt[string]
ma*: Opt[MultiAddress]
AutonatMsg* = object
msgType*: MsgType
dial*: Option[AutonatDial]
response*: Option[AutonatDialResponse]
dial*: Opt[AutonatDial]
response*: Opt[AutonatDialResponse]
NetworkReachability* {.pure.} = enum
Unknown, NotReachable, Reachable
proc encode(p: AutonatPeerInfo): ProtoBuffer =
result = initProtoBuffer()
if p.id.isSome():
result.write(1, p.id.get())
p.id.withValue(id):
result.write(1, id)
for ma in p.addrs:
result.write(2, ma.data.buffer)
result.finish()
@@ -70,8 +69,8 @@ proc encode*(d: AutonatDial): ProtoBuffer =
result = initProtoBuffer()
result.write(1, MsgType.Dial.uint)
var dial = initProtoBuffer()
if d.peerInfo.isSome():
dial.write(1, encode(d.peerInfo.get()))
d.peerInfo.withValue(pinfo):
dial.write(1, encode(pinfo))
dial.finish()
result.write(2, dial.buffer)
result.finish()
@@ -81,72 +80,60 @@ proc encode*(r: AutonatDialResponse): ProtoBuffer =
result.write(1, MsgType.DialResponse.uint)
var bufferResponse = initProtoBuffer()
bufferResponse.write(1, r.status.uint)
if r.text.isSome():
bufferResponse.write(2, r.text.get())
if r.ma.isSome():
bufferResponse.write(3, r.ma.get())
r.text.withValue(text):
bufferResponse.write(2, text)
r.ma.withValue(ma):
bufferResponse.write(3, ma)
bufferResponse.finish()
result.write(3, bufferResponse.buffer)
result.finish()
proc encode*(msg: AutonatMsg): ProtoBuffer =
if msg.dial.isSome():
return encode(msg.dial.get())
if msg.response.isSome():
return encode(msg.response.get())
msg.dial.withValue(dial):
return encode(dial)
msg.response.withValue(res):
return encode(res)
proc decode*(_: typedesc[AutonatMsg], buf: seq[byte]): Option[AutonatMsg] =
proc decode*(_: typedesc[AutonatMsg], buf: seq[byte]): Opt[AutonatMsg] =
var
msgTypeOrd: uint32
pbDial: ProtoBuffer
pbResponse: ProtoBuffer
msg: AutonatMsg
let
pb = initProtoBuffer(buf)
r1 = pb.getField(1, msgTypeOrd)
r2 = pb.getField(2, pbDial)
r3 = pb.getField(3, pbResponse)
if r1.isErr() or r2.isErr() or r3.isErr(): return none(AutonatMsg)
let pb = initProtoBuffer(buf)
if r1.get() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
return none(AutonatMsg)
if r2.get():
if ? pb.getField(1, msgTypeOrd).toOpt() and not checkedEnumAssign(msg.msgType, msgTypeOrd):
return Opt.none(AutonatMsg)
if ? pb.getField(2, pbDial).toOpt():
var
pbPeerInfo: ProtoBuffer
dial: AutonatDial
let
r4 = pbDial.getField(1, pbPeerInfo)
if r4.isErr(): return none(AutonatMsg)
let r4 = ? pbDial.getField(1, pbPeerInfo).toOpt()
var peerInfo: AutonatPeerInfo
if r4.get():
if r4:
var pid: PeerId
let
r5 = pbPeerInfo.getField(1, pid)
r6 = pbPeerInfo.getRepeatedField(2, peerInfo.addrs)
if r5.isErr() or r6.isErr(): return none(AutonatMsg)
if r5.get(): peerInfo.id = some(pid)
dial.peerInfo = some(peerInfo)
msg.dial = some(dial)
r5 = ? pbPeerInfo.getField(1, pid).toOpt()
r6 = ? pbPeerInfo.getRepeatedField(2, peerInfo.addrs).toOpt()
if r5: peerInfo.id = Opt.some(pid)
dial.peerInfo = Opt.some(peerInfo)
msg.dial = Opt.some(dial)
if r3.get():
if ? pb.getField(3, pbResponse).toOpt():
var
statusOrd: uint
text: string
ma: MultiAddress
response: AutonatDialResponse
let
r4 = pbResponse.getField(1, statusOrd)
r5 = pbResponse.getField(2, text)
r6 = pbResponse.getField(3, ma)
if r4.isErr() or r5.isErr() or r6.isErr() or
(r4.get() and not checkedEnumAssign(response.status, statusOrd)):
return none(AutonatMsg)
if r5.get(): response.text = some(text)
if r6.get(): response.ma = some(ma)
msg.response = some(response)
return some(msg)
if ? pbResponse.getField(1, statusOrd).optValue():
if not checkedEnumAssign(response.status, statusOrd):
return Opt.none(AutonatMsg)
if ? pbResponse.getField(2, text).optValue():
response.text = Opt.some(text)
if ? pbResponse.getField(3, ma).optValue():
response.ma = Opt.some(ma)
msg.response = Opt.some(response)
return Opt.some(msg)

View File

@@ -7,14 +7,11 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[options, sets, sequtils]
import std/[sets, sequtils]
import stew/results
import chronos, chronicles, stew/objects
import chronos, chronicles
import ../../protocol,
../../../switch,
../../../multiaddress,
@@ -36,8 +33,8 @@ type
dialTimeout: Duration
proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.} =
let pb = AutonatDial(peerInfo: some(AutonatPeerInfo(
id: some(pid),
let pb = AutonatDial(peerInfo: Opt.some(AutonatPeerInfo(
id: Opt.some(pid),
addrs: addrs
))).encode()
await conn.writeLp(pb.buffer)
@@ -45,16 +42,16 @@ proc sendDial(conn: Connection, pid: PeerId, addrs: seq[MultiAddress]) {.async.}
proc sendResponseError(conn: Connection, status: ResponseStatus, text: string = "") {.async.} =
let pb = AutonatDialResponse(
status: status,
text: if text == "": none(string) else: some(text),
ma: none(MultiAddress)
text: if text == "": Opt.none(string) else: Opt.some(text),
ma: Opt.none(MultiAddress)
).encode()
await conn.writeLp(pb.buffer)
proc sendResponseOk(conn: Connection, ma: MultiAddress) {.async.} =
let pb = AutonatDialResponse(
status: ResponseStatus.Ok,
text: some("Ok"),
ma: some(ma)
text: Opt.some("Ok"),
ma: Opt.some(ma)
).encode()
await conn.writeLp(pb.buffer)
@@ -73,8 +70,8 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
futs = addrs.mapIt(autonat.switch.dialer.tryDial(conn.peerId, @[it]))
let fut = await anyCompleted(futs).wait(autonat.dialTimeout)
let ma = await fut
if ma.isSome:
await conn.sendResponseOk(ma.get())
ma.withValue(maddr):
await conn.sendResponseOk(maddr)
else:
await conn.sendResponseError(DialError, "Missing observed address")
except CancelledError as exc:
@@ -95,42 +92,40 @@ proc tryDial(autonat: Autonat, conn: Connection, addrs: seq[MultiAddress]) {.asy
f.cancel()
proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[void] =
if msg.dial.isNone() or msg.dial.get().peerInfo.isNone():
let dial = msg.dial.valueOr:
return conn.sendResponseError(BadRequest, "Missing Dial")
let peerInfo = dial.peerInfo.valueOr:
return conn.sendResponseError(BadRequest, "Missing Peer Info")
let peerInfo = msg.dial.get().peerInfo.get()
if peerInfo.id.isSome() and peerInfo.id.get() != conn.peerId:
return conn.sendResponseError(BadRequest, "PeerId mismatch")
peerInfo.id.withValue(id):
if id != conn.peerId:
return conn.sendResponseError(BadRequest, "PeerId mismatch")
if conn.observedAddr.isNone:
let observedAddr = conn.observedAddr.valueOr:
return conn.sendResponseError(BadRequest, "Missing observed address")
let observedAddr = conn.observedAddr.get()
var isRelayed = observedAddr.contains(multiCodec("p2p-circuit"))
if isRelayed.isErr() or isRelayed.get():
var isRelayed = observedAddr.contains(multiCodec("p2p-circuit")).valueOr:
return conn.sendResponseError(DialRefused, "Invalid observed address")
if isRelayed:
return conn.sendResponseError(DialRefused, "Refused to dial a relayed observed address")
let hostIp = observedAddr[0]
if hostIp.isErr() or not IP.match(hostIp.get()):
trace "wrong observed address", address=observedAddr
let hostIp = observedAddr[0].valueOr:
return conn.sendResponseError(InternalError, "Wrong observed address")
if not IP.match(hostIp):
return conn.sendResponseError(InternalError, "Expected an IP address")
var addrs = initHashSet[MultiAddress]()
addrs.incl(observedAddr)
trace "addrs received", addrs = peerInfo.addrs
for ma in peerInfo.addrs:
isRelayed = ma.contains(multiCodec("p2p-circuit"))
if isRelayed.isErr() or isRelayed.get():
continue
let maFirst = ma[0]
if maFirst.isErr() or not DNS_OR_IP.match(maFirst.get()):
continue
isRelayed = ma.contains(multiCodec("p2p-circuit")).valueOr: continue
let maFirst = ma[0].valueOr: continue
if not DNS_OR_IP.match(maFirst): continue
try:
addrs.incl(
if maFirst.get() == hostIp.get():
if maFirst == hostIp:
ma
else:
let maEnd = ma[1..^1]
if maEnd.isErr(): continue
hostIp.get() & maEnd.get()
let maEnd = ma[1..^1].valueOr: continue
hostIp & maEnd
)
except LPError as exc:
continue
@@ -145,12 +140,12 @@ proc handleDial(autonat: Autonat, conn: Connection, msg: AutonatMsg): Future[voi
proc new*(T: typedesc[Autonat], switch: Switch, semSize: int = 1, dialTimeout = 15.seconds): T =
let autonat = T(switch: switch, sem: newAsyncSemaphore(semSize), dialTimeout: dialTimeout)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
let msgOpt = AutonatMsg.decode(await conn.readLp(1024))
if msgOpt.isNone() or msgOpt.get().msgType != MsgType.Dial:
let msg = AutonatMsg.decode(await conn.readLp(1024)).valueOr:
raise newException(AutonatError, "Received malformed message")
let msg = msgOpt.get()
if msg.msgType != MsgType.Dial:
raise newException(AutonatError, "Message type should be dial")
await autonat.handleDial(conn, msg)
except CancelledError as exc:
raise exc

View File

@@ -7,18 +7,19 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[options, deques, sequtils]
import std/[deques, sequtils]
import chronos, metrics
import ../../../switch
import ../../../wire
import client
from core import NetworkReachability, AutonatUnreachableError
import ../../../utils/heartbeat
import ../../../crypto/crypto
export core.NetworkReachability
logScope:
topics = "libp2p autonatservice"
@@ -27,39 +28,39 @@ declarePublicGauge(libp2p_autonat_reachability_confidence, "autonat reachability
type
AutonatService* = ref object of Service
newConnectedPeerHandler: PeerEventHandler
addressMapper: AddressMapper
scheduleHandle: Future[void]
networkReachability: NetworkReachability
confidence: Option[float]
networkReachability*: NetworkReachability
confidence: Opt[float]
answers: Deque[NetworkReachability]
autonatClient: AutonatClient
statusAndConfidenceHandler: StatusAndConfidenceHandler
rng: ref HmacDrbgContext
scheduleInterval: Option[Duration]
scheduleInterval: Opt[Duration]
askNewConnectedPeers: bool
numPeersToAsk: int
maxQueueSize: int
minConfidence: float
dialTimeout: Duration
enableAddressMapper: bool
NetworkReachability* {.pure.} = enum
NotReachable, Reachable, Unknown
StatusAndConfidenceHandler* = proc (networkReachability: NetworkReachability, confidence: Option[float]): Future[void] {.gcsafe, raises: [Defect].}
StatusAndConfidenceHandler* = proc (networkReachability: NetworkReachability, confidence: Opt[float]): Future[void] {.gcsafe, raises: [].}
proc new*(
T: typedesc[AutonatService],
autonatClient: AutonatClient,
rng: ref HmacDrbgContext,
scheduleInterval: Option[Duration] = none(Duration),
scheduleInterval: Opt[Duration] = Opt.none(Duration),
askNewConnectedPeers = true,
numPeersToAsk: int = 5,
maxQueueSize: int = 10,
minConfidence: float = 0.3,
dialTimeout = 30.seconds): T =
dialTimeout = 30.seconds,
enableAddressMapper = true): T =
return T(
scheduleInterval: scheduleInterval,
networkReachability: Unknown,
confidence: none(float),
confidence: Opt.none(float),
answers: initDeque[NetworkReachability](),
autonatClient: autonatClient,
rng: rng,
@@ -67,10 +68,8 @@ proc new*(
numPeersToAsk: numPeersToAsk,
maxQueueSize: maxQueueSize,
minConfidence: minConfidence,
dialTimeout: dialTimeout)
proc networkReachability*(self: AutonatService): NetworkReachability {.inline.} =
return self.networkReachability
dialTimeout: dialTimeout,
enableAddressMapper: enableAddressMapper)
proc callHandler(self: AutonatService) {.async.} =
if not isNil(self.statusAndConfidenceHandler):
@@ -83,27 +82,33 @@ proc hasEnoughIncomingSlots(switch: Switch): bool =
proc doesPeerHaveIncomingConn(switch: Switch, peerId: PeerId): bool =
return switch.connManager.selectMuxer(peerId, In) != nil
proc handleAnswer(self: AutonatService, ans: NetworkReachability) {.async.} =
proc handleAnswer(self: AutonatService, ans: NetworkReachability): Future[bool] {.async.} =
if ans == Unknown:
return
let oldNetworkReachability = self.networkReachability
let oldConfidence = self.confidence
if self.answers.len == self.maxQueueSize:
self.answers.popFirst()
self.answers.addLast(ans)
self.networkReachability = Unknown
self.confidence = none(float)
self.confidence = Opt.none(float)
const reachabilityPriority = [Reachable, NotReachable]
for reachability in reachabilityPriority:
let confidence = self.answers.countIt(it == reachability) / self.maxQueueSize
libp2p_autonat_reachability_confidence.set(value = confidence, labelValues = [$reachability])
if self.confidence.isNone and confidence >= self.minConfidence:
self.networkReachability = reachability
self.confidence = some(confidence)
self.confidence = Opt.some(confidence)
debug "Current status", currentStats = $self.networkReachability, confidence = $self.confidence, answers = self.answers
# Return whether anything has changed
return self.networkReachability != oldNetworkReachability or self.confidence != oldConfidence
proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[NetworkReachability] {.async.} =
logScope:
peerId = $peerId
@@ -130,9 +135,10 @@ proc askPeer(self: AutonatService, switch: Switch, peerId: PeerId): Future[Netwo
except CatchableError as error:
debug "dialMe unexpected error", msg = error.msg
Unknown
await self.handleAnswer(ans)
if not isNil(self.statusAndConfidenceHandler):
await self.statusAndConfidenceHandler(self.networkReachability, self.confidence)
let hasReachabilityOrConfidenceChanged = await self.handleAnswer(ans)
if hasReachabilityOrConfidenceChanged:
await self.callHandler()
await switch.peerInfo.update()
return ans
proc askConnectedPeers(self: AutonatService, switch: Switch) {.async.} =
@@ -153,7 +159,29 @@ proc schedule(service: AutonatService, switch: Switch, interval: Duration) {.asy
heartbeat "Scheduling AutonatService run", interval:
await service.run(switch)
proc addressMapper(
self: AutonatService,
peerStore: PeerStore,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
if self.networkReachability != NetworkReachability.Reachable:
return listenAddrs
var addrs = newSeq[MultiAddress]()
for listenAddr in listenAddrs:
var processedMA = listenAddr
try:
if not listenAddr.isPublicMA() and self.networkReachability == NetworkReachability.Reachable:
processedMA = peerStore.guessDialableAddr(listenAddr) # handle manual port forwarding
except CatchableError as exc:
debug "Error while handling address mapper", msg = exc.msg
addrs.add(processedMA)
return addrs
method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return await addressMapper(self, switch.peerStore, listenAddrs)
info "Setting up AutonatService"
let hasBeenSetup = await procCall Service(self).setup(switch)
if hasBeenSetup:
@@ -161,15 +189,15 @@ method setup*(self: AutonatService, switch: Switch): Future[bool] {.async.} =
self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent): Future[void] {.async.} =
discard askPeer(self, switch, peerId)
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
if self.scheduleInterval.isSome():
self.scheduleHandle = schedule(self, switch, self.scheduleInterval.get())
self.scheduleInterval.withValue(interval):
self.scheduleHandle = schedule(self, switch, interval)
if self.enableAddressMapper:
switch.peerInfo.addressMappers.add(self.addressMapper)
return hasBeenSetup
method run*(self: AutonatService, switch: Switch) {.async, public.} =
trace "Running AutonatService"
await askConnectedPeers(self, switch)
await self.callHandler()
method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public.} =
info "Stopping AutonatService"
@@ -180,6 +208,9 @@ method stop*(self: AutonatService, switch: Switch): Future[bool] {.async, public
self.scheduleHandle = nil
if not isNil(self.newConnectedPeerHandler):
switch.connManager.removePeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
if self.enableAddressMapper:
switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper)
await switch.peerInfo.update()
return hasBeenStopped
proc statusAndConfidenceHandler*(self: AutonatService, statusAndConfidenceHandler: StatusAndConfidenceHandler) =

View File

@@ -0,0 +1,89 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import std/sequtils
import stew/results
import chronos, chronicles
import core
import ../../protocol,
../../../stream/connection,
../../../switch,
../../../utils/future
export DcutrError
type
DcutrClient* = ref object
connectTimeout: Duration
maxDialableAddrs: int
logScope:
topics = "libp2p dcutrclient"
proc new*(T: typedesc[DcutrClient], connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
return T(connectTimeout: connectTimeout, maxDialableAddrs: maxDialableAddrs)
proc startSync*(self: DcutrClient, switch: Switch, remotePeerId: PeerId, addrs: seq[MultiAddress]) {.async.} =
logScope:
peerId = switch.peerInfo.peerId
var
peerDialableAddrs: seq[MultiAddress]
stream: Connection
try:
var ourDialableAddrs = getHolePunchableAddrs(addrs)
if ourDialableAddrs.len == 0:
debug "Dcutr initiator has no supported dialable addresses. Aborting Dcutr.", addrs
return
stream = await switch.dial(remotePeerId, DcutrCodec)
await stream.send(MsgType.Connect, addrs)
debug "Dcutr initiator has sent a Connect message."
let rttStart = Moment.now()
let connectAnswer = DcutrMsg.decode(await stream.readLp(1024))
peerDialableAddrs = getHolePunchableAddrs(connectAnswer.addrs)
if peerDialableAddrs.len == 0:
debug "Dcutr receiver has no supported dialable addresses to connect to. Aborting Dcutr.", addrs=connectAnswer.addrs
return
let rttEnd = Moment.now()
debug "Dcutr initiator has received a Connect message back.", connectAnswer
let halfRtt = (rttEnd - rttStart) div 2'i64
await stream.send(MsgType.Sync, @[])
debug "Dcutr initiator has sent a Sync message."
await sleepAsync(halfRtt)
if peerDialableAddrs.len > self.maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<self.maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.In))
try:
discard await anyCompleted(futs).wait(self.connectTimeout)
debug "Dcutr initiator has directly connected to the remote peer."
finally:
for fut in futs: fut.cancel()
except CancelledError as err:
raise err
except AllFuturesFailedError as err:
debug "Dcutr initiator could not connect to the remote peer, all connect attempts failed", peerDialableAddrs, msg = err.msg
raise newException(DcutrError, "Dcutr initiator could not connect to the remote peer, all connect attempts failed", err)
except AsyncTimeoutError as err:
debug "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
raise newException(DcutrError, "Dcutr initiator could not connect to the remote peer, all connect attempts timed out", err)
except CatchableError as err:
debug "Unexpected error when Dcutr initiator tried to connect to the remote peer", err = err.msg
raise newException(DcutrError, "Unexpected error when Dcutr initiator tried to connect to the remote peer", err)
finally:
if stream != nil:
await stream.close()

View File

@@ -0,0 +1,65 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import std/sequtils
import chronos
import stew/objects
import ../../../multiaddress,
../../../errors,
../../../stream/connection
export multiaddress
const
DcutrCodec* = "/libp2p/dcutr"
type
MsgType* = enum
Connect = 100
Sync = 300
DcutrMsg* = object
msgType*: MsgType
addrs*: seq[MultiAddress]
DcutrError* = object of LPError
proc encode*(msg: DcutrMsg): ProtoBuffer =
result = initProtoBuffer()
result.write(1, msg.msgType.uint)
for addr in msg.addrs:
result.write(2, addr)
result.finish()
proc decode*(_: typedesc[DcutrMsg], buf: seq[byte]): DcutrMsg {.raises: [DcutrError].} =
var
msgTypeOrd: uint32
dcutrMsg: DcutrMsg
var pb = initProtoBuffer(buf)
var r1 = pb.getField(1, msgTypeOrd)
let r2 = pb.getRepeatedField(2, dcutrMsg.addrs)
if r1.isErr or r2.isErr or not checkedEnumAssign(dcutrMsg.msgType, msgTypeOrd):
raise newException(DcutrError, "Received malformed message")
return dcutrMsg
proc send*(conn: Connection, msgType: MsgType, addrs: seq[MultiAddress]) {.async.} =
let pb = DcutrMsg(msgType: msgType, addrs: addrs).encode()
await conn.writeLp(pb.buffer)
proc getHolePunchableAddrs*(addrs: seq[MultiAddress]): seq[MultiAddress] {.raises: [LPError]} =
var result = newSeq[MultiAddress]()
for a in addrs:
# This is necessary to also accept addrs like /ip4/198.51.100/tcp/1234/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N
if [TCP, mapAnd(TCP_DNS, P2PPattern), mapAnd(TCP_IP, P2PPattern)].anyIt(it.match(a)):
result.add(a[0..1].tryGet())
return result

View File

@@ -0,0 +1,80 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import std/[sets, sequtils]
import stew/[results, objects]
import chronos, chronicles
import core
import ../../protocol,
../../../stream/connection,
../../../switch,
../../../utils/future
export DcutrError
export chronicles
type Dcutr* = ref object of LPProtocol
logScope:
topics = "libp2p dcutr"
proc new*(T: typedesc[Dcutr], switch: Switch, connectTimeout = 15.seconds, maxDialableAddrs = 8): T =
proc handleStream(stream: Connection, proto: string) {.async.} =
var peerDialableAddrs: seq[MultiAddress]
try:
let connectMsg = DcutrMsg.decode(await stream.readLp(1024))
debug "Dcutr receiver received a Connect message.", connectMsg
var ourAddrs = switch.peerStore.getMostObservedProtosAndPorts() # likely empty when the peer is reachable
if ourAddrs.len == 0:
# this list should be the same as the peer's public addrs when it is reachable
ourAddrs = switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it))
var ourDialableAddrs = getHolePunchableAddrs(ourAddrs)
if ourDialableAddrs.len == 0:
debug "Dcutr receiver has no supported dialable addresses. Aborting Dcutr.", ourAddrs
return
await stream.send(MsgType.Connect, ourAddrs)
debug "Dcutr receiver has sent a Connect message back."
let syncMsg = DcutrMsg.decode(await stream.readLp(1024))
debug "Dcutr receiver has received a Sync message.", syncMsg
peerDialableAddrs = getHolePunchableAddrs(connectMsg.addrs)
if peerDialableAddrs.len == 0:
debug "Dcutr initiator has no supported dialable addresses to connect to. Aborting Dcutr.", addrs=connectMsg.addrs
return
if peerDialableAddrs.len > maxDialableAddrs:
peerDialableAddrs = peerDialableAddrs[0..<maxDialableAddrs]
var futs = peerDialableAddrs.mapIt(switch.connect(stream.peerId, @[it], forceDial = true, reuseConnection = false, dir = Direction.Out))
try:
discard await anyCompleted(futs).wait(connectTimeout)
debug "Dcutr receiver has directly connected to the remote peer."
finally:
for fut in futs: fut.cancel()
except CancelledError as err:
raise err
except AllFuturesFailedError as err:
debug "Dcutr receiver could not connect to the remote peer, all connect attempts failed", peerDialableAddrs, msg = err.msg
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts failed", err)
except AsyncTimeoutError as err:
debug "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", peerDialableAddrs, msg = err.msg
raise newException(DcutrError, "Dcutr receiver could not connect to the remote peer, all connect attempts timed out", err)
except CatchableError as err:
warn "Unexpected error when Dcutr receiver tried to connect to the remote peer", msg = err.msg
raise newException(DcutrError, "Unexpected error when Dcutr receiver tried to connect to the remote peer", err)
let self = T()
self.handler = handleStream
self.codec = DcutrCodec
self

View File

@@ -7,15 +7,10 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import times, options
{.push raises: [].}
import times
import chronos, chronicles
import ./relay,
./messages,
./rconn,
@@ -25,8 +20,6 @@ import ./relay,
../../../multiaddress,
../../../stream/connection
export options
logScope:
topics = "libp2p relay relay-client"
@@ -39,7 +32,7 @@ type
RelayV2DialError* = object of RelayClientError
RelayClientAddConn* = proc(conn: Connection,
duration: uint32,
data: uint64): Future[void] {.gcsafe, raises: [Defect].}
data: uint64): Future[void] {.gcsafe, raises: [].}
RelayClient* = ref object of Relay
onNewConnection*: RelayClientAddConn
canHop: bool
@@ -47,28 +40,27 @@ type
Rsvp* = object
expire*: uint64 # required, Unix expiration time (UTC)
addrs*: seq[MultiAddress] # relay address for reserving peer
voucher*: Option[Voucher] # optional, reservation voucher
voucher*: Opt[Voucher] # optional, reservation voucher
limitDuration*: uint32 # seconds
limitData*: uint64 # bytes
proc sendStopError(conn: Connection, code: StatusV2) {.async.} =
trace "send stop status", status = $code & " (" & $ord(code) & ")"
let msg = StopMessage(msgType: StopMessageType.Status, status: some(code))
let msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
await conn.writeLp(encode(msg).buffer)
proc handleRelayedConnect(cl: RelayClient, conn: Connection, msg: StopMessage) {.async.} =
if msg.peer.isNone():
await sendStopError(conn, MalformedMessage)
return
let
# TODO: check the go version to see in which way this could fail
# it's unclear in the spec
src = msg.peer.get()
src = msg.peer.valueOr:
await sendStopError(conn, MalformedMessage)
return
limitDuration = msg.limit.duration
limitData = msg.limit.data
msg = StopMessage(
msgType: StopMessageType.Status,
status: some(Ok))
status: Opt.some(Ok))
pb = encode(msg)
trace "incoming relay connection", src
@@ -92,7 +84,7 @@ proc reserve*(cl: RelayClient,
pb = encode(HopMessage(msgType: HopMessageType.Reserve))
msg = try:
await conn.writeLp(pb.buffer)
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).get()
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).tryGet()
except CancelledError as exc:
raise exc
except CatchableError as exc:
@@ -103,21 +95,21 @@ proc reserve*(cl: RelayClient,
raise newException(ReservationError, "Unexpected relay response type")
if msg.status.get(UnexpectedMessage) != Ok:
raise newException(ReservationError, "Reservation failed")
if msg.reservation.isNone():
raise newException(ReservationError, "Missing reservation information")
let reservation = msg.reservation.get()
let reservation = msg.reservation.valueOr:
raise newException(ReservationError, "Missing reservation information")
if reservation.expire > int64.high().uint64 or
now().utc > reservation.expire.int64.fromUnix.utc:
raise newException(ReservationError, "Bad expiration date")
result.expire = reservation.expire
result.addrs = reservation.addrs
if reservation.svoucher.isSome():
let svoucher = SignedVoucher.decode(reservation.svoucher.get())
if svoucher.isErr() or svoucher.get().data.relayPeerId != peerId:
reservation.svoucher.withValue(sv):
let svoucher = SignedVoucher.decode(sv).valueOr:
raise newException(ReservationError, "Invalid voucher")
result.voucher = some(svoucher.get().data)
if svoucher.data.relayPeerId != peerId:
raise newException(ReservationError, "Invalid voucher PeerId")
result.voucher = Opt.some(svoucher.data)
result.limitDuration = msg.limit.duration
result.limitData = msg.limit.data
@@ -129,9 +121,9 @@ proc dialPeerV1*(
dstAddrs: seq[MultiAddress]): Future[Connection] {.async.} =
var
msg = RelayMessage(
msgType: some(RelayType.Hop),
srcPeer: some(RelayPeer(peerId: cl.switch.peerInfo.peerId, addrs: cl.switch.peerInfo.addrs)),
dstPeer: some(RelayPeer(peerId: dstPeerId, addrs: dstAddrs)))
msgType: Opt.some(RelayType.Hop),
srcPeer: Opt.some(RelayPeer(peerId: cl.switch.peerInfo.peerId, addrs: cl.switch.peerInfo.addrs)),
dstPeer: Opt.some(RelayPeer(peerId: dstPeerId, addrs: dstAddrs)))
pb = encode(msg)
trace "Dial peer", msgSend=msg
@@ -154,16 +146,18 @@ proc dialPeerV1*(
raise exc
try:
if msgRcvFromRelayOpt.isNone:
let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
raise newException(RelayV1DialError, "Hop can't open destination stream")
let msgRcvFromRelay = msgRcvFromRelayOpt.get()
if msgRcvFromRelay.msgType.isNone or msgRcvFromRelay.msgType.get() != RelayType.Status:
if msgRcvFromRelay.msgType.tryGet() != RelayType.Status:
raise newException(RelayV1DialError, "Hop can't open destination stream: wrong message type")
if msgRcvFromRelay.status.isNone or msgRcvFromRelay.status.get() != StatusV1.Success:
if msgRcvFromRelay.status.tryGet() != StatusV1.Success:
raise newException(RelayV1DialError, "Hop can't open destination stream: status failed")
except RelayV1DialError as exc:
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise exc
except ValueError as exc:
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise newException(RelayV1DialError, exc.msg)
result = conn
proc dialPeerV2*(
@@ -173,13 +167,13 @@ proc dialPeerV2*(
dstAddrs: seq[MultiAddress]): Future[Connection] {.async.} =
let
p = Peer(peerId: dstPeerId, addrs: dstAddrs)
pb = encode(HopMessage(msgType: HopMessageType.Connect, peer: some(p)))
pb = encode(HopMessage(msgType: HopMessageType.Connect, peer: Opt.some(p)))
trace "Dial peer", p
let msgRcvFromRelay = try:
await conn.writeLp(pb.buffer)
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).get()
HopMessage.decode(await conn.readLp(RelayClientMsgSize)).tryGet()
except CancelledError as exc:
raise exc
except CatchableError as exc:
@@ -189,19 +183,17 @@ proc dialPeerV2*(
if msgRcvFromRelay.msgType != HopMessageType.Status:
raise newException(RelayV2DialError, "Unexpected stop response")
if msgRcvFromRelay.status.get(UnexpectedMessage) != Ok:
trace "Relay stop failed", msg = msgRcvFromRelay.status.get()
trace "Relay stop failed", msg = msgRcvFromRelay.status
raise newException(RelayV2DialError, "Relay stop failure")
conn.limitDuration = msgRcvFromRelay.limit.duration
conn.limitData = msgRcvFromRelay.limit.data
return conn
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
let msgOpt = StopMessage.decode(await conn.readLp(RelayClientMsgSize))
if msgOpt.isNone():
proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async.} =
let msg = StopMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
trace "client circuit relay v2 handle stream", msg = msgOpt.get()
let msg = msgOpt.get()
trace "client circuit relay v2 handle stream", msg
if msg.msgType == StopMessageType.Connect:
await cl.handleRelayedConnect(conn, msg)
@@ -209,17 +201,15 @@ proc handleStopStreamV2(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
trace "Unexpected client / relayv2 handshake", msgType=msg.msgType
await sendStopError(conn, MalformedMessage)
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, gcsafe.} =
if msg.srcPeer.isNone:
proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async.} =
let src = msg.srcPeer.valueOr:
await sendStatus(conn, StatusV1.StopSrcMultiaddrInvalid)
return
let src = msg.srcPeer.get()
if msg.dstPeer.isNone:
let dst = msg.dstPeer.valueOr:
await sendStatus(conn, StatusV1.StopDstMultiaddrInvalid)
return
let dst = msg.dstPeer.get()
if dst.peerId != cl.switch.peerInfo.peerId:
await sendStatus(conn, StatusV1.StopDstMultiaddrInvalid)
return
@@ -236,14 +226,17 @@ proc handleStop(cl: RelayClient, conn: Connection, msg: RelayMessage) {.async, g
if cl.onNewConnection != nil: await cl.onNewConnection(conn, 0, 0)
else: await conn.close()
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async, gcsafe.} =
let msgOpt = RelayMessage.decode(await conn.readLp(RelayClientMsgSize))
if msgOpt.isNone:
proc handleStreamV1(cl: RelayClient, conn: Connection) {.async.} =
let msg = RelayMessage.decode(await conn.readLp(RelayClientMsgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
trace "client circuit relay v1 handle stream", msg = msgOpt.get()
let msg = msgOpt.get()
case msg.msgType.get:
trace "client circuit relay v1 handle stream", msg
let typ = msg.msgType.valueOr:
trace "Message type not set"
await sendStatus(conn, StatusV1.MalformedMessage)
return
case typ:
of RelayType.Hop:
if cl.canHop: await cl.handleHop(conn, msg)
else: await sendStatus(conn, StatusV1.HopCantSpeakRelay)
@@ -273,7 +266,7 @@ proc new*(T: typedesc[RelayClient], canHop: bool = false,
maxCircuitPerPeer: maxCircuitPerPeer,
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
case proto:
of RelayV1Codec: await cl.handleStreamV1(conn)

View File

@@ -7,13 +7,10 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import options, macros, sequtils
import stew/objects
import macros
import stew/[objects, results]
import ../../../peerinfo,
../../../signed_envelope
@@ -49,36 +46,36 @@ type
addrs*: seq[MultiAddress]
RelayMessage* = object
msgType*: Option[RelayType]
srcPeer*: Option[RelayPeer]
dstPeer*: Option[RelayPeer]
status*: Option[StatusV1]
msgType*: Opt[RelayType]
srcPeer*: Opt[RelayPeer]
dstPeer*: Opt[RelayPeer]
status*: Opt[StatusV1]
proc encode*(msg: RelayMessage): ProtoBuffer =
result = initProtoBuffer()
if isSome(msg.msgType):
result.write(1, msg.msgType.get().ord.uint)
if isSome(msg.srcPeer):
msg.msgType.withValue(typ):
result.write(1, typ.ord.uint)
msg.srcPeer.withValue(srcPeer):
var peer = initProtoBuffer()
peer.write(1, msg.srcPeer.get().peerId)
for ma in msg.srcPeer.get().addrs:
peer.write(1, srcPeer.peerId)
for ma in srcPeer.addrs:
peer.write(2, ma.data.buffer)
peer.finish()
result.write(2, peer.buffer)
if isSome(msg.dstPeer):
msg.dstPeer.withValue(dstPeer):
var peer = initProtoBuffer()
peer.write(1, msg.dstPeer.get().peerId)
for ma in msg.dstPeer.get().addrs:
peer.write(1, dstPeer.peerId)
for ma in dstPeer.addrs:
peer.write(2, ma.data.buffer)
peer.finish()
result.write(3, peer.buffer)
if isSome(msg.status):
result.write(4, msg.status.get().ord.uint)
msg.status.withValue(status):
result.write(4, status.ord.uint)
result.finish()
proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Option[RelayMessage] =
proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Opt[RelayMessage] =
var
rMsg: RelayMessage
msgTypeOrd: uint32
@@ -88,37 +85,29 @@ proc decode*(_: typedesc[RelayMessage], buf: seq[byte]): Option[RelayMessage] =
pbSrc: ProtoBuffer
pbDst: ProtoBuffer
let
pb = initProtoBuffer(buf)
r1 = pb.getField(1, msgTypeOrd)
r2 = pb.getField(2, pbSrc)
r3 = pb.getField(3, pbDst)
r4 = pb.getField(4, statusOrd)
let pb = initProtoBuffer(buf)
if r1.isErr() or r2.isErr() or r3.isErr() or r4.isErr():
return none(RelayMessage)
if r2.get() and
(pbSrc.getField(1, src.peerId).isErr() or
pbSrc.getRepeatedField(2, src.addrs).isErr()):
return none(RelayMessage)
if r3.get() and
(pbDst.getField(1, dst.peerId).isErr() or
pbDst.getRepeatedField(2, dst.addrs).isErr()):
return none(RelayMessage)
if r1.get():
if ? pb.getField(1, msgTypeOrd).toOpt():
if msgTypeOrd.int notin RelayType:
return none(RelayMessage)
rMsg.msgType = some(RelayType(msgTypeOrd))
if r2.get(): rMsg.srcPeer = some(src)
if r3.get(): rMsg.dstPeer = some(dst)
if r4.get():
if statusOrd.int notin StatusV1:
return none(RelayMessage)
rMsg.status = some(StatusV1(statusOrd))
some(rMsg)
return Opt.none(RelayMessage)
rMsg.msgType = Opt.some(RelayType(msgTypeOrd))
if ? pb.getField(2, pbSrc).toOpt():
discard ? pbSrc.getField(1, src.peerId).toOpt()
discard ? pbSrc.getRepeatedField(2, src.addrs).toOpt()
rMsg.srcPeer = Opt.some(src)
if ? pb.getField(3, pbDst).toOpt():
discard ? pbDst.getField(1, dst.peerId).toOpt()
discard ? pbDst.getRepeatedField(2, dst.addrs).toOpt()
rMsg.dstPeer = Opt.some(dst)
if ? pb.getField(4, statusOrd).toOpt():
var status: StatusV1
if not checkedEnumAssign(status, statusOrd):
return Opt.none(RelayMessage)
rMsg.status = Opt.some(status)
Opt.some(rMsg)
# Voucher
@@ -178,7 +167,7 @@ type
Reservation* = object
expire*: uint64 # required, Unix expiration time (UTC)
addrs*: seq[MultiAddress] # relay address for reserving peer
svoucher*: Option[seq[byte]] # optional, reservation voucher
svoucher*: Opt[seq[byte]] # optional, reservation voucher
Limit* = object
duration*: uint32 # seconds
data*: uint64 # bytes
@@ -198,30 +187,29 @@ type
Status = 2
HopMessage* = object
msgType*: HopMessageType
peer*: Option[Peer]
reservation*: Option[Reservation]
peer*: Opt[Peer]
reservation*: Opt[Reservation]
limit*: Limit
status*: Option[StatusV2]
status*: Opt[StatusV2]
proc encode*(msg: HopMessage): ProtoBuffer =
var pb = initProtoBuffer()
pb.write(1, msg.msgType.ord.uint)
if msg.peer.isSome():
msg.peer.withValue(peer):
var ppb = initProtoBuffer()
ppb.write(1, msg.peer.get().peerId)
for ma in msg.peer.get().addrs:
ppb.write(1, peer.peerId)
for ma in peer.addrs:
ppb.write(2, ma.data.buffer)
ppb.finish()
pb.write(2, ppb.buffer)
if msg.reservation.isSome():
let rsrv = msg.reservation.get()
msg.reservation.withValue(rsrv):
var rpb = initProtoBuffer()
rpb.write(1, rsrv.expire)
for ma in rsrv.addrs:
rpb.write(2, ma.data.buffer)
if rsrv.svoucher.isSome():
rpb.write(3, rsrv.svoucher.get())
rsrv.svoucher.withValue(vouch):
rpb.write(3, vouch)
rpb.finish()
pb.write(3, rpb.buffer)
if msg.limit.duration > 0 or msg.limit.data > 0:
@@ -230,65 +218,51 @@ proc encode*(msg: HopMessage): ProtoBuffer =
if msg.limit.data > 0: lpb.write(2, msg.limit.data)
lpb.finish()
pb.write(4, lpb.buffer)
if msg.status.isSome():
pb.write(5, msg.status.get().ord.uint)
msg.status.withValue(status):
pb.write(5, status.ord.uint)
pb.finish()
pb
proc decode*(_: typedesc[HopMessage], buf: seq[byte]): Option[HopMessage] =
var
msg: HopMessage
msgTypeOrd: uint32
pbPeer: ProtoBuffer
pbReservation: ProtoBuffer
pbLimit: ProtoBuffer
statusOrd: uint32
peer: Peer
reservation: Reservation
limit: Limit
res: bool
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, msgTypeOrd)
r2 = pb.getField(2, pbPeer)
r3 = pb.getField(3, pbReservation)
r4 = pb.getField(4, pbLimit)
r5 = pb.getField(5, statusOrd)
if r1.isErr() or r2.isErr() or r3.isErr() or r4.isErr() or r5.isErr():
return none(HopMessage)
if r2.get() and
(pbPeer.getRequiredField(1, peer.peerId).isErr() or
pbPeer.getRepeatedField(2, peer.addrs).isErr()):
return none(HopMessage)
if r3.get():
var svoucher: seq[byte]
let rSVoucher = pbReservation.getField(3, svoucher)
if pbReservation.getRequiredField(1, reservation.expire).isErr() or
pbReservation.getRepeatedField(2, reservation.addrs).isErr() or
rSVoucher.isErr():
return none(HopMessage)
if rSVoucher.get(): reservation.svoucher = some(svoucher)
if r4.get() and
(pbLimit.getField(1, limit.duration).isErr() or
pbLimit.getField(2, limit.data).isErr()):
return none(HopMessage)
proc decode*(_: typedesc[HopMessage], buf: seq[byte]): Opt[HopMessage] =
var msg: HopMessage
let pb = initProtoBuffer(buf)
var msgTypeOrd: uint32
? pb.getRequiredField(1, msgTypeOrd).toOpt()
if not checkedEnumAssign(msg.msgType, msgTypeOrd):
return none(HopMessage)
if r2.get(): msg.peer = some(peer)
if r3.get(): msg.reservation = some(reservation)
if r4.get(): msg.limit = limit
if r5.get():
if statusOrd.int notin StatusV2:
return none(HopMessage)
msg.status = some(StatusV2(statusOrd))
some(msg)
return Opt.none(HopMessage)
var pbPeer: ProtoBuffer
if ? pb.getField(2, pbPeer).toOpt():
var peer: Peer
? pbPeer.getRequiredField(1, peer.peerId).toOpt()
discard ? pbPeer.getRepeatedField(2, peer.addrs).toOpt()
msg.peer = Opt.some(peer)
var pbReservation: ProtoBuffer
if ? pb.getField(3, pbReservation).toOpt():
var
svoucher: seq[byte]
reservation: Reservation
if ? pbReservation.getField(3, svoucher).toOpt():
reservation.svoucher = Opt.some(svoucher)
? pbReservation.getRequiredField(1, reservation.expire).toOpt()
discard ? pbReservation.getRepeatedField(2, reservation.addrs).toOpt()
msg.reservation = Opt.some(reservation)
var pbLimit: ProtoBuffer
if ? pb.getField(4, pbLimit).toOpt():
discard ? pbLimit.getField(1, msg.limit.duration).toOpt()
discard ? pbLimit.getField(2, msg.limit.data).toOpt()
var statusOrd: uint32
if ? pb.getField(5, statusOrd).toOpt():
var status: StatusV2
if not checkedEnumAssign(status, statusOrd):
return Opt.none(HopMessage)
msg.status = Opt.some(status)
Opt.some(msg)
# Circuit Relay V2 Stop Message
@@ -298,19 +272,19 @@ type
Status = 1
StopMessage* = object
msgType*: StopMessageType
peer*: Option[Peer]
peer*: Opt[Peer]
limit*: Limit
status*: Option[StatusV2]
status*: Opt[StatusV2]
proc encode*(msg: StopMessage): ProtoBuffer =
var pb = initProtoBuffer()
pb.write(1, msg.msgType.ord.uint)
if msg.peer.isSome():
msg.peer.withValue(peer):
var ppb = initProtoBuffer()
ppb.write(1, msg.peer.get().peerId)
for ma in msg.peer.get().addrs:
ppb.write(1, peer.peerId)
for ma in peer.addrs:
ppb.write(2, ma.data.buffer)
ppb.finish()
pb.write(2, ppb.buffer)
@@ -320,51 +294,40 @@ proc encode*(msg: StopMessage): ProtoBuffer =
if msg.limit.data > 0: lpb.write(2, msg.limit.data)
lpb.finish()
pb.write(3, lpb.buffer)
if msg.status.isSome():
pb.write(4, msg.status.get().ord.uint)
msg.status.withValue(status):
pb.write(4, status.ord.uint)
pb.finish()
pb
proc decode*(_: typedesc[StopMessage], buf: seq[byte]): Option[StopMessage] =
var
msg: StopMessage
msgTypeOrd: uint32
pbPeer: ProtoBuffer
pbLimit: ProtoBuffer
statusOrd: uint32
peer: Peer
limit: Limit
rVoucher: ProtoResult[bool]
res: bool
proc decode*(_: typedesc[StopMessage], buf: seq[byte]): Opt[StopMessage] =
var msg: StopMessage
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, msgTypeOrd)
r2 = pb.getField(2, pbPeer)
r3 = pb.getField(3, pbLimit)
r4 = pb.getField(4, statusOrd)
let pb = initProtoBuffer(buf)
if r1.isErr() or r2.isErr() or r3.isErr() or r4.isErr():
return none(StopMessage)
if r2.get() and
(pbPeer.getRequiredField(1, peer.peerId).isErr() or
pbPeer.getRepeatedField(2, peer.addrs).isErr()):
return none(StopMessage)
if r3.get() and
(pbLimit.getField(1, limit.duration).isErr() or
pbLimit.getField(2, limit.data).isErr()):
return none(StopMessage)
if msgTypeOrd.int notin StopMessageType.low.ord .. StopMessageType.high.ord:
return none(StopMessage)
var msgTypeOrd: uint32
? pb.getRequiredField(1, msgTypeOrd).toOpt()
if msgTypeOrd.int notin StopMessageType:
return Opt.none(StopMessage)
msg.msgType = StopMessageType(msgTypeOrd)
if r2.get(): msg.peer = some(peer)
if r3.get(): msg.limit = limit
if r4.get():
if statusOrd.int notin StatusV2:
return none(StopMessage)
msg.status = some(StatusV2(statusOrd))
some(msg)
var pbPeer: ProtoBuffer
if ? pb.getField(2, pbPeer).toOpt():
var peer: Peer
? pbPeer.getRequiredField(1, peer.peerId).toOpt()
discard ? pbPeer.getRepeatedField(2, peer.addrs).toOpt()
msg.peer = Opt.some(peer)
var pbLimit: ProtoBuffer
if ? pb.getField(3, pbLimit).toOpt():
discard ? pbLimit.getField(1, msg.limit.duration).toOpt()
discard ? pbLimit.getField(2, msg.limit.data).toOpt()
var statusOrd: uint32
if ? pb.getField(4, statusOrd).toOpt():
var status: StatusV2
if not checkedEnumAssign(status, statusOrd):
return Opt.none(StopMessage)
msg.status = Opt.some(status)
Opt.some(msg)

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import chronos
@@ -50,6 +47,7 @@ proc new*(
limitDuration: uint32,
limitData: uint64): T =
let rc = T(conn: conn, limitDuration: limitDuration, limitData: limitData)
rc.dir = conn.dir
rc.initStream()
if limitDuration > 0:
proc checkDurationConnection() {.async.} =

View File

@@ -7,12 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import options, sequtils, tables, sugar
import sequtils, tables
import chronos, chronicles
@@ -25,7 +22,6 @@ import ./messages,
../../../multicodec,
../../../stream/connection,
../../../protocols/protocol,
../../../transports/transport,
../../../errors,
../../../utils/heartbeat,
../../../signed_envelope
@@ -94,14 +90,14 @@ proc createReserveResponse(
rsrv = Reservation(expire: expireUnix,
addrs: r.switch.peerInfo.addrs.mapIt(
? it.concat(ma).orErr(CryptoError.KeyError)),
svoucher: some(? sv.encode))
svoucher: Opt.some(? sv.encode))
msg = HopMessage(msgType: HopMessageType.Status,
reservation: some(rsrv),
reservation: Opt.some(rsrv),
limit: r.limit,
status: some(Ok))
status: Opt.some(Ok))
return ok(msg)
proc isRelayed(conn: Connection): bool =
proc isRelayed*(conn: Connection): bool =
var wrappedConn = conn
while not isNil(wrappedConn):
if wrappedConn of RelayConnection:
@@ -109,7 +105,7 @@ proc isRelayed(conn: Connection): bool =
wrappedConn = wrappedConn.getWrapped()
return false
proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
proc handleReserve(r: Relay, conn: Connection) {.async.} =
if conn.isRelayed():
trace "reservation attempt over relay connection", pid = conn.peerId
await sendHopStatus(conn, PermissionDenied)
@@ -119,32 +115,30 @@ proc handleReserve(r: Relay, conn: Connection) {.async, gcsafe.} =
trace "Too many reservations", pid = conn.peerId
await sendHopStatus(conn, ReservationRefused)
return
trace "reserving relay slot for", pid = conn.peerId
let
pid = conn.peerId
expire = now().utc + r.reservationTTL
msg = r.createReserveResponse(pid, expire)
msg = r.createReserveResponse(pid, expire).valueOr:
trace "error signing the voucher", pid
return
trace "reserving relay slot for", pid
if msg.isErr():
trace "error signing the voucher", error = error(msg), pid
return
r.rsvp[pid] = expire
await conn.writeLp(encode(msg.get()).buffer)
await conn.writeLp(encode(msg).buffer)
proc handleConnect(r: Relay,
connSrc: Connection,
msg: HopMessage) {.async, gcsafe.} =
msg: HopMessage) {.async.} =
if connSrc.isRelayed():
trace "connection attempt over relay connection"
await sendHopStatus(connSrc, PermissionDenied)
return
if msg.peer.isNone():
await sendHopStatus(connSrc, MalformedMessage)
return
let
msgPeer = msg.peer.valueOr:
await sendHopStatus(connSrc, MalformedMessage)
return
src = connSrc.peerId
dst = msg.peer.get().peerId
dst = msgPeer.peerId
if dst notin r.rsvp:
trace "refusing connection, no reservation", src, dst
await sendHopStatus(connSrc, NoReservation)
@@ -177,16 +171,17 @@ proc handleConnect(r: Relay,
proc sendStopMsg() {.async.} =
let stopMsg = StopMessage(msgType: StopMessageType.Connect,
peer: some(Peer(peerId: src, addrs: @[])),
peer: Opt.some(Peer(peerId: src, addrs: @[])),
limit: r.limit)
await connDst.writeLp(encode(stopMsg).buffer)
let msg = StopMessage.decode(await connDst.readLp(r.msgSize)).get()
let msg = StopMessage.decode(await connDst.readLp(r.msgSize)).valueOr:
raise newException(SendStopError, "Malformed message")
if msg.msgType != StopMessageType.Status:
raise newException(SendStopError, "Unexpected stop response, not a status message")
if msg.status.get(UnexpectedMessage) != Ok:
raise newException(SendStopError, "Relay stop failure")
await connSrc.writeLp(encode(HopMessage(msgType: HopMessageType.Status,
status: some(Ok))).buffer)
status: Opt.some(Ok))).buffer)
try:
await sendStopMsg()
except CancelledError as exc:
@@ -205,13 +200,11 @@ proc handleConnect(r: Relay,
await rconnDst.close()
await bridge(rconnSrc, rconnDst)
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
let msgOpt = HopMessage.decode(await conn.readLp(r.msgSize))
if msgOpt.isNone():
proc handleHopStreamV2*(r: Relay, conn: Connection) {.async.} =
let msg = HopMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendHopStatus(conn, MalformedMessage)
return
trace "relayv2 handle stream", msg = msgOpt.get()
let msg = msgOpt.get()
trace "relayv2 handle stream", msg = msg
case msg.msgType:
of HopMessageType.Reserve: await r.handleReserve(conn)
of HopMessageType.Connect: await r.handleConnect(conn, msg)
@@ -221,7 +214,7 @@ proc handleHopStreamV2*(r: Relay, conn: Connection) {.async, gcsafe.} =
# Relay V1
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsafe.} =
proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async.} =
r.streamCount.inc()
defer: r.streamCount.dec()
if r.streamCount + r.rsvp.len() >= r.maxCircuit:
@@ -229,15 +222,14 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
await sendStatus(connSrc, StatusV1.HopCantSpeakRelay)
return
var src, dst: RelayPeer
proc checkMsg(): Result[RelayMessage, StatusV1] =
if msg.srcPeer.isNone:
src = msg.srcPeer.valueOr:
return err(StatusV1.HopSrcMultiaddrInvalid)
let src = msg.srcPeer.get()
if src.peerId != connSrc.peerId:
return err(StatusV1.HopSrcMultiaddrInvalid)
if msg.dstPeer.isNone:
dst = msg.dstPeer.valueOr:
return err(StatusV1.HopDstMultiaddrInvalid)
let dst = msg.dstPeer.get()
if dst.peerId == r.switch.peerInfo.peerId:
return err(StatusV1.HopCantRelayToSelf)
if not r.switch.isConnected(dst.peerId):
@@ -249,9 +241,6 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
await sendStatus(connSrc, check.error())
return
let
src = msg.srcPeer.get()
dst = msg.dstPeer.get()
if r.peerCount[src.peerId] >= r.maxCircuitPerPeer or
r.peerCount[dst.peerId] >= r.maxCircuitPerPeer:
trace "refusing connection; too many connection from src or to dst", src, dst
@@ -275,9 +264,9 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
await connDst.close()
let msgToSend = RelayMessage(
msgType: some(RelayType.Stop),
srcPeer: some(src),
dstPeer: some(dst))
msgType: Opt.some(RelayType.Stop),
srcPeer: Opt.some(src),
dstPeer: Opt.some(dst))
let msgRcvFromDstOpt = try:
await connDst.writeLp(encode(msgToSend).buffer)
@@ -289,12 +278,11 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
await sendStatus(connSrc, StatusV1.HopCantOpenDstStream)
return
if msgRcvFromDstOpt.isNone:
let msgRcvFromDst = msgRcvFromDstOpt.valueOr:
trace "error reading stop response", msg = msgRcvFromDstOpt
await sendStatus(connSrc, StatusV1.HopCantOpenDstStream)
return
let msgRcvFromDst = msgRcvFromDstOpt.get()
if msgRcvFromDst.msgType.get(RelayType.Stop) != RelayType.Status or
msgRcvFromDst.status.get(StatusV1.StopRelayRefused) != StatusV1.Success:
trace "unexcepted relay stop response", msgRcvFromDst
@@ -305,14 +293,17 @@ proc handleHop*(r: Relay, connSrc: Connection, msg: RelayMessage) {.async, gcsaf
trace "relaying connection", src, dst
await bridge(connSrc, connDst)
proc handleStreamV1(r: Relay, conn: Connection) {.async, gcsafe.} =
let msgOpt = RelayMessage.decode(await conn.readLp(r.msgSize))
if msgOpt.isNone:
proc handleStreamV1(r: Relay, conn: Connection) {.async.} =
let msg = RelayMessage.decode(await conn.readLp(r.msgSize)).valueOr:
await sendStatus(conn, StatusV1.MalformedMessage)
return
trace "relay handle stream", msg = msgOpt.get()
let msg = msgOpt.get()
case msg.msgType.get:
trace "relay handle stream", msg
let typ = msg.msgType.valueOr:
trace "Message type not set"
await sendStatus(conn, StatusV1.MalformedMessage)
return
case typ:
of RelayType.Hop: await r.handleHop(conn, msg)
of RelayType.Stop: await sendStatus(conn, StatusV1.StopRelayRefused)
of RelayType.CanHop: await sendStatus(conn, StatusV1.Success)
@@ -345,7 +336,7 @@ proc new*(T: typedesc[Relay],
msgSize: msgSize,
isCircuitRelayV1: circuitRelayV1)
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
case proto:
of RelayV2HopCodec: await r.handleHopStreamV2(conn)

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import sequtils, strutils
@@ -40,33 +37,33 @@ method start*(self: RelayTransport, ma: seq[MultiAddress]) {.async.} =
self.client.onNewConnection = proc(
conn: Connection,
duration: uint32 = 0,
data: uint64 = 0) {.async, gcsafe, raises: [Defect].} =
data: uint64 = 0) {.async.} =
await self.queue.addLast(RelayConnection.new(conn, duration, data))
await conn.join()
self.selfRunning = true
await procCall Transport(self).start(ma)
trace "Starting Relay transport"
method stop*(self: RelayTransport) {.async, gcsafe.} =
method stop*(self: RelayTransport) {.async.} =
self.running = false
self.selfRunning = false
self.client.onNewConnection = nil
while not self.queue.empty():
await self.queue.popFirstNoWait().close()
method accept*(self: RelayTransport): Future[Connection] {.async, gcsafe.} =
method accept*(self: RelayTransport): Future[Connection] {.async.} =
result = await self.queue.popFirst()
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async, gcsafe.} =
proc dial*(self: RelayTransport, ma: MultiAddress): Future[Connection] {.async.} =
let
sma = toSeq(ma.items())
relayAddrs = sma[0..sma.len-4].mapIt(it.tryGet()).foldl(a & b)
var
relayPeerId: PeerId
dstPeerId: PeerId
if not relayPeerId.init(($(sma[^3].get())).split('/')[2]):
if not relayPeerId.init(($(sma[^3].tryGet())).split('/')[2]):
raise newException(RelayV2DialError, "Relay doesn't exist")
if not dstPeerId.init(($(sma[^1].get())).split('/')[2]):
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
raise newException(RelayV2DialError, "Destination doesn't exist")
trace "Dial", relayPeerId, dstPeerId
@@ -93,14 +90,18 @@ method dial*(
self: RelayTransport,
hostname: string,
ma: MultiAddress,
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async, gcsafe.} =
let address = MultiAddress.init($ma & "/p2p/" & $peerId.get()).tryGet()
result = await self.dial(address)
peerId: Opt[PeerId] = Opt.none(PeerId)): Future[Connection] {.async.} =
peerId.withValue(pid):
let address = MultiAddress.init($ma & "/p2p/" & $pid).tryGet()
result = await self.dial(address)
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe} =
if ma.protocols.isOk():
let sma = toSeq(ma.items())
result = sma.len >= 2 and CircuitRelay.match(sma[^1].get())
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe.} =
try:
if ma.protocols.isOk():
let sma = toSeq(ma.items())
result = sma.len >= 2 and CircuitRelay.match(sma[^1].tryGet())
except CatchableError as exc:
result = false
trace "Handles return", ma, result
proc new*(T: typedesc[RelayTransport], cl: RelayClient, upgrader: Upgrade): T =

View File

@@ -7,15 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import options
{.push raises: [].}
import chronos, chronicles
import ./messages,
../../../stream/connection
@@ -27,24 +21,24 @@ const
RelayV2HopCodec* = "/libp2p/circuit/relay/0.2.0/hop"
RelayV2StopCodec* = "/libp2p/circuit/relay/0.2.0/stop"
proc sendStatus*(conn: Connection, code: StatusV1) {.async, gcsafe.} =
proc sendStatus*(conn: Connection, code: StatusV1) {.async.} =
trace "send relay/v1 status", status = $code & "(" & $ord(code) & ")"
let
msg = RelayMessage(msgType: some(RelayType.Status), status: some(code))
msg = RelayMessage(msgType: Opt.some(RelayType.Status), status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async, gcsafe.} =
proc sendHopStatus*(conn: Connection, code: StatusV2) {.async.} =
trace "send hop relay/v2 status", status = $code & "(" & $ord(code) & ")"
let
msg = HopMessage(msgType: HopMessageType.Status, status: some(code))
msg = HopMessage(msgType: HopMessageType.Status, status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)
proc sendStopStatus*(conn: Connection, code: StatusV2) {.async.} =
trace "send stop relay/v2 status", status = $code & " (" & $ord(code) & ")"
let
msg = StopMessage(msgType: StopMessageType.Status, status: some(code))
msg = StopMessage(msgType: StopMessageType.Status, status: Opt.some(code))
pb = encode(msg)
await conn.writeLp(pb.buffer)

View File

@@ -10,10 +10,7 @@
## `Identify <https://docs.libp2p.io/concepts/protocols/#identify>`_ and
## `Push Identify <https://docs.libp2p.io/concepts/protocols/#identify-push>`_ implementation
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[sequtils, options, strutils, sugar]
import stew/results
@@ -24,9 +21,13 @@ import ../protobuf/minprotobuf,
../peerid,
../crypto/crypto,
../multiaddress,
../multicodec,
../protocols/protocol,
../utility,
../errors
../errors,
../observedaddrmanager
export observedaddrmanager
logScope:
topics = "libp2p identify"
@@ -56,12 +57,13 @@ type
Identify* = ref object of LPProtocol
peerInfo*: PeerInfo
sendSignedPeerRecord*: bool
observedAddrManager*: ObservedAddrManager
IdentifyPushHandler* = proc (
peer: PeerId,
newInfo: IdentifyInfo):
Future[void]
{.gcsafe, raises: [Defect], public.}
{.gcsafe, raises: [], public.}
IdentifyPush* = ref object of LPProtocol
identifyHandler: IdentifyPushHandler
@@ -70,30 +72,28 @@ chronicles.expandIt(IdentifyInfo):
pubkey = ($it.pubkey).shortLog
addresses = it.addrs.map(x => $x).join(",")
protocols = it.protos.map(x => $x).join(",")
observable_address =
if it.observedAddr.isSome(): $it.observedAddr.get()
else: "None"
observable_address = $it.observedAddr
proto_version = it.protoVersion.get("None")
agent_version = it.agentVersion.get("None")
signedPeerRecord =
# The SPR contains the same data as the identify message
# would be cumbersome to log
if iinfo.signedPeerRecord.isSome(): "Some"
if it.signedPeerRecord.isSome(): "Some"
else: "None"
proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: bool): ProtoBuffer
{.raises: [Defect].} =
{.raises: [].} =
result = initProtoBuffer()
let pkey = peerInfo.publicKey
result.write(1, pkey.getBytes().get())
result.write(1, pkey.getBytes().expect("valid key"))
for ma in peerInfo.addrs:
result.write(2, ma.data.buffer)
for proto in peerInfo.protocols:
result.write(3, proto)
if observedAddr.isSome:
result.write(4, observedAddr.get().data.buffer)
observedAddr.withValue(observed):
result.write(4, observed.data.buffer)
let protoVersion = ProtoVersion
result.write(5, protoVersion)
let agentVersion = if peerInfo.agentVersion.len <= 0:
@@ -105,13 +105,12 @@ proc encodeMsg(peerInfo: PeerInfo, observedAddr: Opt[MultiAddress], sendSpr: boo
## Optionally populate signedPeerRecord field.
## See https://github.com/libp2p/go-libp2p/blob/ddf96ce1cfa9e19564feb9bd3e8269958bbc0aba/p2p/protocol/identify/pb/identify.proto for reference.
if sendSpr:
let sprBuff = peerInfo.signedPeerRecord.envelope.encode()
if sprBuff.isOk():
result.write(8, sprBuff.get())
peerInfo.signedPeerRecord.envelope.encode().toOpt().withValue(sprBuff):
result.write(8, sprBuff)
result.finish()
proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
proc decodeMsg*(buf: seq[byte]): Opt[IdentifyInfo] =
var
iinfo: IdentifyInfo
pubkey: PublicKey
@@ -121,52 +120,38 @@ proc decodeMsg*(buf: seq[byte]): Option[IdentifyInfo] =
signedPeerRecord: SignedPeerRecord
var pb = initProtoBuffer(buf)
if ? pb.getField(1, pubkey).toOpt():
iinfo.pubkey = some(pubkey)
if ? pb.getField(8, signedPeerRecord).toOpt() and
pubkey == signedPeerRecord.envelope.publicKey:
iinfo.signedPeerRecord = some(signedPeerRecord.envelope)
discard ? pb.getRepeatedField(2, iinfo.addrs).toOpt()
discard ? pb.getRepeatedField(3, iinfo.protos).toOpt()
if ? pb.getField(4, oaddr).toOpt():
iinfo.observedAddr = some(oaddr)
if ? pb.getField(5, protoVersion).toOpt():
iinfo.protoVersion = some(protoVersion)
if ? pb.getField(6, agentVersion).toOpt():
iinfo.agentVersion = some(agentVersion)
let r1 = pb.getField(1, pubkey)
let r2 = pb.getRepeatedField(2, iinfo.addrs)
let r3 = pb.getRepeatedField(3, iinfo.protos)
let r4 = pb.getField(4, oaddr)
let r5 = pb.getField(5, protoVersion)
let r6 = pb.getField(6, agentVersion)
let r8 = pb.getField(8, signedPeerRecord)
let res = r1.isOk() and r2.isOk() and r3.isOk() and
r4.isOk() and r5.isOk() and r6.isOk() and
r8.isOk()
if res:
if r1.get():
iinfo.pubkey = some(pubkey)
if r4.get():
iinfo.observedAddr = some(oaddr)
if r5.get():
iinfo.protoVersion = some(protoVersion)
if r6.get():
iinfo.agentVersion = some(agentVersion)
if r8.get() and r1.get():
if iinfo.pubkey.get() == signedPeerRecord.envelope.publicKey:
iinfo.signedPeerRecord = some(signedPeerRecord.envelope)
debug "decodeMsg: decoded identify", iinfo
some(iinfo)
else:
trace "decodeMsg: failed to decode received message"
none[IdentifyInfo]()
Opt.some(iinfo)
proc new*(
T: typedesc[Identify],
peerInfo: PeerInfo,
sendSignedPeerRecord = false
sendSignedPeerRecord = false,
observedAddrManager = ObservedAddrManager.new(),
): T =
let identify = T(
peerInfo: peerInfo,
sendSignedPeerRecord: sendSignedPeerRecord
sendSignedPeerRecord: sendSignedPeerRecord,
observedAddrManager: observedAddrManager,
)
identify.init()
identify
method init*(p: Identify) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
trace "handling identify request", conn
var pb = encodeMsg(p.peerInfo, conn.observedAddr, p.sendSignedPeerRecord)
@@ -182,34 +167,34 @@ method init*(p: Identify) =
p.handler = handle
p.codec = IdentifyCodec
proc identify*(p: Identify,
proc identify*(self: Identify,
conn: Connection,
remotePeerId: PeerId): Future[IdentifyInfo] {.async, gcsafe.} =
remotePeerId: PeerId): Future[IdentifyInfo] {.async.} =
trace "initiating identify", conn
var message = await conn.readLp(64*1024)
if len(message) == 0:
trace "identify: Empty message received!", conn
raise newException(IdentityInvalidMsgError, "Empty message received!")
let infoOpt = decodeMsg(message)
if infoOpt.isNone():
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
result = infoOpt.get()
var info = decodeMsg(message).valueOr: raise newException(IdentityInvalidMsgError, "Incorrect message received!")
debug "identify: decoded message", conn, info
let
pubkey = info.pubkey.valueOr: raise newException(IdentityInvalidMsgError, "No pubkey in identify")
peer = PeerId.init(pubkey).valueOr: raise newException(IdentityInvalidMsgError, $error)
if result.pubkey.isSome:
let peer = PeerId.init(result.pubkey.get())
if peer.isErr:
raise newException(IdentityInvalidMsgError, $peer.error)
else:
result.peerId = peer.get()
if peer.get() != remotePeerId:
trace "Peer ids don't match",
remote = peer,
local = remotePeerId
if peer != remotePeerId:
trace "Peer ids don't match", remote = peer, local = remotePeerId
raise newException(IdentityNoMatchError, "Peer ids don't match")
info.peerId = peer
raise newException(IdentityNoMatchError, "Peer ids don't match")
else:
raise newException(IdentityInvalidMsgError, "No pubkey in identify")
info.observedAddr.withValue(observed):
# Currently, we use the ObservedAddrManager only to find our dialable external NAT address. Therefore, addresses
# like "...\p2p-circuit\p2p\..." and "\p2p\..." are not useful to us.
if observed.contains(multiCodec("p2p-circuit")).get(false) or P2PPattern.matchPartial(observed):
trace "Not adding address to ObservedAddrManager.", observed
elif not self.observedAddrManager.addObservation(observed):
trace "Observed address is not valid.", observedAddr = observed
return info
proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.public.} =
## Create a IdentifyPush protocol. `handler` will be called every time
@@ -219,26 +204,24 @@ proc new*(T: typedesc[IdentifyPush], handler: IdentifyPushHandler = nil): T {.pu
identifypush
proc init*(p: IdentifyPush) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
trace "handling identify push", conn
try:
var message = await conn.readLp(64*1024)
let infoOpt = decodeMsg(message)
if infoOpt.isNone():
var identInfo = decodeMsg(message).valueOr:
raise newException(IdentityInvalidMsgError, "Incorrect message received!")
debug "identify push: decoded message", conn, identInfo
var indentInfo = infoOpt.get()
if indentInfo.pubkey.isSome:
let receivedPeerId = PeerId.init(indentInfo.pubkey.get()).tryGet()
identInfo.pubkey.withValue(pubkey):
let receivedPeerId = PeerId.init(pubkey).tryGet()
if receivedPeerId != conn.peerId:
raise newException(IdentityNoMatchError, "Peer ids don't match")
indentInfo.peerId = receivedPeerId
identInfo.peerId = receivedPeerId
trace "triggering peer event", peerInfo = conn.peerId
if not isNil(p.identifyHandler):
await p.identifyHandler(conn.peerId, indentInfo)
await p.identifyHandler(conn.peerId, identInfo)
except CancelledError as exc:
raise exc
except CatchableError as exc:

View File

@@ -0,0 +1,47 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## `Perf <https://github.com/libp2p/specs/blob/master/perf/perf.md>`_ protocol specification
import chronos, chronicles, sequtils
import stew/endians2
import ./core, ../../stream/connection
logScope:
topics = "libp2p perf"
type PerfClient* = ref object of RootObj
proc perf*(_: typedesc[PerfClient], conn: Connection,
sizeToWrite: uint64 = 0, sizeToRead: uint64 = 0):
Future[Duration] {.async, public.} =
var
size = sizeToWrite
buf: array[PerfSize, byte]
let start = Moment.now()
trace "starting performance benchmark", conn, sizeToWrite, sizeToRead
await conn.write(toSeq(toBytesBE(sizeToRead)))
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0..<toWrite])
size -= toWrite
await conn.close()
size = sizeToRead
while size > 0:
let toRead = min(size, PerfSize)
await conn.readExactly(addr buf[0], toRead.int)
size = size - toRead
let duration = Moment.now() - start
trace "finishing performance benchmark", duration
return duration

View File

@@ -0,0 +1,14 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## `Perf <https://github.com/libp2p/specs/blob/master/perf/perf.md>`_ protocol specification
const
PerfCodec* = "/perf/1.0.0"
PerfSize* = 65536

View File

@@ -0,0 +1,60 @@
# Nim-LibP2P
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
## `Perf <https://github.com/libp2p/specs/blob/master/perf/perf.md>`_ protocol specification
{.push raises: [].}
import chronos, chronicles
import stew/endians2
import ./core,
../protocol,
../../stream/connection,
../../utility
export chronicles, connection
logScope:
topics = "libp2p perf"
type Perf* = ref object of LPProtocol
proc new*(T: typedesc[Perf]): T {.public.} =
var p = T()
proc handle(conn: Connection, proto: string) {.async.} =
var bytesRead = 0
try:
trace "Received benchmark performance check", conn
var
sizeBuffer: array[8, byte]
size: uint64
await conn.readExactly(addr sizeBuffer[0], 8)
size = uint64.fromBytesBE(sizeBuffer)
var toReadBuffer: array[PerfSize, byte]
try:
while true:
bytesRead += await conn.readOnce(addr toReadBuffer[0], PerfSize)
except CatchableError as exc:
discard
var buf: array[PerfSize, byte]
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0..<toWrite])
size -= toWrite
except CancelledError as exc:
raise exc
except CatchableError as exc:
trace "exception in perf handler", exc = exc.msg, conn
await conn.close()
p.handler = handle
p.codec = PerfCodec
return p

View File

@@ -9,13 +9,10 @@
## `Ping <https://docs.libp2p.io/concepts/protocols/#ping>`_ protocol implementation
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import chronos, chronicles
import bearssl/[rand, hash]
import bearssl/rand
import ../protobuf/minprotobuf,
../peerinfo,
../stream/connection,
@@ -42,7 +39,7 @@ type
PingHandler* {.public.} = proc (
peer: PeerId):
Future[void]
{.gcsafe, raises: [Defect].}
{.gcsafe, raises: [].}
Ping* = ref object of LPProtocol
pingHandler*: PingHandler
@@ -54,7 +51,7 @@ proc new*(T: typedesc[Ping], handler: PingHandler = nil, rng: ref HmacDrbgContex
ping
method init*(p: Ping) =
proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} =
proc handle(conn: Connection, proto: string) {.async.} =
try:
trace "handling ping", conn
var buf: array[PingSize, byte]
@@ -74,7 +71,7 @@ method init*(p: Ping) =
proc ping*(
p: Ping,
conn: Connection,
): Future[Duration] {.async, gcsafe, public.} =
): Future[Duration] {.async, public.} =
## Sends ping to `conn`, returns the delay
trace "initiating ping", conn

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import chronos, stew/results
import ../stream/connection
@@ -25,7 +22,7 @@ type
conn: Connection,
proto: string):
Future[void]
{.gcsafe, raises: [Defect].}
{.gcsafe, raises: [].}
LPProtocol* = ref object of RootObj
codecs*: seq[string]
@@ -55,8 +52,8 @@ func `codec=`*(p: LPProtocol, codec: string) =
proc new*(
T: type LPProtocol,
codecs: seq[string],
handler: LPProtoHandler, # default(Opt[int]) or Opt.none(int) don't work on 1.2
maxIncomingStreams: Opt[int] | int = Opt[int]()): T =
handler: LPProtoHandler,
maxIncomingStreams: Opt[int] | int = Opt.none(int)): T =
T(
codecs: codecs,
handler: handler,

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[sets, hashes, tables]
import chronos, chronicles, metrics
@@ -18,7 +15,7 @@ import ./pubsub,
./pubsubpeer,
./timedcache,
./peertable,
./rpc/[message, messages],
./rpc/[message, messages, protobuf],
../../crypto/crypto,
../../stream/connection,
../../peerid,
@@ -98,7 +95,16 @@ method unsubscribePeer*(f: FloodSub, peer: PeerId) =
method rpcHandler*(f: FloodSub,
peer: PubSubPeer,
rpcMsg: RPCMsg) {.async.} =
data: seq[byte]) {.async.} =
var rpcMsg = decodeRpcMsg(data).valueOr:
debug "failed to decode msg from peer", peer, err = error
raise newException(CatchableError, "")
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
# trigger hooks
peer.recvObservers(rpcMsg)
for i in 0..<min(f.topicsHigh, rpcMsg.subscriptions.len):
template sub: untyped = rpcMsg.subscriptions[i]
f.handleSubscribe(peer, sub.topic, sub.subscribe)
@@ -151,7 +157,7 @@ method rpcHandler*(f: FloodSub,
# In theory, if topics are the same in all messages, we could batch - we'd
# also have to be careful to only include validated messages
f.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
f.broadcast(toSendPeers, RPCMsg(messages: @[msg]), isHighPriority = false)
trace "Forwared message to peers", peers = toSendPeers.len
f.updateMetrics(rpcMsg)
@@ -213,7 +219,7 @@ method publish*(f: FloodSub,
return 0
# Try to send to all peers that are known to be interested
f.broadcast(peers, RPCMsg(messages: @[msg]))
f.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
when defined(libp2p_expensive_metrics):
libp2p_pubsub_messages_published.inc(labelValues = [topic])
@@ -223,7 +229,7 @@ method publish*(f: FloodSub,
return peers.len
method initPubSub*(f: FloodSub)
{.raises: [Defect, InitializationError].} =
{.raises: [InitializationError].} =
procCall PubSub(f).initPubSub()
f.seen = TimedCache[MessageId].init(2.minutes)
f.seenSalt = newSeqUninitialized[byte](sizeof(Hash))

View File

@@ -9,20 +9,18 @@
## Gossip based publishing
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[sets, sequtils]
import chronos, chronicles, metrics
import chronos/ratelimit
import ./pubsub,
./floodsub,
./pubsubpeer,
./peertable,
./mcache,
./timedcache,
./rpc/[messages, message],
./rpc/[messages, message, protobuf],
../protocol,
../../stream/connection,
../../peerinfo,
@@ -43,9 +41,14 @@ logScope:
declareCounter(libp2p_gossipsub_failed_publish, "number of failed publish")
declareCounter(libp2p_gossipsub_invalid_topic_subscription, "number of invalid topic subscriptions that happened")
declareCounter(libp2p_gossipsub_duplicate_during_validation, "number of duplicates received during message validation")
declareCounter(libp2p_gossipsub_idontwant_saved_messages, "number of duplicates avoided by idontwant")
declareCounter(libp2p_gossipsub_saved_bytes, "bytes saved by gossipsub optimizations", labels=["kind"])
declareCounter(libp2p_gossipsub_duplicate, "number of duplicates received")
declareCounter(libp2p_gossipsub_received, "number of messages received (deduplicated)")
when defined(libp2p_expensive_metrics):
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
proc init*(_: type[GossipSubParams]): GossipSubParams =
GossipSubParams(
explicit: true,
@@ -77,7 +80,10 @@ proc init*(_: type[GossipSubParams]): GossipSubParams =
behaviourPenaltyWeight: -1.0,
behaviourPenaltyDecay: 0.999,
disconnectBadPeers: false,
enablePX: false
enablePX: false,
bandwidthEstimatebps: 100_000_000, # 100 Mbps or 12.5 MBps
overheadRateLimit: Opt.none(tuple[bytes: int, interval: Duration]),
disconnectPeerAboveRateLimit: false
)
proc validateParameters*(parameters: GossipSubParams): Result[void, cstring] =
@@ -150,7 +156,7 @@ method init*(g: GossipSub) =
g.codecs &= GossipSubCodec
g.codecs &= GossipSubCodec_10
method onNewPeer(g: GossipSub, peer: PubSubPeer) =
method onNewPeer*(g: GossipSub, peer: PubSubPeer) =
g.withPeerStats(peer.peerId) do (stats: var PeerStats):
# Make sure stats and peer information match, even when reloading peer stats
# from a previous connection
@@ -158,8 +164,11 @@ method onNewPeer(g: GossipSub, peer: PubSubPeer) =
peer.appScore = stats.appScore
peer.behaviourPenalty = stats.behaviourPenalty
peer.iWantBudget = IWantPeerBudget
peer.iHaveBudget = IHavePeerBudget
# Check if the score is below the threshold and disconnect the peer if necessary
g.disconnectIfBadScorePeer(peer, stats.score)
peer.iHaveBudget = IHavePeerBudget
peer.pingBudget = PingsPeerBudget
method onPubSubPeerEvent*(p: GossipSub, peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe.} =
case event.kind
@@ -188,11 +197,11 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
return
# remove from peer IPs collection too
if pubSubPeer.address.isSome():
g.peersInIP.withValue(pubSubPeer.address.get(), s):
pubSubPeer.address.withValue(address):
g.peersInIP.withValue(address, s):
s[].excl(pubSubPeer.peerId)
if s[].len == 0:
g.peersInIP.del(pubSubPeer.address.get())
g.peersInIP.del(address)
for t in toSeq(g.mesh.keys):
trace "pruning unsubscribing peer", pubSubPeer, score = pubSubPeer.score
@@ -201,8 +210,8 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
for t in toSeq(g.gossipsub.keys):
g.gossipsub.removePeer(t, pubSubPeer)
# also try to remove from explicit table here
g.explicit.removePeer(t, pubSubPeer)
# also try to remove from direct peers table here
g.subscribedDirectPeers.removePeer(t, pubSubPeer)
for t in toSeq(g.fanout.keys):
g.fanout.removePeer(t, pubSubPeer)
@@ -211,6 +220,8 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
for topic, info in stats[].topicInfos.mpairs:
info.firstMessageDeliveries = 0
pubSubPeer.stopSendNonPriorityTask()
procCall FloodSub(g).unsubscribePeer(peer)
proc handleSubscribe*(g: GossipSub,
@@ -241,7 +252,7 @@ proc handleSubscribe*(g: GossipSub,
# subscribe remote peer to the topic
discard g.gossipsub.addPeer(topic, peer)
if peer.peerId in g.parameters.directPeers:
discard g.explicit.addPeer(topic, peer)
discard g.subscribedDirectPeers.addPeer(topic, peer)
else:
trace "peer unsubscribed from topic"
@@ -255,7 +266,7 @@ proc handleSubscribe*(g: GossipSub,
g.fanout.removePeer(topic, peer)
if peer.peerId in g.parameters.directPeers:
g.explicit.removePeer(topic, peer)
g.subscribedDirectPeers.removePeer(topic, peer)
trace "gossip peers", peers = g.gossipsub.peers(topic), topic
@@ -263,18 +274,35 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
g.handlePrune(peer, control.prune)
var respControl: ControlMessage
g.handleIDontWant(peer, control.idontwant)
let iwant = g.handleIHave(peer, control.ihave)
if iwant.messageIds.len > 0:
respControl.iwant.add(iwant)
respControl.prune.add(g.handleGraft(peer, control.graft))
let messages = g.handleIWant(peer, control.iwant)
if
respControl.prune.len > 0 or
respControl.iwant.len > 0 or
messages.len > 0:
# iwant and prunes from here, also messages
let
isPruneNotEmpty = respControl.prune.len > 0
isIWantNotEmpty = respControl.iwant.len > 0
if isPruneNotEmpty or isIWantNotEmpty:
if isIWantNotEmpty:
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
if isPruneNotEmpty:
for prune in respControl.prune:
if g.knownTopics.contains(prune.topicId):
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicId])
else:
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
trace "sending control message", msg = shortLog(respControl), peer
g.send(
peer,
RPCMsg(control: some(respControl)), isHighPriority = true)
if messages.len > 0:
for smsg in messages:
for topic in smsg.topicIds:
if g.knownTopics.contains(topic):
@@ -282,18 +310,11 @@ proc handleControl(g: GossipSub, peer: PubSubPeer, control: ControlMessage) =
else:
libp2p_pubsub_broadcast_messages.inc(labelValues = ["generic"])
libp2p_pubsub_broadcast_iwant.inc(respControl.iwant.len.int64)
for prune in respControl.prune:
if g.knownTopics.contains(prune.topicId):
libp2p_pubsub_broadcast_prune.inc(labelValues = [prune.topicId])
else:
libp2p_pubsub_broadcast_prune.inc(labelValues = ["generic"])
trace "sending control message", msg = shortLog(respControl), peer
# iwant replies have lower priority
trace "sending iwant reply messages", peer
g.send(
peer,
RPCMsg(control: some(respControl), messages: messages))
RPCMsg(messages: messages), isHighPriority = false)
proc validateAndRelay(g: GossipSub,
msg: Message,
@@ -305,12 +326,13 @@ proc validateAndRelay(g: GossipSub,
var seenPeers: HashSet[PubSubPeer]
discard g.validationSeen.pop(msgIdSalted, seenPeers)
libp2p_gossipsub_duplicate_during_validation.inc(seenPeers.len.int64)
libp2p_gossipsub_saved_bytes.inc((msg.data.len * seenPeers.len).int64, labelValues = ["validation_duplicate"])
case validation
of ValidationResult.Reject:
debug "Dropping message after validation, reason: reject",
msgId = shortLog(msgId), peer
g.punishInvalidMessage(peer, msg.topicIds)
await g.punishInvalidMessage(peer, msg)
return
of ValidationResult.Ignore:
debug "Dropping message after validation, reason: ignore",
@@ -332,15 +354,35 @@ proc validateAndRelay(g: GossipSub,
g.floodsub.withValue(t, peers): toSendPeers.incl(peers[])
g.mesh.withValue(t, peers): toSendPeers.incl(peers[])
# add direct peers
toSendPeers.incl(g.subscribedDirectPeers.getOrDefault(t))
# Don't send it to source peer, or peers that
# sent it during validation
toSendPeers.excl(peer)
toSendPeers.excl(seenPeers)
# IDontWant is only worth it if the message is substantially
# bigger than the messageId
if msg.data.len > msgId.len * 10:
g.broadcast(toSendPeers, RPCMsg(control: some(ControlMessage(
idontwant: @[ControlIWant(messageIds: @[msgId])]
))), isHighPriority = true)
for peer in toSendPeers:
for heDontWant in peer.heDontWants:
if msgId in heDontWant:
seenPeers.incl(peer)
libp2p_gossipsub_idontwant_saved_messages.inc
libp2p_gossipsub_saved_bytes.inc(msg.data.len.int64, labelValues = ["idontwant"])
break
toSendPeers.excl(seenPeers)
# In theory, if topics are the same in all messages, we could batch - we'd
# also have to be careful to only include validated messages
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]))
trace "forwared message to peers", peers = toSendPeers.len, msgId, peer
g.broadcast(toSendPeers, RPCMsg(messages: @[msg]), isHighPriority = false)
trace "forwarded message to peers", peers = toSendPeers.len, msgId, peer
for topic in msg.topicIds:
if topic notin g.topics: continue
@@ -353,9 +395,65 @@ proc validateAndRelay(g: GossipSub,
except CatchableError as exc:
info "validateAndRelay failed", msg=exc.msg
proc dataAndTopicsIdSize(msgs: seq[Message]): int =
msgs.mapIt(it.data.len + it.topicIds.mapIt(it.len).foldl(a + b, 0)).foldl(a + b, 0)
proc rateLimit*(g: GossipSub, peer: PubSubPeer, rpcMsgOpt: Opt[RPCMsg], msgSize: int) {.async.} =
# In this way we count even ignored fields by protobuf
var rmsg = rpcMsgOpt.valueOr:
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(msgSize):
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
debug "Peer sent a msg that couldn't be decoded and it's above rate limit.", peer, uselessAppBytesNum = msgSize
if g.parameters.disconnectPeerAboveRateLimit:
await g.disconnectPeer(peer)
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
raise newException(CatchableError, "Peer msg couldn't be decoded")
let usefulMsgBytesNum =
if g.verifySignature:
byteSize(rmsg.messages)
else:
dataAndTopicsIdSize(rmsg.messages)
var uselessAppBytesNum = msgSize - usefulMsgBytesNum
rmsg.control.withValue(control):
uselessAppBytesNum -= (byteSize(control.ihave) + byteSize(control.iwant))
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
debug "Peer sent too much useless application data and it's above rate limit.", peer, msgSize, uselessAppBytesNum, rmsg
if g.parameters.disconnectPeerAboveRateLimit:
await g.disconnectPeer(peer)
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
method rpcHandler*(g: GossipSub,
peer: PubSubPeer,
rpcMsg: RPCMsg) {.async.} =
data: seq[byte]) {.async.} =
let msgSize = data.len
var rpcMsg = decodeRpcMsg(data).valueOr:
debug "failed to decode msg from peer", peer, err = error
await rateLimit(g, peer, Opt.none(RPCMsg), msgSize)
return
when defined(libp2p_expensive_metrics):
for m in rpcMsg.messages:
for t in m.topicIds:
libp2p_pubsub_received_messages.inc(labelValues = [$peer.peerId, t])
trace "decoded msg from peer", peer, msg = rpcMsg.shortLog
await rateLimit(g, peer, Opt.some(rpcMsg), msgSize)
# trigger hooks
peer.recvObservers(rpcMsg)
if rpcMsg.ping.len in 1..<64 and peer.pingBudget > 0:
g.send(peer, RPCMsg(pong: rpcMsg.ping), isHighPriority = true)
peer.pingBudget.dec
for i in 0..<min(g.topicsHigh, rpcMsg.subscriptions.len):
template sub: untyped = rpcMsg.subscriptions[i]
g.handleSubscribe(peer, sub.topic, sub.subscribe)
@@ -413,14 +511,14 @@ method rpcHandler*(g: GossipSub,
# always validate if signature is present or required
debug "Dropping message due to failed signature verification",
msgId = shortLog(msgId), peer
g.punishInvalidMessage(peer, msg.topicIds)
await g.punishInvalidMessage(peer, msg)
continue
if msg.seqno.len > 0 and msg.seqno.len != 8:
# if we have seqno should be 8 bytes long
debug "Dropping message due to invalid seqno length",
msgId = shortLog(msgId), peer
g.punishInvalidMessage(peer, msg.topicIds)
await g.punishInvalidMessage(peer, msg)
continue
# g.anonymize needs no evaluation when receiving messages
@@ -464,7 +562,7 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
topicID: topic,
peers: g.peerExchangeList(topic),
backoff: g.parameters.unsubscribeBackoff.seconds.uint64)])))
g.broadcast(mpeers, msg)
g.broadcast(mpeers, msg, isHighPriority = true)
for peer in mpeers:
g.pruned(peer, topic, backoff = some(g.parameters.unsubscribeBackoff))
@@ -492,32 +590,38 @@ method publish*(g: GossipSub,
var peers: HashSet[PubSubPeer]
if g.parameters.floodPublish:
# With flood publishing enabled, the mesh is used when propagating messages from other peers,
# but a peer's own messages will always be published to all known peers in the topic.
for peer in g.gossipsub.getOrDefault(topic):
if peer.score >= g.parameters.publishThreshold:
trace "publish: including flood/high score peer", peer
peers.incl(peer)
# add always direct peers
peers.incl(g.explicit.getOrDefault(topic))
peers.incl(g.subscribedDirectPeers.getOrDefault(topic))
if topic in g.topics: # if we're subscribed use the mesh
peers.incl(g.mesh.getOrDefault(topic))
if peers.len < g.parameters.dLow and g.parameters.floodPublish == false:
# not subscribed or bad mesh, send to fanout peers
# disable for floodPublish, since we already sent to every good peer
#
if g.parameters.floodPublish:
# With flood publishing enabled, the mesh is used when propagating messages from other peers,
# but a peer's own messages will always be published to all known peers in the topic, limited
# to the amount of peers we can send it to in one heartbeat
var maxPeersToFlodOpt: Opt[int64]
if g.parameters.bandwidthEstimatebps > 0:
let
bandwidth = (g.parameters.bandwidthEstimatebps) div 8 div 1000 # Divisions are to convert it to Bytes per ms TODO replace with bandwidth estimate
msToTransmit = max(data.len div bandwidth, 1)
maxPeersToFlodOpt = Opt.some(max(g.parameters.heartbeatInterval.milliseconds div msToTransmit, g.parameters.dLow))
for peer in g.gossipsub.getOrDefault(topic):
maxPeersToFlodOpt.withValue(maxPeersToFlod):
if peers.len >= maxPeersToFlod: break
if peer.score >= g.parameters.publishThreshold:
trace "publish: including flood/high score peer", peer
peers.incl(peer)
if peers.len < g.parameters.dLow:
# not subscribed, or bad mesh, send to fanout peers
var fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
if fanoutPeers.len == 0:
if fanoutPeers.len < g.parameters.dLow:
g.replenishFanout(topic)
fanoutPeers = g.fanout.getOrDefault(topic).toSeq()
g.rng.shuffle(fanoutPeers)
if fanoutPeers.len + peers.len > g.parameters.d:
fanoutPeers.setLen(g.parameters.d - peers.len)
for fanPeer in fanoutPeers:
peers.incl(fanPeer)
@@ -535,7 +639,6 @@ method publish*(g: GossipSub,
debug "No peers for topic, skipping publish", peersOnTopic = topicPeers.len,
connectedPeers = topicPeers.filterIt(it.connected).len,
topic
# skipping topic as our metrics finds that heavy
libp2p_gossipsub_failed_publish.inc()
return 0
@@ -563,7 +666,7 @@ method publish*(g: GossipSub,
g.mcache.put(msgId, msg)
g.broadcast(peers, RPCMsg(messages: @[msg]))
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
if g.knownTopics.contains(topic):
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])
@@ -571,15 +674,16 @@ method publish*(g: GossipSub,
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = ["generic"])
trace "Published message to peers", peers=peers.len
return peers.len
proc maintainDirectPeer(g: GossipSub, id: PeerId, addrs: seq[MultiAddress]) {.async.} =
let peer = g.peers.getOrDefault(id)
if isNil(peer):
if id notin g.peers:
trace "Attempting to dial a direct peer", peer = id
if g.switch.isConnected(id):
warn "We are connected to a direct peer, but it isn't a GossipSub peer!", id
return
try:
await g.switch.connect(id, addrs)
await g.switch.connect(id, addrs, forceDial = true)
# populate the peer after it's connected
discard g.getOrCreatePeer(id, g.codecs)
except CancelledError as exc:
@@ -623,7 +727,7 @@ method stop*(g: GossipSub) {.async.} =
g.heartbeatFut = nil
method initPubSub*(g: GossipSub)
{.raises: [Defect, InitializationError].} =
{.raises: [InitializationError].} =
procCall FloodSub(g).initPubSub()
if not g.parameters.explicit:
@@ -638,3 +742,13 @@ method initPubSub*(g: GossipSub)
# init gossip stuff
g.mcache = MCache.init(g.parameters.historyGossip, g.parameters.historyLength)
method getOrCreatePeer*(
g: GossipSub,
peerId: PeerId,
protos: seq[string]): PubSubPeer =
let peer = procCall PubSub(g).getOrCreatePeer(peerId, protos)
g.parameters.overheadRateLimit.withValue(overheadRateLimit):
peer.overheadRateLimitOpt = Opt.some(TokenBucket.new(overheadRateLimit.bytes, overheadRateLimit.interval))
return peer

View File

@@ -7,15 +7,12 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[tables, sequtils, sets, algorithm]
import std/[tables, sequtils, sets, algorithm, deques]
import chronos, chronicles, metrics
import "."/[types, scoring]
import ".."/[pubsubpeer, peertable, timedcache, mcache, floodsub, pubsub]
import ".."/[pubsubpeer, peertable, mcache, floodsub, pubsub]
import "../rpc"/[messages]
import "../../.."/[peerid, multiaddress, utility, switch, routing_record, signed_envelope, utils/heartbeat]
@@ -31,9 +28,9 @@ declareGauge(libp2p_gossipsub_no_peers_topics, "number of topics in mesh with no
declareGauge(libp2p_gossipsub_low_peers_topics, "number of topics in mesh with at least one but below dlow peers")
declareGauge(libp2p_gossipsub_healthy_peers_topics, "number of topics in mesh with at least dlow peers (but below dhigh)")
declareCounter(libp2p_gossipsub_above_dhigh_condition, "number of above dhigh pruning branches ran", labels = ["topic"])
declareSummary(libp2p_gossipsub_mcache_hit, "ratio of successful IWANT message cache lookups")
declareGauge(libp2p_gossipsub_received_iwants, "received iwants", labels = ["kind"])
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [Defect].} =
proc grafted*(g: GossipSub, p: PubSubPeer, topic: string) {.raises: [].} =
g.withPeerStats(p.peerId) do (stats: var PeerStats):
var info = stats.topicInfos.getOrDefault(topic)
info.graftTime = Moment.now()
@@ -49,12 +46,10 @@ proc pruned*(g: GossipSub,
p: PubSubPeer,
topic: string,
setBackoff: bool = true,
backoff = none(Duration)) {.raises: [Defect].} =
backoff = none(Duration)) {.raises: [].} =
if setBackoff:
let
backoffDuration =
if isSome(backoff): backoff.get()
else: g.parameters.pruneBackoff
backoffDuration = backoff.get(g.parameters.pruneBackoff)
backoffMoment = Moment.fromNow(backoffDuration)
g.backingOff
@@ -75,7 +70,7 @@ proc pruned*(g: GossipSub,
trace "pruned", peer=p, topic
proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [Defect].} =
proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [].} =
let now = Moment.now()
var expired = toSeq(t.getOrDefault(topic).pairs())
expired.keepIf do (pair: tuple[peer: PeerId, expire: Moment]) -> bool:
@@ -84,7 +79,7 @@ proc handleBackingOff*(t: var BackoffTable, topic: string) {.raises: [Defect].}
t.withValue(topic, v):
v[].del(peer)
proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] {.raises: [Defect].} =
proc peerExchangeList*(g: GossipSub, topic: string): seq[PeerInfoMsg] {.raises: [].} =
if not g.parameters.enablePX:
return @[]
var peers = g.gossipsub.getOrDefault(topic, initHashSet[PubSubPeer]()).toSeq()
@@ -111,10 +106,11 @@ proc handleGraft*(g: GossipSub,
let topic = graft.topicId
trace "peer grafted topic", peer, topic
# It is an error to GRAFT on a explicit peer
# It is an error to GRAFT on a direct peer
if peer.peerId in g.parameters.directPeers:
# receiving a graft from a direct peer should yield a more prominent warning (protocol violation)
warn "an explicit peer attempted to graft us, peering agreements should be reciprocal",
# we are trusting direct peer not to abuse this
warn "a direct peer attempted to graft us, peering agreements should be reciprocal",
peer, topic
# and such an attempt should be logged and rejected with a PRUNE
prunes.add(ControlPrune(
@@ -164,7 +160,8 @@ proc handleGraft*(g: GossipSub,
# If they send us a graft before they send us a subscribe, what should
# we do? For now, we add them to mesh but don't add them to gossipsub.
if topic in g.topics:
if g.mesh.peers(topic) < g.parameters.dHigh or peer.outbound:
if g.mesh.peers(topic) < g.parameters.dHigh or
(peer.outbound and g.mesh.outboundPeers(topic) < g.parameters.dOut):
# In the spec, there's no mention of DHi here, but implicitly, a
# peer will be removed from the mesh on next rebalance, so we don't want
# this peer to push someone else out
@@ -193,27 +190,22 @@ proc handleGraft*(g: GossipSub,
proc getPeers(prune: ControlPrune, peer: PubSubPeer): seq[(PeerId, Option[PeerRecord])] =
var routingRecords: seq[(PeerId, Option[PeerRecord])]
for record in prune.peers:
let peerRecord =
if record.signedPeerRecord.len == 0:
none(PeerRecord)
else:
let signedRecord = SignedPeerRecord.decode(record.signedPeerRecord)
if signedRecord.isErr:
trace "peer sent invalid SPR", peer, error=signedRecord.error
none(PeerRecord)
var peerRecord = none(PeerRecord)
if record.signedPeerRecord.len > 0:
SignedPeerRecord.decode(record.signedPeerRecord).toOpt().withValue(spr):
if record.peerId != spr.data.peerId:
trace "peer sent envelope with wrong public key", peer
else:
if record.peerId != signedRecord.get().data.peerId:
trace "peer sent envelope with wrong public key", peer
none(PeerRecord)
else:
some(signedRecord.get().data)
peerRecord = some(spr.data)
else:
trace "peer sent invalid SPR", peer
routingRecords.add((record.peerId, peerRecord))
routingRecords
proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.raises: [Defect].} =
proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.raises: [].} =
for prune in prunes:
let topic = prune.topicId
@@ -247,64 +239,68 @@ proc handlePrune*(g: GossipSub, peer: PubSubPeer, prunes: seq[ControlPrune]) {.r
proc handleIHave*(g: GossipSub,
peer: PubSubPeer,
ihaves: seq[ControlIHave]): ControlIWant {.raises: [Defect].} =
ihaves: seq[ControlIHave]): ControlIWant {.raises: [].} =
var res: ControlIWant
if peer.score < g.parameters.gossipThreshold:
trace "ihave: ignoring low score peer", peer, score = peer.score
elif peer.iHaveBudget <= 0:
trace "ihave: ignoring out of budget peer", peer, score = peer.score
else:
# TODO review deduplicate algorithm
# * https://github.com/nim-lang/Nim/blob/5f46474555ee93306cce55342e81130c1da79a42/lib/pure/collections/sequtils.nim#L184
# * it's probably not efficient and might give preference to the first dupe
let deIhaves = ihaves.deduplicate()
for ihave in deIhaves:
for ihave in ihaves:
trace "peer sent ihave",
peer, topic = ihave.topicId, msgs = ihave.messageIds
if ihave.topicId in g.mesh:
# also avoid duplicates here!
let deIhavesMsgs = ihave.messageIds.deduplicate()
for msgId in deIhavesMsgs:
if ihave.topicId in g.topics:
for msgId in ihave.messageIds:
if not g.hasSeen(msgId):
if peer.iHaveBudget > 0:
if peer.iHaveBudget <= 0:
break
elif msgId notin res.messageIds:
res.messageIds.add(msgId)
dec peer.iHaveBudget
trace "requested message via ihave", messageID=msgId
else:
break
# shuffling res.messageIDs before sending it out to increase the likelihood
# of getting an answer if the peer truncates the list due to internal size restrictions.
g.rng.shuffle(res.messageIds)
return res
proc handleIDontWant*(g: GossipSub,
peer: PubSubPeer,
iDontWants: seq[ControlIWant]) =
for dontWant in iDontWants:
for messageId in dontWant.messageIds:
if peer.heDontWants[^1].len > 1000: break
if messageId.len > 100: continue
peer.heDontWants[^1].incl(messageId)
proc handleIWant*(g: GossipSub,
peer: PubSubPeer,
iwants: seq[ControlIWant]): seq[Message] {.raises: [Defect].} =
var messages: seq[Message]
iwants: seq[ControlIWant]): seq[Message] {.raises: [].} =
var
messages: seq[Message]
invalidRequests = 0
if peer.score < g.parameters.gossipThreshold:
trace "iwant: ignoring low score peer", peer, score = peer.score
elif peer.iWantBudget <= 0:
trace "iwant: ignoring out of budget peer", peer, score = peer.score
else:
let deIwants = iwants.deduplicate()
for iwant in deIwants:
let deIwantsMsgs = iwant.messageIds.deduplicate()
for mid in deIwantsMsgs:
for iwant in iwants:
for mid in iwant.messageIds:
trace "peer sent iwant", peer, messageID = mid
let msg = g.mcache.get(mid)
if msg.isSome:
libp2p_gossipsub_mcache_hit.observe(1)
# avoid spam
if peer.iWantBudget > 0:
messages.add(msg.get())
dec peer.iWantBudget
else:
break
else:
libp2p_gossipsub_mcache_hit.observe(0)
# canAskIWant will only return true once for a specific message
if not peer.canAskIWant(mid):
libp2p_gossipsub_received_iwants.inc(1, labelValues=["notsent"])
invalidRequests.inc()
if invalidRequests > 20:
libp2p_gossipsub_received_iwants.inc(1, labelValues=["skipped"])
return messages
continue
let msg = g.mcache.get(mid).valueOr:
libp2p_gossipsub_received_iwants.inc(1, labelValues=["unknown"])
continue
libp2p_gossipsub_received_iwants.inc(1, labelValues=["correct"])
messages.add(msg)
return messages
proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} =
proc commitMetrics(metrics: var MeshMetrics) {.raises: [].} =
libp2p_gossipsub_low_peers_topics.set(metrics.lowPeersTopics)
libp2p_gossipsub_no_peers_topics.set(metrics.noPeersTopics)
libp2p_gossipsub_under_dout_topics.set(metrics.underDoutTopics)
@@ -313,7 +309,7 @@ proc commitMetrics(metrics: var MeshMetrics) {.raises: [Defect].} =
libp2p_gossipsub_peers_per_topic_fanout.set(metrics.otherPeersPerTopicFanout, labelValues = ["other"])
libp2p_gossipsub_peers_per_topic_mesh.set(metrics.otherPeersPerTopicMesh, labelValues = ["other"])
proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) {.raises: [Defect].} =
proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil) {.raises: [].} =
logScope:
topic
mesh = g.mesh.peers(topic)
@@ -326,10 +322,11 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
var
prunes, grafts: seq[PubSubPeer]
npeers = g.mesh.peers(topic)
nOutPeers = g.mesh.outboundPeers(topic)
defaultMesh: HashSet[PubSubPeer]
backingOff = g.backingOff.getOrDefault(topic)
if npeers < g.parameters.dLow:
if npeers < g.parameters.dLow:
trace "replenishing mesh", peers = npeers
# replenish the mesh if we're below Dlo
@@ -344,7 +341,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
# avoid negative score peers
it.score >= 0.0 and
it notin currentMesh[] and
# don't pick explicit peers
# don't pick direct peers
it.peerId notin g.parameters.directPeers and
# and avoid peers we are backing off
it.peerId notin backingOff:
@@ -368,7 +365,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
g.fanout.removePeer(topic, peer)
grafts &= peer
else:
elif nOutPeers < g.parameters.dOut:
trace "replenishing mesh outbound quota", peers = g.mesh.peers(topic)
var
@@ -384,7 +381,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
it notin currentMesh[] and
# avoid negative score peers
it.score >= 0.0 and
# don't pick explicit peers
# don't pick direct peers
it.peerId notin g.parameters.directPeers and
# and avoid peers we are backing off
it.peerId notin backingOff:
@@ -396,8 +393,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
# sort peers by score, high score first, we are grafting
candidates.sort(byScore, SortOrder.Descending)
# Graft peers so we reach a count of D
candidates.setLen(min(candidates.len, g.parameters.dOut))
# Graft outgoing peers so we reach a count of dOut
candidates.setLen(min(candidates.len, g.parameters.dOut - nOutPeers))
trace "grafting outbound peers", topic, peers = candidates.len
@@ -486,7 +483,7 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
# avoid negative score peers
it.score >= median.score and
it notin currentMesh[] and
# don't pick explicit peers
# don't pick direct peers
it.peerId notin g.parameters.directPeers and
# and avoid peers we are backing off
it.peerId notin backingOff:
@@ -533,16 +530,16 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
# Send changes to peers after table updates to avoid stale state
if grafts.len > 0:
let graft = RPCMsg(control: some(ControlMessage(graft: @[ControlGraft(topicID: topic)])))
g.broadcast(grafts, graft)
g.broadcast(grafts, graft, isHighPriority = true)
if prunes.len > 0:
let prune = RPCMsg(control: some(ControlMessage(
prune: @[ControlPrune(
topicID: topic,
peers: g.peerExchangeList(topic),
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
g.broadcast(prunes, prune)
g.broadcast(prunes, prune, isHighPriority = true)
proc dropFanoutPeers*(g: GossipSub) {.raises: [Defect].} =
proc dropFanoutPeers*(g: GossipSub) {.raises: [].} =
# drop peers that we haven't published to in
# GossipSubFanoutTTL seconds
let now = Moment.now()
@@ -555,13 +552,13 @@ proc dropFanoutPeers*(g: GossipSub) {.raises: [Defect].} =
for topic in drops:
g.lastFanoutPubSub.del topic
proc replenishFanout*(g: GossipSub, topic: string) {.raises: [Defect].} =
proc replenishFanout*(g: GossipSub, topic: string) {.raises: [].} =
## get fanout peers for a topic
logScope: topic
trace "about to replenish fanout"
let currentMesh = g.mesh.getOrDefault(topic)
if g.fanout.peers(topic) < g.parameters.dLow:
let currentMesh = g.mesh.getOrDefault(topic)
trace "replenishing fanout", peers = g.fanout.peers(topic)
for peer in g.gossipsub.getOrDefault(topic):
if peer in currentMesh: continue
@@ -571,7 +568,7 @@ proc replenishFanout*(g: GossipSub, topic: string) {.raises: [Defect].} =
trace "fanout replenished with peers", peers = g.fanout.peers(topic)
proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises: [Defect].} =
proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises: [].} =
## gossip iHave messages to peers
##
@@ -624,20 +621,29 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] {.raises:
g.rng.shuffle(allPeers)
allPeers.setLen(target)
let msgIdsAsSet = ihave.messageIds.toHashSet()
for peer in allPeers:
control.mgetOrPut(peer, ControlMessage()).ihave.add(ihave)
peer.sentIHaves[^1].incl(msgIdsAsSet)
libp2p_gossipsub_cache_window_size.set(cacheWindowSize.int64)
return control
proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
proc onHeartbeat(g: GossipSub) {.raises: [].} =
# reset IWANT budget
# reset IHAVE cap
block:
for peer in g.peers.values:
peer.iWantBudget = IWantPeerBudget
peer.sentIHaves.addFirst(default(HashSet[MessageId]))
if peer.sentIHaves.len > g.parameters.historyLength:
discard peer.sentIHaves.popLast()
peer.heDontWants.addFirst(default(HashSet[MessageId]))
if peer.heDontWants.len > g.parameters.historyLength:
discard peer.heDontWants.popLast()
peer.iHaveBudget = IHavePeerBudget
peer.pingBudget = PingsPeerBudget
var meshMetrics = MeshMetrics()
@@ -663,7 +669,7 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
topicID: t,
peers: g.peerExchangeList(t),
backoff: g.parameters.pruneBackoff.seconds.uint64)])))
g.broadcast(prunes, prune)
g.broadcast(prunes, prune, isHighPriority = true)
# pass by ptr in order to both signal we want to update metrics
# and as well update the struct for each topic during this iteration
@@ -685,11 +691,11 @@ proc onHeartbeat(g: GossipSub) {.raises: [Defect].} =
libp2p_pubsub_broadcast_ihave.inc(labelValues = [ihave.topicId])
else:
libp2p_pubsub_broadcast_ihave.inc(labelValues = ["generic"])
g.send(peer, RPCMsg(control: some(control)))
g.send(peer, RPCMsg(control: some(control)), isHighPriority = true)
g.mcache.shift() # shift the cache
# {.pop.} # raises [Defect]
# {.pop.} # raises []
proc heartbeat*(g: GossipSub) {.async.} =
heartbeat "GossipSub", g.parameters.heartbeatInterval:

View File

@@ -7,16 +7,16 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[tables, sets, options]
import std/[tables, sets]
import chronos, chronicles, metrics
import chronos/ratelimit
import "."/[types]
import ".."/[pubsubpeer]
import "../../.."/[peerid, multiaddress, utility, switch, utils/heartbeat]
import ../rpc/messages
import "../../.."/[peerid, multiaddress, switch, utils/heartbeat]
import ../pubsub
logScope:
topics = "libp2p gossipsub"
@@ -30,6 +30,7 @@ declareGauge(libp2p_gossipsub_peers_score_invalidMessageDeliveries, "Detailed go
declareGauge(libp2p_gossipsub_peers_score_appScore, "Detailed gossipsub scoring metric", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_behaviourPenalty, "Detailed gossipsub scoring metric", labels = ["agent"])
declareGauge(libp2p_gossipsub_peers_score_colocationFactor, "Detailed gossipsub scoring metric", labels = ["agent"])
declarePublicCounter(libp2p_gossipsub_peers_rate_limit_hits, "The number of times peers were above their rate limit", labels = ["agent"])
proc init*(_: type[TopicParams]): TopicParams =
TopicParams(
@@ -55,7 +56,7 @@ proc init*(_: type[TopicParams]): TopicParams =
proc withPeerStats*(
g: GossipSub,
peerId: PeerId,
action: proc (stats: var PeerStats) {.gcsafe, raises: [Defect].}) =
action: proc (stats: var PeerStats) {.gcsafe, raises: [].}) =
## Add or update peer statistics for a particular peer id - the statistics
## are retained across multiple connections until they expire
g.peerStats.withValue(peerId, stats) do:
@@ -74,39 +75,32 @@ func `/`(a, b: Duration): float64 =
func byScore*(x,y: PubSubPeer): int = system.cmp(x.score, y.score)
proc colocationFactor(g: GossipSub, peer: PubSubPeer): float64 =
if peer.address.isNone():
0.0
let address = peer.address.valueOr: return 0.0
g.peersInIP.mgetOrPut(address, initHashSet[PeerId]()).incl(peer.peerId)
let
ipPeers = g.peersInIP.getOrDefault(address).len().float64
if ipPeers > g.parameters.ipColocationFactorThreshold:
trace "colocationFactor over threshold", peer, address, ipPeers
let over = ipPeers - g.parameters.ipColocationFactorThreshold
over * over
else:
let
address = peer.address.get()
g.peersInIP.mgetOrPut(address, initHashSet[PeerId]()).incl(peer.peerId)
let
ipPeers = g.peersInIP.getOrDefault(address).len().float64
if ipPeers > g.parameters.ipColocationFactorThreshold:
trace "colocationFactor over threshold", peer, address, ipPeers
let over = ipPeers - g.parameters.ipColocationFactorThreshold
over * over
else:
0.0
0.0
{.pop.}
proc disconnectPeer(g: GossipSub, peer: PubSubPeer) {.async.} =
let agent =
when defined(libp2p_agents_metrics):
if peer.shortAgent.len > 0:
peer.shortAgent
else:
"unknown"
else:
"unknown"
libp2p_gossipsub_bad_score_disconnection.inc(labelValues = [agent])
proc disconnectPeer*(g: GossipSub, peer: PubSubPeer) {.async.} =
try:
await g.switch.disconnect(peer.peerId)
except CatchableError as exc: # Never cancelled
trace "Failed to close connection", peer, error = exc.name, msg = exc.msg
proc disconnectIfBadScorePeer*(g: GossipSub, peer: PubSubPeer, score: float64) =
if g.parameters.disconnectBadPeers and score < g.parameters.graylistThreshold and
peer.peerId notin g.parameters.directPeers:
debug "disconnecting bad score peer", peer, score = peer.score
asyncSpawn(g.disconnectPeer(peer))
libp2p_gossipsub_bad_score_disconnection.inc(labelValues = [peer.getAgent()])
proc updateScores*(g: GossipSub) = # avoid async
## https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#the-score-function
@@ -176,14 +170,7 @@ proc updateScores*(g: GossipSub) = # avoid async
score += topicScore * topicParams.topicWeight
# Score metrics
let agent =
when defined(libp2p_agents_metrics):
if peer.shortAgent.len > 0:
peer.shortAgent
else:
"unknown"
else:
"unknown"
let agent = peer.getAgent()
libp2p_gossipsub_peers_score_firstMessageDeliveries.inc(info.firstMessageDeliveries, labelValues = [agent])
libp2p_gossipsub_peers_score_meshMessageDeliveries.inc(info.meshMessageDeliveries, labelValues = [agent])
libp2p_gossipsub_peers_score_meshFailurePenalty.inc(info.meshFailurePenalty, labelValues = [agent])
@@ -220,14 +207,7 @@ proc updateScores*(g: GossipSub) = # avoid async
score += colocationFactor * g.parameters.ipColocationFactorWeight
# Score metrics
let agent =
when defined(libp2p_agents_metrics):
if peer.shortAgent.len > 0:
peer.shortAgent
else:
"unknown"
else:
"unknown"
let agent = peer.getAgent()
libp2p_gossipsub_peers_score_appScore.inc(peer.appScore, labelValues = [agent])
libp2p_gossipsub_peers_score_behaviourPenalty.inc(peer.behaviourPenalty, labelValues = [agent])
libp2p_gossipsub_peers_score_colocationFactor.inc(colocationFactor, labelValues = [agent])
@@ -247,11 +227,7 @@ proc updateScores*(g: GossipSub) = # avoid async
trace "updated peer's score", peer, score = peer.score, n_topics, is_grafted
if g.parameters.disconnectBadPeers and stats.score < g.parameters.graylistThreshold and
peer.peerId notin g.parameters.directPeers:
debug "disconnecting bad score peer", peer, score = peer.score
asyncSpawn(try: g.disconnectPeer(peer) except Exception as exc: raiseAssert exc.msg)
g.disconnectIfBadScorePeer(peer, stats.score)
libp2p_gossipsub_peers_scores.inc(peer.score, labelValues = [agent])
for peer in evicting:
@@ -264,8 +240,18 @@ proc scoringHeartbeat*(g: GossipSub) {.async.} =
trace "running scoring heartbeat", instance = cast[int](g)
g.updateScores()
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, topics: seq[string]) =
for tt in topics:
proc punishInvalidMessage*(g: GossipSub, peer: PubSubPeer, msg: Message) {.async.} =
let uselessAppBytesNum = msg.data.len
peer.overheadRateLimitOpt.withValue(overheadRateLimit):
if not overheadRateLimit.tryConsume(uselessAppBytesNum):
debug "Peer sent invalid message and it's above rate limit", peer, uselessAppBytesNum
libp2p_gossipsub_peers_rate_limit_hits.inc(labelValues = [peer.getAgent()]) # let's just measure at the beginning for test purposes.
if g.parameters.disconnectPeerAboveRateLimit:
await g.disconnectPeer(peer)
raise newException(PeerRateLimitError, "Peer disconnected because it's above rate limit.")
for tt in msg.topicIds:
let t = tt
if t notin g.topics:
continue

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import chronos
import std/[options, tables, sets]
@@ -48,7 +45,7 @@ const
const
BackoffSlackTime* = 2 # seconds
IWantPeerBudget* = 25 # 25 messages per second ( reset every heartbeat )
PingsPeerBudget* = 100 # maximum of 6.4kb/heartbeat (6.4kb/s with default 1 second/hb)
IHavePeerBudget* = 10
# the max amount of IHave to expose, not by spec, but go as example
# rust sigp: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/config.rs#L572
@@ -145,6 +142,11 @@ type
disconnectBadPeers*: bool
enablePX*: bool
bandwidthEstimatebps*: int # This is currently used only for limting flood publishing. 0 disables flood-limiting completely
overheadRateLimit*: Opt[tuple[bytes: int, interval: Duration]]
disconnectPeerAboveRateLimit*: bool
BackoffTable* = Table[string, Table[PeerId, Moment]]
ValidationSeenTable* = Table[MessageId, HashSet[PubSubPeer]]
@@ -153,13 +155,13 @@ type
proc(peer: PeerId,
tag: string, # For gossipsub, the topic
peers: seq[RoutingRecordsPair])
{.gcsafe, raises: [Defect].}
{.gcsafe, raises: [].}
GossipSub* = ref object of FloodSub
mesh*: PeerTable # peers that we send messages to when we are subscribed to the topic
fanout*: PeerTable # peers that we send messages to when we're not subscribed to the topic
gossipsub*: PeerTable # peers that are subscribed to a topic
explicit*: PeerTable # directpeers that we keep alive explicitly
subscribedDirectPeers*: PeerTable # directpeers that we keep alive
backingOff*: BackoffTable # peers to backoff from when replenishing the mesh
lastFanoutPubSub*: Table[string, Moment] # last publish time for fanout topics
gossip*: Table[string, seq[ControlIHave]] # pending gossip

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[sets, tables, options]
import rpc/[messages]

View File

@@ -7,12 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[tables, sets]
import std/[tables, sets, sequtils]
import ./pubsubpeer, ../../peerid
export tables, sets
@@ -53,3 +50,10 @@ func peers*(table: PeerTable, topic: string): int =
except KeyError: raiseAssert "checked with in"
else:
0
func outboundPeers*(table: PeerTable, topic: string): int =
if topic in table:
try: table[topic].countIt(it.outbound)
except KeyError: raiseAssert "checked with in"
else:
0

View File

@@ -13,13 +13,11 @@
## `publish<#publish.e%2CPubSub%2Cstring%2Cseq%5Bbyte%5D>`_ something on it,
## and eventually `unsubscribe<#unsubscribe%2CPubSub%2Cstring%2CTopicHandler>`_ from it.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[tables, sequtils, sets, strutils]
import chronos, chronicles, metrics
import chronos/ratelimit
import ./errors as pubsub_errors,
./pubsubpeer,
./rpc/[message, messages, protobuf],
@@ -86,18 +84,18 @@ type
InitializationError* = object of LPError
TopicHandler* {.public.} = proc(topic: string,
data: seq[byte]): Future[void] {.gcsafe, raises: [Defect].}
data: seq[byte]): Future[void] {.gcsafe, raises: [].}
ValidatorHandler* {.public.} = proc(topic: string,
message: Message): Future[ValidationResult] {.gcsafe, raises: [Defect].}
message: Message): Future[ValidationResult] {.gcsafe, raises: [].}
TopicPair* = tuple[topic: string, handler: TopicHandler]
MsgIdProvider* {.public.} =
proc(m: Message): Result[MessageId, ValidationResult] {.noSideEffect, raises: [Defect], gcsafe.}
proc(m: Message): Result[MessageId, ValidationResult] {.noSideEffect, raises: [], gcsafe.}
SubscriptionValidator* {.public.} =
proc(topic: string): bool {.raises: [Defect], gcsafe.}
proc(topic: string): bool {.raises: [], gcsafe.}
## Every time a peer send us a subscription (even to an unknown topic),
## we have to store it, which may be an attack vector.
## This callback can be used to reject topic we're not interested in
@@ -140,18 +138,34 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
libp2p_pubsub_peers.set(p.peers.len.int64)
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg) {.raises: [Defect].} =
## Attempt to send `msg` to remote peer
proc send*(p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool) {.raises: [].} =
## This procedure attempts to send a `msg` (of type `RPCMsg`) to the specified remote peer in the PubSub network.
##
## Parameters:
## - `p`: The `PubSub` instance.
## - `peer`: An instance of `PubSubPeer` representing the peer to whom the message should be sent.
## - `msg`: The `RPCMsg` instance that contains the message to be sent.
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
## priority messages have been sent.
trace "sending pubsub message to peer", peer, msg = shortLog(msg)
peer.send(msg, p.anonymize)
asyncSpawn peer.send(msg, p.anonymize, isHighPriority)
proc broadcast*(
p: PubSub,
sendPeers: auto, # Iteratble[PubSubPeer]
msg: RPCMsg) {.raises: [Defect].} =
## Attempt to send `msg` to the given peers
msg: RPCMsg,
isHighPriority: bool) {.raises: [].} =
## This procedure attempts to send a `msg` (of type `RPCMsg`) to a specified group of peers in the PubSub network.
##
## Parameters:
## - `p`: The `PubSub` instance.
## - `sendPeers`: An iterable of `PubSubPeer` instances representing the peers to whom the message should be sent.
## - `msg`: The `RPCMsg` instance that contains the message to be broadcast.
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
## priority messages have been sent.
let npeers = sendPeers.len.int64
for sub in msg.subscriptions:
@@ -173,10 +187,9 @@ proc broadcast*(
else:
libp2p_pubsub_broadcast_messages.inc(npeers, labelValues = ["generic"])
if msg.control.isSome():
libp2p_pubsub_broadcast_iwant.inc(npeers * msg.control.get().iwant.len.int64)
msg.control.withValue(control):
libp2p_pubsub_broadcast_iwant.inc(npeers * control.iwant.len.int64)
let control = msg.control.get()
for ihave in control.ihave:
if p.knownTopics.contains(ihave.topicId):
libp2p_pubsub_broadcast_ihave.inc(npeers, labelValues = [ihave.topicId])
@@ -198,19 +211,19 @@ proc broadcast*(
if anyIt(sendPeers, it.hasObservers):
for peer in sendPeers:
p.send(peer, msg)
p.send(peer, msg, isHighPriority)
else:
# Fast path that only encodes message once
let encoded = encodeRpcMsg(msg, p.anonymize)
for peer in sendPeers:
asyncSpawn peer.sendEncoded(encoded)
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
proc sendSubs*(p: PubSub,
peer: PubSubPeer,
topics: openArray[string],
subscribe: bool) =
## send subscriptions to remote peer
p.send(peer, RPCMsg.withSubs(topics, subscribe))
p.send(peer, RPCMsg.withSubs(topics, subscribe), isHighPriority = true)
for topic in topics:
if subscribe:
@@ -247,9 +260,8 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
else:
libp2p_pubsub_received_messages.inc(labelValues = ["generic"])
if rpcMsg.control.isSome():
libp2p_pubsub_received_iwant.inc(rpcMsg.control.get().iwant.len.int64)
template control: untyped = rpcMsg.control.unsafeGet()
rpcMsg.control.withValue(control):
libp2p_pubsub_received_iwant.inc(control.iwant.len.int64)
for ihave in control.ihave:
if p.knownTopics.contains(ihave.topicId):
libp2p_pubsub_received_ihave.inc(labelValues = [ihave.topicId])
@@ -268,7 +280,7 @@ proc updateMetrics*(p: PubSub, rpcMsg: RPCMsg) =
method rpcHandler*(p: PubSub,
peer: PubSubPeer,
rpcMsg: RPCMsg): Future[void] {.base, async.} =
data: seq[byte]): Future[void] {.base, async.} =
## Handler that must be overridden by concrete implementation
raiseAssert "Unimplemented"
@@ -283,10 +295,11 @@ method onPubSubPeerEvent*(p: PubSub, peer: PubSubPeer, event: PubSubPeerEvent) {
of PubSubPeerEventKind.Disconnected:
discard
proc getOrCreatePeer*(
method getOrCreatePeer*(
p: PubSub,
peerId: PeerId,
protos: seq[string]): PubSubPeer =
protos: seq[string]): PubSubPeer {.base, gcsafe.} =
p.peers.withValue(peerId, peer):
return peer[]
@@ -359,9 +372,9 @@ method handleConn*(p: PubSub,
## that we're interested in
##
proc handler(peer: PubSubPeer, msg: RPCMsg): Future[void] =
proc handler(peer: PubSubPeer, data: seq[byte]): Future[void] =
# call pubsub rpc handler
p.rpcHandler(peer, msg)
p.rpcHandler(peer, data)
let peer = p.getOrCreatePeer(conn.peerId, @[proto])
@@ -491,7 +504,7 @@ method publish*(p: PubSub,
return 0
method initPubSub*(p: PubSub)
{.base, raises: [Defect, InitializationError].} =
{.base, raises: [InitializationError].} =
## perform pubsub initialization
p.observers = new(seq[PubSubObserver])
if p.msgIdProvider == nil:
@@ -559,7 +572,7 @@ proc init*[PubParams: object | bool](
maxMessageSize: int = 1024 * 1024,
rng: ref HmacDrbgContext = newRng(),
parameters: PubParams = false): P
{.raises: [Defect, InitializationError], public.} =
{.raises: [InitializationError], public.} =
let pubsub =
when PubParams is bool:
P(switch: switch,

View File

@@ -7,14 +7,12 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[sequtils, strutils, tables, hashes, options]
import std/[sequtils, strutils, tables, hashes, options, sets, deques]
import stew/results
import chronos, chronicles, nimcrypto/sha2, metrics
import chronos/ratelimit
import rpc/[messages, message, protobuf],
../../peerid,
../../peerinfo,
@@ -23,21 +21,25 @@ import rpc/[messages, message, protobuf],
../../protobuf/minprotobuf,
../../utility
export peerid, connection
export peerid, connection, deques
logScope:
topics = "libp2p pubsubpeer"
when defined(libp2p_expensive_metrics):
declareCounter(libp2p_pubsub_sent_messages, "number of messages sent", labels = ["id", "topic"])
declareCounter(libp2p_pubsub_received_messages, "number of messages received", labels = ["id", "topic"])
declareCounter(libp2p_pubsub_skipped_received_messages, "number of received skipped messages", labels = ["id"])
declareCounter(libp2p_pubsub_skipped_sent_messages, "number of sent skipped messages", labels = ["id"])
declareGauge(libp2p_gossipsub_priority_queue_size, "the number of messages in the priority queue", labels = ["id"])
declareGauge(libp2p_gossipsub_non_priority_queue_size, "the number of messages in the non-priority queue", labels = ["id"])
type
PeerRateLimitError* = object of CatchableError
PubSubObserver* = ref object
onRecv*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [Defect].}
onSend*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [Defect].}
onRecv*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
onSend*: proc(peer: PubSubPeer; msgs: var RPCMsg) {.gcsafe, raises: [].}
PubSubPeerEventKind* {.pure.} = enum
Connected
@@ -46,9 +48,17 @@ type
PubSubPeerEvent* = object
kind*: PubSubPeerEventKind
GetConn* = proc(): Future[Connection] {.gcsafe, raises: [Defect].}
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [Defect].} # have to pass peer as it's unknown during init
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [Defect].}
GetConn* = proc(): Future[Connection] {.gcsafe, raises: [].}
DropConn* = proc(peer: PubSubPeer) {.gcsafe, raises: [].} # have to pass peer as it's unknown during init
OnEvent* = proc(peer: PubSubPeer, event: PubSubPeerEvent) {.gcsafe, raises: [].}
RpcMessageQueue* = ref object
# Tracks async tasks for sending high-priority peer-published messages.
sendPriorityQueue: Deque[Future[void]]
# Queue for lower-priority messages, like "IWANT" replies and relay messages.
nonPriorityQueue: AsyncQueue[seq[byte]]
# Task for processing non-priority message queue.
sendNonPriorityTask: Future[void]
PubSubPeer* = ref object of RootObj
getConn*: GetConn # callback to establish a new send connection
@@ -62,17 +72,38 @@ type
observers*: ref seq[PubSubObserver] # ref as in smart_ptr
score*: float64
iWantBudget*: int
sentIHaves*: Deque[HashSet[MessageId]]
heDontWants*: Deque[HashSet[MessageId]]
iHaveBudget*: int
pingBudget*: int
maxMessageSize: int
appScore*: float64 # application specific score
behaviourPenalty*: float64 # the eventual penalty score
overheadRateLimitOpt*: Opt[TokenBucket]
rpcmessagequeue: RpcMessageQueue
RPCHandler* = proc(peer: PubSubPeer, data: seq[byte]): Future[void]
{.gcsafe, raises: [].}
when defined(libp2p_agents_metrics):
func shortAgent*(p: PubSubPeer): string =
if p.sendConn.isNil or p.sendConn.getWrapped().isNil:
"unknown"
else:
#TODO the sendConn is setup before identify,
#so we have to read the parents short agent..
p.sendConn.getWrapped().shortAgent
proc getAgent*(peer: PubSubPeer): string =
return
when defined(libp2p_agents_metrics):
shortAgent*: string
RPCHandler* = proc(peer: PubSubPeer, msg: RPCMsg): Future[void]
{.gcsafe, raises: [Defect].}
if peer.shortAgent.len > 0:
peer.shortAgent
else:
"unknown"
else:
"unknown"
func hash*(p: PubSubPeer): Hash =
p.peerId.hash
@@ -102,7 +133,7 @@ func outbound*(p: PubSubPeer): bool =
else:
false
proc recvObservers(p: PubSubPeer, msg: var RPCMsg) =
proc recvObservers*(p: PubSubPeer, msg: var RPCMsg) =
# trigger hooks
if not(isNil(p.observers)) and p.observers[].len > 0:
for obs in p.observers[]:
@@ -122,43 +153,28 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async.} =
try:
try:
while not conn.atEof:
trace "waiting for data", conn, peer = p, closed = conn.closed
debug "waiting for data", conn, peer = p, closed = conn.closed
var data = await conn.readLp(p.maxMessageSize)
trace "read data from peer",
debug "read data from peer",
conn, peer = p, closed = conn.closed,
data = data.shortLog
var rmsg = decodeRpcMsg(data)
await p.handler(p, data)
data = newSeq[byte]() # Release memory
if rmsg.isErr():
notice "failed to decode msg from peer",
conn, peer = p, closed = conn.closed,
err = rmsg.error()
break
trace "decoded msg from peer",
conn, peer = p, closed = conn.closed,
msg = rmsg.get().shortLog
# trigger hooks
p.recvObservers(rmsg.get())
when defined(libp2p_expensive_metrics):
for m in rmsg.get().messages:
for t in m.topicIDs:
# metrics
libp2p_pubsub_received_messages.inc(labelValues = [$p.peerId, t])
await p.handler(p, rmsg.get())
except PeerRateLimitError as exc:
debug "Peer rate limit exceeded, exiting read while", conn, peer = p, error = exc.msg
except CatchableError as exc:
debug "Exception occurred in PubSubPeer.handle",
conn, peer = p, closed = conn.closed, exc = exc.msg
finally:
await conn.close()
except CancelledError:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.
trace "Unexpected cancellation in PubSubPeer.handle"
debug "Unexpected cancellation in PubSubPeer.handle"
except CatchableError as exc:
trace "Exception occurred in PubSubPeer.handle",
debug "Exception occurred in PubSubPeer.handle",
conn, peer = p, closed = conn.closed, exc = exc.msg
finally:
debug "exiting pubsub read loop",
@@ -168,7 +184,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
try:
if p.connectedFut.finished:
p.connectedFut = newFuture[void]()
let newConn = await p.getConn()
let newConn = await p.getConn().wait(5.seconds)
if newConn.isNil:
raise (ref LPError)(msg: "Cannot establish send connection")
@@ -176,7 +192,7 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
# remote peer - if we had multiple channels up and one goes down, all
# stop working so we make an effort to only keep a single channel alive
trace "Get new send connection", p, newConn
debug "Get new send connection", p, newConn
# Careful to race conditions here.
# Topic subscription relies on either connectedFut
@@ -191,10 +207,13 @@ proc connectOnce(p: PubSubPeer): Future[void] {.async.} =
await handle(p, newConn)
finally:
if p.sendConn != nil:
trace "Removing send connection", p, conn = p.sendConn
debug "Removing send connection", p, conn = p.sendConn
await p.sendConn.close()
p.sendConn = nil
if not p.connectedFut.finished:
p.connectedFut.complete()
try:
if p.onEvent != nil:
p.onEvent(p, PubSubPeerEvent(kind: PubSubPeerEventKind.Disconnected))
@@ -227,11 +246,62 @@ proc hasSendConn*(p: PubSubPeer): bool =
template sendMetrics(msg: RPCMsg): untyped =
when defined(libp2p_expensive_metrics):
for x in msg.messages:
for t in x.topicIDs:
for t in x.topicIds:
# metrics
libp2p_pubsub_sent_messages.inc(labelValues = [$p.peerId, t])
proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect], async.} =
proc clearSendPriorityQueue(p: PubSubPeer) =
while p.rpcmessagequeue.sendPriorityQueue.len > 0 and p.rpcmessagequeue.sendPriorityQueue[0].finished:
when defined(libp2p_expensive_metrics):
libp2p_gossipsub_priority_queue_size.dec(labelValues = [$p.peerId])
let f = p.rpcmessagequeue.sendPriorityQueue.popFirst()
debug "Finished", p, f = repr(cast[pointer](f))
if p.rpcmessagequeue.sendPriorityQueue.len > 0 and p.rpcmessagequeue.sendPriorityQueue[^1].finished:
for f in p.rpcmessagequeue.sendPriorityQueue:
if f.failed():
debug "Broken failed", p, f = repr(cast[pointer](f)), err= f.error().msg
elif f.completed:
debug "Broken completed", p, f = repr(cast[pointer](f))
else:
debug "Broken pending", p, f = repr(cast[pointer](f))
quit 1
proc sendMsg(p: PubSubPeer, msg: seq[byte]) {.async.} =
if p.sendConn == nil:
# Wait for a send conn to be setup. `connectOnce` will
# complete this even if the sendConn setup failed
await p.connectedFut
var conn = p.sendConn
if conn == nil or conn.closed():
debug "No send connection", p, msg = shortLog(msg)
return
debug "sending encoded msgs to peer", conn, encoded = shortLog(msg)
try:
await conn.writeLp(msg)
debug "sent pubsub message to remote", conn
except CatchableError as exc: # never cancelled
# Because we detach the send call from the currently executing task using
# asyncSpawn, no exceptions may leak out of it
debug "Unable to send to remote", conn, msg = exc.msg
# Next time sendConn is used, it will be have its close flag set and thus
# will be recycled
await conn.close() # This will clean up the send connection
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool) {.async.} =
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
##
## Parameters:
## - `p`: The `PubSubPeer` instance to which the message is to be sent.
## - `msg`: The message to be sent, encoded as a sequence of bytes (`seq[byte]`).
## - `isHighPriority`: A boolean indicating whether the message should be treated as high priority.
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
## priority messages have been sent.
doAssert(not isNil(p), "pubsubpeer nil!")
if msg.len <= 0:
@@ -239,34 +309,68 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte]) {.raises: [Defect], async.} =
return
if msg.len > p.maxMessageSize:
info "trying to send a too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
info "trying to send a msg too big for pubsub", maxSize=p.maxMessageSize, msgSize=msg.len
return
if p.sendConn == nil:
discard await p.connectedFut.withTimeout(1.seconds)
if isHighPriority:
p.clearSendPriorityQueue()
let f = p.sendMsg(msg)
if not f.finished:
debug "Unfinished", p, msg = msg.len, f = repr(cast[pointer](f))
p.rpcmessagequeue.sendPriorityQueue.addLast(f)
when defined(libp2p_expensive_metrics):
libp2p_gossipsub_priority_queue_size.inc(labelValues = [$p.peerId])
else:
await p.rpcmessagequeue.nonPriorityQueue.addLast(msg)
when defined(libp2p_expensive_metrics):
libp2p_gossipsub_non_priority_queue_size.inc(labelValues = [$p.peerId])
debug "message queued", p, msg = shortLog(msg)
var conn = p.sendConn
if conn == nil or conn.closed():
debug "No send connection, skipping message", p, msg = shortLog(msg)
return
iterator splitRPCMsg(peer: PubSubPeer, rpcMsg: RPCMsg, maxSize: int, anonymize: bool): seq[byte] =
## This iterator takes an `RPCMsg` and sequentially repackages its Messages into new `RPCMsg` instances.
## Each new `RPCMsg` accumulates Messages until reaching the specified `maxSize`. If a single Message
## exceeds the `maxSize` when trying to fit into an empty `RPCMsg`, the latter is skipped as too large to send.
## Every constructed `RPCMsg` is then encoded, optionally anonymized, and yielded as a sequence of bytes.
trace "sending encoded msgs to peer", conn, encoded = shortLog(msg)
var currentRPCMsg = rpcMsg
currentRPCMsg.messages = newSeq[Message]()
try:
await conn.writeLp(msg)
trace "sent pubsub message to remote", conn
except CatchableError as exc: # never cancelled
# Because we detach the send call from the currently executing task using
# asyncSpawn, no exceptions may leak out of it
trace "Unable to send to remote", conn, msg = exc.msg
# Next time sendConn is used, it will be have its close flag set and thus
# will be recycled
var currentSize = byteSize(currentRPCMsg)
await conn.close() # This will clean up the send connection
for msg in rpcMsg.messages:
let msgSize = byteSize(msg)
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
# Check if adding the next message will exceed maxSize
if float(currentSize + msgSize) * 1.1 > float(maxSize): # Guessing 10% protobuf overhead
if currentRPCMsg.messages.len == 0:
debug "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
continue # Skip this message
debug "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
yield encodeRpcMsg(currentRPCMsg, anonymize)
currentRPCMsg = RPCMsg()
currentSize = 0
currentRPCMsg.messages.add(msg)
currentSize += msgSize
# Check if there is a non-empty currentRPCMsg left to be added
if currentSize > 0 and currentRPCMsg.messages.len > 0:
debug "sending msg to peer", peer, rpcMsg = shortLog(currentRPCMsg)
yield encodeRpcMsg(currentRPCMsg, anonymize)
else:
debug "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool) {.async.} =
## Asynchronously sends an `RPCMsg` to a specified `PubSubPeer` with an option for anonymization.
##
## Parameters:
## - `p`: The `PubSubPeer` instance to which the message is to be sent.
## - `msg`: The `RPCMsg` instance representing the message to be sent.
## - `anonymize`: A boolean flag indicating whether the message should be sent with anonymization.
## - `isHighPriority`: A boolean flag indicating whether the message should be treated as high priority.
## High priority messages are sent immediately, while low priority messages are queued and sent only after all high
## priority messages have been sent.
# When sending messages, we take care to re-encode them with the right
# anonymization flag to ensure that we're not penalized for sending invalid
# or malicious data on the wire - in particular, re-encoding protects against
@@ -284,7 +388,57 @@ proc send*(p: PubSubPeer, msg: RPCMsg, anonymize: bool) {.raises: [Defect].} =
sendMetrics(msg)
encodeRpcMsg(msg, anonymize)
asyncSpawn p.sendEncoded(encoded)
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
await p.sendEncoded(encodedSplitMsg, isHighPriority)
else:
# If the message size is within limits, send it as is
debug "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
await p.sendEncoded(encoded, isHighPriority)
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
for sentIHave in p.sentIHaves.mitems():
if msgId in sentIHave:
sentIHave.excl(msgId)
return true
return false
proc sendNonPriorityTask(p: PubSubPeer) {.async.} =
while true:
# we send non-priority messages only if there are no pending priority messages
let msg = await p.rpcmessagequeue.nonPriorityQueue.popFirst()
while p.rpcmessagequeue.sendPriorityQueue.len > 0:
p.clearSendPriorityQueue()
# this minimizes the number of times we have to wait for something (each wait = performance cost)
# we will never wait for a finished future and by waiting for the last one, all that come before it are guaranteed
# to be finished already (since sends are processed in order).
if p.rpcmessagequeue.sendPriorityQueue.len > 0:
await p.rpcmessagequeue.sendPriorityQueue[^1]
when defined(libp2p_expensive_metrics):
libp2p_gossipsub_non_priority_queue_size.dec(labelValues = [$p.peerId])
await p.sendMsg(msg)
proc startSendNonPriorityTask(p: PubSubPeer) =
debug "starting sendNonPriorityTask", p
if p.rpcmessagequeue.sendNonPriorityTask.isNil:
p.rpcmessagequeue.sendNonPriorityTask = p.sendNonPriorityTask()
proc stopSendNonPriorityTask*(p: PubSubPeer) =
if not p.rpcmessagequeue.sendNonPriorityTask.isNil:
debug "stopping sendNonPriorityTask", p
p.rpcmessagequeue.sendNonPriorityTask.cancel()
p.rpcmessagequeue.sendNonPriorityTask = nil
p.rpcmessagequeue.sendPriorityQueue.clear()
p.rpcmessagequeue.nonPriorityQueue.clear()
when defined(libp2p_expensive_metrics):
libp2p_gossipsub_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
libp2p_gossipsub_non_priority_queue_size.set(labelValues = [$p.peerId], value = 0)
proc new(T: typedesc[RpcMessageQueue]): T =
return T(
sendPriorityQueue: initDeque[Future[void]](),
nonPriorityQueue: newAsyncQueue[seq[byte]](),
)
proc new*(
T: typedesc[PubSubPeer],
@@ -292,13 +446,19 @@ proc new*(
getConn: GetConn,
onEvent: OnEvent,
codec: string,
maxMessageSize: int): T =
maxMessageSize: int,
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket)): T =
T(
result = T(
getConn: getConn,
onEvent: onEvent,
codec: codec,
peerId: peerId,
connectedFut: newFuture[void](),
maxMessageSize: maxMessageSize
maxMessageSize: maxMessageSize,
overheadRateLimitOpt: overheadRateLimitOpt,
rpcmessagequeue: RpcMessageQueue.new(),
)
result.sentIHaves.addFirst(default(HashSet[MessageId]))
result.heDontWants.addFirst(default(HashSet[MessageId]))
result.startSendNonPriorityTask()

View File

@@ -7,12 +7,8 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import hashes
import chronicles, metrics, stew/[byteutils, endians2]
import ./messages,
./protobuf,
@@ -66,22 +62,21 @@ proc init*(
topic: string,
seqno: Option[uint64],
sign: bool = true): Message
{.gcsafe, raises: [Defect, LPError].} =
{.gcsafe, raises: [LPError].} =
var msg = Message(data: data, topicIDs: @[topic])
# order matters, we want to include seqno in the signature
if seqno.isSome:
msg.seqno = @(seqno.get().toBytesBE())
seqno.withValue(seqn):
msg.seqno = @(seqn.toBytesBE())
if peer.isSome:
let peer = peer.get()
peer.withValue(peer):
msg.fromPeer = peer.peerId
if sign:
msg.signature = sign(msg, peer.privateKey).expect("Couldn't sign message!")
msg.key = peer.privateKey.getPublicKey().expect("Invalid private key!")
.getBytes().expect("Couldn't get public key bytes!")
elif sign:
raise (ref LPError)(msg: "Cannot sign message without peer info")
else:
if sign: raise (ref LPError)(msg: "Cannot sign message without peer info")
msg
@@ -91,10 +86,10 @@ proc init*(
data: seq[byte],
topic: string,
seqno: Option[uint64]): Message
{.gcsafe, raises: [Defect, LPError].} =
{.gcsafe, raises: [LPError].} =
var msg = Message(data: data, topicIDs: @[topic])
msg.fromPeer = peerId
if seqno.isSome:
msg.seqno = @(seqno.get().toBytesBE())
seqno.withValue(seqn):
msg.seqno = @(seqn.toBytesBE())
msg

View File

@@ -7,12 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import options, sequtils
import options, sequtils, sugar
import "../../.."/[
peerid,
routing_record,
@@ -21,6 +18,14 @@ import "../../.."/[
export options
proc expectedFields[T](t: typedesc[T], existingFieldNames: seq[string]) {.raises: [CatchableError].} =
var fieldNames: seq[string]
for name, _ in fieldPairs(T()):
fieldNames &= name
if fieldNames != existingFieldNames:
fieldNames.keepIf(proc(it: string): bool = it notin existingFieldNames)
raise newException(CatchableError, $T & " fields changed, please search for and revise all relevant procs. New fields: " & $fieldNames)
type
PeerInfoMsg* = object
peerId*: PeerId
@@ -45,6 +50,7 @@ type
iwant*: seq[ControlIWant]
graft*: seq[ControlGraft]
prune*: seq[ControlPrune]
idontwant*: seq[ControlIWant]
ControlIHave* = object
topicId*: string
@@ -65,6 +71,8 @@ type
subscriptions*: seq[SubOpts]
messages*: seq[Message]
control*: Option[ControlMessage]
ping*: seq[byte]
pong*: seq[byte]
func withSubs*(
T: type RPCMsg, topics: openArray[string], subscribe: bool): T =
@@ -111,15 +119,59 @@ func shortLog*(msg: Message): auto =
)
func shortLog*(m: RPCMsg): auto =
if m.control.isSome:
(
subscriptions: m.subscriptions,
messages: mapIt(m.messages, it.shortLog),
control: m.control.get().shortLog
)
else:
(
subscriptions: m.subscriptions,
messages: mapIt(m.messages, it.shortLog),
control: ControlMessage().shortLog
)
(
subscriptions: m.subscriptions,
messages: mapIt(m.messages, it.shortLog),
control: m.control.get(ControlMessage()).shortLog
)
static: expectedFields(PeerInfoMsg, @["peerId", "signedPeerRecord"])
proc byteSize(peerInfo: PeerInfoMsg): int =
peerInfo.peerId.len + peerInfo.signedPeerRecord.len
static: expectedFields(SubOpts, @["subscribe", "topic"])
proc byteSize(subOpts: SubOpts): int =
1 + subOpts.topic.len # 1 byte for the bool
static: expectedFields(Message, @["fromPeer", "data", "seqno", "topicIds", "signature", "key"])
proc byteSize*(msg: Message): int =
msg.fromPeer.len + msg.data.len + msg.seqno.len +
msg.signature.len + msg.key.len + msg.topicIds.foldl(a + b.len, 0)
proc byteSize*(msgs: seq[Message]): int =
msgs.foldl(a + b.byteSize, 0)
static: expectedFields(ControlIHave, @["topicId", "messageIds"])
proc byteSize(controlIHave: ControlIHave): int =
controlIHave.topicId.len + controlIHave.messageIds.foldl(a + b.len, 0)
proc byteSize*(ihaves: seq[ControlIHave]): int =
ihaves.foldl(a + b.byteSize, 0)
static: expectedFields(ControlIWant, @["messageIds"])
proc byteSize(controlIWant: ControlIWant): int =
controlIWant.messageIds.foldl(a + b.len, 0)
proc byteSize*(iwants: seq[ControlIWant]): int =
iwants.foldl(a + b.byteSize, 0)
static: expectedFields(ControlGraft, @["topicId"])
proc byteSize(controlGraft: ControlGraft): int =
controlGraft.topicId.len
static: expectedFields(ControlPrune, @["topicId", "peers", "backoff"])
proc byteSize(controlPrune: ControlPrune): int =
controlPrune.topicId.len + controlPrune.peers.foldl(a + b.byteSize, 0) + 8 # 8 bytes for uint64
static: expectedFields(ControlMessage, @["ihave", "iwant", "graft", "prune", "idontwant"])
proc byteSize(control: ControlMessage): int =
control.ihave.foldl(a + b.byteSize, 0) + control.iwant.foldl(a + b.byteSize, 0) +
control.graft.foldl(a + b.byteSize, 0) + control.prune.foldl(a + b.byteSize, 0) +
control.idontwant.foldl(a + b.byteSize, 0)
static: expectedFields(RPCMsg, @["subscriptions", "messages", "control", "ping", "pong"])
proc byteSize*(rpc: RPCMsg): int =
result = rpc.subscriptions.foldl(a + b.byteSize, 0) + byteSize(rpc.messages) +
rpc.ping.len + rpc.pong.len
rpc.control.withValue(ctrl):
result += ctrl.byteSize

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import options
import stew/assign2
@@ -90,6 +87,8 @@ proc write*(pb: var ProtoBuffer, field: int, control: ControlMessage) =
ipb.write(3, graft)
for prune in control.prune:
ipb.write(4, prune)
for idontwant in control.idontwant:
ipb.write(5, idontwant)
if len(ipb.buffer) > 0:
ipb.finish()
pb.write(field, ipb)
@@ -213,6 +212,7 @@ proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.
var iwantpbs: seq[seq[byte]]
var graftpbs: seq[seq[byte]]
var prunepbs: seq[seq[byte]]
var idontwant: seq[seq[byte]]
if ? cpb.getRepeatedField(1, ihavepbs):
for item in ihavepbs:
control.ihave.add(? decodeIHave(initProtoBuffer(item)))
@@ -225,6 +225,9 @@ proc decodeControl*(pb: ProtoBuffer): ProtoResult[Option[ControlMessage]] {.
if ? cpb.getRepeatedField(4, prunepbs):
for item in prunepbs:
control.prune.add(? decodePrune(initProtoBuffer(item)))
if ? cpb.getRepeatedField(5, idontwant):
for item in idontwant:
control.idontwant.add(? decodeIWant(initProtoBuffer(item)))
trace "decodeControl: message statistics", graft_count = len(control.graft),
prune_count = len(control.prune),
ihave_count = len(control.ihave),
@@ -317,8 +320,14 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
pb.write(1, item)
for item in msg.messages:
pb.write(2, item, anonymize)
if msg.control.isSome():
pb.write(3, msg.control.get())
msg.control.withValue(control):
pb.write(3, control)
# nim-libp2p extension, using fields which are unlikely to be used
# by other extensions
if msg.ping.len > 0:
pb.write(60, msg.ping)
if msg.pong.len > 0:
pb.write(61, msg.pong)
if len(pb.buffer) > 0:
pb.finish()
pb.buffer
@@ -326,8 +335,10 @@ proc encodeRpcMsg*(msg: RPCMsg, anonymize: bool): seq[byte] =
proc decodeRpcMsg*(msg: seq[byte]): ProtoResult[RPCMsg] {.inline.} =
trace "decodeRpcMsg: decoding message", msg = msg.shortLog()
var pb = initProtoBuffer(msg, maxSize = uint.high)
var rpcMsg = ok(RPCMsg())
assign(rpcMsg.get().messages, ? pb.decodeMessages())
assign(rpcMsg.get().subscriptions, ? pb.decodeSubscriptions())
assign(rpcMsg.get().control, ? pb.decodeControl())
rpcMsg
var rpcMsg = RPCMsg()
assign(rpcMsg.messages, ? pb.decodeMessages())
assign(rpcMsg.subscriptions, ? pb.decodeSubscriptions())
assign(rpcMsg.control, ? pb.decodeControl())
discard ? pb.getField(60, rpcMsg.ping)
discard ? pb.getField(61, rpcMsg.pong)
ok(rpcMsg)

View File

@@ -7,14 +7,13 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[tables]
import chronos/timer
import chronos/timer, stew/results
import ../../utility
const Timeout* = 10.seconds # default timeout in ms
@@ -22,6 +21,7 @@ type
TimedEntry*[K] = ref object of RootObj
key: K
addedAt: Moment
expiresAt: Moment
next, prev: TimedEntry[K]
TimedCache*[K] = object of RootObj
@@ -30,15 +30,14 @@ type
timeout: Duration
func expire*(t: var TimedCache, now: Moment = Moment.now()) =
let expirationLimit = now - t.timeout
while t.head != nil and t.head.addedAt < expirationLimit:
while t.head != nil and t.head.expiresAt < now:
t.entries.del(t.head.key)
t.head.prev = nil
t.head = t.head.next
if t.head == nil: t.tail = nil
func del*[K](t: var TimedCache[K], key: K): bool =
# Removes existing key from cache, returning false if it was not present
func del*[K](t: var TimedCache[K], key: K): Opt[TimedEntry[K]] =
# Removes existing key from cache, returning the previous value if present
var item: TimedEntry[K]
if t.entries.pop(key, item):
if t.head == item: t.head = item.next
@@ -46,9 +45,9 @@ func del*[K](t: var TimedCache[K], key: K): bool =
if item.next != nil: item.next.prev = item.prev
if item.prev != nil: item.prev.next = item.next
true
Opt.some(item)
else:
false
Opt.none(TimedEntry[K])
func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
# Puts k in cache, returning true if the item was already present and false
@@ -56,9 +55,13 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
# refreshed.
t.expire(now)
var res = t.del(k) # Refresh existing item
var previous = t.del(k) # Refresh existing item
let node = TimedEntry[K](key: k, addedAt: now)
var addedAt = now
previous.withValue(previous):
addedAt = previous.addedAt
let node = TimedEntry[K](key: k, addedAt: addedAt, expiresAt: now + t.timeout)
if t.head == nil:
t.tail = node
@@ -66,7 +69,7 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
else:
# search from tail because typically that's where we add when now grows
var cur = t.tail
while cur != nil and node.addedAt < cur.addedAt:
while cur != nil and node.expiresAt < cur.expiresAt:
cur = cur.prev
if cur == nil:
@@ -82,7 +85,7 @@ func put*[K](t: var TimedCache[K], k: K, now = Moment.now()): bool =
t.entries[k] = node
res
previous.isSome()
func contains*[K](t: TimedCache[K], k: K): bool =
k in t.entries

View File

@@ -7,16 +7,14 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import tables, sequtils, sugar, sets, options
import tables, sequtils, sugar, sets
import metrics except collect
import chronos,
chronicles,
bearssl/rand,
stew/[byteutils, objects]
stew/[byteutils, objects, results]
import ./protocol,
../switch,
../routing_record,
@@ -30,6 +28,11 @@ export chronicles
logScope:
topics = "libp2p discovery rendezvous"
declareCounter(libp2p_rendezvous_register, "number of advertise requests")
declareCounter(libp2p_rendezvous_discover, "number of discovery requests")
declareGauge(libp2p_rendezvous_registered, "number of registered peers")
declareGauge(libp2p_rendezvous_namespaces, "number of registered namespaces")
const
RendezVousCodec* = "/rendezvous/1.0.0"
MinimumDuration* = 2.hours
@@ -65,34 +68,34 @@ type
Register = object
ns : string
signedPeerRecord: seq[byte]
ttl: Option[uint64] # in seconds
ttl: Opt[uint64] # in seconds
RegisterResponse = object
status: ResponseStatus
text: Option[string]
ttl: Option[uint64] # in seconds
text: Opt[string]
ttl: Opt[uint64] # in seconds
Unregister = object
ns: string
Discover = object
ns: string
limit: Option[uint64]
cookie: Option[seq[byte]]
limit: Opt[uint64]
cookie: Opt[seq[byte]]
DiscoverResponse = object
registrations: seq[Register]
cookie: Option[seq[byte]]
cookie: Opt[seq[byte]]
status: ResponseStatus
text: Option[string]
text: Opt[string]
Message = object
msgType: MessageType
register: Option[Register]
registerResponse: Option[RegisterResponse]
unregister: Option[Unregister]
discover: Option[Discover]
discoverResponse: Option[DiscoverResponse]
register: Opt[Register]
registerResponse: Opt[RegisterResponse]
unregister: Opt[Unregister]
discover: Opt[Discover]
discoverResponse: Opt[DiscoverResponse]
proc encode(c: Cookie): ProtoBuffer =
result = initProtoBuffer()
@@ -104,17 +107,17 @@ proc encode(r: Register): ProtoBuffer =
result = initProtoBuffer()
result.write(1, r.ns)
result.write(2, r.signedPeerRecord)
if r.ttl.isSome():
result.write(3, r.ttl.get())
r.ttl.withValue(ttl):
result.write(3, ttl)
result.finish()
proc encode(rr: RegisterResponse): ProtoBuffer =
result = initProtoBuffer()
result.write(1, rr.status.uint)
if rr.text.isSome():
result.write(2, rr.text.get())
if rr.ttl.isSome():
result.write(3, rr.ttl.get())
rr.text.withValue(text):
result.write(2, text)
rr.ttl.withValue(ttl):
result.write(3, ttl)
result.finish()
proc encode(u: Unregister): ProtoBuffer =
@@ -125,48 +128,48 @@ proc encode(u: Unregister): ProtoBuffer =
proc encode(d: Discover): ProtoBuffer =
result = initProtoBuffer()
result.write(1, d.ns)
if d.limit.isSome():
result.write(2, d.limit.get())
if d.cookie.isSome():
result.write(3, d.cookie.get())
d.limit.withValue(limit):
result.write(2, limit)
d.cookie.withValue(cookie):
result.write(3, cookie)
result.finish()
proc encode(d: DiscoverResponse): ProtoBuffer =
proc encode(dr: DiscoverResponse): ProtoBuffer =
result = initProtoBuffer()
for reg in d.registrations:
for reg in dr.registrations:
result.write(1, reg.encode())
if d.cookie.isSome():
result.write(2, d.cookie.get())
result.write(3, d.status.uint)
if d.text.isSome():
result.write(4, d.text.get())
dr.cookie.withValue(cookie):
result.write(2, cookie)
result.write(3, dr.status.uint)
dr.text.withValue(text):
result.write(4, text)
result.finish()
proc encode(msg: Message): ProtoBuffer =
result = initProtoBuffer()
result.write(1, msg.msgType.uint)
if msg.register.isSome():
result.write(2, msg.register.get().encode())
if msg.registerResponse.isSome():
result.write(3, msg.registerResponse.get().encode())
if msg.unregister.isSome():
result.write(4, msg.unregister.get().encode())
if msg.discover.isSome():
result.write(5, msg.discover.get().encode())
if msg.discoverResponse.isSome():
result.write(6, msg.discoverResponse.get().encode())
msg.register.withValue(register):
result.write(2, register.encode())
msg.registerResponse.withValue(registerResponse):
result.write(3, registerResponse.encode())
msg.unregister.withValue(unregister):
result.write(4, unregister.encode())
msg.discover.withValue(discover):
result.write(5, discover.encode())
msg.discoverResponse.withValue(discoverResponse):
result.write(6, discoverResponse.encode())
result.finish()
proc decode(_: typedesc[Cookie], buf: seq[byte]): Option[Cookie] =
proc decode(_: typedesc[Cookie], buf: seq[byte]): Opt[Cookie] =
var c: Cookie
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, c.offset)
r2 = pb.getRequiredField(2, c.ns)
if r1.isErr() or r2.isErr(): return none(Cookie)
some(c)
if r1.isErr() or r2.isErr(): return Opt.none(Cookie)
Opt.some(c)
proc decode(_: typedesc[Register], buf: seq[byte]): Option[Register] =
proc decode(_: typedesc[Register], buf: seq[byte]): Opt[Register] =
var
r: Register
ttl: uint64
@@ -175,11 +178,11 @@ proc decode(_: typedesc[Register], buf: seq[byte]): Option[Register] =
r1 = pb.getRequiredField(1, r.ns)
r2 = pb.getRequiredField(2, r.signedPeerRecord)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr(): return none(Register)
if r3.get(): r.ttl = some(ttl)
some(r)
if r1.isErr() or r2.isErr() or r3.isErr(): return Opt.none(Register)
if r3.get(false): r.ttl = Opt.some(ttl)
Opt.some(r)
proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Option[RegisterResponse] =
proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Opt[RegisterResponse] =
var
rr: RegisterResponse
statusOrd: uint
@@ -191,20 +194,20 @@ proc decode(_: typedesc[RegisterResponse], buf: seq[byte]): Option[RegisterRespo
r2 = pb.getField(2, text)
r3 = pb.getField(3, ttl)
if r1.isErr() or r2.isErr() or r3.isErr() or
not checkedEnumAssign(rr.status, statusOrd): return none(RegisterResponse)
if r2.get(): rr.text = some(text)
if r3.get(): rr.ttl = some(ttl)
some(rr)
not checkedEnumAssign(rr.status, statusOrd): return Opt.none(RegisterResponse)
if r2.get(false): rr.text = Opt.some(text)
if r3.get(false): rr.ttl = Opt.some(ttl)
Opt.some(rr)
proc decode(_: typedesc[Unregister], buf: seq[byte]): Option[Unregister] =
proc decode(_: typedesc[Unregister], buf: seq[byte]): Opt[Unregister] =
var u: Unregister
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, u.ns)
if r1.isErr(): return none(Unregister)
some(u)
if r1.isErr(): return Opt.none(Unregister)
Opt.some(u)
proc decode(_: typedesc[Discover], buf: seq[byte]): Option[Discover] =
proc decode(_: typedesc[Discover], buf: seq[byte]): Opt[Discover] =
var
d: Discover
limit: uint64
@@ -214,12 +217,12 @@ proc decode(_: typedesc[Discover], buf: seq[byte]): Option[Discover] =
r1 = pb.getRequiredField(1, d.ns)
r2 = pb.getField(2, limit)
r3 = pb.getField(3, cookie)
if r1.isErr() or r2.isErr() or r3.isErr: return none(Discover)
if r2.get(): d.limit = some(limit)
if r3.get(): d.cookie = some(cookie)
some(d)
if r1.isErr() or r2.isErr() or r3.isErr: return Opt.none(Discover)
if r2.get(false): d.limit = Opt.some(limit)
if r3.get(false): d.cookie = Opt.some(cookie)
Opt.some(d)
proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Option[DiscoverResponse] =
proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Opt[DiscoverResponse] =
var
dr: DiscoverResponse
registrations: seq[seq[byte]]
@@ -233,48 +236,47 @@ proc decode(_: typedesc[DiscoverResponse], buf: seq[byte]): Option[DiscoverRespo
r3 = pb.getRequiredField(3, statusOrd)
r4 = pb.getField(4, text)
if r1.isErr() or r2.isErr() or r3.isErr or r4.isErr() or
not checkedEnumAssign(dr.status, statusOrd): return none(DiscoverResponse)
not checkedEnumAssign(dr.status, statusOrd): return Opt.none(DiscoverResponse)
for reg in registrations:
var r: Register
let regOpt = Register.decode(reg)
if regOpt.isNone(): return none(DiscoverResponse)
dr.registrations.add(regOpt.get())
if r2.get(): dr.cookie = some(cookie)
if r4.get(): dr.text = some(text)
some(dr)
let regOpt = Register.decode(reg).valueOr:
return
dr.registrations.add(regOpt)
if r2.get(false): dr.cookie = Opt.some(cookie)
if r4.get(false): dr.text = Opt.some(text)
Opt.some(dr)
proc decode(_: typedesc[Message], buf: seq[byte]): Option[Message] =
proc decode(_: typedesc[Message], buf: seq[byte]): Opt[Message] =
var
msg: Message
statusOrd: uint
pbr, pbrr, pbu, pbd, pbdr: ProtoBuffer
let
pb = initProtoBuffer(buf)
r1 = pb.getRequiredField(1, statusOrd)
r2 = pb.getField(2, pbr)
r3 = pb.getField(3, pbrr)
r4 = pb.getField(4, pbu)
r5 = pb.getField(5, pbd)
r6 = pb.getField(6, pbdr)
if r1.isErr() or r2.isErr() or r3.isErr() or
r4.isErr() or r5.isErr() or r6.isErr() or
not checkedEnumAssign(msg.msgType, statusOrd): return none(Message)
if r2.get():
let pb = initProtoBuffer(buf)
? pb.getRequiredField(1, statusOrd).toOpt
if not checkedEnumAssign(msg.msgType, statusOrd): return Opt.none(Message)
if ? pb.getField(2, pbr).optValue:
msg.register = Register.decode(pbr.buffer)
if msg.register.isNone(): return none(Message)
if r3.get():
if msg.register.isNone(): return Opt.none(Message)
if ? pb.getField(3, pbrr).optValue:
msg.registerResponse = RegisterResponse.decode(pbrr.buffer)
if msg.registerResponse.isNone(): return none(Message)
if r4.get():
if msg.registerResponse.isNone(): return Opt.none(Message)
if ? pb.getField(4, pbu).optValue:
msg.unregister = Unregister.decode(pbu.buffer)
if msg.unregister.isNone(): return none(Message)
if r5.get():
if msg.unregister.isNone(): return Opt.none(Message)
if ? pb.getField(5, pbd).optValue:
msg.discover = Discover.decode(pbd.buffer)
if msg.discover.isNone(): return none(Message)
if r6.get():
if msg.discover.isNone(): return Opt.none(Message)
if ? pb.getField(6, pbdr).optValue:
msg.discoverResponse = DiscoverResponse.decode(pbdr.buffer)
if msg.discoverResponse.isNone(): return none(Message)
some(msg)
if msg.discoverResponse.isNone(): return Opt.none(Message)
Opt.some(msg)
type
@@ -314,7 +316,7 @@ proc sendRegisterResponse(conn: Connection,
ttl: uint64) {.async.} =
let msg = encode(Message(
msgType: MessageType.RegisterResponse,
registerResponse: some(RegisterResponse(status: Ok, ttl: some(ttl)))))
registerResponse: Opt.some(RegisterResponse(status: Ok, ttl: Opt.some(ttl)))))
await conn.writeLp(msg.buffer)
proc sendRegisterResponseError(conn: Connection,
@@ -322,7 +324,7 @@ proc sendRegisterResponseError(conn: Connection,
text: string = "") {.async.} =
let msg = encode(Message(
msgType: MessageType.RegisterResponse,
registerResponse: some(RegisterResponse(status: status, text: some(text)))))
registerResponse: Opt.some(RegisterResponse(status: status, text: Opt.some(text)))))
await conn.writeLp(msg.buffer)
proc sendDiscoverResponse(conn: Connection,
@@ -330,10 +332,10 @@ proc sendDiscoverResponse(conn: Connection,
cookie: Cookie) {.async.} =
let msg = encode(Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: some(DiscoverResponse(
discoverResponse: Opt.some(DiscoverResponse(
status: Ok,
registrations: s,
cookie: some(cookie.encode().buffer)
cookie: Opt.some(cookie.encode().buffer)
))
))
await conn.writeLp(msg.buffer)
@@ -343,7 +345,7 @@ proc sendDiscoverResponseError(conn: Connection,
text: string = "") {.async.} =
let msg = encode(Message(
msgType: MessageType.DiscoverResponse,
discoverResponse: some(DiscoverResponse(status: status, text: some(text)))))
discoverResponse: Opt.some(DiscoverResponse(status: status, text: Opt.some(text)))))
await conn.writeLp(msg.buffer)
proc countRegister(rdv: RendezVous, peerId: PeerId): int =
@@ -378,6 +380,7 @@ proc save(rdv: RendezVous,
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
trace "Received Register", peerId = conn.peerId, ns = r.ns
libp2p_rendezvous_register.inc()
if r.ns.len notin 1..255:
return conn.sendRegisterResponseError(InvalidNamespace)
let ttl = r.ttl.get(MinimumTTL)
@@ -389,6 +392,8 @@ proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
if rdv.countRegister(conn.peerId) >= RegistrationLimitPerPeer:
return conn.sendRegisterResponseError(NotAuthorized, "Registration limit reached")
rdv.save(r.ns, conn.peerId, r)
libp2p_rendezvous_registered.inc()
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
conn.sendRegisterResponse(ttl)
proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
@@ -398,11 +403,13 @@ proc unregister(rdv: RendezVous, conn: Connection, u: Unregister) =
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].peerId == conn.peerId:
rdv.registered[index].expiration = rdv.defaultDT
libp2p_rendezvous_registered.dec()
except KeyError:
return
proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
trace "Received Discover", peerId = conn.peerId, ns = d.ns
libp2p_rendezvous_discover.inc()
if d.ns.len notin 0..255:
await conn.sendDiscoverResponseError(InvalidNamespace)
return
@@ -411,7 +418,7 @@ proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
cookie =
if d.cookie.isSome():
try:
Cookie.decode(d.cookie.get()).get()
Cookie.decode(d.cookie.tryGet()).tryGet()
except CatchableError:
await conn.sendDiscoverResponseError(InvalidCookie)
return
@@ -442,7 +449,7 @@ proc discover(rdv: RendezVous, conn: Connection, d: Discover) {.async.} =
break
if reg.expiration < n or index.uint64 <= cookie.offset: continue
limit.dec()
reg.data.ttl = some((reg.expiration - Moment.now()).seconds.uint64)
reg.data.ttl = Opt.some((reg.expiration - Moment.now()).seconds.uint64)
reg.data
rdv.rng.shuffle(s)
await conn.sendDiscoverResponse(s, Cookie(offset: offset.uint64, ns: d.ns))
@@ -457,12 +464,13 @@ proc advertisePeer(rdv: RendezVous,
await conn.writeLp(msg)
let
buf = await conn.readLp(4096)
msgRecv = Message.decode(buf).get()
msgRecv = Message.decode(buf).tryGet()
if msgRecv.msgType != MessageType.RegisterResponse:
trace "Unexpected register response", peer, msgType = msgRecv.msgType
elif msgRecv.registerResponse.isNone() or
msgRecv.registerResponse.get().status != ResponseStatus.Ok:
elif msgRecv.registerResponse.tryGet().status != ResponseStatus.Ok:
trace "Refuse to register", peer, response = msgRecv.registerResponse
else:
trace "Successfully registered", peer, response = msgRecv.registerResponse
except CatchableError as exc:
trace "exception in the advertise", error = exc.msg
finally:
@@ -470,19 +478,18 @@ proc advertisePeer(rdv: RendezVous,
await rdv.sema.acquire()
discard await advertiseWrap().withTimeout(5.seconds)
proc advertise*(rdv: RendezVous,
method advertise*(rdv: RendezVous,
ns: string,
ttl: Duration = MinimumDuration) {.async.} =
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode()
if sprBuff.isErr():
ttl: Duration = MinimumDuration) {.async, base.} =
let sprBuff = rdv.switch.peerInfo.signedPeerRecord.encode().valueOr:
raise newException(RendezVousError, "Wrong Signed Peer Record")
if ns.len notin 1..255:
raise newException(RendezVousError, "Invalid namespace")
if ttl notin MinimumDuration..MaximumDuration:
raise newException(RendezVousError, "Invalid time to live")
let
r = Register(ns: ns, signedPeerRecord: sprBuff.get(), ttl: some(ttl.seconds.uint64))
msg = encode(Message(msgType: MessageType.Register, register: some(r)))
r = Register(ns: ns, signedPeerRecord: sprBuff, ttl: Opt.some(ttl.seconds.uint64))
msg = encode(Message(msgType: MessageType.Register, register: Opt.some(r)))
rdv.save(ns, rdv.switch.peerInfo.peerId, r)
let fut = collect(newSeq()):
for peer in rdv.peers:
@@ -498,7 +505,9 @@ proc requestLocally*(rdv: RendezVous, ns: string): seq[PeerRecord] =
collect(newSeq()):
for index in rdv.namespaces[nsSalted]:
if rdv.registered[index].expiration > n:
SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).get().data
let res = SignedPeerRecord.decode(rdv.registered[index].data.signedPeerRecord).valueOr:
continue
res.data
except KeyError as exc:
@[]
@@ -519,38 +528,42 @@ proc request*(rdv: RendezVous,
proc requestPeer(peer: PeerId) {.async.} =
let conn = await rdv.switch.dial(peer, RendezVousCodec)
defer: await conn.close()
d.limit = some(limit)
d.limit = Opt.some(limit)
d.cookie =
try:
some(rdv.cookiesSaved[peer][ns])
Opt.some(rdv.cookiesSaved[peer][ns])
except KeyError as exc:
none(seq[byte])
Opt.none(seq[byte])
await conn.writeLp(encode(Message(
msgType: MessageType.Discover,
discover: some(d))).buffer)
discover: Opt.some(d))).buffer)
let
buf = await conn.readLp(65536)
msgRcv = Message.decode(buf).get()
if msgRcv.msgType != MessageType.DiscoverResponse or
msgRcv.discoverResponse.isNone():
msgRcv = Message.decode(buf).valueOr:
debug "Message undecodable"
return
if msgRcv.msgType != MessageType.DiscoverResponse:
debug "Unexpected discover response", msgType = msgRcv.msgType
return
let resp = msgRcv.discoverResponse.get()
let resp = msgRcv.discoverResponse.valueOr:
debug "Discover response is empty"
return
if resp.status != ResponseStatus.Ok:
trace "Cannot discover", ns, status = resp.status, text = resp.text
return
if resp.cookie.isSome() and resp.cookie.get().len < 1000:
if rdv.cookiesSaved.hasKeyOrPut(peer, {ns: resp.cookie.get()}.toTable):
rdv.cookiesSaved[peer][ns] = resp.cookie.get()
resp.cookie.withValue(cookie):
if cookie.len() < 1000 and rdv.cookiesSaved.hasKeyOrPut(peer, {ns: cookie}.toTable()):
rdv.cookiesSaved[peer][ns] = cookie
for r in resp.registrations:
if limit == 0: return
if r.ttl.isNone() or r.ttl.get() > MaximumTTL: continue
let sprRes = SignedPeerRecord.decode(r.signedPeerRecord)
if sprRes.isErr(): continue
let pr = sprRes.get().data
let ttl = r.ttl.get(MaximumTTL + 1)
if ttl > MaximumTTL: continue
let
spr = SignedPeerRecord.decode(r.signedPeerRecord).valueOr: continue
pr = spr.data
if s.hasKey(pr.peerId):
let (prSaved, rSaved) = s[pr.peerId]
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get() < r.ttl.get()) or
if (prSaved.seqNo == pr.seqNo and rSaved.ttl.get(MaximumTTL) < ttl) or
prSaved.seqNo < pr.seqNo:
s[pr.peerId] = (pr, r)
else:
@@ -589,7 +602,7 @@ proc unsubscribe*(rdv: RendezVous, ns: string) {.async.} =
rdv.unsubscribeLocally(ns)
let msg = encode(Message(
msgType: MessageType.Unregister,
unregister: some(Unregister(ns: ns))))
unregister: Opt.some(Unregister(ns: ns))))
proc unsubscribePeer(rdv: RendezVous, peerId: PeerId) {.async.} =
try:
@@ -623,17 +636,17 @@ proc new*(T: typedesc[RendezVous],
sema: newAsyncSemaphore(SemaphoreDefaultSize)
)
logScope: topics = "libp2p discovery rendezvous"
proc handleStream(conn: Connection, proto: string) {.async, gcsafe.} =
proc handleStream(conn: Connection, proto: string) {.async.} =
try:
let
buf = await conn.readLp(4096)
msg = Message.decode(buf).get()
msg = Message.decode(buf).tryGet()
case msg.msgType:
of MessageType.Register: await rdv.register(conn, msg.register.get())
of MessageType.Register: await rdv.register(conn, msg.register.tryGet())
of MessageType.RegisterResponse:
trace "Got an unexpected Register Response", response = msg.registerResponse
of MessageType.Unregister: rdv.unregister(conn, msg.unregister.get())
of MessageType.Discover: await rdv.discover(conn, msg.discover.get())
of MessageType.Unregister: rdv.unregister(conn, msg.unregister.tryGet())
of MessageType.Discover: await rdv.discover(conn, msg.discover.tryGet())
of MessageType.DiscoverResponse:
trace "Got an unexpected Discover Response", response = msg.discoverResponse
except CancelledError as exc:
@@ -657,9 +670,13 @@ proc new*(T: typedesc[RendezVous],
proc deletesRegister(rdv: RendezVous) {.async.} =
heartbeat "Register timeout", 1.minutes:
let n = Moment.now()
var total = 0
rdv.registered.flushIfIt(it.expiration < n)
for data in rdv.namespaces.mvalues():
data.keepItIf(it >= rdv.registered.offset)
total += data.len
libp2p_rendezvous_registered.set(int64(total))
libp2p_rendezvous_namespaces.set(int64(rdv.namespaces.len))
method start*(rdv: RendezVous) {.async.} =
if not rdv.registerDeletionLoop.isNil:

View File

@@ -7,12 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[oids, strformat]
import std/strformat
import chronos
import chronicles
import bearssl/[rand, hash]
@@ -136,7 +133,7 @@ proc encrypt(
state: var CipherState,
data: var openArray[byte],
ad: openArray[byte]): ChaChaPolyTag
{.noinit, raises: [Defect, NoiseNonceMaxError].} =
{.noinit, raises: [NoiseNonceMaxError].} =
var nonce: ChaChaPolyNonce
nonce[4..<12] = toBytesLE(state.n)
@@ -148,7 +145,7 @@ proc encrypt(
raise newException(NoiseNonceMaxError, "Noise max nonce value reached")
proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
{.raises: [Defect, NoiseNonceMaxError].} =
{.raises: [NoiseNonceMaxError].} =
result = newSeqOfCap[byte](data.len + sizeof(ChaChaPolyTag))
result.add(data)
@@ -160,7 +157,7 @@ proc encryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
tag = byteutils.toHex(tag), data = result.shortLog, nonce = state.n - 1
proc decryptWithAd(state: var CipherState, ad, data: openArray[byte]): seq[byte]
{.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} =
{.raises: [NoiseDecryptTagError, NoiseNonceMaxError].} =
var
tagIn = data.toOpenArray(data.len - ChaChaPolyTag.len, data.high).intoChaChaPolyTag
tagOut: ChaChaPolyTag
@@ -209,7 +206,7 @@ proc mixKeyAndHash(ss: var SymmetricState, ikm: openArray[byte]) {.used.} =
ss.cs = CipherState(k: temp_keys[2])
proc encryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
{.raises: [Defect, NoiseNonceMaxError].} =
{.raises: [NoiseNonceMaxError].} =
# according to spec if key is empty leave plaintext
if ss.cs.hasKey:
result = ss.cs.encryptWithAd(ss.h.data, data)
@@ -218,7 +215,7 @@ proc encryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
ss.mixHash(result)
proc decryptAndHash(ss: var SymmetricState, data: openArray[byte]): seq[byte]
{.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} =
{.raises: [NoiseDecryptTagError, NoiseNonceMaxError].} =
# according to spec if key is empty leave plaintext
if ss.cs.hasKey and data.len > ChaChaPolyTag.len:
result = ss.cs.decryptWithAd(ss.h.data, data)
@@ -448,7 +445,7 @@ proc encryptFrame(
sconn: NoiseConnection,
cipherFrame: var openArray[byte],
src: openArray[byte])
{.raises: [Defect, NoiseNonceMaxError].} =
{.raises: [NoiseNonceMaxError].} =
# Frame consists of length + cipher data + tag
doAssert src.len <= MaxPlainSize
doAssert cipherFrame.len == 2 + src.len + sizeof(ChaChaPolyTag)
@@ -557,8 +554,7 @@ method handshake*(p: Noise, conn: Connection, initiator: bool, peerId: Opt[PeerI
trace "Remote peer id", pid = $pid
if peerId.isSome():
let targetPid = peerId.get()
peerId.withValue(targetPid):
if not targetPid.validate():
raise newException(NoiseHandshakeError, "Failed to validate expected peerId.")

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import chronos
import secure, ../../stream/connection
@@ -22,7 +19,7 @@ type
method init(p: PlainText) {.gcsafe.} =
proc handle(conn: Connection, proto: string)
{.async, gcsafe.} = discard
{.async.} = discard
## plain text doesn't do anything
p.codec = PlainTextCodec

View File

@@ -7,10 +7,7 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[oids, strformat]
import bearssl/rand
@@ -262,7 +259,7 @@ proc newSecioConn(conn: Connection,
secrets: Secret,
order: int,
remotePubKey: PublicKey): SecioConn
{.raises: [Defect, LPError].} =
{.raises: [LPError].} =
## Create new secure stream/lpstream, using specified hash algorithm ``hash``,
## cipher algorithm ``cipher``, stretched keys ``secrets`` and order
## ``order``.
@@ -342,8 +339,7 @@ method handshake*(s: Secio, conn: Connection, initiator: bool, peerId: Opt[PeerI
remotePeerId = PeerId.init(remotePubkey).tryGet()
if peerId.isSome():
let targetPid = peerId.get()
peerId.withValue(targetPid):
if not targetPid.validate():
raise newException(SecioError, "Failed to validate expected peerId.")
@@ -439,14 +435,10 @@ proc new*(
T: typedesc[Secio],
rng: ref HmacDrbgContext,
localPrivateKey: PrivateKey): T =
let pkRes = localPrivateKey.getPublicKey()
if pkRes.isErr:
raise newException(Defect, "Invalid private key")
let secio = Secio(
rng: rng,
localPrivateKey: localPrivateKey,
localPublicKey: pkRes.get(),
localPublicKey: localPrivateKey.getPublicKey().expect("Invalid private key"),
)
secio.init()
secio

View File

@@ -8,10 +8,7 @@
# those terms.
{.push gcsafe.}
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[strformat]
import stew/results
@@ -138,10 +135,9 @@ method init*(s: Secure) =
method secure*(s: Secure,
conn: Connection,
initiator: bool,
peerId: Opt[PeerId]):
Future[Connection] {.base.} =
s.handleConn(conn, initiator, peerId)
s.handleConn(conn, conn.dir == Direction.Out, peerId)
method readOnce*(s: SecureConn,
pbytes: pointer,

View File

@@ -9,10 +9,7 @@
## This module implements Routing Records.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import std/[sequtils, times]
import pkg/stew/results
@@ -45,16 +42,16 @@ proc decode*(
? pb.getRequiredField(2, record.seqNo)
var addressInfos: seq[seq[byte]]
let pb3 = ? pb.getRepeatedField(3, addressInfos)
if pb3:
if ? pb.getRepeatedField(3, addressInfos):
for address in addressInfos:
var addressInfo = AddressInfo()
let subProto = initProtoBuffer(address)
if ? subProto.getField(1, addressInfo.address) == false:
return err(ProtoError.RequiredFieldMissing)
let f = subProto.getField(1, addressInfo.address)
if f.get(false):
record.addresses &= addressInfo
record.addresses &= addressInfo
if record.addresses.len == 0:
return err(ProtoError.RequiredFieldMissing)
ok(record)

View File

@@ -7,12 +7,9 @@
# This file may not be copied, modified, or distributed except according to
# those terms.
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
{.push raises: [].}
import chronos, chronicles, times, tables, sequtils, options
import chronos, chronicles, times, tables, sequtils
import ../switch,
../protocols/connectivity/relay/[client, utils]
@@ -20,7 +17,7 @@ logScope:
topics = "libp2p autorelay"
type
OnReservationHandler = proc (addresses: seq[MultiAddress]) {.gcsafe, raises: [Defect].}
OnReservationHandler = proc (addresses: seq[MultiAddress]) {.gcsafe, raises: [].}
AutoRelayService* = ref object of Service
running: bool
@@ -32,9 +29,18 @@ type
backingOff: seq[PeerId]
peerAvailable: AsyncEvent
onReservation: OnReservationHandler
addressMapper: AddressMapper
rng: ref HmacDrbgContext
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, selfPid: PeerId) {.async.} =
proc isRunning*(self: AutoRelayService): bool =
return self.running
proc addressMapper(
self: AutoRelayService,
listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return concat(toSeq(self.relayAddresses.values))
proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, switch: Switch) {.async.} =
while self.running:
let
rsvp = await self.client.reserve(relayPid).wait(chronos.seconds(5))
@@ -46,11 +52,16 @@ proc reserveAndUpdate(self: AutoRelayService, relayPid: PeerId, selfPid: PeerId)
break
if relayPid notin self.relayAddresses or self.relayAddresses[relayPid] != relayedAddr:
self.relayAddresses[relayPid] = relayedAddr
await switch.peerInfo.update()
debug "Updated relay addresses", relayPid, relayedAddr
if not self.onReservation.isNil():
self.onReservation(concat(toSeq(self.relayAddresses.values)))
await sleepAsync chronos.seconds(ttl - 30)
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
self.addressMapper = proc (listenAddrs: seq[MultiAddress]): Future[seq[MultiAddress]] {.async.} =
return await addressMapper(self, listenAddrs)
let hasBeenSetUp = await procCall Service(self).setup(switch)
if hasBeenSetUp:
proc handlePeerJoined(peerId: PeerId, event: PeerEvent) {.async.} =
@@ -63,6 +74,7 @@ method setup*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcs
future[].cancel()
switch.addPeerEventHandler(handlePeerJoined, Joined)
switch.addPeerEventHandler(handlePeerLeft, Left)
switch.peerInfo.addressMappers.add(self.addressMapper)
await self.run(switch)
return hasBeenSetUp
@@ -71,7 +83,7 @@ proc manageBackedOff(self: AutoRelayService, pid: PeerId) {.async.} =
self.backingOff.keepItIf(it != pid)
self.peerAvailable.fire()
proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
proc innerRun(self: AutoRelayService, switch: Switch) {.async.} =
while true:
# Remove relayPeers that failed
let peers = toSeq(self.relayPeers.keys())
@@ -96,7 +108,7 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
for relayPid in connectedPeers:
if self.relayPeers.len() >= self.numRelays:
break
self.relayPeers[relayPid] = self.reserveAndUpdate(relayPid, switch.peerInfo.peerId)
self.relayPeers[relayPid] = self.reserveAndUpdate(relayPid, switch)
if self.relayPeers.len() > 0:
await one(toSeq(self.relayPeers.values())) or self.peerAvailable.wait()
@@ -104,18 +116,20 @@ proc innerRun(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
await self.peerAvailable.wait()
await sleepAsync(200.millis)
method run*(self: AutoRelayService, switch: Switch) {.async, gcsafe.} =
method run*(self: AutoRelayService, switch: Switch) {.async.} =
if self.running:
trace "Autorelay is already running"
return
self.running = true
self.runner = self.innerRun(switch)
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async, gcsafe.} =
method stop*(self: AutoRelayService, switch: Switch): Future[bool] {.async.} =
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
self.running = false
self.runner.cancel()
switch.peerInfo.addressMappers.keepItIf(it != self.addressMapper)
await switch.peerInfo.update()
return hasBeenStopped
proc getAddresses*(self: AutoRelayService): seq[MultiAddress] =

View File

@@ -0,0 +1,116 @@
# Nim-LibP2P
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import std/[tables, sequtils]
import chronos, chronicles
import ../switch, ../wire
import ../protocols/rendezvous
import ../services/autorelayservice
import ../protocols/connectivity/relay/relay
import ../protocols/connectivity/autonat/service
import ../protocols/connectivity/dcutr/[client, server]
import ../multicodec
logScope:
topics = "libp2p hpservice"
type
HPService* = ref object of Service
newConnectedPeerHandler: PeerEventHandler
onNewStatusHandler: StatusAndConfidenceHandler
autoRelayService: AutoRelayService
autonatService: AutonatService
proc new*(T: typedesc[HPService], autonatService: AutonatService, autoRelayService: AutoRelayService): T =
return T(autonatService: autonatService, autoRelayService: autoRelayService)
proc tryStartingDirectConn(self: HPService, switch: Switch, peerId: PeerId): Future[bool] {.async.} =
proc tryConnect(address: MultiAddress): Future[bool] {.async.} =
debug "Trying to create direct connection", peerId, address
await switch.connect(peerId, @[address], true, false)
debug "Direct connection created."
return true
await sleepAsync(500.milliseconds) # wait for AddressBook to be populated
for address in switch.peerStore[AddressBook][peerId]:
try:
let isRelayed = address.contains(multiCodec("p2p-circuit"))
if not isRelayed.get(false) and address.isPublicMA():
return await tryConnect(address)
except CatchableError as err:
debug "Failed to create direct connection.", err = err.msg
continue
return false
proc closeRelayConn(relayedConn: Connection) {.async.} =
await sleepAsync(2000.milliseconds) # grace period before closing relayed connection
await relayedConn.close()
proc newConnectedPeerHandler(self: HPService, switch: Switch, peerId: PeerId, event: PeerEvent) {.async.} =
try:
# Get all connections to the peer. If there is at least one non-relayed connection, return.
let connections = switch.connManager.getConnections()[peerId].mapIt(it.connection)
if connections.anyIt(not isRelayed(it)):
return
let incomingRelays = connections.filterIt(it.transportDir == Direction.In)
if incomingRelays.len == 0:
return
let relayedConn = incomingRelays[0]
if await self.tryStartingDirectConn(switch, peerId):
await closeRelayConn(relayedConn)
return
let dcutrClient = DcutrClient.new()
var natAddrs = switch.peerStore.getMostObservedProtosAndPorts()
if natAddrs.len == 0:
natAddrs = switch.peerInfo.listenAddrs.mapIt(switch.peerStore.guessDialableAddr(it))
await dcutrClient.startSync(switch, peerId, natAddrs)
await closeRelayConn(relayedConn)
except CatchableError as err:
debug "Hole punching failed during dcutr", err = err.msg
method setup*(self: HPService, switch: Switch): Future[bool] {.async.} =
var hasBeenSetup = await procCall Service(self).setup(switch)
hasBeenSetup = hasBeenSetup and await self.autonatService.setup(switch)
if hasBeenSetup:
let dcutrProto = Dcutr.new(switch)
switch.mount(dcutrProto)
self.newConnectedPeerHandler = proc (peerId: PeerId, event: PeerEvent) {.async.} =
await newConnectedPeerHandler(self, switch, peerId, event)
switch.connManager.addPeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)
self.onNewStatusHandler = proc (networkReachability: NetworkReachability, confidence: Opt[float]) {.async.} =
if networkReachability == NetworkReachability.NotReachable and not self.autoRelayService.isRunning():
discard await self.autoRelayService.setup(switch)
elif networkReachability == NetworkReachability.Reachable and self.autoRelayService.isRunning():
discard await self.autoRelayService.stop(switch)
# We do it here instead of in the AutonatService because this is useful only when hole punching.
for t in switch.transports:
t.networkReachability = networkReachability
self.autonatService.statusAndConfidenceHandler(self.onNewStatusHandler)
return hasBeenSetup
method run*(self: HPService, switch: Switch) {.async, public.} =
await self.autonatService.run(switch)
method stop*(self: HPService, switch: Switch): Future[bool] {.async, public.} =
discard await self.autonatService.stop(switch)
if not isNil(self.newConnectedPeerHandler):
switch.connManager.removePeerEventHandler(self.newConnectedPeerHandler, PeerEventKind.Joined)

Some files were not shown because too many files have changed in this diff Show More