Compare commits

...

68 Commits

Author SHA1 Message Date
Gabriel Cruz
74ab07d40e chore(autotls): remove tcp,yamux,noise from integration test 2025-07-04 16:29:51 -03:00
Gabriel Cruz
5ad656bf26 fix(ci): move testintegration to new job (#1499) 2025-07-02 15:43:50 -04:00
vladopajic
cfd631457a ci(daily): fix latest deps install (#1498) 2025-07-02 14:24:05 +01:00
Radosław Kamiński
4f8597609b test(gossipsub): Signature flags tests (#1496) 2025-07-02 13:09:39 +01:00
Gabriel Cruz
4ed72a753c chore(autotls): make autotls manager into service (#1486) 2025-07-01 17:51:45 +00:00
Gabriel Cruz
2a9abbe925 fix(daily): testintegration only used on amd64 (#1494) 2025-07-01 16:53:00 +00:00
Radosław Kamiński
ee61e234ac test(gossipsub): scoring integration tests (#1487) 2025-07-01 15:41:25 +00:00
Radosław Kamiński
8f54367e3a test(gossipsub): refactor compatibility related tests (#1495) 2025-07-01 15:19:44 +00:00
vladopajic
61826a20e4 refactor(bufferstream): utilize ZeroQueue (#1491) 2025-07-01 09:51:52 +00:00
vladopajic
32951e1a68 refactor(secure): utilize ZeroQueue (#1492) 2025-07-01 09:00:52 +00:00
vladopajic
1d13e405e4 chore(ci): disable hole punching tests (#1493)
Co-authored-by: Gabriel Cruz <8129788+gmelodie@users.noreply.github.com>
2025-07-01 10:40:15 +02:00
vladopajic
729e879c1c chore: remove unused import (#1490) 2025-06-30 15:53:01 +02:00
AkshayaMani
64c9cf1b9e feat(gossipsub): Add support for skipping message insertion into the message cache (Mix protocol integration) (#1485) 2025-06-27 17:37:37 -04:00
vladopajic
4d94892eb0 chore(yamux): improve performance with zero allocation queue (#1488) 2025-06-27 16:49:51 +00:00
Gabriel Cruz
3ecb1744ce fix(autotls): private checkedGetPrimaryIPAddr (#1489) 2025-06-27 08:42:22 -04:00
vladopajic
2f9c3fb3e2 chore(perf): add quic test (#1483) 2025-06-25 11:15:07 -04:00
Gabriel Cruz
2609c270b8 feat(autotls): add AutoTLSManager (#1472) 2025-06-25 14:19:59 +00:00
Radosław Kamiński
48b3e34cd3 test(gossipsub): updateScores tests (#1471) 2025-06-24 16:01:49 +00:00
Radosław Kamiński
abb2c43667 test(gossipsub): behaviourPenalty tests (#1469) 2025-06-24 15:07:14 +00:00
Radosław Kamiński
d1cfbb35d3 test(gossipsub): organise Behavior tests (#1468) 2025-06-24 14:18:54 +00:00
Radosław Kamiński
38a630eee0 test(gossipsub): rpcHandler - rateLimit and punishInvalidMessage tests (#1467) 2025-06-24 14:43:44 +01:00
richΛrd
be1a2023ce fix: mark channel as reset to not read after closed (#1479) 2025-06-23 20:05:41 -04:00
Gabriel Cruz
021d0c1700 chore(acme): add ACMEClient to hold information about an ACME account (#1470) 2025-06-23 19:24:33 +00:00
Gabriel Cruz
f49cd377ce fix(peeridauth): fix peeridauth_integration import (#1478) 2025-06-23 15:13:22 +00:00
richΛrd
fc80840784 feat(kad-dht): handler (#1455) 2025-06-20 21:08:06 +00:00
richΛrd
7742d06a58 feat(kad-dht): routing table (#1454) 2025-06-20 16:47:48 -04:00
richΛrd
e0ea1d48a4 fix: make quic test optional (#1475) 2025-06-20 15:39:40 -04:00
richΛrd
f028ad8c12 fix: force close of streams instead of reset when closing connection (#1466) 2025-06-20 12:57:20 +00:00
richΛrd
9c153c822b chore(version): update libp2p.nimble to 1.11.0 (#1433) 2025-06-18 16:39:45 -04:00
Radosław Kamiński
d803352bd6 test(gossipsub): split unit and integration tests (#1465) 2025-06-16 15:18:18 +00:00
Radosław Kamiński
2eafac47e8 test(gossipsub): GossipThreshold and PublishThreshold tests (#1464) 2025-06-16 14:46:25 +00:00
vladopajic
848fdde0a8 feat(perf): add stats (#1452) 2025-06-13 10:16:45 +00:00
Gabriel Cruz
31e7dc68e2 chore(peeridauth): add mocked client (#1458) 2025-06-12 21:11:36 +00:00
Ivan FB
08299a2059 chore: Add some more context when an exception is caught (#1432)
Co-authored-by: richΛrd <info@richardramos.me>
2025-06-12 14:38:25 +00:00
Gabriel Cruz
2f3156eafb fix(daily): fix typo in testintegration (#1463) 2025-06-12 09:26:46 -03:00
Radosław Kamiński
72e85101b0 test(gossipsub): refactor and unify scoring tests (#1461) 2025-06-12 08:18:01 +00:00
Gabriel Cruz
d205260a3e chore(acme): add MockACMEApi for testing (#1457) 2025-06-11 18:59:29 +00:00
Radosław Kamiński
97e576d146 test: increase timeout (#1460) 2025-06-11 14:19:33 +00:00
richΛrd
888cb78331 feat(kad-dht): protobuffers (#1453) 2025-06-11 12:56:02 +00:00
richΛrd
1d4c261d2a feat: withWsTransport (#1398) 2025-06-10 22:32:55 +00:00
Gabriel Cruz
83de0c0abd feat(peeridauth): add peeridauth (#1445) 2025-06-10 10:25:34 -03:00
AkshayaMani
c501adc9ab feat(gossipsub): Add support for custom connection handling (Mix protocol integration) (#1420)
Co-authored-by: Ben-PH <benphawke@gmail.com>
2025-06-09 13:36:06 -04:00
Radosław Kamiński
f9fc24cc08 test(gossipsub): flaky tests (#1451) 2025-06-09 17:20:49 +01:00
richΛrd
cd26244ccc chore(quic): add libp2p_network_bytes metric (#1439)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-06-09 09:42:52 -03:00
vladopajic
cabab6aafe chore(gossipsub): add consts (#1447)
Co-authored-by: Radoslaw Kaminski <radoslaw@status.im>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-06-06 14:33:38 +00:00
Radosław Kamiński
fb42a9b4aa test(gossipsub): parameters (#1442)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-06-06 14:09:55 +00:00
Radosław Kamiński
141f4d9116 fix(GossipSub): save sent iHave in first element (#1405) 2025-06-06 10:27:59 +00:00
Gabriel Cruz
cb31152b53 feat(autotls): add acme client (#1436) 2025-06-05 17:47:02 +00:00
Radosław Kamiński
3a7745f920 test(gossipsub): message cache (#1431) 2025-06-03 15:18:29 +01:00
Radosław Kamiński
a89916fb1a test: checkUntilTimeout refactor (#1437) 2025-06-03 13:31:34 +01:00
vladopajic
c6cf46c904 fix(ci-daily): delete cache action will continue on error (#1435) 2025-06-02 17:08:31 +02:00
Gabriel Cruz
b28a71ab13 chore(readme): improve README's development section (#1427) 2025-05-29 17:51:29 +00:00
vladopajic
95b9859bcd chore(interop): move interop code to separate folder (#1413) 2025-05-29 16:14:12 +00:00
vladopajic
9e599753af ci(daily): add pinned dependencies variant (#1418) 2025-05-29 15:27:06 +00:00
richΛrd
2e924906bb chore: bump quic (#1428) 2025-05-29 14:25:02 +00:00
Radosław Kamiński
e811c1ad32 fix(gossipsub): save iDontWants messages in the first element of history (#1393) 2025-05-29 13:33:51 +01:00
Radosław Kamiński
86695b55bb test(gossipsub): include missing test files and handle flaky tests (#1416)
Co-authored-by: vladopajic <vladopajic@users.noreply.github.com>
2025-05-29 12:44:21 +01:00
vladopajic
8c3a4d882a ci(dependencies): fix access to tokens (#1421) 2025-05-29 00:27:36 +00:00
richΛrd
4bad343ddc fix: limit chronicles version to < 0.11.0 (#1423) 2025-05-28 21:00:41 -03:00
vladopajic
47b8a05c32 ci(daily): improvements (#1404) 2025-05-27 14:41:53 +00:00
Radosław Kamiński
4e6f4af601 test(gossipsub): heartbeat tests (#1391) 2025-05-27 10:28:12 +01:00
Miran
7275f6f9c3 chore: unused imports are now errors (#1399) 2025-05-26 21:36:08 +02:00
richΛrd
c3dae6a7d4 fix(quic): reset and mm for interop tests (#1397) 2025-05-26 12:16:17 -04:00
vladopajic
bb404eda4a fix(ci-daily): remove --solver flag (#1400) 2025-05-26 16:48:51 +02:00
richΛrd
584710bd80 chore: move -d:libp2p_quic_support flag to .nimble (#1392) 2025-05-26 08:57:26 -04:00
Radosław Kamiński
ad5eae9adf test(gossipsub): move and refactor control messages tests (#1380) 2025-05-22 15:10:37 +00:00
richΛrd
26fae7cd2d chore: bump quic (#1387) 2025-05-21 22:30:35 +00:00
Miran
87d6655368 chore: update more dependencies (#1374) 2025-05-21 21:46:09 +00:00
125 changed files with 8841 additions and 2763 deletions

View File

@@ -118,5 +118,5 @@ jobs:
nimble --version
gcc --version
export NIMFLAGS="${NIMFLAGS} -d:libp2p_quic_support --mm:${{ matrix.nim.memory_management }}"
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble test

View File

@@ -51,7 +51,7 @@ jobs:
- name: Run test suite with coverage flags
run: |
export NIMFLAGS="-d:libp2p_quic_support --lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
export NIMFLAGS="--lineDir:on --passC:-fprofile-arcs --passC:-ftest-coverage --passL:-fprofile-arcs --passL:-ftest-coverage"
nimble testnative
nimble testpubsub
nimble testfilter

View File

@@ -6,9 +6,26 @@ on:
workflow_dispatch:
jobs:
test_amd64:
name: Daily amd64
test_amd64_latest:
name: Daily amd64 (latest dependencies)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}]"
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['amd64']"
test_amd64_pinned:
name: Daily amd64 (pinned dependencies)
uses: ./.github/workflows/daily_common.yml
with:
pinned_deps: true
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['amd64']"

View File

@@ -4,6 +4,11 @@ name: Daily Common
on:
workflow_call:
inputs:
pinned_deps:
description: 'Should dependencies be installed from pinned file or use latest versions'
required: false
type: boolean
default: false
nim:
description: 'Nim Configuration'
required: true
@@ -17,20 +22,12 @@ on:
required: false
type: string
default: "[]"
use_sat_solver:
description: 'Install dependencies with SAT Solver'
required: false
type: boolean
default: false
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
delete_cache:
name: Delete github action's branch cache
runs-on: ubuntu-latest
continue-on-error: true
steps:
- uses: snnaplab/delete-branch-cache-action@v1
@@ -81,8 +78,14 @@ jobs:
- name: Install p2pd
run: |
V=1 bash scripts/build_p2pd.sh p2pdCache 124530a3
- name: Install dependencies
- name: Install dependencies (pinned)
if: ${{ inputs.pinned_deps }}
run: |
nimble install_pinned
- name: Install dependencies (latest)
if: ${{ inputs.pinned_deps == false }}
run: |
nimble install -y --depsOnly
@@ -91,11 +94,14 @@ jobs:
nim --version
nimble --version
if [[ "${{ inputs.use_sat_solver }}" == "true" ]]; then
dependency_solver="sat"
else
dependency_solver="legacy"
fi
export NIMFLAGS="${NIMFLAGS} -d:libp2p_quic_support --mm:${{ matrix.nim.memory_management }} --solver:${dependency_solver}"
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble test
- name: Run integration tests
if: ${{ matrix.platform.os == 'linux' && matrix.cpu == 'amd64' }}
run: |
nim --version
nimble --version
export NIMFLAGS="${NIMFLAGS} --mm:${{ matrix.nim.memory_management }}"
nimble testintegration

View File

@@ -1,14 +0,0 @@
name: Daily Nim Devel
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
test_nim_devel:
name: Daily Nim Devel
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'devel', 'memory_management': 'orc'}]"
cpu: "['amd64']"

View File

@@ -10,6 +10,14 @@ jobs:
name: Daily i386 (Linux)
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'version-1-6', 'memory_management': 'refc'}, {'ref': 'version-2-0', 'memory_management': 'refc'}, {'ref': 'devel', 'memory_management': 'orc'}]"
nim: "[
{'ref': 'version-1-6', 'memory_management': 'refc'},
{'ref': 'version-2-0', 'memory_management': 'refc'},
{'ref': 'version-2-2', 'memory_management': 'refc'},
{'ref': 'devel', 'memory_management': 'refc'},
]"
cpu: "['i386']"
exclude: "[{'platform': {'os':'macos'}}, {'platform': {'os':'windows'}}]"
exclude: "[
{'platform': {'os':'macos'}},
{'platform': {'os':'windows'}},
]"

View File

@@ -1,15 +0,0 @@
name: Daily SAT
on:
schedule:
- cron: "30 6 * * *"
workflow_dispatch:
jobs:
test_amd64:
name: Daily SAT
uses: ./.github/workflows/daily_common.yml
with:
nim: "[{'ref': 'version-2-0', 'memory_management': 'refc'}]"
cpu: "['amd64']"
use_sat_solver: true

View File

@@ -17,13 +17,13 @@ jobs:
target:
- repository: status-im/nimbus-eth2
ref: unstable
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NIMBUS_ETH2 }}
secret: ACTIONS_GITHUB_TOKEN_NIMBUS_ETH2
- repository: waku-org/nwaku
ref: master
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NWAKU }}
secret: ACTIONS_GITHUB_TOKEN_NWAKU
- repository: codex-storage/nim-codex
ref: master
token: ${{ secrets.ACTIONS_GITHUB_TOKEN_NIM_CODEX }}
secret: ACTIONS_GITHUB_TOKEN_NIM_CODEX
steps:
- name: Clone target repository
uses: actions/checkout@v4
@@ -32,7 +32,7 @@ jobs:
ref: ${{ matrix.target.ref}}
path: nbc
fetch-depth: 0
token: ${{ matrix.target.token }}
token: ${{ secrets[matrix.target.secret] }}
- name: Checkout this ref in target repository
run: |

View File

@@ -27,7 +27,7 @@ jobs:
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f tests/transport-interop/Dockerfile .
run: docker buildx build --load -t nim-libp2p-head -f interop/transport/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-transport-interop-test@master
with:
@@ -35,26 +35,28 @@ jobs:
# without suffix action fails because "hole-punching-interop" artifacts have
# the same name as "transport-interop" artifacts
test-results-suffix: transport-interop
extra-versions: ${{ github.workspace }}/tests/transport-interop/version.json
extra-versions: ${{ github.workspace }}/interop/transport/version.json
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
run-hole-punching-interop:
name: Run hole-punching interoperability tests
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: docker/setup-buildx-action@v3
- name: Build image
run: docker buildx build --load -t nim-libp2p-head -f tests/hole-punching-interop/Dockerfile .
- name: Run tests
uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
with:
test-filter: nim-libp2p-head
extra-versions: ${{ github.workspace }}/tests/hole-punching-interop/version.json
s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}
# nim-libp2p#1367: hole punching tests are temporary disabled as they keep failing
# and issue does not seem to be on nim-libp2p side
# run-hole-punching-interop:
# name: Run hole-punching interoperability tests
# runs-on: ubuntu-22.04
# steps:
# - uses: actions/checkout@v4
# - uses: docker/setup-buildx-action@v3
# - name: Build image
# run: docker buildx build --load -t nim-libp2p-head -f interop/hole-punching/Dockerfile .
# - name: Run tests
# uses: libp2p/test-plans/.github/actions/run-interop-hole-punch-test@master
# with:
# test-filter: nim-libp2p-head
# extra-versions: ${{ github.workspace }}/interop/hole-punching/version.json
# s3-cache-bucket: ${{ vars.S3_LIBP2P_BUILD_CACHE_BUCKET_NAME }}
# s3-access-key-id: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_ACCESS_KEY_ID }}
# s3-secret-access-key: ${{ secrets.S3_LIBP2P_BUILD_CACHE_AWS_SECRET_ACCESS_KEY }}
# aws-region: ${{ vars.S3_LIBP2P_BUILD_CACHE_AWS_REGION }}

View File

@@ -22,6 +22,6 @@ jobs:
uses: arnetheduck/nph-action@v1
with:
version: 0.6.1
options: "examples libp2p tests tools *.nim*"
options: "examples libp2p tests interop tools *.nim*"
fail: true
suggest: true

11
.pinned
View File

@@ -1,6 +1,6 @@
bearssl;https://github.com/status-im/nim-bearssl@#34d712933a4e0f91f5e66bc848594a581504a215
chronicles;https://github.com/status-im/nim-chronicles@#32ac8679680ea699f7dbc046e8e0131cac97d41a
chronos;https://github.com/status-im/nim-chronos@#c04576d829b8a0a1b12baaa8bc92037501b3a4a0
chronicles;https://github.com/status-im/nim-chronicles@#81a4a7a360c78be9c80c8f735c76b6d4a1517304
chronos;https://github.com/status-im/nim-chronos@#b55e2816eb45f698ddaca8d8473e401502562db2
dnsclient;https://github.com/ba0f3/dnsclient.nim@#23214235d4784d24aceed99bbfe153379ea557c8
faststreams;https://github.com/status-im/nim-faststreams@#c51315d0ae5eb2594d0bf41181d0e1aca1b3c01d
httputils;https://github.com/status-im/nim-http-utils@#79cbab1460f4c0cdde2084589d017c43a3d7b4f1
@@ -8,12 +8,15 @@ json_serialization;https://github.com/status-im/nim-json-serialization@#2b1c5eb1
metrics;https://github.com/status-im/nim-metrics@#6142e433fc8ea9b73379770a788017ac528d46ff
ngtcp2;https://github.com/status-im/nim-ngtcp2@#9456daa178c655bccd4a3c78ad3b8cce1f0add73
nimcrypto;https://github.com/cheatfate/nimcrypto@#19c41d6be4c00b4a2c8000583bd30cf8ceb5f4b1
quic;https://github.com/status-im/nim-quic.git@#a6c30263c95fc5ddb2ef4d197c09b282555c06b0
quic;https://github.com/status-im/nim-quic.git@#ca3eda53bee9cef7379be195738ca1490877432f
results;https://github.com/arnetheduck/nim-results@#df8113dda4c2d74d460a8fa98252b0b771bf1f27
secp256k1;https://github.com/status-im/nim-secp256k1@#f808ed5e7a7bfc42204ec7830f14b7a42b63c284
serialization;https://github.com/status-im/nim-serialization@#548d0adc9797a10b2db7f788b804330306293088
stew;https://github.com/status-im/nim-stew@#0db179256cf98eb9ce9ee7b9bc939f219e621f77
testutils;https://github.com/status-im/nim-testutils@#9e842bd58420d23044bc55e16088e8abbe93ce51
unittest2;https://github.com/status-im/nim-unittest2@#8b51e99b4a57fcfb31689230e75595f024543024
websock;https://github.com/status-im/nim-websock@#f8ed9b40a5ff27ad02a3c237c4905b0924e3f982
websock;https://github.com/status-im/nim-websock@#d5cd89062cd2d168ef35193c7d29d2102921d97e
zlib;https://github.com/status-im/nim-zlib@#daa8723fd32299d4ca621c837430c29a5a11e19a
jwt;https://github.com/vacp2p/nim-jwt@#18f8378de52b241f321c1f9ea905456e89b95c6f
bearssl_pkey_decoder;https://github.com/vacp2p/bearssl_pkey_decoder@#21dd3710df9345ed2ad8bf8f882761e07863b8e0
bio;https://github.com/xzeshen/bio@#0f5ed58b31c678920b6b4f7c1783984e6660be97

166
README.md
View File

@@ -20,14 +20,13 @@
- [Background](#background)
- [Install](#install)
- [Getting Started](#getting-started)
- [Testing](#testing)
- [Modules](#modules)
- [Users](#users)
- [Stability](#stability)
- [Development](#development)
- [Contribute](#contribute)
- [Contributors](#contributors)
- [Core Maintainers](#core-maintainers)
- [Modules](#modules)
- [Users](#users)
- [Stability](#stability)
- [License](#license)
## Background
@@ -39,20 +38,102 @@ This is a native Nim implementation, using [chronos](https://github.com/status-i
Learn more about libp2p at [**libp2p.io**](https://libp2p.io) and follow libp2p's documentation [**docs.libp2p.io**](https://docs.libp2p.io).
## Install
**Prerequisite**
- [Nim](https://nim-lang.org/install.html)
> The currently supported Nim versions are 1.6, 2.0 and 2.2.
```
nimble install libp2p
```
## Getting Started
You'll find the nim-libp2p documentation [here](https://vacp2p.github.io/nim-libp2p/docs/). See [examples](./examples) for simple usage patterns.
## Getting Started
Try out the chat example. For this you'll need to have [`go-libp2p-daemon`](examples/go-daemon/daemonapi.md) running. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
```bash
nim c -r --threads:on examples/directchat.nim
```
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
```bash
./examples/directchat
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu # change this hash by the hash you were given
```
You can now chat between the instances!
![Chat example](https://imgur.com/caYRu8K.gif)
## Development
Clone the repository and install the dependencies:
```sh
git clone https://github.com/vacp2p/nim-libp2p
cd nim-libp2p
nimble install -dy
```
### Testing
Remember you'll need to build the `go-libp2p-daemon` binary to run the `nim-libp2p` tests.
To do so, please follow the installation instructions in [daemonapi.md](examples/go-daemon/daemonapi.md).
Run unit tests:
```sh
# run all the unit tests
nimble test
```
**Obs:** Running all tests requires the [`go-libp2p-daemon` to be installed and running](examples/go-daemon/daemonapi.md).
If you only want to run tests that don't require `go-libp2p-daemon`, use:
```
nimble testnative
```
For a list of all available test suites, use:
```
nimble tasks
```
### Contribute
The libp2p implementation in Nim is a work in progress. We welcome contributors to help out! Specifically, you can:
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
- **Code format**. Code should be formatted with [nph](https://github.com/arnetheduck/nph) and follow the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
### Contributors
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
### Core Maintainers
<table>
<tbody>
<tr>
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
</tr>
</tbody>
</table>
### Compile time flags
Enable quic transport support
```bash
nim c -d:libp2p_quic_support some_file.nim
```
Enable expensive metrics (ie, metrics with per-peer cardinality):
```bash
nim c -d:libp2p_expensive_metrics some_file.nim
```
Set list of known libp2p agents for metrics:
```bash
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
```
Specify gossipsub specific topics to measure in the metrics:
```bash
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
```
## Modules
List of packages modules implemented in nim-libp2p:
@@ -111,71 +192,6 @@ The versioning follows [semver](https://semver.org/), with some additions:
We aim to be compatible at all time with at least 2 Nim `MINOR` versions, currently `1.6 & 2.0`
## Development
Clone and Install dependencies:
```sh
git clone https://github.com/vacp2p/nim-libp2p
cd nim-libp2p
# to use dependencies computed by nimble
nimble install -dy
# OR to install the dependencies versions used in CI
nimble install_pinned
```
Run unit tests:
```sh
# run all the unit tests
nimble test
```
This requires the go daemon to be available. To only run native tests, use `nimble testnative`.
Or use `nimble tasks` to show all available tasks.
### Contribute
The libp2p implementation in Nim is a work in progress. We welcome contributors to help out! Specifically, you can:
- Go through the modules and **check out existing issues**. This would be especially useful for modules in active development. Some knowledge of IPFS/libp2p may be required, as well as the infrastructure behind it.
- **Perform code reviews**. Feel free to let us know if you found anything that can a) speed up the project development b) ensure better quality and c) reduce possible future bugs.
- **Add tests**. Help nim-libp2p to be more robust by adding more tests to the [tests folder](tests/).
- **Small PRs**. Try to keep PRs atomic and digestible. This makes the review process and pinpointing bugs easier.
- **Code format**. Please format code using [nph](https://github.com/arnetheduck/nph) v0.5.1. This will ensure a consistent codebase and make PRs easier to review. A CI rule has been added to ensure that future commits are all formatted using the same nph version.
The code follows the [Status Nim Style Guide](https://status-im.github.io/nim-style-guide/).
### Contributors
<a href="https://github.com/vacp2p/nim-libp2p/graphs/contributors"><img src="https://contrib.rocks/image?repo=vacp2p/nim-libp2p" alt="nim-libp2p contributors"></a>
### Core Maintainers
<table>
<tbody>
<tr>
<td align="center"><a href="https://github.com/richard-ramos"><img src="https://avatars.githubusercontent.com/u/1106587?v=4?s=100" width="100px;" alt="Richard"/><br /><sub><b>Richard</b></sub></a></td>
<td align="center"><a href="https://github.com/vladopajic"><img src="https://avatars.githubusercontent.com/u/4353513?v=4?s=100" width="100px;" alt="Vlado"/><br /><sub><b>Vlado</b></sub></a></td>
<td align="center"><a href="https://github.com/gmelodie"><img src="https://avatars.githubusercontent.com/u/8129788?v=4?s=100" width="100px;" alt="Gabe"/><br /><sub><b>Gabe</b></sub></a></td>
</tr>
</tbody>
</table>
### Compile time flags
Enable quic transport support
```bash
nim c -d:libp2p_quic_support some_file.nim
```
Enable expensive metrics (ie, metrics with per-peer cardinality):
```bash
nim c -d:libp2p_expensive_metrics some_file.nim
```
Set list of known libp2p agents for metrics:
```bash
nim c -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku some_file.nim
```
Specify gossipsub specific topics to measure in the metrics:
```bash
nim c -d:KnownLibP2PTopics=topic1,topic2,topic3 some_file.nim
```
## License
Licensed and distributed under either of

View File

@@ -4,6 +4,7 @@ if dirExists("nimbledeps/pkgs"):
if dirExists("nimbledeps/pkgs2"):
switch("NimblePath", "nimbledeps/pkgs2")
switch("warningAsError", "UnusedImport:on")
switch("warning", "CaseTransition:off")
switch("warning", "ObservableStores:off")
switch("warning", "LockLevel:off")

View File

@@ -3,9 +3,7 @@
- [Prerequisites](#prerequisites)
- [Installation](#installation)
- [Script](#script)
- [Usage](#usage)
- [Example](#example)
- [Getting Started](#getting-started)
- [Examples](#examples)
# Introduction
This is a libp2p-backed daemon wrapping the functionalities of go-libp2p for use in Nim. <br>
@@ -13,20 +11,25 @@ For more information about the go daemon, check out [this repository](https://gi
> **Required only** for running the tests.
# Prerequisites
Go with version `1.16.0`.
Go with version `1.16.0`
> You will *likely* be able to build `go-libp2p-daemon` with different Go versions, but **they haven't been tested**.
# Installation
Follow one of the methods below:
## Script
Run the build script while having the `go` command pointing to the correct Go version.
We recommend using `1.16.0`, as previously stated.
```sh
./scripts/build_p2pd.sh
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the correct directory.
If you find any issues, please head into our discord and ask for our assistance.
`build_p2pd.sh` will not rebuild unless needed. If you already have the newest binary and you want to force the rebuild, use:
```sh
./scripts/build_p2pd.sh -f
```
Or:
```sh
./scripts/build_p2pd.sh --force
```
If everything goes correctly, the binary (`p2pd`) should be built and placed in the `$GOPATH/bin` directory.
If you're having issues, head into [our discord](https://discord.com/channels/864066763682218004/1115526869769535629) and ask for assistance.
After successfully building the binary, remember to add it to your path so it can be found. You can do that by running:
```sh
@@ -34,28 +37,7 @@ export PATH="$PATH:$HOME/go/bin"
```
> **Tip:** To make this change permanent, add the command above to your `.bashrc` file.
# Usage
## Example
# Examples
Examples can be found in the [examples folder](https://github.com/status-im/nim-libp2p/tree/readme/examples/go-daemon)
## Getting Started
Try out the chat example. Full code can be found [here](https://github.com/status-im/nim-libp2p/blob/master/examples/chat.nim):
```bash
nim c -r --threads:on examples/directchat.nim
```
This will output a peer ID such as `QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu` which you can use in another instance to connect to it.
```bash
./examples/directchat
/connect QmbmHfVvouKammmQDJck4hz33WvVktNEe7pasxz2HgseRu
```
You can now chat between the instances!
![Chat example](https://imgur.com/caYRu8K.gif)

View File

@@ -11,7 +11,7 @@ RUN cd nim-libp2p && nimble install_pinned && nimble install "redis@#b341fe240db
COPY . nim-libp2p/
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./tests/hole-punching-interop/hole_punching.nim
RUN cd nim-libp2p && nim c --skipParentCfg --NimblePath:./nimbledeps/pkgs --mm:refc -d:chronicles_log_level=DEBUG -d:chronicles_default_output_device=stderr -d:release --threads:off --skipProjCfg -o:hole-punching-tests ./interop/hole-punching/hole_punching.nim
FROM --platform=linux/amd64 debian:bullseye-slim
RUN --mount=type=cache,target=/var/cache/apt apt-get update && apt-get install -y dnsutils jq curl tcpdump iproute2 libssl-dev

View File

@@ -15,8 +15,7 @@ import
protocols/connectivity/autonat/service,
protocols/ping,
]
import ../stubs/autonatclientstub
import ../errorhelpers
import ../../tests/[stubs/autonatclientstub, errorhelpers]
logScope:
topics = "hp interop node"
@@ -85,8 +84,8 @@ proc main() {.async.} =
debug "Dialing relay...", relayMA
let relayId = await switch.connect(relayMA).wait(30.seconds)
debug "Connected to relay", relayId
except AsyncTimeoutError:
raise newException(CatchableError, "Connection to relay timed out")
except AsyncTimeoutError as e:
raise newException(CatchableError, "Connection to relay timed out: " & e.msg, e)
# Wait for our relay address to be published
while not switch.peerInfo.addrs.anyIt(it.contains(multiCodec("p2p-circuit")).tryGet()):
@@ -104,7 +103,7 @@ proc main() {.async.} =
try:
PeerId.init(redisClient.bLPop(@["LISTEN_CLIENT_PEER_ID"], 0)[1]).tryGet()
except Exception as e:
raise newException(CatchableError, e.msg)
raise newException(CatchableError, "Exception init peer: " & e.msg, e)
debug "Got listener peer id", listenerId
let listenerRelayAddr = MultiAddress.init($relayMA & "/p2p-circuit").tryGet()
@@ -131,8 +130,8 @@ try:
return "done"
discard waitFor(mainAsync().wait(4.minutes))
except AsyncTimeoutError:
error "Program execution timed out."
except AsyncTimeoutError as e:
error "Program execution timed out", description = e.msg
quit(-1)
except CatchableError as e:
error "Unexpected error", description = e.msg

View File

@@ -13,6 +13,6 @@ COPY . nim-libp2p/
RUN \
cd nim-libp2p && \
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs -p:nim-libp2p -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./tests/transport-interop/main.nim
nim c --skipProjCfg --skipParentCfg --NimblePath:./nimbledeps/pkgs -p:nim-libp2p --mm:refc -d:libp2p_quic_support -d:chronicles_log_level=WARN -d:chronicles_default_output_device=stderr --threads:off ./interop/transport/main.nim
ENTRYPOINT ["/app/nim-libp2p/tests/transport-interop/main"]
ENTRYPOINT ["/app/nim-libp2p/interop/transport/main"]

View File

@@ -47,12 +47,9 @@ proc main() {.async.} =
MultiAddress.init("/ip4/" & ip & "/udp/0/quic-v1").tryGet()
)
of "ws":
discard switchBuilder
.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
WsTransport.new(upgr)
discard switchBuilder.withWsTransport().withAddress(
MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet()
)
.withAddress(MultiAddress.init("/ip4/" & ip & "/tcp/0/ws").tryGet())
else:
doAssert false
@@ -83,7 +80,7 @@ proc main() {.async.} =
try:
redisClient.bLPop(@["listenerAddr"], testTimeout.seconds.int)[1]
except Exception as e:
raise newException(CatchableError, e.msg)
raise newException(CatchableError, "Exception calling bLPop: " & e.msg, e)
let
remoteAddr = MultiAddress.init(listenerAddr).tryGet()
dialingStart = Moment.now()
@@ -108,8 +105,8 @@ try:
return "done"
discard waitFor(mainAsync().wait(testTimeout))
except AsyncTimeoutError:
error "Program execution timed out."
except AsyncTimeoutError as e:
error "Program execution timed out", description = e.msg
quit(-1)
except CatchableError as e:
error "Unexpected error", description = e.msg

View File

@@ -1,7 +1,7 @@
mode = ScriptMode.Verbose
packageName = "libp2p"
version = "1.10.1"
version = "1.11.0"
author = "Status Research & Development GmbH"
description = "LibP2P implementation"
license = "MIT"
@@ -9,9 +9,9 @@ skipDirs = @["tests", "examples", "Nim", "tools", "scripts", "docs"]
requires "nim >= 1.6.0",
"nimcrypto >= 0.6.0 & < 0.7.0", "dnsclient >= 0.3.0 & < 0.4.0", "bearssl >= 0.2.5",
"chronicles >= 0.10.2", "chronos >= 4.0.3", "metrics", "secp256k1", "stew >= 0.4.0",
"websock", "unittest2", "results",
"https://github.com/status-im/nim-quic.git#a6c30263c95fc5ddb2ef4d197c09b282555c06b0"
"chronicles >= 0.10.3 & < 0.11.0", "chronos >= 4.0.4", "metrics", "secp256k1",
"stew >= 0.4.0", "websock >= 0.2.0", "unittest2", "results", "quic >= 0.2.7", "bio",
"https://github.com/vacp2p/nim-jwt.git#18f8378de52b241f321c1f9ea905456e89b95c6f"
let nimc = getEnv("NIMC", "nim") # Which nim compiler to use
let lang = getEnv("NIMLANG", "c") # Which backend (c/cpp/js)
@@ -30,7 +30,7 @@ proc runTest(filename: string, moreoptions: string = "") =
excstr.add(" " & moreoptions & " ")
if getEnv("CICOV").len > 0:
excstr &= " --nimcache:nimcache/" & filename & "-" & $excstr.hash
exec excstr & " -r " & " tests/" & filename
exec excstr & " -r -d:libp2p_quic_support tests/" & filename
rmFile "tests/" & filename.toExe
proc buildSample(filename: string, run = false, extraFlags = "") =
@@ -62,6 +62,9 @@ task testfilter, "Run PKI filter test":
runTest("testpkifilter")
runTest("testpkifilter", moreoptions = "-d:libp2p_pki_schemes=")
task testintegration, "Runs integraion tests":
runTest("testintegration")
task test, "Runs the test suite":
runTest("testall")
exec "nimble testfilter"

509
libp2p/autotls/acme/api.nim Normal file
View File

@@ -0,0 +1,509 @@
import options, sequtils, strutils, json, uri
from times import DateTime, parse
import chronos/apps/http/httpclient, jwt, results, bearssl/pem, chronicles
import ./utils
import ../../crypto/crypto
import ../../crypto/rsa
export ACMEError
logScope:
topics = "libp2p acme api"
const
LetsEncryptURL* = "https://acme-v02.api.letsencrypt.org"
LetsEncryptURLStaging* = "https://acme-staging-v02.api.letsencrypt.org"
Alg = "RS256"
DefaultChalCompletedRetries = 10
DefaultChalCompletedRetryTime = 1.seconds
DefaultFinalizeRetries = 10
DefaultFinalizeRetryTime = 1.seconds
DefaultRandStringSize = 256
ACMEHttpHeaders = [("Content-Type", "application/jose+json")]
type Authorization* = string
type Domain* = string
type Kid* = string
type Nonce* = string
type ACMEDirectory* = object
newNonce*: string
newOrder*: string
newAccount*: string
type ACMEApi* = ref object of RootObj
directory: Opt[ACMEDirectory]
session: HttpSessionRef
acmeServerURL*: Uri
type HTTPResponse* = object
body*: JsonNode
headers*: HttpTable
type JWK = object
kty: string
n: string
e: string
# whether the request uses Kid or not
type ACMERequestType = enum
ACMEJwkRequest
ACMEKidRequest
type ACMERequestHeader = object
alg: string
typ: string
nonce: Nonce
url: string
case kind: ACMERequestType
of ACMEJwkRequest:
jwk: JWK
of ACMEKidRequest:
kid: Kid
type Email = string
type ACMERegisterRequest* = object
termsOfServiceAgreed: bool
contact: seq[Email]
type ACMEAccountStatus = enum
valid = "valid"
deactivated = "deactivated"
revoked = "revoked"
type ACMERegisterResponseBody = object
status*: ACMEAccountStatus
type ACMERegisterResponse* = object
kid*: Kid
status*: ACMEAccountStatus
type ACMEChallengeStatus* {.pure.} = enum
PENDING = "pending"
PROCESSING = "processing"
VALID = "valid"
INVALID = "invalid"
type ACMEOrderStatus* {.pure.} = enum
PENDING = "pending"
READY = "ready"
PROCESSING = "processing"
VALID = "valid"
INVALID = "invalid"
type ACMEChallengeType* {.pure.} = enum
DNS01 = "dns-01"
HTTP01 = "http-01"
TLSALPN01 = "tls-alpn-01"
type ACMEChallengeToken* = string
type ACMEChallenge* = object
url*: string
`type`*: ACMEChallengeType
status*: ACMEChallengeStatus
token*: ACMEChallengeToken
type ACMEChallengeIdentifier = object
`type`: string
value: string
type ACMEChallengeRequest = object
identifiers: seq[ACMEChallengeIdentifier]
type ACMEChallengeResponseBody = object
status: ACMEOrderStatus
authorizations: seq[Authorization]
finalize: string
type ACMEChallengeResponse* = object
status*: ACMEOrderStatus
authorizations*: seq[Authorization]
finalize*: string
order*: string
type ACMEChallengeResponseWrapper* = object
finalize*: string
order*: string
dns01*: ACMEChallenge
type ACMEAuthorizationsResponse* = object
challenges*: seq[ACMEChallenge]
type ACMECompletedResponse* = object
url: string
type ACMECheckKind* = enum
ACMEOrderCheck
ACMEChallengeCheck
type ACMECheckResponse* = object
case kind: ACMECheckKind
of ACMEOrderCheck:
orderStatus: ACMEOrderStatus
of ACMEChallengeCheck:
chalStatus: ACMEChallengeStatus
retryAfter: Duration
type ACMEFinalizeResponse* = object
status: ACMEOrderStatus
type ACMEOrderResponse* = object
certificate: string
expires: string
type ACMECertificateResponse* = object
rawCertificate*: string
certificateExpiry*: DateTime
template handleError*(msg: string, body: untyped): untyped =
try:
body
except ACMEError as exc:
raise exc
except CancelledError as exc:
raise exc
except JsonKindError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except ValueError as exc:
raise newException(ACMEError, msg & ": Failed to decode JSON", exc)
except HttpError as exc:
raise newException(ACMEError, msg & ": Failed to connect to ACME server", exc)
except CatchableError as exc:
raise newException(ACMEError, msg & ": Unexpected error", exc)
method post*(
self: ACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.}
method get*(
self: ACMEApi, uri: Uri
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.}
proc new*(
T: typedesc[ACMEApi], acmeServerURL: Uri = parseUri(LetsEncryptURL)
): ACMEApi =
let session = HttpSessionRef.new()
ACMEApi(
session: session, directory: Opt.none(ACMEDirectory), acmeServerURL: acmeServerURL
)
proc getDirectory(
self: ACMEApi
): Future[ACMEDirectory] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("getDirectory"):
self.directory.valueOr:
let acmeResponse = await self.get(self.acmeServerURL / "directory")
let directory = acmeResponse.body.to(ACMEDirectory)
self.directory = Opt.some(directory)
directory
method requestNonce*(
self: ACMEApi
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]), base.} =
handleError("requestNonce"):
let acmeResponse = await self.get(parseUri((await self.getDirectory()).newNonce))
Nonce(acmeResponse.headers.keyOrError("Replay-Nonce"))
# TODO: save n and e in account so we don't have to recalculate every time
proc acmeHeader(
self: ACMEApi, uri: Uri, key: KeyPair, needsJwk: bool, kid: Opt[Kid]
): Future[ACMERequestHeader] {.async: (raises: [ACMEError, CancelledError]).} =
if not needsJwk and kid.isNone():
raise newException(ACMEError, "kid not set")
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
let newNonce = await self.requestNonce()
if needsJwk:
let pubkey = key.pubkey.rsakey
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
ACMERequestHeader(
kind: ACMEJwkRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: $uri,
jwk: JWK(kty: "RSA", n: base64UrlEncode(nArray), e: base64UrlEncode(eArray)),
)
else:
ACMERequestHeader(
kind: ACMEKidRequest,
alg: Alg,
typ: "JWT",
nonce: newNonce,
url: $uri,
kid: kid.get(),
)
method post*(
self: ACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.} =
let rawResponse = await HttpClientRequestRef
.post(self.session, $uri, body = payload, headers = ACMEHttpHeaders)
.get()
.send()
let body = await rawResponse.getResponseBody()
HTTPResponse(body: body, headers: rawResponse.headers)
method get*(
self: ACMEApi, uri: Uri
): Future[HTTPResponse] {.
async: (raises: [ACMEError, HttpError, CancelledError]), base
.} =
let rawResponse = await HttpClientRequestRef.get(self.session, $uri).get().send()
let body = await rawResponse.getResponseBody()
HTTPResponse(body: body, headers: rawResponse.headers)
proc createSignedAcmeRequest(
self: ACMEApi,
uri: Uri,
payload: auto,
key: KeyPair,
needsJwk: bool = false,
kid: Opt[Kid] = Opt.none(Kid),
): Future[string] {.async: (raises: [ACMEError, CancelledError]).} =
if key.pubkey.scheme != PKScheme.RSA or key.seckey.scheme != PKScheme.RSA:
raise newException(ACMEError, "Unsupported signing key type")
let acmeHeader = await self.acmeHeader(uri, key, needsJwk, kid)
handleError("createSignedAcmeRequest"):
var token = toJWT(%*{"header": acmeHeader, "claims": payload})
let derPrivKey = key.seckey.rsakey.getBytes.get
let pemPrivKey: string = pemEncode(derPrivKey, "PRIVATE KEY")
token.sign(pemPrivKey)
$token.toFlattenedJson()
proc requestRegister*(
self: ACMEApi, key: KeyPair
): Future[ACMERegisterResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let registerRequest = ACMERegisterRequest(termsOfServiceAgreed: true)
handleError("acmeRegister"):
let payload = await self.createSignedAcmeRequest(
parseUri((await self.getDirectory()).newAccount),
registerRequest,
key,
needsJwk = true,
)
let acmeResponse =
await self.post(parseUri((await self.getDirectory()).newAccount), payload)
let acmeResponseBody = acmeResponse.body.to(ACMERegisterResponseBody)
ACMERegisterResponse(
status: acmeResponseBody.status, kid: acmeResponse.headers.keyOrError("location")
)
proc requestNewOrder*(
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
# request challenge from ACME server
let orderRequest = ACMEChallengeRequest(
identifiers: domains.mapIt(ACMEChallengeIdentifier(`type`: "dns", value: it))
)
handleError("requestNewOrder"):
let payload = await self.createSignedAcmeRequest(
parseUri((await self.getDirectory()).newOrder),
orderRequest,
key,
kid = Opt.some(kid),
)
let acmeResponse =
await self.post(parseUri((await self.getDirectory()).newOrder), payload)
let challengeResponseBody = acmeResponse.body.to(ACMEChallengeResponseBody)
if challengeResponseBody.authorizations.len == 0:
raise newException(ACMEError, "Authorizations field is empty")
ACMEChallengeResponse(
status: challengeResponseBody.status,
authorizations: challengeResponseBody.authorizations,
finalize: challengeResponseBody.finalize,
order: acmeResponse.headers.keyOrError("location"),
)
proc requestAuthorizations*(
self: ACMEApi, authorizations: seq[Authorization], key: KeyPair, kid: Kid
): Future[ACMEAuthorizationsResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestAuthorizations"):
doAssert authorizations.len > 0
let acmeResponse = await self.get(parseUri(authorizations[0]))
acmeResponse.body.to(ACMEAuthorizationsResponse)
proc requestChallenge*(
self: ACMEApi, domains: seq[Domain], key: KeyPair, kid: Kid
): Future[ACMEChallengeResponseWrapper] {.async: (raises: [ACMEError, CancelledError]).} =
let orderResponse = await self.requestNewOrder(domains, key, kid)
if orderResponse.status != ACMEOrderStatus.PENDING and
orderResponse.status != ACMEOrderStatus.READY:
# ready is a valid status when renewing certs before expiry
raise newException(ACMEError, "Invalid new order status: " & $orderResponse.status)
let authorizationsResponse =
await self.requestAuthorizations(orderResponse.authorizations, key, kid)
if authorizationsResponse.challenges.len == 0:
raise newException(ACMEError, "No challenges received")
return ACMEChallengeResponseWrapper(
finalize: orderResponse.finalize,
order: orderResponse.order,
dns01: authorizationsResponse.challenges.filterIt(
it.`type` == ACMEChallengeType.DNS01
)[0],
# getting the first element is safe since we checked that authorizationsResponse.challenges.len != 0
)
proc requestCheck*(
self: ACMEApi, checkURL: Uri, checkKind: ACMECheckKind, key: KeyPair, kid: Kid
): Future[ACMECheckResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestCheck"):
let acmeResponse = await self.get(checkURL)
let retryAfter =
try:
parseInt(acmeResponse.headers.keyOrError("Retry-After")).seconds
except ValueError:
DefaultChalCompletedRetryTime
case checkKind
of ACMEOrderCheck:
try:
ACMECheckResponse(
kind: checkKind,
orderStatus: parseEnum[ACMEOrderStatus](acmeResponse.body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
)
of ACMEChallengeCheck:
try:
ACMECheckResponse(
kind: checkKind,
chalStatus: parseEnum[ACMEChallengeStatus](acmeResponse.body["status"].getStr),
retryAfter: retryAfter,
)
except ValueError:
raise newException(
ACMEError, "Invalid order status: " & acmeResponse.body["status"].getStr
)
proc sendChallengeCompleted*(
self: ACMEApi, chalURL: Uri, key: KeyPair, kid: Kid
): Future[ACMECompletedResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("sendChallengeCompleted"):
let payload =
await self.createSignedAcmeRequest(chalURL, %*{}, key, kid = Opt.some(kid))
let acmeResponse = await self.post(chalURL, payload)
acmeResponse.body.to(ACMECompletedResponse)
proc checkChallengeCompleted*(
self: ACMEApi,
checkURL: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
for i in 0 .. retries:
let checkResponse = await self.requestCheck(checkURL, ACMEChallengeCheck, key, kid)
case checkResponse.chalStatus
of ACMEChallengeStatus.PENDING:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
of ACMEChallengeStatus.VALID:
return true
else:
raise newException(
ACMEError,
"Failed challenge completion: expected 'valid', got '" &
$checkResponse.chalStatus & "'",
)
return false
proc completeChallenge*(
self: ACMEApi,
chalURL: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
let completedResponse = await self.sendChallengeCompleted(chalURL, key, kid)
# check until acme server is done (poll validation)
return await self.checkChallengeCompleted(chalURL, key, kid, retries = retries)
proc requestFinalize*(
self: ACMEApi, domain: Domain, finalize: Uri, key: KeyPair, kid: Kid
): Future[ACMEFinalizeResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestFinalize"):
let payload = await self.createSignedAcmeRequest(
finalize, %*{"csr": createCSR(domain)}, key, kid = Opt.some(kid)
)
let acmeResponse = await self.post(finalize, payload)
# server responds with updated order response
acmeResponse.body.to(ACMEFinalizeResponse)
proc checkCertFinalized*(
self: ACMEApi,
order: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultChalCompletedRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
for i in 0 .. retries:
let checkResponse = await self.requestCheck(order, ACMEOrderCheck, key, kid)
case checkResponse.orderStatus
of ACMEOrderStatus.VALID:
return true
of ACMEOrderStatus.PROCESSING:
await sleepAsync(checkResponse.retryAfter) # try again after some delay
else:
error "Failed certificate finalization",
description = "expected 'valid', got '" & $checkResponse.orderStatus & "'"
return false # do not try again
return false
proc certificateFinalized*(
self: ACMEApi,
domain: Domain,
finalize: Uri,
order: Uri,
key: KeyPair,
kid: Kid,
retries: int = DefaultFinalizeRetries,
): Future[bool] {.async: (raises: [ACMEError, CancelledError]).} =
let finalizeResponse = await self.requestFinalize(domain, finalize, key, kid)
# keep checking order until cert is valid (done)
return await self.checkCertFinalized(order, key, kid, retries = retries)
proc requestGetOrder*(
self: ACMEApi, order: Uri
): Future[ACMEOrderResponse] {.async: (raises: [ACMEError, CancelledError]).} =
handleError("requestGetOrder"):
let acmeResponse = await self.get(order)
acmeResponse.body.to(ACMEOrderResponse)
proc downloadCertificate*(
self: ACMEApi, order: Uri
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let orderResponse = await self.requestGetOrder(order)
handleError("downloadCertificate"):
let rawResponse = await HttpClientRequestRef
.get(self.session, orderResponse.certificate)
.get()
.send()
ACMECertificateResponse(
rawCertificate: bytesToString(await rawResponse.getBodyBytes()),
certificateExpiry: parse(orderResponse.expires, "yyyy-MM-dd'T'HH:mm:ss'Z'"),
)
proc close*(self: ACMEApi) {.async: (raises: [CancelledError]).} =
await self.session.closeWait()

View File

@@ -0,0 +1,86 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import uri
import chronos, results, bio, chronicles
import ./api, ./utils
import ../../crypto/crypto
import ../../crypto/rsa
export api
type KeyAuthorization* = string
type ACMEClient* = ref object
api: ACMEApi
key*: KeyPair
kid*: Kid
logScope:
topics = "libp2p acme client"
proc new*(
T: typedesc[ACMEClient],
rng: ref HmacDrbgContext = newRng(),
api: ACMEApi = ACMEApi.new(acmeServerURL = parseUri(LetsEncryptURL)),
key: Opt[KeyPair] = Opt.none(KeyPair),
kid: Kid = Kid(""),
): T {.raises: [].} =
let key = key.valueOr:
KeyPair.random(PKScheme.RSA, rng[]).get()
T(api: api, key: key, kid: kid)
proc getOrInitKid*(
self: ACMEClient
): Future[Kid] {.async: (raises: [ACMEError, CancelledError]).} =
if self.kid.len == 0:
let registerResponse = await self.api.requestRegister(self.key)
self.kid = registerResponse.kid
return self.kid
proc genKeyAuthorization*(self: ACMEClient, token: string): KeyAuthorization =
base64UrlEncode(@(sha256.digest((token & "." & thumbprint(self.key)).toByteSeq).data))
proc getChallenge*(
self: ACMEClient, domains: seq[api.Domain]
): Future[ACMEChallengeResponseWrapper] {.async: (raises: [ACMEError, CancelledError]).} =
await self.api.requestChallenge(domains, self.key, await self.getOrInitKid())
proc getCertificate*(
self: ACMEClient, domain: api.Domain, challenge: ACMEChallengeResponseWrapper
): Future[ACMECertificateResponse] {.async: (raises: [ACMEError, CancelledError]).} =
let chalURL = parseUri(challenge.dns01.url)
let orderURL = parseUri(challenge.order)
let finalizeURL = parseUri(challenge.finalize)
trace "sending challenge completed notification"
discard
await self.api.sendChallengeCompleted(chalURL, self.key, await self.getOrInitKid())
trace "checking for completed challenge"
let completed =
await self.api.checkChallengeCompleted(chalURL, self.key, await self.getOrInitKid())
if not completed:
raise
newException(ACMEError, "Failed to signal ACME server about challenge completion")
trace "waiting for certificate to be finalized"
let finalized = await self.api.certificateFinalized(
domain, finalizeURL, orderURL, self.key, await self.getOrInitKid()
)
if not finalized:
raise newException(ACMEError, "Failed to finalize certificate for domain " & domain)
trace "downloading certificate"
await self.api.downloadCertificate(orderURL)
proc close*(self: ACMEClient) {.async: (raises: [CancelledError]).} =
await self.api.close()

View File

@@ -0,0 +1,39 @@
import uri
import chronos, chronos/apps/http/httpclient, json
import ./api, ./utils
export api
type MockACMEApi* = ref object of ACMEApi
mockedResponses*: seq[HTTPResponse]
proc new*(
T: typedesc[MockACMEApi]
): Future[T] {.async: (raises: [ACMEError, CancelledError]).} =
let directory = ACMEDirectory(
newNonce: LetsEncryptURL & "/new-nonce",
newOrder: LetsEncryptURL & "/new-order",
newAccount: LetsEncryptURL & "/new-account",
)
MockACMEApi(
session: HttpSessionRef.new(),
directory: Opt.some(directory),
acmeServerURL: parseUri(LetsEncryptURL),
)
method requestNonce*(
self: MockACMEApi
): Future[Nonce] {.async: (raises: [ACMEError, CancelledError]).} =
return $self.acmeServerURL & "/acme/1234"
method post*(
self: MockACMEApi, uri: Uri, payload: string
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
result = self.mockedResponses[0]
self.mockedResponses.delete(0)
method get*(
self: MockACMEApi, uri: Uri
): Future[HTTPResponse] {.async: (raises: [ACMEError, HttpError, CancelledError]).} =
result = self.mockedResponses[0]
self.mockedResponses.delete(0)

View File

@@ -0,0 +1,67 @@
import base64, strutils, chronos/apps/http/httpclient, json
import ../../errors
import ../../transports/tls/certificate_ffi
import ../../transports/tls/certificate
import ../../crypto/crypto
import ../../crypto/rsa
type ACMEError* = object of LPError
proc keyOrError*(table: HttpTable, key: string): string {.raises: [ValueError].} =
if not table.contains(key):
raise newException(ValueError, "key " & key & " not present in headers")
table.getString(key)
proc base64UrlEncode*(data: seq[byte]): string =
## Encodes data using base64url (RFC 4648 §5) — no padding, URL-safe
var encoded = base64.encode(data, safe = true)
encoded.removeSuffix("=")
encoded.removeSuffix("=")
return encoded
proc thumbprint*(key: KeyPair): string =
doAssert key.seckey.scheme == PKScheme.RSA, "unsupported keytype"
let pubkey = key.pubkey.rsakey
let nArray = @(getArray(pubkey.buffer, pubkey.key.n, pubkey.key.nlen))
let eArray = @(getArray(pubkey.buffer, pubkey.key.e, pubkey.key.elen))
let n = base64UrlEncode(nArray)
let e = base64UrlEncode(eArray)
let keyJson = %*{"e": e, "kty": "RSA", "n": n}
let digest = sha256.digest($keyJson)
return base64UrlEncode(@(digest.data))
proc getResponseBody*(
response: HttpClientResponseRef
): Future[JsonNode] {.async: (raises: [ACMEError, CancelledError]).} =
try:
let bodyBytes = await response.getBodyBytes()
if bodyBytes.len > 0:
return bytesToString(bodyBytes).parseJson()
return %*{} # empty body
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
except Exception as exc: # this is required for nim 1.6
raise
newException(ACMEError, "Unexpected error occurred while getting body bytes", exc)
proc createCSR*(domain: string): string {.raises: [ACMEError].} =
var certKey: cert_key_t
var certCtx: cert_context_t
var derCSR: ptr cert_buffer = nil
let personalizationStr = "libp2p_autotls"
if cert_init_drbg(
personalizationStr.cstring, personalizationStr.len.csize_t, certCtx.addr
) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to initialize certCtx")
if cert_generate_key(certCtx, certKey.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to generate cert key")
if cert_signing_req(domain.cstring, certKey, derCSR.addr) != CERT_SUCCESS:
raise newException(ACMEError, "Failed to create CSR")
base64.encode(derCSR.toSeq, safe = true)

226
libp2p/autotls/service.nim Normal file
View File

@@ -0,0 +1,226 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
{.push public.}
import net, results, json, sequtils
import chronos/apps/http/httpclient, chronos, chronicles, bearssl/rand
import
./acme/client,
./utils,
../crypto/crypto,
../transports/tcptransport,
../nameresolving/dnsresolver,
../peeridauth/client,
../peerinfo,
../switch,
../utils/heartbeat,
../wire
logScope:
topics = "libp2p autotls"
export LetsEncryptURL, AutoTLSError
const
DefaultDnsServers* =
@[
initTAddress("1.1.1.1:53"),
initTAddress("1.0.0.1:53"),
initTAddress("[2606:4700:4700::1111]:53"),
]
DefaultRenewCheckTime* = 1.hours
DefaultRenewBufferTime = 1.hours
AutoTLSBroker* = "registration.libp2p.direct"
AutoTLSDNSServer* = "libp2p.direct"
HttpOk* = 200
HttpCreated* = 201
# NoneIp is needed because nim 1.6.16 can't do proper generic inference
NoneIp = Opt.none(IpAddress)
type SigParam = object
k: string
v: seq[byte]
type AutotlsCert* = ref object
cert*: TLSCertificate
expiry*: Moment
type AutotlsConfig* = ref object
acmeServerURL*: Uri
dnsResolver*: DnsResolver
ipAddress: Opt[IpAddress]
renewCheckTime*: Duration
renewBufferTime*: Duration
type AutotlsService* = ref object of Service
acmeClient: ACMEClient
bearer*: Opt[BearerToken]
brokerClient: PeerIDAuthClient
cert*: Opt[AutotlsCert]
certReady*: AsyncEvent
config: AutotlsConfig
managerFut: Future[void]
peerInfo: PeerInfo
rng: ref HmacDrbgContext
proc new*(T: typedesc[AutotlsCert], cert: TLSCertificate, expiry: Moment): T =
T(cert: cert, expiry: expiry)
proc getCertWhenReady*(
self: AutotlsService
): Future[TLSCertificate] {.async: (raises: [AutoTLSError, CancelledError]).} =
await self.certReady.wait()
return self.cert.get.cert
proc new*(
T: typedesc[AutotlsConfig],
ipAddress: Opt[IpAddress] = NoneIp,
nameServers: seq[TransportAddress] = DefaultDnsServers,
acmeServerURL: Uri = parseUri(LetsEncryptURL),
renewCheckTime: Duration = DefaultRenewCheckTime,
renewBufferTime: Duration = DefaultRenewBufferTime,
): T =
T(
dnsResolver: DnsResolver.new(nameServers),
acmeServerURL: acmeServerURL,
ipAddress: ipAddress,
renewCheckTime: renewCheckTime,
renewBufferTime: renewBufferTime,
)
proc new*(
T: typedesc[AutotlsService],
rng: ref HmacDrbgContext = newRng(),
config: AutotlsConfig = AutotlsConfig.new(),
): T =
T(
acmeClient: ACMEClient.new(api = ACMEApi.new(acmeServerURL = config.acmeServerURL)),
brokerClient: PeerIDAuthClient.new(),
bearer: Opt.none(BearerToken),
cert: Opt.none(AutotlsCert),
certReady: newAsyncEvent(),
config: config,
managerFut: nil,
peerInfo: nil,
rng: rng,
)
method setup*(
self: AutotlsService, switch: Switch
): Future[bool] {.base, async: (raises: [LPError, CancelledError]).} =
trace "Setting up AutotlsService"
let hasBeenSetup = await procCall Service(self).setup(switch)
if hasBeenSetup:
if self.config.ipAddress.isNone():
try:
self.config.ipAddress = Opt.some(getPublicIPAddress())
except AutoTLSError as exc:
error "Failed to get public IP address", err = exc.msg
return false
switch.addTransport(TcpTransport.new(upgrade = Upgrade()))
await switch.startTransports()
self.peerInfo = switch.peerInfo
self.managerFut = self.run(switch)
return hasBeenSetup
method issueCertificate(
self: AutotlsService
) {.base, async: (raises: [AutoTLSError, ACMEError, PeerIDAuthError, CancelledError]).} =
trace "Issuing certificate"
assert not self.peerInfo.isNil(), "Cannot issue new certificate: peerInfo not set"
# generate autotls domain string: "*.{peerID}.libp2p.direct"
let baseDomain =
api.Domain(encodePeerId(self.peerInfo.peerId) & "." & AutoTLSDNSServer)
let domain = api.Domain("*." & baseDomain)
let acmeClient = self.acmeClient
trace "Requesting ACME challenge"
let dns01Challenge = await acmeClient.getChallenge(@[domain])
let keyAuth = acmeClient.genKeyAuthorization(dns01Challenge.dns01.token)
let strMultiaddresses: seq[string] = self.peerInfo.addrs.mapIt($it)
let payload = %*{"value": keyAuth, "addresses": strMultiaddresses}
let registrationURL = parseUri("https://" & AutoTLSBroker & "/v1/_acme-challenge")
trace "Sending challenge to AutoTLS broker"
let (bearer, response) =
await self.brokerClient.send(registrationURL, self.peerInfo, payload, self.bearer)
if self.bearer.isNone():
# save bearer token for future
self.bearer = Opt.some(bearer)
if response.status != HttpOk:
raise newException(
AutoTLSError, "Failed to authenticate with AutoTLS Broker at " & AutoTLSBroker
)
debug "Waiting for DNS record to be set"
let dnsSet = await checkDNSRecords(
self.config.dnsResolver, self.config.ipAddress.get(), baseDomain, keyAuth
)
if not dnsSet:
raise newException(AutoTLSError, "DNS records not set")
debug "Notifying challenge completion to ACME and downloading cert"
let certResponse = await acmeClient.getCertificate(domain, dns01Challenge)
debug "Installing certificate"
let newCert =
try:
AutotlsCert.new(
TLSCertificate.init(certResponse.rawCertificate),
asMoment(certResponse.certificateExpiry),
)
except TLSStreamProtocolError:
raise newException(AutoTLSError, "Could not parse downloaded certificates")
self.cert = Opt.some(newCert)
self.certReady.fire()
debug "Certificate installed"
method run*(
self: AutotlsService, switch: Switch
) {.async: (raises: [CancelledError]).} =
heartbeat "Certificate Management", self.config.renewCheckTime:
if self.cert.isNone():
try:
await self.issueCertificate()
except CancelledError as exc:
raise exc
except CatchableError as exc:
error "Failed to issue certificate", err = exc.msg
break
# AutotlsService will renew the cert 1h before it expires
let cert = self.cert.get
let waitTime = cert.expiry - Moment.now - self.config.renewBufferTime
if waitTime <= self.config.renewBufferTime:
try:
await self.issueCertificate()
except CancelledError as exc:
raise exc
except CatchableError as exc:
error "Failed to renew certificate", err = exc.msg
break
method stop*(
self: AutotlsService, switch: Switch
): Future[bool] {.async: (raises: [CancelledError]).} =
let hasBeenStopped = await procCall Service(self).stop(switch)
if hasBeenStopped:
await self.acmeClient.close()
await self.brokerClient.close()
await self.managerFut.cancelAndWait()
self.managerFut = nil
return hasBeenStopped

109
libp2p/autotls/utils.nim Normal file
View File

@@ -0,0 +1,109 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
{.push public.}
import net, strutils
from times import DateTime, toTime, toUnix
import chronos, stew/base36, chronicles
import
./acme/client,
../errors,
../peerid,
../multihash,
../cid,
../multicodec,
../nameresolving/dnsresolver
const
DefaultDnsRetries = 10
DefaultDnsRetryTime = 1.seconds
type AutoTLSError* = object of LPError
proc checkedGetPrimaryIPAddr*(): IpAddress {.raises: [AutoTLSError].} =
# This is so that we don't need to catch Exceptions directly
# since we support 1.6.16 and getPrimaryIPAddr before nim 2 didn't have explicit .raises. pragmas
try:
return getPrimaryIPAddr()
except Exception as exc:
raise newException(AutoTLSError, "Error while getting primary IP address", exc)
proc isIPv4*(ip: IpAddress): bool =
ip.family == IpAddressFamily.IPv4
proc isPublic*(ip: IpAddress): bool {.raises: [AutoTLSError].} =
let ip = $ip
try:
not (
ip.startsWith("10.") or
(ip.startsWith("172.") and parseInt(ip.split(".")[1]) in 16 .. 31) or
ip.startsWith("192.168.") or ip.startsWith("127.") or ip.startsWith("169.254.")
)
except ValueError as exc:
raise newException(AutoTLSError, "Failed to parse IP address", exc)
proc getPublicIPAddress*(): IpAddress {.raises: [AutoTLSError].} =
let ip = checkedGetPrimaryIPAddr()
if not ip.isIPv4():
raise newException(AutoTLSError, "Host does not have an IPv4 address")
if not ip.isPublic():
raise newException(AutoTLSError, "Host does not have a public IPv4 address")
return ip
proc asMoment*(dt: DateTime): Moment =
let unixTime: int64 = dt.toTime.toUnix
return Moment.init(unixTime, Second)
proc encodePeerId*(peerId: PeerId): string {.raises: [AutoTLSError].} =
var mh: MultiHash
let decodeResult = MultiHash.decode(peerId.data, mh)
if decodeResult.isErr() or decodeResult.get() == -1:
raise
newException(AutoTLSError, "Failed to decode PeerId: invalid multihash format")
let cidResult = Cid.init(CIDv1, multiCodec("libp2p-key"), mh)
if cidResult.isErr():
raise newException(AutoTLSError, "Failed to initialize CID from multihash")
return Base36.encode(cidResult.get().data.buffer)
proc checkDNSRecords*(
dnsResolver: DnsResolver,
ipAddress: IpAddress,
baseDomain: api.Domain,
keyAuth: KeyAuthorization,
retries: int = DefaultDnsRetries,
): Future[bool] {.async: (raises: [AutoTLSError, CancelledError]).} =
# if my ip address is 100.10.10.3 then the ip4Domain will be:
# 100-10-10-3.{peerIdBase36}.libp2p.direct
# and acme challenge TXT domain will be:
# _acme-challenge.{peerIdBase36}.libp2p.direct
let dashedIpAddr = ($ipAddress).replace(".", "-")
let acmeChalDomain = api.Domain("_acme-challenge." & baseDomain)
let ip4Domain = api.Domain(dashedIpAddr & "." & baseDomain)
var txt: seq[string]
var ip4: seq[TransportAddress]
for _ in 0 .. retries:
txt = await dnsResolver.resolveTxt(acmeChalDomain)
try:
ip4 = await dnsResolver.resolveIp(ip4Domain, 0.Port)
except CancelledError as exc:
raise exc
except CatchableError as exc:
error "Failed to resolve IP", description = exc.msg # retry
if txt.len > 0 and txt[0] == keyAuth and ip4.len > 0:
return true
await sleepAsync(DefaultDnsRetryTime)
return false

View File

@@ -15,7 +15,7 @@ runnableExamples:
{.push raises: [].}
import options, tables, chronos, chronicles, sequtils
import options, tables, chronos, chronicles, sequtils, uri
import
switch,
peerid,
@@ -23,25 +23,29 @@ import
stream/connection,
multiaddress,
crypto/crypto,
transports/[transport, tcptransport, memorytransport],
transports/[transport, tcptransport, wstransport, memorytransport],
muxers/[muxer, mplex/mplex, yamux/yamux],
protocols/[identify, secure/secure, secure/noise, rendezvous],
protocols/connectivity/[autonat/server, relay/relay, relay/client, relay/rtransport],
connmanager,
upgrademngrs/muxedupgrade,
observedaddrmanager,
autotls/service,
nameresolving/nameresolver,
errors,
utility
import services/wildcardresolverservice
export switch, peerid, peerinfo, connection, multiaddress, crypto, errors
export
switch, peerid, peerinfo, connection, multiaddress, crypto, errors, TLSPrivateKey,
TLSCertificate, TLSFlags, ServerFlags
const MemoryAutoAddress* = memorytransport.MemoryAutoAddress
type
TransportProvider* {.public.} =
proc(upgr: Upgrade, privateKey: PrivateKey): Transport {.gcsafe, raises: [].}
TransportProvider* {.public.} = proc(
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
): Transport {.gcsafe, raises: [].}
SecureProtocol* {.pure.} = enum
Noise
@@ -63,6 +67,7 @@ type
nameResolver: NameResolver
peerStoreCapacity: Opt[int]
autonat: bool
autotls: AutotlsService
circuitRelay: Relay
rdv: RendezVous
services: seq[Service]
@@ -154,7 +159,9 @@ proc withTransport*(
let switch = SwitchBuilder
.new()
.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
proc(
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
): Transport =
TcpTransport.new(flags, upgr)
)
.build()
@@ -165,22 +172,34 @@ proc withTcpTransport*(
b: SwitchBuilder, flags: set[ServerFlags] = {}
): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
TcpTransport.new(flags, upgr)
)
proc withWsTransport*(
b: SwitchBuilder,
tlsPrivateKey: TLSPrivateKey = nil,
tlsCertificate: TLSCertificate = nil,
tlsFlags: set[TLSFlags] = {},
flags: set[ServerFlags] = {},
): SwitchBuilder =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
WsTransport.new(upgr, tlsPrivateKey, tlsCertificate, tlsFlags, flags)
)
when defined(libp2p_quic_support):
import transports/quictransport
proc withQuicTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
QuicTransport.new(upgr, privateKey)
)
proc withMemoryTransport*(b: SwitchBuilder): SwitchBuilder {.public.} =
b.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
MemoryTransport.new(upgr)
)
@@ -238,6 +257,12 @@ proc withAutonat*(b: SwitchBuilder): SwitchBuilder =
b.autonat = true
b
proc withAutotls*(
b: SwitchBuilder, config: AutotlsConfig = AutotlsConfig.new()
): SwitchBuilder {.public.} =
b.autotls = AutotlsService.new(config = config)
b
proc withCircuitRelay*(b: SwitchBuilder, r: Relay = Relay.new()): SwitchBuilder =
b.circuitRelay = r
b
@@ -289,10 +314,13 @@ proc build*(b: SwitchBuilder): Switch {.raises: [LPError], public.} =
ms = MultistreamSelect.new()
muxedUpgrade = MuxedUpgrade.new(b.muxers, secureManagerInstances, ms)
if not b.autotls.isNil():
b.services.insert(b.autotls, 0)
let transports = block:
var transports: seq[Transport]
for tProvider in b.transports:
transports.add(tProvider(muxedUpgrade, seckey))
transports.add(tProvider(muxedUpgrade, seckey, b.autotls))
transports
if b.secureManagers.len == 0:

View File

@@ -10,6 +10,7 @@
## This module implementes CID (Content IDentifier).
{.push raises: [].}
{.used.}
import tables, hashes
import multibase, multicodec, multihash, vbuffer, varint, results

View File

@@ -140,7 +140,7 @@ proc triggerConnEvent*(
except CancelledError as exc:
raise exc
except CatchableError as exc:
warn "Exception in triggerConnEvents",
warn "Exception in triggerConnEvent",
description = exc.msg, peer = peerId, event = $event
proc addPeerEventHandler*(
@@ -186,7 +186,7 @@ proc expectConnection*(
if key in c.expectedConnectionsOverLimit:
raise newException(
AlreadyExpectingConnectionError,
"Already expecting an incoming connection from that peer",
"Already expecting an incoming connection from that peer: " & shortLog(p),
)
let future = Future[Muxer].Raising([CancelledError]).init()

View File

@@ -85,8 +85,9 @@ proc init*(sig: var SkSignature, data: string): SkResult[void] =
var buffer: seq[byte]
try:
buffer = hexToSeqByte(data)
except ValueError:
return err("secp: Hex to bytes failed")
except ValueError as e:
let errMsg = "secp: Hex to bytes failed: " & e.msg
return err(errMsg.cstring)
init(sig, buffer)
proc init*(t: typedesc[SkPrivateKey], data: openArray[byte]): SkResult[SkPrivateKey] =

View File

@@ -595,13 +595,13 @@ template exceptionToAssert(body: untyped): untyped =
try:
res = body
except OSError as exc:
raise exc
raise newException(OSError, "failure in exceptionToAssert: " & exc.msg, exc)
except IOError as exc:
raise exc
raise newException(IOError, "failure in exceptionToAssert: " & exc.msg, exc)
except Defect as exc:
raise exc
raise newException(Defect, "failure in exceptionToAssert: " & exc.msg, exc)
except Exception as exc:
raiseAssert exc.msg
raiseAssert "Exception captured in exceptionToAssert: " & exc.msg
when defined(nimHasWarnBareExcept):
{.pop.}
res
@@ -967,9 +967,9 @@ proc openStream*(
stream.flags.incl(Outbound)
stream.transp = transp
result = stream
except ResultError[ProtoError]:
except ResultError[ProtoError] as e:
await api.closeConnection(transp)
raise newException(DaemonLocalError, "Wrong message type!")
raise newException(DaemonLocalError, "Wrong message type: " & e.msg, e)
proc streamHandler(server: StreamServer, transp: StreamTransport) {.async.} =
# must not specify raised exceptions as this is StreamCallback from chronos
@@ -1023,10 +1023,10 @@ proc addHandler*(
api.servers.add(P2PServer(server: server, address: maddress))
except DaemonLocalError as e:
await removeHandler()
raise e
raise newException(DaemonLocalError, "Could not add stream handler: " & e.msg, e)
except TransportError as e:
await removeHandler()
raise e
raise newException(TransportError, "Could not add stream handler: " & e.msg, e)
except CancelledError as e:
await removeHandler()
raise e
@@ -1503,10 +1503,14 @@ proc pubsubSubscribe*(
result = ticket
except DaemonLocalError as exc:
await api.closeConnection(transp)
raise exc
raise newException(
DaemonLocalError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
)
except TransportError as exc:
await api.closeConnection(transp)
raise exc
raise newException(
TransportError, "Could not subscribe to topic '" & topic & "': " & exc.msg, exc
)
except CancelledError as exc:
await api.closeConnection(transp)
raise exc

View File

@@ -127,8 +127,8 @@ proc expandDnsAddr(
var peerIdBytes: seq[byte]
try:
peerIdBytes = lastPart.protoArgument().tryGet()
except ResultError[string]:
raiseAssert "expandDnsAddr failed in protoArgument: " & getCurrentExceptionMsg()
except ResultError[string] as e:
raiseAssert "expandDnsAddr failed in expandDnsAddr protoArgument: " & e.msg
let addrPeerId = PeerId.init(peerIdBytes).tryGet()
result.add((resolvedAddress[0 ..^ 2].tryGet(), Opt.some(addrPeerId)))
@@ -178,7 +178,7 @@ proc internalConnect(
dir = Direction.Out,
): Future[Muxer] {.async: (raises: [DialFailedError, CancelledError]).} =
if Opt.some(self.localPeerId) == peerId:
raise newException(DialFailedError, "can't dial self!")
raise newException(DialFailedError, "internalConnect can't dial self!")
# Ensure there's only one in-flight attempt per peer
let lock = self.dialLock.mgetOrPut(peerId.get(default(PeerId)), newAsyncLock())
@@ -186,8 +186,8 @@ proc internalConnect(
defer:
try:
lock.release()
except AsyncLockError:
raiseAssert "lock must have been acquired in line above"
except AsyncLockError as e:
raiseAssert "lock must have been acquired in line above: " & e.msg
if reuseConnection:
peerId.withValue(peerId):
@@ -198,7 +198,9 @@ proc internalConnect(
try:
self.connManager.getOutgoingSlot(forceDial)
except TooManyConnectionsError as exc:
raise newException(DialFailedError, exc.msg)
raise newException(
DialFailedError, "failed getOutgoingSlot in internalConnect: " & exc.msg, exc
)
let muxed =
try:
@@ -208,11 +210,15 @@ proc internalConnect(
raise exc
except CatchableError as exc:
slot.release()
raise newException(DialFailedError, exc.msg)
raise newException(
DialFailedError, "failed dialAndUpgrade in internalConnect: " & exc.msg, exc
)
slot.trackMuxer(muxed)
if isNil(muxed): # None of the addresses connected
raise newException(DialFailedError, "Unable to establish outgoing link")
raise newException(
DialFailedError, "Unable to establish outgoing link in internalConnect"
)
try:
self.connManager.storeMuxer(muxed)
@@ -228,7 +234,11 @@ proc internalConnect(
except CatchableError as exc:
trace "Failed to finish outgoing upgrade", description = exc.msg
await muxed.close()
raise newException(DialFailedError, "Failed to finish outgoing upgrade")
raise newException(
DialFailedError,
"Failed to finish outgoing upgrade in internalConnect: " & exc.msg,
exc,
)
method connect*(
self: Dialer,
@@ -260,7 +270,7 @@ method connect*(
if allowUnknownPeerId == false:
raise newException(
DialFailedError, "Address without PeerID and unknown peer id disabled!"
DialFailedError, "Address without PeerID and unknown peer id disabled in connect"
)
return
@@ -273,7 +283,7 @@ proc negotiateStream(
let selected = await MultistreamSelect.select(conn, protos)
if not protos.contains(selected):
await conn.closeWithEOF()
raise newException(DialFailedError, "Unable to select sub-protocol " & $protos)
raise newException(DialFailedError, "Unable to select sub-protocol: " & $protos)
return conn
@@ -289,13 +299,13 @@ method tryDial*(
try:
let mux = await self.dialAndUpgrade(Opt.some(peerId), addrs)
if mux.isNil():
raise newException(DialFailedError, "No valid multiaddress")
raise newException(DialFailedError, "No valid multiaddress in tryDial")
await mux.close()
return mux.connection.observedAddr
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise newException(DialFailedError, exc.msg)
raise newException(DialFailedError, "tryDial failed: " & exc.msg, exc)
method dial*(
self: Dialer, peerId: PeerId, protos: seq[string]
@@ -309,14 +319,17 @@ method dial*(
try:
let stream = await self.connManager.getStream(peerId)
if stream.isNil:
raise newException(DialFailedError, "Couldn't get muxed stream")
raise newException(
DialFailedError,
"Couldn't get muxed stream in dial for peer_id: " & shortLog(peerId),
)
return await self.negotiateStream(stream, protos)
except CancelledError as exc:
trace "Dial canceled"
trace "Dial canceled", description = exc.msg
raise exc
except CatchableError as exc:
trace "Error dialing", description = exc.msg
raise newException(DialFailedError, exc.msg)
raise newException(DialFailedError, "failed dial existing: " & exc.msg)
method dial*(
self: Dialer,
@@ -347,17 +360,20 @@ method dial*(
stream = await self.connManager.getStream(conn)
if isNil(stream):
raise newException(DialFailedError, "Couldn't get muxed stream")
raise newException(
DialFailedError,
"Couldn't get muxed stream in new dial for remote_peer_id: " & shortLog(peerId),
)
return await self.negotiateStream(stream, protos)
except CancelledError as exc:
trace "Dial canceled", conn
trace "Dial canceled", conn, description = exc.msg
await cleanup()
raise exc
except CatchableError as exc:
debug "Error dialing", conn, description = exc.msg
await cleanup()
raise newException(DialFailedError, exc.msg)
raise newException(DialFailedError, "failed new dial: " & exc.msg, exc)
method addTransport*(self: Dialer, t: Transport) =
self.transports &= t

View File

@@ -113,7 +113,7 @@ proc add*(dm: DiscoveryManager, di: DiscoveryInterface) =
try:
query.peers.putNoWait(pa)
except AsyncQueueFullError as exc:
debug "Cannot push discovered peer to queue"
debug "Cannot push discovered peer to queue", description = exc.msg
proc request*(dm: DiscoveryManager, pa: PeerAttributes): DiscoveryQuery =
var query = DiscoveryQuery(attr: pa, peers: newAsyncQueue[PeerAttributes]())

View File

@@ -10,6 +10,7 @@
## This module implements MultiCodec.
{.push raises: [].}
{.used.}
import tables, hashes
import vbuffer

View File

@@ -22,6 +22,7 @@
## 2. MURMUR
{.push raises: [].}
{.used.}
import tables
import nimcrypto/[sha, sha2, keccak, blake2, hash, utils]
@@ -566,7 +567,7 @@ proc init*(mhtype: typedesc[MultiHash], data: string): MhResult[MultiHash] {.inl
proc init58*(mhtype: typedesc[MultiHash], data: string): MultiHash {.inline.} =
## Create MultiHash from BASE58 encoded string representation ``data``.
if MultiHash.decode(Base58.decode(data), result) == -1:
raise newException(MultihashError, "Incorrect MultiHash binary format")
raise newException(MultihashError, "Incorrect MultiHash binary format in init58")
proc cmp(a: openArray[byte], b: openArray[byte]): bool {.inline.} =
if len(a) != len(b):

View File

@@ -11,8 +11,7 @@
import std/[oids, strformat]
import pkg/[chronos, chronicles, metrics]
import
./coder, ../muxer, ../../stream/[bufferstream, connection, streamseq], ../../peerinfo
import ./coder, ../muxer, ../../stream/[bufferstream, connection], ../../peerinfo
export connection
@@ -87,7 +86,7 @@ proc open*(s: LPChannel) {.async: (raises: [CancelledError, LPStreamError]).} =
raise exc
except LPStreamError as exc:
await s.conn.close()
raise exc
raise newException(LPStreamError, "Opening LPChannel failed: " & exc.msg, exc)
method closed*(s: LPChannel): bool =
s.closedLocal

View File

@@ -12,6 +12,7 @@
import sequtils, std/[tables]
import chronos, chronicles, metrics, stew/[endians2, byteutils, objects]
import ../muxer, ../../stream/connection
import ../../utils/zeroqueue
export muxer
@@ -151,7 +152,7 @@ type
opened: bool
isSending: bool
sendQueue: seq[ToSend]
recvQueue: seq[byte]
recvQueue: ZeroQueue
isReset: bool
remoteReset: bool
closedRemotely: AsyncEvent
@@ -229,7 +230,6 @@ proc reset(channel: YamuxChannel, isLocal: bool = false) {.async: (raises: []).}
for (d, s, fut) in channel.sendQueue:
fut.fail(newLPStreamEOFError())
channel.sendQueue = @[]
channel.recvQueue = @[]
channel.sendWindow = 0
if not channel.closedLocally:
if isLocal and not channel.isSending:
@@ -257,7 +257,7 @@ proc updateRecvWindow(
return
let delta = channel.maxRecvWindow - inWindow
channel.recvWindow.inc(delta)
channel.recvWindow.inc(delta.int)
await channel.conn.write(YamuxHeader.windowUpdate(channel.id, delta.uint32))
trace "increasing the recvWindow", delta
@@ -279,7 +279,7 @@ method readOnce*(
newLPStreamConnDownError()
if channel.isEof:
raise newLPStreamRemoteClosedError()
if channel.recvQueue.len == 0:
if channel.recvQueue.isEmpty():
channel.receivedData.clear()
let
closedRemotelyFut = channel.closedRemotely.wait()
@@ -290,28 +290,23 @@ method readOnce*(
if not receivedDataFut.finished():
await receivedDataFut.cancelAndWait()
await closedRemotelyFut or receivedDataFut
if channel.closedRemotely.isSet() and channel.recvQueue.len == 0:
if channel.closedRemotely.isSet() and channel.recvQueue.isEmpty():
channel.isEof = true
return
0 # we return 0 to indicate that the channel is closed for reading from now on
let toRead = min(channel.recvQueue.len, nbytes)
var p = cast[ptr UncheckedArray[byte]](pbytes)
toOpenArray(p, 0, nbytes - 1)[0 ..< toRead] =
channel.recvQueue.toOpenArray(0, toRead - 1)
channel.recvQueue = channel.recvQueue[toRead ..^ 1]
let consumed = channel.recvQueue.consumeTo(pbytes, nbytes)
# We made some room in the recv buffer let the peer know
await channel.updateRecvWindow()
channel.activity = true
return toRead
return consumed
proc gotDataFromRemote(
channel: YamuxChannel, b: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
channel.recvWindow -= b.len
channel.recvQueue = channel.recvQueue.concat(b)
channel.recvQueue.push(b)
channel.receivedData.fire()
when defined(libp2p_yamux_metrics):
libp2p_yamux_recv_queue.observe(channel.recvQueue.len.int64)
@@ -512,7 +507,15 @@ method close*(m: Yamux) {.async: (raises: []).} =
trace "Closing yamux"
let channels = toSeq(m.channels.values())
for channel in channels:
await channel.reset(isLocal = true)
for (d, s, fut) in channel.sendQueue:
fut.fail(newLPStreamEOFError())
channel.sendQueue = @[]
channel.sendWindow = 0
channel.closedLocally = true
channel.isReset = true
channel.opened = false
await channel.remoteClosed()
channel.receivedData.fire()
try:
await m.connection.write(YamuxHeader.goAway(NormalTermination))
except CancelledError as exc:
@@ -587,10 +590,12 @@ method handle*(m: Yamux) {.async: (raises: []).} =
let channel =
try:
m.channels[header.streamId]
except KeyError:
except KeyError as e:
raise newException(
YamuxError,
"Stream was cleaned up before handling data: " & $header.streamId,
"Stream was cleaned up before handling data: " & $header.streamId & " : " &
e.msg,
e,
)
if header.msgType == WindowUpdate:

View File

@@ -78,23 +78,23 @@ proc getDnsResponse(
try:
await receivedDataFuture.wait(5.seconds) #unix default
except AsyncTimeoutError:
raise newException(IOError, "DNS server timeout")
except AsyncTimeoutError as e:
raise newException(IOError, "DNS server timeout: " & e.msg, e)
let rawResponse = sock.getMessage()
try:
parseResponse(string.fromBytes(rawResponse))
except IOError as exc:
raise exc
raise newException(IOError, "Failed to parse DNS response: " & exc.msg, exc)
except OSError as exc:
raise exc
raise newException(OSError, "Failed to parse DNS response: " & exc.msg, exc)
except ValueError as exc:
raise exc
raise newException(ValueError, "Failed to parse DNS response: " & exc.msg, exc)
except Exception as exc:
# Nim 1.6: parseResponse can has a raises: [Exception, ..] because of
# https://github.com/nim-lang/Nim/commit/035134de429b5d99c5607c5fae912762bebb6008
# it can't actually raise though
raiseAssert exc.msg
raiseAssert "Exception parsing DN response: " & exc.msg
finally:
await sock.closeWait()

View File

@@ -11,6 +11,7 @@
{.push raises: [].}
{.push public.}
{.used.}
import
std/[hashes, strutils],

View File

@@ -0,0 +1,345 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import base64, json, strutils, uri, times
import chronos, chronos/apps/http/httpclient, results, chronicles, bio
import ../peerinfo, ../crypto/crypto, ../varint.nim
logScope:
topics = "libp2p peeridauth"
const
NimLibp2pUserAgent = "nim-libp2p"
PeerIDAuthPrefix* = "libp2p-PeerID"
ChallengeCharset = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
ChallengeDefaultLen = 48
export Domain
type PeerIDAuthClient* = ref object of RootObj
session: HttpSessionRef
rng: ref HmacDrbgContext
type PeerIDAuthError* = object of LPError
type PeerIDAuthResponse* = object
status*: int
headers*: HttpTable
body*: seq[byte]
type BearerToken* = object
token*: string
expires*: Opt[DateTime]
type PeerIDAuthOpaque* = string
type PeerIDAuthSignature* = string
type PeerIDAuthChallenge* = string
type PeerIDAuthAuthenticationResponse* = object
challengeClient*: PeerIDAuthChallenge
opaque*: PeerIDAuthOpaque
serverPubkey*: PublicKey
type PeerIDAuthAuthorizationResponse* = object
sig*: PeerIDAuthSignature
bearer*: BearerToken
response*: PeerIDAuthResponse
type SigParam = object
k: string
v: seq[byte]
proc new*(
T: typedesc[PeerIDAuthClient], rng: ref HmacDrbgContext = newRng()
): PeerIDAuthClient =
PeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
proc sampleChar(
ctx: var HmacDrbgContext, choices: string
): char {.raises: [ValueError].} =
## Samples a random character from the input string using the DRBG context
if choices.len == 0:
raise newException(ValueError, "Cannot sample from an empty string")
var idx: uint32
ctx.generate(idx)
return choices[uint32(idx mod uint32(choices.len))]
proc randomChallenge(
rng: ref HmacDrbgContext, challengeLen: int = ChallengeDefaultLen
): PeerIDAuthChallenge {.raises: [PeerIDAuthError].} =
var rng = rng[]
var challenge = ""
try:
for _ in 0 ..< challengeLen:
challenge.add(rng.sampleChar(ChallengeCharset))
except ValueError as exc:
raise newException(PeerIDAuthError, "Failed to generate challenge", exc)
PeerIDAuthChallenge(challenge)
proc extractField(data, key: string): string {.raises: [PeerIDAuthError].} =
# Helper to extract quoted value from key
for segment in data.split(","):
if key in segment:
return segment.split("=", 1)[1].strip(chars = {' ', '"'})
raise newException(PeerIDAuthError, "Failed to find " & key & " in " & data)
proc genDataToSign(
parts: seq[SigParam], prefix: string = PeerIDAuthPrefix
): seq[byte] {.raises: [PeerIDAuthError].} =
var buf: seq[byte] = prefix.toByteSeq()
for p in parts:
let varintLen = PB.encodeVarint(hint(p.k.len + p.v.len + 1)).valueOr:
raise newException(PeerIDAuthError, "could not encode fields length to varint")
buf.add varintLen
buf.add (p.k & "=").toByteSeq()
buf.add p.v
return buf
proc getSigParams(
clientSender: bool, hostname: string, challenge: string, publicKey: PublicKey
): seq[SigParam] =
if clientSender:
@[
SigParam(k: "challenge-client", v: challenge.toByteSeq()),
SigParam(k: "hostname", v: hostname.toByteSeq()),
SigParam(k: "server-public-key", v: publicKey.getBytes().get()),
]
else:
@[
SigParam(k: "challenge-server", v: challenge.toByteSeq()),
SigParam(k: "client-public-key", v: publicKey.getBytes().get()),
SigParam(k: "hostname", v: hostname.toByteSeq()),
]
proc sign(
privateKey: PrivateKey,
challenge: PeerIDAuthChallenge,
publicKey: PublicKey,
hostname: string,
clientSender: bool = true,
): PeerIDAuthSignature {.raises: [PeerIDAuthError].} =
let bytesToSign =
getSigParams(clientSender, hostname, challenge, publicKey).genDataToSign()
PeerIDAuthSignature(
base64.encode(privateKey.sign(bytesToSign).get().getBytes(), safe = true)
)
proc checkSignature*(
serverSig: PeerIDAuthSignature,
serverPublicKey: PublicKey,
challengeServer: PeerIDAuthChallenge,
clientPublicKey: PublicKey,
hostname: string,
): bool {.raises: [PeerIDAuthError].} =
let bytesToSign =
getSigParams(false, hostname, challengeServer, clientPublicKey).genDataToSign()
var serverSignature: Signature
try:
if not serverSignature.init(base64.decode(serverSig).toByteSeq()):
raise newException(
PeerIDAuthError, "Failed to initialize Signature from base64 encoded sig"
)
except ValueError as exc:
raise newException(PeerIDAuthError, "Failed to decode server's signature", exc)
serverSignature.verify(
bytesToSign.toOpenArray(0, bytesToSign.len - 1), serverPublicKey
)
method post*(
self: PeerIDAuthClient, uri: Uri, payload: string, authHeader: string
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]), base.} =
let rawResponse = await HttpClientRequestRef
.post(
self.session,
$uri,
body = payload,
headers = [
("Content-Type", "application/json"),
("User-Agent", NimLibp2pUserAgent),
("Authorization", authHeader),
],
)
.get()
.send()
PeerIDAuthResponse(
status: rawResponse.status,
headers: rawResponse.headers,
body: await rawResponse.getBodyBytes(),
)
method get*(
self: PeerIDAuthClient, uri: Uri
): Future[PeerIDAuthResponse] {.
async: (raises: [PeerIDAuthError, HttpError, CancelledError]), base
.} =
if self.session.isNil():
raise newException(PeerIDAuthError, "Session is nil")
let req = HttpClientRequestRef.get(self.session, $uri).valueOr:
raise newException(PeerIDAuthError, "Could not get request obj")
let rawResponse = await req.send()
PeerIDAuthResponse(
status: rawResponse.status,
headers: rawResponse.headers,
body: await rawResponse.getBodyBytes(),
)
proc requestAuthentication*(
self: PeerIDAuthClient, uri: Uri
): Future[PeerIDAuthAuthenticationResponse] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
let response =
try:
await self.get(uri)
except HttpError as exc:
raise newException(PeerIDAuthError, "Failed to start PeerID Auth", exc)
let wwwAuthenticate = response.headers.getString("WWW-Authenticate")
if wwwAuthenticate == "":
raise newException(PeerIDAuthError, "WWW-authenticate not present in response")
let serverPubkey: PublicKey =
try:
PublicKey.init(decode(extractField(wwwAuthenticate, "public-key")).toByteSeq()).valueOr:
raise newException(PeerIDAuthError, "Failed to initialize server public-key")
except ValueError as exc:
raise newException(PeerIDAuthError, "Failed to decode server public-key", exc)
PeerIDAuthAuthenticationResponse(
challengeClient: extractField(wwwAuthenticate, "challenge-client"),
opaque: extractField(wwwAuthenticate, "opaque"),
serverPubkey: serverPubkey,
)
proc pubkeyBytes*(pubkey: PublicKey): seq[byte] {.raises: [PeerIDAuthError].} =
try:
pubkey.getBytes().valueOr:
raise
newException(PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey")
except ValueError as exc:
raise newException(
PeerIDAuthError, "Failed to get bytes from PeerInfo's publicKey", exc
)
proc parse3339DateTime(
timeStr: string
): DateTime {.raises: [ValueError, TimeParseError].} =
let parts = timeStr.split('.')
let base = parse(parts[0], "yyyy-MM-dd'T'HH:mm:ss")
let millis = parseInt(parts[1].strip(chars = {'Z'}))
result = base + initDuration(milliseconds = millis)
proc requestAuthorization*(
self: PeerIDAuthClient,
peerInfo: PeerInfo,
uri: Uri,
challengeClient: PeerIDAuthChallenge,
challengeServer: PeerIDAuthChallenge,
serverPubkey: PublicKey,
opaque: PeerIDAuthOpaque,
payload: auto,
): Future[PeerIDAuthAuthorizationResponse] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
let clientPubkeyB64 = peerInfo.publicKey.pubkeyBytes().encode(safe = true)
let sig = peerInfo.privateKey.sign(challengeClient, serverPubkey, uri.hostname)
let authHeader =
PeerIDAuthPrefix & " public-key=\"" & clientPubkeyB64 & "\"" & ", opaque=\"" & opaque &
"\"" & ", challenge-server=\"" & challengeServer & "\"" & ", sig=\"" & sig & "\""
let response =
try:
await self.post(uri, $payload, authHeader)
except HttpError as exc:
raise newException(
PeerIDAuthError, "Failed to send Authorization for PeerID Auth", exc
)
let authenticationInfo = response.headers.getString("authentication-info")
let bearerExpires =
try:
Opt.some(parse3339DateTime(extractField(authenticationInfo, "expires")))
except ValueError, PeerIDAuthError, TimeParseError:
Opt.none(DateTime)
PeerIDAuthAuthorizationResponse(
sig: PeerIDAuthSignature(extractField(authenticationInfo, "sig")),
bearer: BearerToken(
token: extractField(authenticationInfo, "bearer"), expires: bearerExpires
),
response: response,
)
proc sendWithoutBearer(
self: PeerIDAuthClient, uri: Uri, peerInfo: PeerInfo, payload: auto
): Future[(BearerToken, PeerIDAuthResponse)] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
# Authenticate in three ways as per the PeerID Auth spec
# https://github.com/libp2p/specs/blob/master/http/peer-id-auth.md
let authenticationResponse = await self.requestAuthentication(uri)
let challengeServer = self.rng.randomChallenge()
let authorizationResponse = await self.requestAuthorization(
peerInfo, uri, authenticationResponse.challengeClient, challengeServer,
authenticationResponse.serverPubkey, authenticationResponse.opaque, payload,
)
if not checkSignature(
authorizationResponse.sig, authenticationResponse.serverPubkey, challengeServer,
peerInfo.publicKey, uri.hostname,
):
raise newException(PeerIDAuthError, "Failed to validate server's signature")
return (authorizationResponse.bearer, authorizationResponse.response)
proc sendWithBearer(
self: PeerIDAuthClient,
uri: Uri,
peerInfo: PeerInfo,
payload: auto,
bearer: BearerToken,
): Future[(BearerToken, PeerIDAuthResponse)] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
if bearer.expires.isSome() and DateTime(bearer.expires.get) <= now():
raise newException(PeerIDAuthError, "Bearer expired")
let authHeader = PeerIDAuthPrefix & " bearer=\"" & bearer.token & "\""
let response =
try:
await self.post(uri, $payload, authHeader)
except HttpError as exc:
raise newException(
PeerIDAuthError, "Failed to send request with bearer token for PeerID Auth", exc
)
return (bearer, response)
proc send*(
self: PeerIDAuthClient,
uri: Uri,
peerInfo: PeerInfo,
payload: auto,
bearer: Opt[BearerToken] = Opt.none(BearerToken),
): Future[(BearerToken, PeerIDAuthResponse)] {.
async: (raises: [PeerIDAuthError, CancelledError])
.} =
if bearer.isSome():
await self.sendWithBearer(uri, peerInfo, payload, bearer.get)
else:
await self.sendWithoutBearer(uri, peerInfo, payload)
proc close*(
self: PeerIDAuthClient
): Future[void] {.async: (raises: [CancelledError]).} =
await self.session.closeWait()

View File

@@ -0,0 +1,40 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import uri
import chronos, chronos/apps/http/httpclient
import ../crypto/crypto, ./client
export client
type MockPeerIDAuthClient* = ref object of PeerIDAuthClient
mockedStatus*: int
mockedHeaders*: HttpTable
mockedBody*: seq[byte]
proc new*(
T: typedesc[MockPeerIDAuthClient], rng: ref HmacDrbgContext
): MockPeerIDAuthClient {.raises: [PeerIDAuthError].} =
MockPeerIDAuthClient(session: HttpSessionRef.new(), rng: rng)
method post*(
self: MockPeerIDAuthClient, uri: Uri, payload: string, authHeader: string
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
PeerIDAuthResponse(
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
)
method get*(
self: MockPeerIDAuthClient, uri: Uri
): Future[PeerIDAuthResponse] {.async: (raises: [HttpError, CancelledError]).} =
PeerIDAuthResponse(
status: self.mockedStatus, headers: self.mockedHeaders, body: self.mockedBody
)

View File

@@ -101,8 +101,10 @@ proc new*(
let pubkey =
try:
key.getPublicKey().tryGet()
except CatchableError:
raise newException(PeerInfoError, "invalid private key")
except CatchableError as e:
raise newException(
PeerInfoError, "invalid private key creating PeerInfo: " & e.msg, e
)
let peerId = PeerId.init(key).tryGet()

View File

@@ -87,7 +87,7 @@ method dialMe*(
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(AutonatError, "read Dial response failed", e)
raise newException(AutonatError, "read Dial response failed: " & e.msg, e)
let response = getResponseOrRaise(AutonatMsg.decode(respBytes))

View File

@@ -107,7 +107,9 @@ proc startSync*(
description = err.msg
raise newException(
DcutrError,
"Unexpected error when Dcutr initiator tried to connect to the remote peer", err,
"Unexpected error when Dcutr initiator tried to connect to the remote peer: " &
err.msg,
err,
)
finally:
if stream != nil:

View File

@@ -148,7 +148,7 @@ proc dialPeerV1*(
raise exc
except LPStreamError as exc:
trace "error writing hop request", description = exc.msg
raise newException(RelayV1DialError, "error writing hop request", exc)
raise newException(RelayV1DialError, "error writing hop request: " & exc.msg, exc)
let msgRcvFromRelayOpt =
try:
@@ -158,7 +158,8 @@ proc dialPeerV1*(
except LPStreamError as exc:
trace "error reading stop response", description = exc.msg
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise newException(RelayV1DialError, "error reading stop response", exc)
raise
newException(RelayV1DialError, "error reading stop response: " & exc.msg, exc)
try:
let msgRcvFromRelay = msgRcvFromRelayOpt.valueOr:
@@ -173,10 +174,16 @@ proc dialPeerV1*(
)
except RelayV1DialError as exc:
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise exc
raise newException(
RelayV1DialError,
"Hop can't open destination stream after sendStatus: " & exc.msg,
exc,
)
except ValueError as exc:
await sendStatus(conn, StatusV1.HopCantOpenDstStream)
raise newException(RelayV1DialError, exc.msg)
raise newException(
RelayV1DialError, "Exception reading msg in dialPeerV1: " & exc.msg, exc
)
result = conn
proc dialPeerV2*(
@@ -199,7 +206,8 @@ proc dialPeerV2*(
raise exc
except CatchableError as exc:
trace "error reading stop response", description = exc.msg
raise newException(RelayV2DialError, exc.msg)
raise
newException(RelayV2DialError, "Exception decoding HopMessage: " & exc.msg, exc)
if msgRcvFromRelay.msgType != HopMessageType.Status:
raise newException(RelayV2DialError, "Unexpected stop response")

View File

@@ -76,7 +76,7 @@ proc dial*(
if not dstPeerId.init(($(sma[^1].tryGet())).split('/')[2]):
raise newException(RelayDialError, "Destination doesn't exist")
except RelayDialError as e:
raise e
raise newException(RelayDialError, "dial address not valid: " & e.msg, e)
except CatchableError:
raise newException(RelayDialError, "dial address not valid")
@@ -100,13 +100,13 @@ proc dial*(
raise e
except DialFailedError as e:
safeClose(rc)
raise newException(RelayDialError, "dial relay peer failed", e)
raise newException(RelayDialError, "dial relay peer failed: " & e.msg, e)
except RelayV1DialError as e:
safeClose(rc)
raise e
raise newException(RelayV1DialError, "dial relay v1 failed: " & e.msg, e)
except RelayV2DialError as e:
safeClose(rc)
raise e
raise newException(RelayV2DialError, "dial relay v2 failed: " & e.msg, e)
method dial*(
self: RelayTransport,
@@ -121,7 +121,8 @@ method dial*(
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(transport.TransportDialError, e.msg, e)
raise
newException(transport.TransportDialError, "Caught error in dial: " & e.msg, e)
method handles*(self: RelayTransport, ma: MultiAddress): bool {.gcsafe.} =
try:

View File

@@ -69,8 +69,8 @@ proc bridge*(
while not connSrc.closed() and not connDst.closed():
try: # https://github.com/status-im/nim-chronos/issues/516
discard await race(futSrc, futDst)
except ValueError:
raiseAssert("Futures list is not empty")
except ValueError as e:
raiseAssert("Futures list is not empty: " & e.msg)
if futSrc.finished():
bufRead = await futSrc
if bufRead > 0:

View File

@@ -0,0 +1,3 @@
import ./kademlia/kademlia
export kademlia

View File

@@ -0,0 +1,6 @@
const
IdLength* = 32 # 256-bit IDs
k* = 20 # replication parameter
maxBuckets* = 256
const KadCodec* = "/ipfs/kad/1.0.0"

View File

@@ -0,0 +1,74 @@
import chronos
import chronicles
import ../../peerid
import ./consts
import ./routingtable
import ../protocol
import ../../switch
import ./protobuf
import ../../utils/heartbeat
logScope:
topics = "kad-dht"
type KadDHT* = ref object of LPProtocol
switch: Switch
rng: ref HmacDrbgContext
rtable*: RoutingTable
maintenanceLoop: Future[void]
proc maintainBuckets(kad: KadDHT) {.async: (raises: [CancelledError]).} =
heartbeat "refresh buckets", 10.minutes:
debug "TODO: implement bucket maintenance"
proc new*(
T: typedesc[KadDHT], switch: Switch, rng: ref HmacDrbgContext = newRng()
): T {.raises: [].} =
var rtable = RoutingTable.init(switch.peerInfo.peerId)
let kad = T(rng: rng, switch: switch, rtable: rtable)
kad.codec = KadCodec
kad.handler = proc(
conn: Connection, proto: string
) {.async: (raises: [CancelledError]).} =
try:
while not conn.atEof:
let
buf = await conn.readLp(4096)
msg = Message.decode(buf).tryGet()
# TODO: handle msg.msgType
except CancelledError as exc:
raise exc
except CatchableError:
error "could not handle request",
peerId = conn.PeerId, err = getCurrentExceptionMsg()
finally:
await conn.close()
return kad
method start*(
kad: KadDHT
): Future[void] {.async: (raises: [CancelledError], raw: true).} =
let fut = newFuture[void]()
fut.complete()
if kad.started:
warn "Starting kad-dht twice"
return fut
kad.maintenanceLoop = kad.maintainBuckets()
kad.started = true
info "kad-dht started"
fut
method stop*(kad: KadDHT): Future[void] {.async: (raises: [], raw: true).} =
if not kad.started:
return
kad.started = false
kad.maintenanceLoop.cancelSoon()
kad.maintenanceLoop = nil
return

View File

@@ -0,0 +1,48 @@
import ../../peerid
import ./consts
import chronicles
import stew/byteutils
type
KeyType* {.pure.} = enum
Unhashed
Raw
PeerId
Key* = object
case kind*: KeyType
of KeyType.PeerId:
peerId*: PeerId
of KeyType.Raw, KeyType.Unhashed:
data*: array[IdLength, byte]
proc toKey*(s: seq[byte]): Key =
doAssert s.len == IdLength
var data: array[IdLength, byte]
for i in 0 ..< IdLength:
data[i] = s[i]
return Key(kind: KeyType.Raw, data: data)
proc toKey*(p: PeerId): Key =
return Key(kind: KeyType.PeerId, peerId: p)
proc getBytes*(k: Key): seq[byte] =
return
case k.kind
of KeyType.PeerId:
k.peerId.getBytes()
of KeyType.Raw, KeyType.Unhashed:
@(k.data)
template `==`*(a, b: Key): bool =
a.getBytes() == b.getBytes() and a.kind == b.kind
proc shortLog*(k: Key): string =
case k.kind
of KeyType.PeerId:
"PeerId:" & $k.peerId
of KeyType.Raw, KeyType.Unhashed:
$k.kind & ":" & toHex(k.data)
chronicles.formatIt(Key):
shortLog(it)

View File

@@ -0,0 +1,159 @@
import ../../protobuf/minprotobuf
import ../../varint
import ../../utility
import results
import ../../multiaddress
import stew/objects
import stew/assign2
import options
type
Record* {.public.} = object
key*: Option[seq[byte]]
value*: Option[seq[byte]]
timeReceived*: Option[string]
MessageType* = enum
putValue = 0
getValue = 1
addProvider = 2
getProviders = 3
findNode = 4
ping = 5 # Deprecated
ConnectionType* = enum
notConnected = 0
connected = 1
canConnect = 2 # Unused
cannotConnect = 3 # Unused
Peer* {.public.} = object
id*: seq[byte]
addrs*: seq[MultiAddress]
connection*: ConnectionType
Message* {.public.} = object
msgType*: MessageType
key*: Option[seq[byte]]
record*: Option[Record]
closerPeers*: seq[Peer]
providerPeers*: seq[Peer]
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].}
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].}
proc encode*(record: Record): ProtoBuffer {.raises: [].} =
var pb = initProtoBuffer()
pb.writeOpt(1, record.key)
pb.writeOpt(2, record.value)
pb.writeOpt(5, record.timeReceived)
pb.finish()
return pb
proc encode*(peer: Peer): ProtoBuffer {.raises: [].} =
var pb = initProtoBuffer()
pb.write(1, peer.id)
for address in peer.addrs:
pb.write(2, address.data.buffer)
pb.write(3, uint32(ord(peer.connection)))
pb.finish()
return pb
proc encode*(msg: Message): ProtoBuffer {.raises: [].} =
var pb = initProtoBuffer()
pb.write(1, uint32(ord(msg.msgType)))
pb.writeOpt(2, msg.key)
msg.record.withValue(record):
pb.writeOpt(3, msg.record)
for peer in msg.closerPeers:
pb.write(8, peer.encode())
for peer in msg.providerPeers:
pb.write(9, peer.encode())
pb.finish()
return pb
proc writeOpt*[T](pb: var ProtoBuffer, field: int, opt: Option[T]) {.raises: [].} =
opt.withValue(v):
pb.write(field, v)
proc write*(pb: var ProtoBuffer, field: int, value: Record) {.raises: [].} =
pb.write(field, value.encode())
proc getOptionField[T: ProtoScalar | string | seq[byte]](
pb: ProtoBuffer, field: int, output: var Option[T]
): ProtoResult[void] =
var f: T
if ?pb.getField(field, f):
assign(output, some(f))
ok()
proc decode*(T: type Record, pb: ProtoBuffer): ProtoResult[Option[T]] =
var r: Record
?pb.getOptionField(1, r.key)
?pb.getOptionField(2, r.value)
?pb.getOptionField(5, r.timeReceived)
return ok(some(r))
proc decode*(T: type Peer, pb: ProtoBuffer): ProtoResult[Option[T]] =
var
p: Peer
id: seq[byte]
?pb.getRequiredField(1, p.id)
discard ?pb.getRepeatedField(2, p.addrs)
var connVal: uint32
if ?pb.getField(3, connVal):
var connType: ConnectionType
if not checkedEnumAssign(connType, connVal):
return err(ProtoError.BadWireType)
p.connection = connType
return ok(some(p))
proc decode*(T: type Message, buf: seq[byte]): ProtoResult[Option[T]] =
var
m: Message
key: seq[byte]
recPb: seq[byte]
closerPbs: seq[seq[byte]]
providerPbs: seq[seq[byte]]
var pb = initProtoBuffer(buf)
var msgTypeVal: uint32
?pb.getRequiredField(1, msgTypeVal)
var msgType: MessageType
if not checkedEnumAssign(msgType, msgTypeVal):
return err(ProtoError.BadWireType)
m.msgType = msgType
?pb.getOptionField(2, m.key)
if ?pb.getField(3, recPb):
assign(m.record, ?Record.decode(initProtoBuffer(recPb)))
discard ?pb.getRepeatedField(8, closerPbs)
for ppb in closerPbs:
let peerOpt = ?Peer.decode(initProtoBuffer(ppb))
peerOpt.withValue(peer):
m.closerPeers.add(peer)
discard ?pb.getRepeatedField(9, providerPbs)
for ppb in providerPbs:
let peer = ?Peer.decode(initProtoBuffer(ppb))
peer.withValue(peer):
m.providerPeers.add(peer)
return ok(some(m))

View File

@@ -0,0 +1,129 @@
import algorithm
import bearssl/rand
import chronos
import chronicles
import ./consts
import ./keys
import ./xordistance
import ../../peerid
import sequtils
logScope:
topics = "kad-dht rtable"
type
NodeEntry* = object
nodeId*: Key
lastSeen*: Moment
Bucket* = object
peers*: seq[NodeEntry]
RoutingTable* = ref object
selfId*: Key
buckets*: seq[Bucket]
proc init*(T: typedesc[RoutingTable], selfId: Key): T =
return RoutingTable(selfId: selfId, buckets: @[])
proc bucketIndex*(selfId, key: Key): int =
return xorDistance(selfId, key).leadingZeros
proc peerIndexInBucket(bucket: var Bucket, nodeId: Key): Opt[int] =
for i, p in bucket.peers:
if p.nodeId == nodeId:
return Opt.some(i)
return Opt.none(int)
proc insert*(rtable: var RoutingTable, nodeId: Key): bool =
if nodeId == rtable.selfId:
return false # No self insertion
let idx = bucketIndex(rtable.selfId, nodeId)
if idx >= maxBuckets:
trace "cannot insert node. max buckets have been reached",
nodeId, bucketIdx = idx, maxBuckets
return false
if idx >= rtable.buckets.len:
# expand buckets lazily if needed
rtable.buckets.setLen(idx + 1)
var bucket = rtable.buckets[idx]
let keyx = peerIndexInBucket(bucket, nodeId)
if keyx.isSome:
bucket.peers[keyx.unsafeValue].lastSeen = Moment.now()
elif bucket.peers.len < k:
bucket.peers.add(NodeEntry(nodeId: nodeId, lastSeen: Moment.now()))
else:
# TODO: eviction policy goes here, rn we drop the node
trace "cannot insert node in bucket, dropping node",
nodeId, bucket = k, bucketIdx = idx
return false
rtable.buckets[idx] = bucket
return true
proc insert*(rtable: var RoutingTable, peerId: PeerId): bool =
insert(rtable, peerId.toKey())
proc findClosest*(rtable: RoutingTable, targetId: Key, count: int): seq[Key] =
var allNodes: seq[Key] = @[]
for bucket in rtable.buckets:
for p in bucket.peers:
allNodes.add(p.nodeId)
allNodes.sort(
proc(a, b: Key): int =
cmp(xorDistance(a, targetId), xorDistance(b, targetId))
)
return allNodes[0 ..< min(count, allNodes.len)]
proc findClosestPeers*(rtable: RoutingTable, targetId: Key, count: int): seq[PeerId] =
findClosest(rtable, targetId, count).mapIt(it.peerId)
proc isStale*(bucket: Bucket): bool =
if bucket.peers.len == 0:
return true
for p in bucket.peers:
if Moment.now() - p.lastSeen > 30.minutes:
return true
return false
proc randomKeyInBucketRange*(
selfId: Key, bucketIndex: int, rng: ref HmacDrbgContext
): Key =
var raw = selfId.getBytes()
# zero out higher bits
for i in 0 ..< bucketIndex:
let byteIdx = i div 8
let bitInByte = 7 - (i mod 8)
raw[byteIdx] = raw[byteIdx] and not (1'u8 shl bitInByte)
# flip the target bit
let tgtByte = bucketIndex div 8
let tgtBitInByte = 7 - (bucketIndex mod 8)
raw[tgtByte] = raw[tgtByte] xor (1'u8 shl tgtBitInByte)
# randomize all less significant bits
let totalBits = raw.len * 8
let lsbStart = bucketIndex + 1
let lsbBytes = (totalBits - lsbStart + 7) div 8
var randomBuf = newSeq[byte](lsbBytes)
hmacDrbgGenerate(rng[], randomBuf)
for i in lsbStart ..< totalBits:
let byteIdx = i div 8
let bitInByte = 7 - (i mod 8)
let lsbByte = (i - lsbStart) div 8
let lsbBit = 7 - ((i - lsbStart) mod 8)
let randBit = (randomBuf[lsbByte] shr lsbBit) and 1
if randBit == 1:
raw[byteIdx] = raw[byteIdx] or (1'u8 shl bitInByte)
else:
raw[byteIdx] = raw[byteIdx] and not (1'u8 shl bitInByte)
return raw.toKey()

View File

@@ -0,0 +1,55 @@
import ./consts
import ./keys
import nimcrypto/sha2
import ../../peerid
type XorDistance* = array[IdLength, byte]
proc countLeadingZeroBits*(b: byte): int =
for i in 0 .. 7:
if (b and (0x80'u8 shr i)) != 0:
return i
return 8
proc leadingZeros*(dist: XorDistance): int =
for i in 0 ..< dist.len:
if dist[i] != 0:
return i * 8 + countLeadingZeroBits(dist[i])
return dist.len * 8
proc cmp*(a, b: XorDistance): int =
for i in 0 ..< IdLength:
if a[i] < b[i]:
return -1
elif a[i] > b[i]:
return 1
return 0
proc `<`*(a, b: XorDistance): bool =
cmp(a, b) < 0
proc `<=`*(a, b: XorDistance): bool =
cmp(a, b) <= 0
proc hashFor(k: Key): seq[byte] =
return
@(
case k.kind
of KeyType.PeerId:
sha256.digest(k.peerId.getBytes()).data
of KeyType.Raw:
sha256.digest(k.data).data
of KeyType.Unhashed:
k.data
)
proc xorDistance*(a, b: Key): XorDistance =
let hashA = a.hashFor()
let hashB = b.hashFor()
var response: XorDistance
for i in 0 ..< hashA.len:
response[i] = hashA[i] xor hashB[i]
return response
proc xorDistance*(a: PeerId, b: Key): XorDistance =
xorDistance(a.toKey(), b)

View File

@@ -16,35 +16,68 @@ import ./core, ../../stream/connection
logScope:
topics = "libp2p perf"
type PerfClient* = ref object of RootObj
type Stats* = object
isFinal*: bool
uploadBytes*: uint
downloadBytes*: uint
duration*: Duration
type PerfClient* = ref object
stats: Stats
proc new*(T: typedesc[PerfClient]): T =
return T()
proc currentStats*(p: PerfClient): Stats =
return p.stats
proc perf*(
_: typedesc[PerfClient],
conn: Connection,
sizeToWrite: uint64 = 0,
sizeToRead: uint64 = 0,
p: PerfClient, conn: Connection, sizeToWrite: uint64 = 0, sizeToRead: uint64 = 0
): Future[Duration] {.public, async: (raises: [CancelledError, LPStreamError]).} =
var
size = sizeToWrite
buf: array[PerfSize, byte]
let start = Moment.now()
trace "starting performance benchmark", conn, sizeToWrite, sizeToRead
await conn.write(toSeq(toBytesBE(sizeToRead)))
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0 ..< toWrite])
size -= toWrite
p.stats = Stats()
await conn.close()
try:
var
size = sizeToWrite
buf: array[PerfSize, byte]
size = sizeToRead
let start = Moment.now()
while size > 0:
let toRead = min(size, PerfSize)
await conn.readExactly(addr buf[0], toRead.int)
size = size - toRead
await conn.write(toSeq(toBytesBE(sizeToRead)))
while size > 0:
let toWrite = min(size, PerfSize)
await conn.write(buf[0 ..< toWrite])
size -= toWrite.uint
let duration = Moment.now() - start
trace "finishing performance benchmark", duration
return duration
# set stats using copy value to avoid race condition
var statsCopy = p.stats
statsCopy.duration = Moment.now() - start
statsCopy.uploadBytes += toWrite.uint
p.stats = statsCopy
await conn.close()
size = sizeToRead
while size > 0:
let toRead = min(size, PerfSize)
await conn.readExactly(addr buf[0], toRead.int)
size = size - toRead.uint
# set stats using copy value to avoid race condition
var statsCopy = p.stats
statsCopy.duration = Moment.now() - start
statsCopy.downloadBytes += toRead.uint
p.stats = statsCopy
except CancelledError as e:
raise e
except LPStreamError as e:
raise e
finally:
p.stats.isFinal = true
trace "finishing performance benchmark", duration = p.stats.duration
return p.stats.duration

View File

@@ -185,14 +185,17 @@ method init*(f: FloodSub) =
try:
await f.handleConn(conn, proto)
except CancelledError as exc:
trace "Unexpected cancellation in floodsub handler", conn
trace "Unexpected cancellation in floodsub handler", conn, description = exc.msg
raise exc
f.handler = handler
f.codec = FloodSubCodec
method publish*(
f: FloodSub, topic: string, data: seq[byte]
f: FloodSub,
topic: string,
data: seq[byte],
publishParams: Option[PublishParams] = none(PublishParams),
): Future[int] {.async: (raises: []).} =
# base returns always 0
discard await procCall PubSub(f).publish(topic, data)

View File

@@ -218,7 +218,7 @@ method init*(g: GossipSub) =
try:
await g.handleConn(conn, proto)
except CancelledError as exc:
trace "Unexpected cancellation in gossipsub handler", conn
trace "Unexpected cancellation in gossipsub handler", conn, description = exc.msg
raise exc
g.handler = handler
@@ -702,24 +702,27 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
# Send unsubscribe (in reverse order to sub/graft)
procCall PubSub(g).onTopicSubscription(topic, subscribed)
method publish*(
proc makePeersForPublishUsingCustomConn(
g: GossipSub, topic: string
): HashSet[PubSubPeer] =
assert g.customConnCallbacks.isSome,
"GossipSub misconfiguration: useCustomConn was true, but no customConnCallbacks provided"
trace "Selecting peers via custom connection callback"
return g.customConnCallbacks.get().customPeerSelectionCB(
g.gossipsub.getOrDefault(topic),
g.subscribedDirectPeers.getOrDefault(topic),
g.mesh.getOrDefault(topic),
g.fanout.getOrDefault(topic),
)
proc makePeersForPublishDefault(
g: GossipSub, topic: string, data: seq[byte]
): Future[int] {.async: (raises: []).} =
logScope:
topic
if topic.len <= 0: # data could be 0/empty
debug "Empty topic, skipping publish"
return 0
# base returns always 0
discard await procCall PubSub(g).publish(topic, data)
trace "Publishing message on topic", data = data.shortLog
): HashSet[PubSubPeer] =
var peers: HashSet[PubSubPeer]
# add always direct peers
# Always include direct peers
peers.incl(g.subscribedDirectPeers.getOrDefault(topic))
if topic in g.topics: # if we're subscribed use the mesh
@@ -769,6 +772,34 @@ method publish*(
# ultimately is not sent)
g.lastFanoutPubSub[topic] = Moment.fromNow(g.parameters.fanoutTTL)
return peers
method publish*(
g: GossipSub,
topic: string,
data: seq[byte],
publishParams: Option[PublishParams] = none(PublishParams),
): Future[int] {.async: (raises: []).} =
logScope:
topic
if topic.len <= 0: # data could be 0/empty
debug "Empty topic, skipping publish"
return 0
# base returns always 0
discard await procCall PubSub(g).publish(topic, data)
trace "Publishing message on topic", data = data.shortLog
let pubParams = publishParams.get(PublishParams())
let peers =
if pubParams.useCustomConn:
g.makePeersForPublishUsingCustomConn(topic)
else:
g.makePeersForPublishDefault(topic, data)
if peers.len == 0:
let topicPeers = g.gossipsub.getOrDefault(topic).toSeq()
debug "No peers for topic, skipping publish",
@@ -802,12 +833,18 @@ method publish*(
trace "Dropping already-seen message"
return 0
g.mcache.put(msgId, msg)
if not pubParams.skipMCache:
g.mcache.put(msgId, msg)
if g.parameters.sendIDontWantOnPublish and isLargeMessage(msg, msgId):
g.sendIDontWant(msg, msgId, peers)
g.broadcast(peers, RPCMsg(messages: @[msg]), isHighPriority = true)
g.broadcast(
peers,
RPCMsg(messages: @[msg]),
isHighPriority = true,
useCustomConn = pubParams.useCustomConn,
)
if g.knownTopics.contains(topic):
libp2p_pubsub_messages_published.inc(peers.len.int64, labelValues = [topic])

View File

@@ -305,9 +305,9 @@ proc handleIHave*(
proc handleIDontWant*(g: GossipSub, peer: PubSubPeer, iDontWants: seq[ControlIWant]) =
for dontWant in iDontWants:
for messageId in dontWant.messageIDs:
if peer.iDontWants[^1].len > 1000:
if peer.iDontWants[0].len >= IDontWantMaxCount:
break
peer.iDontWants[^1].incl(g.salt(messageId))
peer.iDontWants[0].incl(g.salt(messageId))
proc handleIWant*(
g: GossipSub, peer: PubSubPeer, iwants: seq[ControlIWant]
@@ -457,8 +457,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
prunes = toSeq(
try:
g.mesh[topic]
except KeyError:
raiseAssert "have peers"
except KeyError as e:
raiseAssert "have peers: " & e.msg
)
# avoid pruning peers we are currently grafting in this heartbeat
prunes.keepIf do(x: PubSubPeer) -> bool:
@@ -513,8 +513,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
var peers = toSeq(
try:
g.mesh[topic]
except KeyError:
raiseAssert "have peers"
except KeyError as e:
raiseAssert "have peers: " & e.msg
)
# grafting so high score has priority
peers.sort(byScore, SortOrder.Descending)
@@ -538,8 +538,8 @@ proc rebalanceMesh*(g: GossipSub, topic: string, metrics: ptr MeshMetrics = nil)
it.peerId notin backingOff:
avail.add(it)
# by spec, grab only 2
if avail.len > 1:
# by spec, grab only up to MaxOpportunisticGraftPeers
if avail.len >= MaxOpportunisticGraftPeers:
break
for peer in avail:
@@ -690,7 +690,7 @@ proc getGossipPeers*(g: GossipSub): Table[PubSubPeer, ControlMessage] =
for peer in allPeers:
control.mgetOrPut(peer, ControlMessage()).ihave.add(ihave)
for msgId in ihave.messageIDs:
peer.sentIHaves[^1].incl(msgId)
peer.sentIHaves[0].incl(msgId)
libp2p_gossipsub_cache_window_size.set(cacheWindowSize.int64)

View File

@@ -50,6 +50,9 @@ const
# rust sigp: https://github.com/sigp/rust-libp2p/blob/f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c/protocols/gossipsub/src/config.rs#L572
# go: https://github.com/libp2p/go-libp2p-pubsub/blob/08c17398fb11b2ab06ca141dddc8ec97272eb772/gossipsub.go#L155
IHaveMaxLength* = 5000
IDontWantMaxCount* = 1000
# maximum number of IDontWant messages in one slot of the history
MaxOpportunisticGraftPeers* = 2
type
TopicInfo* = object # gossip 1.1 related

View File

@@ -145,6 +145,10 @@ type
## we have to store it, which may be an attack vector.
## This callback can be used to reject topic we're not interested in
PublishParams* = object
useCustomConn*: bool
skipMCache*: bool
PubSub* {.public.} = ref object of LPProtocol
switch*: Switch # the switch used to dial/connect to peers
peerInfo*: PeerInfo # this peer's info
@@ -176,6 +180,7 @@ type
rng*: ref HmacDrbgContext
knownTopics*: HashSet[string]
customConnCallbacks*: Option[CustomConnectionCallbacks]
method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
## handle peer disconnects
@@ -187,7 +192,11 @@ method unsubscribePeer*(p: PubSub, peerId: PeerId) {.base, gcsafe.} =
libp2p_pubsub_peers.set(p.peers.len.int64)
proc send*(
p: PubSub, peer: PubSubPeer, msg: RPCMsg, isHighPriority: bool
p: PubSub,
peer: PubSubPeer,
msg: RPCMsg,
isHighPriority: bool,
useCustomConn: bool = false,
) {.raises: [].} =
## This procedure attempts to send a `msg` (of type `RPCMsg`) to the specified remote peer in the PubSub network.
##
@@ -200,13 +209,14 @@ proc send*(
## priority messages have been sent.
trace "sending pubsub message to peer", peer, payload = shortLog(msg)
peer.send(msg, p.anonymize, isHighPriority)
peer.send(msg, p.anonymize, isHighPriority, useCustomConn)
proc broadcast*(
p: PubSub,
sendPeers: auto, # Iteratble[PubSubPeer]
msg: RPCMsg,
isHighPriority: bool,
useCustomConn: bool = false,
) {.raises: [].} =
## This procedure attempts to send a `msg` (of type `RPCMsg`) to a specified group of peers in the PubSub network.
##
@@ -261,12 +271,12 @@ proc broadcast*(
if anyIt(sendPeers, it.hasObservers):
for peer in sendPeers:
p.send(peer, msg, isHighPriority)
p.send(peer, msg, isHighPriority, useCustomConn)
else:
# Fast path that only encodes message once
let encoded = encodeRpcMsg(msg, p.anonymize)
for peer in sendPeers:
asyncSpawn peer.sendEncoded(encoded, isHighPriority)
asyncSpawn peer.sendEncoded(encoded, isHighPriority, useCustomConn)
proc sendSubs*(
p: PubSub, peer: PubSubPeer, topics: openArray[string], subscribe: bool
@@ -373,8 +383,14 @@ method getOrCreatePeer*(
p.onPubSubPeerEvent(peer, event)
# create new pubsub peer
let pubSubPeer =
PubSubPeer.new(peerId, getConn, onEvent, protoNegotiated, p.maxMessageSize)
let pubSubPeer = PubSubPeer.new(
peerId,
getConn,
onEvent,
protoNegotiated,
p.maxMessageSize,
customConnCallbacks = p.customConnCallbacks,
)
debug "created new pubsub peer", peerId
p.peers[peerId] = pubSubPeer
@@ -558,7 +574,10 @@ proc subscribe*(p: PubSub, topic: string, handler: TopicHandler) {.public.} =
p.updateTopicMetrics(topic)
method publish*(
p: PubSub, topic: string, data: seq[byte]
p: PubSub,
topic: string,
data: seq[byte],
publishParams: Option[PublishParams] = none(PublishParams),
): Future[int] {.base, async: (raises: []), public.} =
## publish to a ``topic``
##
@@ -648,6 +667,8 @@ proc init*[PubParams: object | bool](
maxMessageSize: int = 1024 * 1024,
rng: ref HmacDrbgContext = newRng(),
parameters: PubParams = false,
customConnCallbacks: Option[CustomConnectionCallbacks] =
none(CustomConnectionCallbacks),
): P {.raises: [InitializationError], public.} =
let pubsub =
when PubParams is bool:
@@ -663,6 +684,7 @@ proc init*[PubParams: object | bool](
maxMessageSize: maxMessageSize,
rng: rng,
topicsHigh: int.high,
customConnCallbacks: customConnCallbacks,
)
else:
P(
@@ -678,6 +700,7 @@ proc init*[PubParams: object | bool](
maxMessageSize: maxMessageSize,
rng: rng,
topicsHigh: int.high,
customConnCallbacks: customConnCallbacks,
)
proc peerEventHandler(

View File

@@ -95,6 +95,21 @@ type
# Task for processing non-priority message queue.
sendNonPriorityTask: Future[void]
CustomConnCreationProc* = proc(
destAddr: Option[MultiAddress], destPeerId: PeerId, codec: string
): Connection {.gcsafe, raises: [].}
CustomPeerSelectionProc* = proc(
allPeers: HashSet[PubSubPeer],
directPeers: HashSet[PubSubPeer],
meshPeers: HashSet[PubSubPeer],
fanoutPeers: HashSet[PubSubPeer],
): HashSet[PubSubPeer] {.gcsafe, raises: [].}
CustomConnectionCallbacks* = object
customConnCreationCB*: CustomConnCreationProc
customPeerSelectionCB*: CustomPeerSelectionProc
PubSubPeer* = ref object of RootObj
getConn*: GetConn # callback to establish a new send connection
onEvent*: OnEvent # Connectivity updates for peer
@@ -123,6 +138,7 @@ type
maxNumElementsInNonPriorityQueue*: int
# The max number of elements allowed in the non-priority queue.
disconnected: bool
customConnCallbacks*: Option[CustomConnectionCallbacks]
RPCHandler* =
proc(peer: PubSubPeer, data: seq[byte]): Future[void] {.async: (raises: []).}
@@ -214,10 +230,10 @@ proc handle*(p: PubSubPeer, conn: Connection) {.async: (raises: []).} =
conn, peer = p, closed = conn.closed, description = exc.msg
finally:
await conn.close()
except CancelledError:
except CancelledError as e:
# This is top-level procedure which will work as separate task, so it
# do not need to propagate CancelledError.
trace "Unexpected cancellation in PubSubPeer.handle"
trace "Unexpected cancellation in PubSubPeer.handle", description = e.msg
finally:
debug "exiting pubsub read loop", conn, peer = p, closed = conn.closed
@@ -250,7 +266,7 @@ proc connectOnce(
await p.getConn().wait(5.seconds)
except AsyncTimeoutError as error:
trace "getConn timed out", description = error.msg
raise (ref LPError)(msg: "Cannot establish send connection")
raise (ref LPError)(msg: "Cannot establish send connection: " & error.msg)
# When the send channel goes up, subscriptions need to be sent to the
# remote peer - if we had multiple channels up and one goes down, all
@@ -356,21 +372,43 @@ proc sendMsgSlow(p: PubSubPeer, msg: seq[byte]) {.async: (raises: [CancelledErro
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
await sendMsgContinue(conn, conn.writeLp(msg))
proc sendMsg(p: PubSubPeer, msg: seq[byte]): Future[void] {.async: (raises: []).} =
if p.sendConn != nil and not p.sendConn.closed():
# Fast path that avoids copying msg (which happens for {.async.})
let conn = p.sendConn
proc sendMsg(
p: PubSubPeer, msg: seq[byte], useCustomConn: bool = false
): Future[void] {.async: (raises: []).} =
type ConnectionType = enum
ctCustom
ctSend
ctSlow
trace "sending encoded msg to peer", conn, encoded = shortLog(msg)
var slowPath = false
let (conn, connType) =
if useCustomConn and p.customConnCallbacks.isSome:
let address = p.address
(
p.customConnCallbacks.get().customConnCreationCB(address, p.peerId, p.codec),
ctCustom,
)
elif p.sendConn != nil and not p.sendConn.closed():
(p.sendConn, ctSend)
else:
slowPath = true
(nil, ctSlow)
if not slowPath:
trace "sending encoded msg to peer",
conntype = $connType, conn = conn, encoded = shortLog(msg)
let f = conn.writeLp(msg)
if not f.completed():
sendMsgContinue(conn, f)
else:
f
else:
trace "sending encoded msg to peer via slow path"
sendMsgSlow(p, msg)
proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[void] =
proc sendEncoded*(
p: PubSubPeer, msg: seq[byte], isHighPriority: bool, useCustomConn: bool = false
): Future[void] =
## Asynchronously sends an encoded message to a specified `PubSubPeer`.
##
## Parameters:
@@ -399,7 +437,7 @@ proc sendEncoded*(p: PubSubPeer, msg: seq[byte], isHighPriority: bool): Future[v
maxSize = p.maxMessageSize, msgSize = msg.len
Future[void].completed()
elif isHighPriority or emptyQueues:
let f = p.sendMsg(msg)
let f = p.sendMsg(msg, useCustomConn)
if not f.finished:
p.rpcmessagequeue.sendPriorityQueue.addLast(f)
when defined(pubsubpeer_queue_metrics):
@@ -458,7 +496,11 @@ iterator splitRPCMsg(
trace "message too big to sent", peer, rpcMsg = shortLog(currentRPCMsg)
proc send*(
p: PubSubPeer, msg: RPCMsg, anonymize: bool, isHighPriority: bool
p: PubSubPeer,
msg: RPCMsg,
anonymize: bool,
isHighPriority: bool,
useCustomConn: bool = false,
) {.raises: [].} =
## Asynchronously sends an `RPCMsg` to a specified `PubSubPeer` with an option for anonymization.
##
@@ -489,11 +531,11 @@ proc send*(
if encoded.len > p.maxMessageSize and msg.messages.len > 1:
for encodedSplitMsg in splitRPCMsg(p, msg, p.maxMessageSize, anonymize):
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority)
asyncSpawn p.sendEncoded(encodedSplitMsg, isHighPriority, useCustomConn)
else:
# If the message size is within limits, send it as is
trace "sending msg to peer", peer = p, rpcMsg = shortLog(msg)
asyncSpawn p.sendEncoded(encoded, isHighPriority)
asyncSpawn p.sendEncoded(encoded, isHighPriority, useCustomConn)
proc canAskIWant*(p: PubSubPeer, msgId: MessageId): bool =
for sentIHave in p.sentIHaves.mitems():
@@ -552,6 +594,8 @@ proc new*(
maxMessageSize: int,
maxNumElementsInNonPriorityQueue: int = DefaultMaxNumElementsInNonPriorityQueue,
overheadRateLimitOpt: Opt[TokenBucket] = Opt.none(TokenBucket),
customConnCallbacks: Option[CustomConnectionCallbacks] =
none(CustomConnectionCallbacks),
): T =
result = T(
getConn: getConn,
@@ -563,6 +607,7 @@ proc new*(
overheadRateLimitOpt: overheadRateLimitOpt,
rpcmessagequeue: RpcMessageQueue.new(),
maxNumElementsInNonPriorityQueue: maxNumElementsInNonPriorityQueue,
customConnCallbacks: customConnCallbacks,
)
result.sentIHaves.addFirst(default(HashSet[MessageId]))
result.iDontWants.addFirst(default(HashSet[SaltedId]))

View File

@@ -419,8 +419,8 @@ proc save(
)
rdv.namespaces[nsSalted].add(rdv.registered.high)
# rdv.registerEvent.fire()
except KeyError:
doAssert false, "Should have key"
except KeyError as e:
doAssert false, "Should have key: " & e.msg
proc register(rdv: RendezVous, conn: Connection, r: Register): Future[void] =
trace "Received Register", peerId = conn.peerId, ns = r.ns

View File

@@ -15,7 +15,7 @@ import results
import chronos, chronicles
import
../protocol,
../../stream/streamseq,
../../utils/zeroqueue,
../../stream/connection,
../../multiaddress,
../../peerinfo
@@ -32,7 +32,7 @@ type
SecureConn* = ref object of Connection
stream*: Connection
buf: StreamSeq
buf: ZeroQueue
func shortLog*(conn: SecureConn): auto =
try:
@@ -110,8 +110,8 @@ proc handleConn(
fut2 = sconn.join()
try: # https://github.com/status-im/nim-chronos/issues/516
discard await race(fut1, fut2)
except ValueError:
raiseAssert("Futures list is not empty")
except ValueError as e:
raiseAssert("Futures list is not empty: " & e.msg)
# at least one join() completed, cancel pending one, if any
if not fut1.finished:
await fut1.cancelAndWait()
@@ -174,22 +174,21 @@ method readOnce*(
if s.isEof:
raise newLPStreamEOFError()
if s.buf.data().len() == 0:
if s.buf.isEmpty:
try:
let buf = await s.readMessage() # Always returns >0 bytes or raises
s.activity = true
s.buf.add(buf)
s.buf.push(buf)
except LPStreamEOFError as err:
s.isEof = true
await s.close()
raise err
raise newException(LPStreamEOFError, "Secure connection EOF: " & err.msg, err)
except CancelledError as exc:
raise exc
except LPStreamError as err:
debug "Error while reading message from secure connection, closing.",
error = err.name, message = err.msg, connection = s
await s.close()
raise err
raise newException(LPStreamError, "Secure connection read error: " & err.msg, err)
var p = cast[ptr UncheckedArray[byte]](pbytes)
return s.buf.consumeTo(toOpenArray(p, 0, nbytes - 1))
return s.buf.consumeTo(pbytes, nbytes)

View File

@@ -55,7 +55,7 @@ proc tryStartingDirectConn(
if not isRelayed.get(false) and address.isPublicMA():
return await tryConnect(address)
except CatchableError as err:
debug "Failed to create direct connection.", err = err.msg
debug "Failed to create direct connection.", description = err.msg
continue
return false
@@ -91,7 +91,7 @@ proc newConnectedPeerHandler(
except CancelledError as err:
raise err
except CatchableError as err:
debug "Hole punching failed during dcutr", err = err.msg
debug "Hole punching failed during dcutr", description = err.msg
method setup*(
self: HPService, switch: Switch
@@ -104,7 +104,7 @@ method setup*(
let dcutrProto = Dcutr.new(switch)
switch.mount(dcutrProto)
except LPError as err:
error "Failed to mount Dcutr", err = err.msg
error "Failed to mount Dcutr", description = err.msg
self.newConnectedPeerHandler = proc(
peerId: PeerId, event: PeerEvent

View File

@@ -10,10 +10,9 @@
{.push raises: [].}
import std/strformat
import stew/byteutils
import chronos, chronicles, metrics
import ../stream/connection
import ./streamseq
import ../utils/zeroqueue
export connection
@@ -24,7 +23,7 @@ const BufferStreamTrackerName* = "BufferStream"
type BufferStream* = ref object of Connection
readQueue*: AsyncQueue[seq[byte]] # read queue for managing backpressure
readBuf*: StreamSeq # overflow buffer for readOnce
readBuf: ZeroQueue # zero queue buffer for readOnce
pushing*: bool # number of ongoing push operations
reading*: bool # is there an ongoing read? (only allow one)
pushedEof*: bool # eof marker has been put on readQueue
@@ -43,7 +42,7 @@ chronicles.formatIt(BufferStream):
shortLog(it)
proc len*(s: BufferStream): int =
s.readBuf.len + (if s.readQueue.len > 0: s.readQueue[0].len()
s.readBuf.len.int + (if s.readQueue.len > 0: s.readQueue[0].len()
else: 0)
method initStream*(s: BufferStream) =
@@ -62,7 +61,7 @@ proc new*(T: typedesc[BufferStream], timeout: Duration = DefaultConnectionTimeou
bufferStream
method pushData*(
s: BufferStream, data: seq[byte]
s: BufferStream, data: sink seq[byte]
) {.base, async: (raises: [CancelledError, LPStreamError]).} =
## Write bytes to internal read buffer, use this to fill up the
## buffer with data.
@@ -107,7 +106,7 @@ method pushEof*(
s.pushing = false
method atEof*(s: BufferStream): bool =
s.isEof and s.readBuf.len == 0
s.isEof and s.readBuf.isEmpty
method readOnce*(
s: BufferStream, pbytes: pointer, nbytes: int
@@ -118,20 +117,12 @@ method readOnce*(
if s.returnedEof:
raise newLPStreamEOFError()
var p = cast[ptr UncheckedArray[byte]](pbytes)
# First consume leftovers from previous read
var rbytes = s.readBuf.consumeTo(toOpenArray(p, 0, nbytes - 1))
if rbytes < nbytes and not s.isEof:
# There's space in the buffer - consume some data from the read queue
s.reading = true
if not s.isEof and s.readBuf.len < nbytes:
let buf =
try:
s.reading = true
await s.readQueue.popFirst()
except CancelledError as exc:
# Not very efficient, but shouldn't happen often
s.readBuf.assign(@(p.toOpenArray(0, rbytes - 1)) & @(s.readBuf.data))
raise exc
finally:
s.reading = false
@@ -141,28 +132,18 @@ method readOnce*(
trace "EOF", s
s.isEof = true
else:
let remaining = min(buf.len, nbytes - rbytes)
toOpenArray(p, rbytes, nbytes - 1)[0 ..< remaining] =
buf.toOpenArray(0, remaining - 1)
rbytes += remaining
if remaining < buf.len:
trace "add leftovers", s, len = buf.len - remaining
s.readBuf.add(buf.toOpenArray(remaining, buf.high))
if s.isEof and s.readBuf.len() == 0:
# We can clear the readBuf memory since it won't be used any more
s.readBuf = StreamSeq()
s.readBuf.push(buf)
let consumed = s.readBuf.consumeTo(pbytes, nbytes)
s.activity = true
# We want to return 0 exactly once - after that, we'll start raising instead -
# this is a bit nuts in a mixed exception / return value world, but allows the
# consumer of the stream to rely on the 0-byte read as a "regular" EOF marker
# (instead of _sometimes_ getting an exception).
s.returnedEof = rbytes == 0
s.returnedEof = consumed == 0
return rbytes
return consumed
method closeImpl*(s: BufferStream): Future[void] {.async: (raises: [], raw: true).} =
## close the stream and clear the buffer
@@ -171,7 +152,6 @@ method closeImpl*(s: BufferStream): Future[void] {.async: (raises: [], raw: true
# First, make sure any new calls to `readOnce` and `pushData` etc will fail -
# there may already be such calls in the event queue however
s.isEof = true
s.readBuf = StreamSeq()
s.pushedEof = true
# Essentially we need to handle the following cases
@@ -199,8 +179,10 @@ method closeImpl*(s: BufferStream): Future[void] {.async: (raises: [], raw: true
elif s.pushing:
if not s.readQueue.empty():
discard s.readQueue.popFirstNoWait()
except AsyncQueueFullError, AsyncQueueEmptyError:
raiseAssert(getCurrentExceptionMsg())
except AsyncQueueFullError as e:
raiseAssert("closeImpl failed queue full: " & e.msg)
except AsyncQueueEmptyError as e:
raiseAssert("closeImpl failed queue empty: " & e.msg)
trace "Closed BufferStream", s

View File

@@ -34,8 +34,6 @@ when defined(libp2p_agents_metrics):
declareCounter libp2p_peers_traffic_read, "incoming traffic", labels = ["agent"]
declareCounter libp2p_peers_traffic_write, "outgoing traffic", labels = ["agent"]
declareCounter libp2p_network_bytes, "total traffic", labels = ["direction"]
func shortLog*(conn: ChronosStream): auto =
try:
if conn == nil:

View File

@@ -52,6 +52,8 @@ func shortLog*(conn: Connection): string =
chronicles.formatIt(Connection):
shortLog(it)
declarePublicCounter libp2p_network_bytes, "total traffic", labels = ["direction"]
method initStream*(s: Connection) =
if s.objName.len == 0:
s.objName = ConnectionTrackerName

View File

@@ -113,9 +113,9 @@ method initStream*(s: LPStream) {.base.} =
trackCounter(s.objName)
trace "Stream created", s, objName = s.objName, dir = $s.dir
proc join*(
method join*(
s: LPStream
): Future[void] {.async: (raises: [CancelledError], raw: true), public.} =
): Future[void] {.base, async: (raises: [CancelledError], raw: true), public.} =
## Wait for the stream to be closed
s.closeEvent.wait()
@@ -135,9 +135,9 @@ method readOnce*(
## available
raiseAssert("[LPStream.readOnce] abstract method not implemented!")
proc readExactly*(
method readExactly*(
s: LPStream, pbytes: pointer, nbytes: int
): Future[void] {.async: (raises: [CancelledError, LPStreamError]), public.} =
): Future[void] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
## Waits for `nbytes` to be available, then read
## them and return them
if s.atEof:
@@ -171,9 +171,9 @@ proc readExactly*(
trace "couldn't read all bytes, incomplete data", s, nbytes, read
raise newLPStreamIncompleteError()
proc readLine*(
method readLine*(
s: LPStream, limit = 0, sep = "\r\n"
): Future[string] {.async: (raises: [CancelledError, LPStreamError]), public.} =
): Future[string] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
## Reads up to `limit` bytes are read, or a `sep` is found
# TODO replace with something that exploits buffering better
var lim = if limit <= 0: -1 else: limit
@@ -199,9 +199,9 @@ proc readLine*(
if len(result) == lim:
break
proc readVarint*(
method readVarint*(
conn: LPStream
): Future[uint64] {.async: (raises: [CancelledError, LPStreamError]), public.} =
): Future[uint64] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
var buffer: array[10, byte]
for i in 0 ..< len(buffer):
@@ -218,9 +218,9 @@ proc readVarint*(
if true: # can't end with a raise apparently
raise (ref InvalidVarintError)(msg: "Cannot parse varint")
proc readLp*(
method readLp*(
s: LPStream, maxSize: int
): Future[seq[byte]] {.async: (raises: [CancelledError, LPStreamError]), public.} =
): Future[seq[byte]] {.base, async: (raises: [CancelledError, LPStreamError]), public.} =
## read length prefixed msg, with the length encoded as a varint
let
length = await s.readVarint()
@@ -244,9 +244,11 @@ method write*(
# Write `msg` to stream, waiting for the write to be finished
raiseAssert("[LPStream.write] abstract method not implemented!")
proc writeLp*(
method writeLp*(
s: LPStream, msg: openArray[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
): Future[void] {.
base, async: (raises: [CancelledError, LPStreamError], raw: true), public
.} =
## Write `msg` with a varint-encoded length prefix
let vbytes = PB.toBytes(msg.len().uint64)
var buf = newSeqUninitialized[byte](msg.len() + vbytes.len)
@@ -254,9 +256,11 @@ proc writeLp*(
buf[vbytes.len ..< buf.len] = msg
s.write(buf)
proc writeLp*(
method writeLp*(
s: LPStream, msg: string
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
): Future[void] {.
base, async: (raises: [CancelledError, LPStreamError], raw: true), public
.} =
writeLp(s, msg.toOpenArrayByte(0, msg.high))
proc write*(
@@ -324,7 +328,7 @@ proc closeWithEOF*(s: LPStream): Future[void] {.async: (raises: []), public.} =
debug "Unexpected bytes while waiting for EOF", s
except CancelledError:
discard
except LPStreamEOFError:
trace "Expected EOF came", s
except LPStreamEOFError as e:
trace "Expected EOF came", s, description = e.msg
except LPStreamError as exc:
debug "Unexpected error while waiting for EOF", s, description = exc.msg

View File

@@ -55,7 +55,7 @@ type
connManager*: ConnManager
transports*: seq[Transport]
ms*: MultistreamSelect
acceptFuts: seq[Future[void]]
acceptFuts*: seq[Future[void]]
dialer*: Dial
peerStore*: PeerStore
nameResolver*: NameResolver
@@ -233,7 +233,7 @@ proc upgrader(
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(UpgradeError, e.msg, e)
raise newException(UpgradeError, "catchable error upgrader: " & e.msg, e)
proc upgradeMonitor(
switch: Switch, trans: Transport, conn: Connection, upgrades: AsyncSemaphore
@@ -256,7 +256,7 @@ proc upgradeMonitor(
await conn.close()
upgrades.release()
proc accept(s: Switch, transport: Transport) {.async: (raises: []).} =
proc accept*(s: Switch, transport: Transport) {.async: (raises: []).} =
## switch accept loop, ran for every transport
##
@@ -275,7 +275,8 @@ proc accept(s: Switch, transport: Transport) {.async: (raises: []).} =
await transport.accept()
except CatchableError as exc:
slot.release()
raise exc
raise
newException(CatchableError, "failed to accept connection: " & exc.msg, exc)
slot.trackConnection(conn)
if isNil(conn):
# A nil connection means that we might have hit a
@@ -335,14 +336,9 @@ proc stop*(s: Switch) {.public, async: (raises: [CancelledError]).} =
trace "Switch stopped"
proc start*(s: Switch) {.public, async: (raises: [CancelledError, LPError]).} =
## Start listening on every transport
if s.started:
warn "Switch has already been started"
return
debug "starting switch for peer", peerInfo = s.peerInfo
proc startTransports*(
s: Switch
) {.public, async: (raises: [CancelledError, LPError]).} =
var startFuts: seq[Future[void]]
for t in s.transports:
let addrs = s.peerInfo.listenAddrs.filterIt(t.handles(it))
@@ -364,9 +360,20 @@ proc start*(s: Switch) {.public, async: (raises: [CancelledError, LPError]).} =
s.acceptFuts.add(s.accept(t))
s.peerInfo.listenAddrs &= t.addrs
proc start*(s: Switch) {.public, async: (raises: [CancelledError, LPError]).} =
## Start listening on every transport
if s.started:
warn "Switch has already been started"
return
debug "starting switch for peer", peerInfo = s.peerInfo
for service in s.services:
discard await service.setup(s)
await s.startTransports()
await s.peerInfo.update()
await s.ms.start()
s.started = true

View File

@@ -1,7 +1,8 @@
import std/sequtils
import pkg/chronos
import pkg/chronicles
import pkg/quic
import chronos
import chronicles
import metrics
import quic
import results
import ../multiaddress
import ../multicodec
@@ -58,6 +59,7 @@ method readOnce*(
result = min(nbytes, stream.cached.len)
copyMem(pbytes, addr stream.cached[0], result)
stream.cached = stream.cached[result ..^ 1]
libp2p_network_bytes.inc(result.int64, labelValues = ["in"])
except CatchableError as exc:
raise newLPStreamEOFError()
@@ -66,6 +68,7 @@ method write*(
stream: QuicStream, bytes: seq[byte]
) {.async: (raises: [CancelledError, LPStreamError]).} =
mapExceptions(await stream.stream.write(bytes))
libp2p_network_bytes.inc(bytes.len.int64, labelValues = ["out"])
{.pop.}
@@ -98,7 +101,7 @@ proc getStream*(
return QuicStream.new(stream, session.observedAddr, session.peerId)
except CatchableError as exc:
# TODO: incomingStream is using {.async.} with no raises
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
raise (ref QuicTransportError)(msg: "error in getStream: " & exc.msg, parent: exc)
method getWrapped*(self: QuicSession): P2PConnection =
nil
@@ -116,7 +119,7 @@ method newStream*(
try:
return await m.quicSession.getStream(Direction.Out)
except CatchableError as exc:
raise newException(MuxerError, exc.msg, exc)
raise newException(MuxerError, "error in newStream: " & exc.msg, exc)
proc handleStream(m: QuicMuxer, chann: QuicStream) {.async: (raises: []).} =
## call the muxer stream handler for this channel
@@ -233,11 +236,16 @@ method start*(
except QuicConfigError as exc:
doAssert false, "invalid quic setup: " & $exc.msg
except TLSCertificateError as exc:
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
raise (ref QuicTransportError)(
msg: "tlscert error in quic start: " & exc.msg, parent: exc
)
except QuicError as exc:
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
raise
(ref QuicTransportError)(msg: "quicerror in quic start: " & exc.msg, parent: exc)
except TransportOsError as exc:
raise (ref QuicTransportError)(msg: exc.msg, parent: exc)
raise (ref QuicTransportError)(
msg: "transport error in quic start: " & exc.msg, parent: exc
)
self.running = true
method stop*(transport: QuicTransport) {.async: (raises: []).} =
@@ -315,7 +323,7 @@ method dial*(
except CancelledError as e:
raise e
except CatchableError as e:
raise newException(QuicTransportDialError, e.msg, e)
raise newException(QuicTransportDialError, "error in quic dial:" & e.msg, e)
method upgrade*(
self: QuicTransport, conn: P2PConnection, peerId: Opt[PeerId]

View File

@@ -133,7 +133,9 @@ method start*(
try:
createStreamServer(ta, flags = self.flags)
except common.TransportError as exc:
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
raise (ref TcpTransportError)(
msg: "transport error in TcpTransport start:" & exc.msg, parent: exc
)
self.servers &= server
@@ -250,9 +252,13 @@ method accept*(
except TransportUseClosedError as exc:
raise newTransportClosedError(exc)
except TransportOsError as exc:
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
raise (ref TcpTransportError)(
msg: "TransportOs error in accept:" & exc.msg, parent: exc
)
except common.TransportError as exc: # Needed for chronos 4.0.0 support
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
raise (ref TcpTransportError)(
msg: "TransportError in accept: " & exc.msg, parent: exc
)
except CancelledError as exc:
cancelAcceptFuts()
raise exc
@@ -302,7 +308,8 @@ method dial*(
except CancelledError as exc:
raise exc
except CatchableError as exc:
raise (ref TcpTransportError)(msg: exc.msg, parent: exc)
raise
(ref TcpTransportError)(msg: "TcpTransport dial error: " & exc.msg, parent: exc)
# If `stop` is called after `connect` but before `await` returns, we might
# end up with a race condition where `stop` returns but not all connections
@@ -318,7 +325,7 @@ method dial*(
MultiAddress.init(transp.remoteAddress).expect("remote address is valid")
except TransportOsError as exc:
safeCloseWait(transp)
raise (ref TcpTransportError)(msg: exc.msg)
raise (ref TcpTransportError)(msg: "MultiAddress.init error in dial: " & exc.msg)
self.connHandler(transp, Opt.some(observedAddr), Direction.Out)

View File

@@ -118,8 +118,8 @@ proc makeASN1Time(time: Time): string {.inline.} =
try:
let f = initTimeFormat("yyyyMMddhhmmss")
format(time.utc(), f)
except TimeFormatParseError:
raiseAssert "time format is const and checked with test"
except TimeFormatParseError as e:
raiseAssert "time format is const and checked with test: " & e.msg
return str & "Z"
@@ -278,7 +278,7 @@ proc parse*(
validTo = parseCertTime($certParsed.valid_to)
except TimeParseError as e:
raise newException(
CertificateParsingError, "Failed to parse certificate validity time, " & $e.msg
CertificateParsingError, "Failed to parse certificate validity time: " & $e.msg, e
)
P2pCertificate(

View File

@@ -18,6 +18,7 @@ import
transport,
tcptransport,
../switch,
../autotls/service,
../builders,
../stream/[lpstream, connection, chronosstream],
../multiaddress,
@@ -243,7 +244,9 @@ method dial*(
raise e
except CatchableError as e:
safeCloseWait(transp)
raise newException(transport.TransportDialError, e.msg, e)
raise newException(
transport.TransportDialError, "error in dial TorTransport: " & e.msg, e
)
method start*(
self: TorTransport, addrs: seq[MultiAddress]
@@ -301,7 +304,7 @@ proc new*(
flags: set[ServerFlags] = {},
): TorSwitch {.raises: [LPError], public.} =
var builder = SwitchBuilder.new().withRng(rng).withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
proc(upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService): Transport =
TorTransport.new(torServer, flags, upgr)
)
if addresses.len != 0:

View File

@@ -160,7 +160,9 @@ method start*(
else:
HttpServer.create(address, handshakeTimeout = self.handshakeTimeout)
except CatchableError as exc:
raise (ref WsTransportError)(msg: exc.msg, parent: exc)
raise (ref WsTransportError)(
msg: "error in WsTransport start: " & exc.msg, parent: exc
)
self.httpservers &= httpserver
@@ -309,7 +311,9 @@ method accept*(
debug "OS Error", description = exc.msg
except CatchableError as exc:
info "Unexpected error accepting connection", description = exc.msg
raise newException(transport.TransportError, exc.msg, exc)
raise newException(
transport.TransportError, "Error in WsTransport accept: " & exc.msg, exc
)
method dial*(
self: WsTransport,
@@ -338,7 +342,9 @@ method dial*(
raise e
except CatchableError as e:
safeClose(transp)
raise newException(transport.TransportDialError, e.msg, e)
raise newException(
transport.TransportDialError, "error in WsTransport dial: " & e.msg, e
)
method handles*(t: WsTransport, address: MultiAddress): bool {.gcsafe, raises: [].} =
if procCall Transport(t).handles(address):

View File

@@ -54,8 +54,9 @@ when defined(libp2p_agents_metrics):
proc safeToLowerAscii*(s: string): Result[string, cstring] =
try:
ok(s.toLowerAscii())
except CatchableError:
err("toLowerAscii failed")
except CatchableError as e:
let errMsg = "toLowerAscii failed: " & e.msg
err(errMsg.cstring)
const
KnownLibP2PAgents* {.strdefine.} = "nim-libp2p"

View File

@@ -27,9 +27,9 @@ proc anyCompleted*[T](
if raceFut.completed:
return raceFut
requests.del(requests.find(raceFut))
except ValueError:
except ValueError as e:
raise newException(
AllFuturesFailedError, "None of the futures completed successfully"
AllFuturesFailedError, "None of the futures completed successfully: " & e.msg, e
)
except CancelledError as exc:
raise exc

View File

@@ -0,0 +1,84 @@
# Nim-Libp2p
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import std/deques
type Chunk = ref object
data: seq[byte]
size: int
start: int
template clone(c: Chunk): Chunk =
Chunk(data: c.data, size: c.size, start: c.start)
template newChunk(b: sink seq[byte]): Chunk =
Chunk(data: b, size: b.len, start: 0)
template len(c: Chunk): int =
c.size - c.start
type ZeroQueue* = object
# ZeroQueue is queue structure optimized for efficient pushing and popping of
# byte sequences `seq[byte]` (called chunks). This type is useful for streaming or buffering
# scenarios where chunks of binary data are accumulated and consumed incrementally.
chunks: Deque[Chunk]
proc clear*(q: var ZeroQueue) =
q.chunks.clear()
proc isEmpty*(q: ZeroQueue): bool =
return q.chunks.len() == 0
proc len*(q: ZeroQueue): int64 =
var l: int64
for b in q.chunks.items():
l += b.len()
return l
proc push*(q: var ZeroQueue, b: sink seq[byte]) =
if b.len > 0:
q.chunks.addLast(newChunk(b))
proc popChunk(q: var ZeroQueue, count: int): Chunk {.inline.} =
var first = q.chunks.popFirst()
# first chunk has up to requested count elements,
# queue will return this chunk (chunk might have less then requested)
if first.len() <= count:
return first
# first chunk has more elements then requested count,
# queue will return view of first count elements, leaving the rest in the queue
var ret = first.clone()
ret.size = ret.start + count
first.start += count
q.chunks.addFirst(first)
return ret
proc consumeTo*(q: var ZeroQueue, pbytes: pointer, nbytes: int): int =
var consumed = 0
while consumed < nbytes and not q.isEmpty():
let chunk = q.popChunk(nbytes - consumed)
let dest = cast[pointer](cast[ByteAddress](pbytes) + consumed)
let offsetPtr = cast[ptr byte](cast[int](unsafeAddr chunk.data[0]) + chunk.start)
copyMem(dest, offsetPtr, chunk.len())
consumed += chunk.len()
return consumed
proc popChunkSeq*(q: var ZeroQueue, count: int): seq[byte] =
if q.isEmpty:
return @[]
let chunk = q.popChunk(count)
var dest = newSeq[byte](chunk.len())
let offsetPtr = cast[ptr byte](cast[int](unsafeAddr chunk.data[0]) + chunk.start)
copyMem(dest[0].addr, offsetPtr, chunk.len())
return dest

View File

@@ -108,7 +108,9 @@ proc createStreamServer*[T](
): StreamServer {.raises: [LPError, MaInvalidAddress].} =
## Create new TCP stream server which bounds to ``ma`` address.
if not (RTRANSPMA.match(ma)):
raise newException(MaInvalidAddress, "Incorrect or unsupported address!")
raise newException(
MaInvalidAddress, "Incorrect or unsupported address in createStreamServer"
)
try:
return createStreamServer(
@@ -123,7 +125,7 @@ proc createStreamServer*[T](
init,
)
except CatchableError as exc:
raise newException(LPError, exc.msg)
raise newException(LPError, "failed createStreamServer: " & exc.msg, exc)
proc createStreamServer*[T](
ma: MultiAddress,
@@ -146,7 +148,7 @@ proc createStreamServer*[T](
initTAddress(ma).tryGet(), flags, udata, sock, backlog, bufferSize, child, init
)
except CatchableError as exc:
raise newException(LPError, exc.msg)
raise newException(LPError, "failed simpler createStreamServer: " & exc.msg, exc)
proc createAsyncSocket*(ma: MultiAddress): AsyncFD {.raises: [ValueError, LPError].} =
## Create new asynchronous socket using MultiAddress' ``ma`` socket type and
@@ -178,7 +180,9 @@ proc createAsyncSocket*(ma: MultiAddress): AsyncFD {.raises: [ValueError, LPErro
try:
createAsyncSocket(address.getDomain(), socktype, protocol)
except CatchableError as exc:
raise newException(LPError, exc.msg)
raise newException(
LPError, "Convert exception to LPError in createAsyncSocket: " & exc.msg, exc
)
proc bindAsyncSocket*(sock: AsyncFD, ma: MultiAddress): bool {.raises: [LPError].} =
## Bind socket ``sock`` to MultiAddress ``ma``.

View File

@@ -9,91 +9,115 @@
set -e
CACHE_DIR="$1" # optional parameter pointing to a CI cache dir.
LIBP2P_COMMIT="124530a3" # Tags maye be used as well
[[ -n "$2" ]] && LIBP2P_COMMIT="$2" # allow overriding it on the command line
force=false
verbose=false
CACHE_DIR=""
LIBP2P_COMMIT="124530a3"
while [[ "$#" -gt 0 ]]; do
case "$1" in
-f|--force) force=true ;;
-v|--verbose) verbose=true ;;
-h|--help)
echo "Usage: $0 [-f|--force] [-v|--verbose] [CACHE_DIR] [COMMIT]"
exit 0
;;
*)
# First non-option is CACHE_DIR, second is LIBP2P_COMMIT
if [[ -z "$CACHE_DIR" ]]; then
CACHE_DIR="$1"
elif [[ "$LIBP2P_COMMIT" == "124530a3" ]]; then
LIBP2P_COMMIT="$1"
else
echo "Unknown argument: $1"
exit 1
fi
;;
esac
shift
done
SUBREPO_DIR="vendor/go/src/github.com/libp2p/go-libp2p-daemon"
if [[ ! -e "$SUBREPO_DIR" ]]; then
# we're probably in nim-libp2p's CI
SUBREPO_DIR="go-libp2p-daemon"
rm -rf "$SUBREPO_DIR"
git clone -q https://github.com/libp2p/go-libp2p-daemon
cd "$SUBREPO_DIR"
git checkout -q $LIBP2P_COMMIT
cd ..
SUBREPO_DIR="go-libp2p-daemon"
rm -rf "$SUBREPO_DIR"
git clone -q https://github.com/libp2p/go-libp2p-daemon
cd "$SUBREPO_DIR"
git checkout -q "$LIBP2P_COMMIT"
cd ..
fi
## env vars
# verbosity level
[[ -z "$V" ]] && V=0
[[ -z "$BUILD_MSG" ]] && BUILD_MSG="Building p2pd ${LIBP2P_COMMIT}"
# Windows detection
if uname | grep -qiE "mingw|msys"; then
EXE_SUFFIX=".exe"
# otherwise it fails in AppVeyor due to https://github.com/git-for-windows/git/issues/2495
GIT_TIMESTAMP_ARG="--date=unix" # available since Git 2.9.4
EXE_SUFFIX=".exe"
# otherwise it fails in AppVeyor due to https://github.com/git-for-windows/git/issues/2495
GIT_TIMESTAMP_ARG="--date=unix" # available since Git 2.9.4
else
EXE_SUFFIX=""
GIT_TIMESTAMP_ARG="--date=format-local:%s" # available since Git 2.7.0
EXE_SUFFIX=""
GIT_TIMESTAMP_ARG="--date=format-local:%s" # available since Git 2.7.0
fi
TARGET_DIR="$(go env GOPATH)/bin"
TARGET_BINARY="${TARGET_DIR}/p2pd${EXE_SUFFIX}"
target_needs_rebuilding() {
REBUILD=0
NO_REBUILD=1
REBUILD=0
NO_REBUILD=1
if [[ -n "$CACHE_DIR" && -e "${CACHE_DIR}/p2pd${EXE_SUFFIX}" ]]; then
mkdir -p "${TARGET_DIR}"
cp -a "$CACHE_DIR"/* "${TARGET_DIR}/"
fi
if [[ -n "$CACHE_DIR" && -e "${CACHE_DIR}/p2pd${EXE_SUFFIX}" ]]; then
mkdir -p "${TARGET_DIR}"
cp -a "$CACHE_DIR"/* "${TARGET_DIR}/"
fi
# compare the built commit's timestamp to the date of the last commit (keep in mind that Git doesn't preserve file timestamps)
if [[ -e "${TARGET_DIR}/timestamp" && $(cat "${TARGET_DIR}/timestamp") -eq $(cd "$SUBREPO_DIR"; git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG}) ]]; then
return $NO_REBUILD
else
return $REBUILD
fi
# compare the built commit's timestamp to the date of the last commit (keep in mind that Git doesn't preserve file timestamps)
if [[ -e "${TARGET_DIR}/timestamp" && $(cat "${TARGET_DIR}/timestamp") -eq $(cd "$SUBREPO_DIR"; git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG}) ]]; then
return $NO_REBUILD
else
return $REBUILD
fi
}
build_target() {
echo -e "$BUILD_MSG"
[[ "$V" == "0" ]] && exec &>/dev/null
echo -e "$BUILD_MSG"
pushd "$SUBREPO_DIR"
# Go module downloads can fail randomly in CI VMs, so retry them a few times
MAX_RETRIES=5
CURR=0
while [[ $CURR -lt $MAX_RETRIES ]]; do
FAILED=0
go get ./... && break || FAILED=1
CURR=$(( CURR + 1 ))
echo "retry #${CURR}"
done
if [[ $FAILED == 1 ]]; then
echo "Error: still fails after retrying ${MAX_RETRIES} times."
exit 1
fi
go install ./...
pushd "$SUBREPO_DIR"
# Go module downloads can fail randomly in CI VMs, so retry them a few times
MAX_RETRIES=5
CURR=0
while [[ $CURR -lt $MAX_RETRIES ]]; do
FAILED=0
go get ./... && break || FAILED=1
CURR=$(( CURR + 1 ))
if $verbose; then
echo "retry #${CURR}"
fi
done
if [[ $FAILED == 1 ]]; then
echo "Error: still fails after retrying ${MAX_RETRIES} times."
exit 1
fi
go install ./...
# record the last commit's timestamp
git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG} > "${TARGET_DIR}/timestamp"
# record the last commit's timestamp
git log --pretty=format:%cd -n 1 ${GIT_TIMESTAMP_ARG} > "${TARGET_DIR}/timestamp"
popd
popd
# update the CI cache
if [[ -n "$CACHE_DIR" ]]; then
rm -rf "$CACHE_DIR"
mkdir "$CACHE_DIR"
cp -a "$TARGET_DIR"/* "$CACHE_DIR"/
fi
echo "Binary built successfully."
# update the CI cache
if [[ -n "$CACHE_DIR" ]]; then
rm -rf "$CACHE_DIR"
mkdir "$CACHE_DIR"
cp -a "$TARGET_DIR"/* "$CACHE_DIR"/
fi
echo "Binary built successfully: $TARGET_BINARY"
}
if target_needs_rebuilding; then
build_target
if $force || target_needs_rebuilding; then
build_target
else
echo "No rebuild needed."
echo "No rebuild needed."
fi

View File

@@ -1,13 +1,17 @@
import chronos, chronicles, stew/byteutils
import helpers
import ../libp2p
import ../libp2p/[daemon/daemonapi, varint, transports/wstransport, crypto/crypto]
import
../libp2p/
[autotls/service, daemon/daemonapi, varint, transports/wstransport, crypto/crypto]
import ../libp2p/protocols/connectivity/relay/[relay, client, utils]
type
SwitchCreator = proc(
ma: MultiAddress = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(),
prov: TransportProvider = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
prov: TransportProvider = proc(
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
): Transport =
TcpTransport.new({}, upgr),
relay: Relay = Relay.new(circuitRelayV1 = true),
): Switch {.gcsafe, raises: [LPError].}
@@ -318,7 +322,9 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
let nativeNode = swCreator(
ma = wsAddress,
prov = proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
prov = proc(
upgr: Upgrade, privateKey: PrivateKey, autotls: AutotlsService
): Transport =
WsTransport.new(upgr),
)
@@ -357,10 +363,7 @@ proc commonInteropTests*(name: string, swCreator: SwitchCreator) =
.withAddress(wsAddress)
.withRng(crypto.newRng())
.withMplex()
.withTransport(
proc(upgr: Upgrade, privateKey: PrivateKey): Transport =
WsTransport.new(upgr)
)
.withWsTransport()
.withNoise()
.build()

View File

@@ -49,8 +49,10 @@ template checkTrackers*() =
{.push warning[BareExcept]: off.}
try:
GC_fullCollect()
except CatchableError:
discard
except Defect as exc:
raise exc # Reraise to maintain call stack
except Exception:
raiseAssert "Unexpected exception during GC collection"
when defined(nimHasWarnBareExcept):
{.pop.}
@@ -92,7 +94,9 @@ proc new*(T: typedesc[TestBufferStream], writeHandler: WriteHandler): T =
testBufferStream.initStream()
testBufferStream
macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
macro checkUntilTimeoutCustom*(
timeout: Duration, sleepInterval: Duration, code: untyped
): untyped =
## Periodically checks a given condition until it is true or a timeout occurs.
##
## `code`: untyped - A condition expression that should eventually evaluate to true.
@@ -101,17 +105,17 @@ macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
## Examples:
## ```nim
## # Example 1:
## asyncTest "checkUntilCustomTimeout should pass if the condition is true":
## asyncTest "checkUntilTimeoutCustom should pass if the condition is true":
## let a = 2
## let b = 2
## checkUntilCustomTimeout(2.seconds):
## checkUntilTimeoutCustom(2.seconds):
## a == b
##
## # Example 2: Multiple conditions
## asyncTest "checkUntilCustomTimeout should pass if the conditions are true":
## asyncTest "checkUntilTimeoutCustom should pass if the conditions are true":
## let a = 2
## let b = 2
## checkUntilCustomTimeout(5.seconds)::
## checkUntilTimeoutCustom(5.seconds)::
## a == b
## a == 2
## b == 1
@@ -145,12 +149,12 @@ macro checkUntilCustomTimeout*(timeout: Duration, code: untyped): untyped =
if `combinedBoolExpr`:
return
else:
await sleepAsync(100.millis)
await sleepAsync(`sleepInterval`)
await checkExpiringInternal()
macro checkUntilTimeout*(code: untyped): untyped =
## Same as `checkUntilCustomTimeout` but with a default timeout of 10 seconds.
## Same as `checkUntilTimeoutCustom` but with a default timeout of 2s with 50ms interval.
##
## Examples:
## ```nim
@@ -171,7 +175,7 @@ macro checkUntilTimeout*(code: untyped): untyped =
## b == 1
## ```
result = quote:
checkUntilCustomTimeout(10.seconds, `code`)
checkUntilTimeoutCustom(2.seconds, 50.milliseconds, `code`)
proc unorderedCompare*[T](a, b: seq[T]): bool =
if a == b:

View File

@@ -0,0 +1,142 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import unittest2
import ../../libp2p/protobuf/minprotobuf
import ../../libp2p/protocols/kademlia/protobuf
import ../../libp2p/multiaddress
import options
import results
suite "kademlia protobuffers":
const invalidType = uint32(999)
proc valFromResultOption[T](res: ProtoResult[Option[T]]): T =
assert res.isOk()
assert res.value().isSome()
return res.value().unsafeGet()
test "record encode/decode":
let rec = Record(
key: some(@[1'u8, 2, 3]),
value: some(@[4'u8, 5, 6]),
timeReceived: some("2025-05-12T12:00:00Z"),
)
let encoded = rec.encode()
let decoded = Record.decode(encoded).valFromResultOption
check:
decoded.key.get() == rec.key.get()
decoded.value.get() == rec.value.get()
decoded.timeReceived.get() == rec.timeReceived.get()
test "peer encode/decode":
let maddr = MultiAddress.init("/ip4/127.0.0.1/tcp/9000").tryGet()
let peer =
Peer(id: @[1'u8, 2, 3], addrs: @[maddr], connection: ConnectionType.connected)
let encoded = peer.encode()
var decoded = Peer.decode(initProtoBuffer(encoded.buffer)).valFromResultOption
check:
decoded == peer
test "message encode/decode roundtrip":
let maddr = MultiAddress.init("/ip4/10.0.0.1/tcp/4001").tryGet()
let peer = Peer(id: @[9'u8], addrs: @[maddr], connection: canConnect)
let r = Record(key: some(@[1'u8]), value: some(@[2'u8]), timeReceived: some("t"))
let msg = Message(
msgType: MessageType.findNode,
key: some(@[7'u8]),
record: some(r),
closerPeers: @[peer],
providerPeers: @[peer],
)
let encoded = msg.encode()
let decoded = Message.decode(encoded.buffer).valFromResultOption
check:
decoded == msg
test "decode record with missing fields":
var pb = initProtoBuffer()
# no fields written
let rec = Record.decode(pb).valFromResultOption
check:
rec.key.isNone()
rec.value.isNone()
rec.timeReceived.isNone()
test "decode peer with missing id (invalid)":
var pb = initProtoBuffer()
check:
Peer.decode(pb).isErr()
test "decode peer with invalid connection type":
var pb = initProtoBuffer()
pb.write(1, @[1'u8, 2, 3]) # id field
pb.write(3, invalidType) # bogus connection type
check:
Peer.decode(pb).isErr()
test "decode message with invalid msgType":
var pb = initProtoBuffer()
pb.write(1, invalidType) # invalid MessageType
check:
Message.decode(pb.buffer).isErr()
test "decode message with invalid peer in closerPeers":
let badPeerBuf = @[0'u8, 1, 2] # junk
var pb = initProtoBuffer()
pb.write(8, badPeerBuf) # closerPeers field
check:
Message.decode(pb.buffer).isErr()
test "decode message with invalid embedded record":
# encode junk data into field 3 (record)
var pb = initProtoBuffer()
pb.write(1, uint32(MessageType.putValue)) # valid msgType
pb.write(3, @[0x00'u8, 0xFF, 0xAB]) # broken protobuf for record
check:
Message.decode(pb.buffer).isErr()
test "decode message with empty embedded record":
var recordPb = initProtoBuffer() # no fields
var pb = initProtoBuffer()
pb.write(1, uint32(MessageType.getValue))
pb.write(3, recordPb.buffer)
let decoded = Message.decode(pb.buffer).valFromResultOption
check:
decoded.record.isSome()
decoded.record.get().key.isNone()
test "peer with empty addr list and no connection":
let peer = Peer(id: @[0x42'u8], addrs: @[], connection: ConnectionType.notConnected)
let encoded = peer.encode()
let decoded = Peer.decode(initProtoBuffer(encoded.buffer)).valFromResultOption
check:
decoded == peer
test "message with empty closer/provider peers":
let msg = Message(
msgType: MessageType.ping,
key: none[seq[byte]](),
record: none[Record](),
closerPeers: @[],
providerPeers: @[],
)
let encoded = msg.encode()
let decoded = Message.decode(encoded.buffer).valFromResultOption
check:
decoded == msg
test "peer with addr but missing id":
var pb = initProtoBuffer()
let maddr = MultiAddress.init("/ip4/1.2.3.4/tcp/1234").tryGet()
pb.write(2, maddr.data.buffer)
check:
Peer.decode(pb).isErr()

View File

@@ -0,0 +1,83 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import unittest
import chronos
import ../../libp2p/crypto/crypto
import ../../libp2p/protocols/kademlia/[routingtable, consts, keys]
proc testKey*(x: byte): Key =
var buf: array[IdLength, byte]
buf[31] = x
return Key(kind: KeyType.Unhashed, data: buf)
let rng = crypto.newRng()
suite "routing table":
test "inserts single key in correct bucket":
let selfId = testKey(0)
var rt = RoutingTable.init(selfId)
let other = testKey(0b10000000)
discard rt.insert(other)
let idx = bucketIndex(selfId, other)
check:
rt.buckets.len > idx
rt.buckets[idx].peers.len == 1
rt.buckets[idx].peers[0].nodeId == other
test "does not insert beyond capacity":
let selfId = testKey(0)
var rt = RoutingTable.init(selfId)
let targetBucket = 6
for _ in 0 ..< k + 5:
var kid = randomKeyInBucketRange(selfId, targetBucket, rng)
kid.kind = KeyType.Unhashed
# Overriding so we don't use sha for comparing xor distances
discard rt.insert(kid)
check targetBucket < rt.buckets.len
let bucket = rt.buckets[targetBucket]
check bucket.peers.len <= k
test "findClosest returns sorted keys":
let selfId = testKey(0)
var rt = RoutingTable.init(selfId)
let ids = @[testKey(1), testKey(2), testKey(3), testKey(4), testKey(5)]
for id in ids:
discard rt.insert(id)
let res = rt.findClosest(testKey(1), 3)
check:
res.len == 3
res == @[testKey(1), testKey(3), testKey(2)]
test "isStale returns true for empty or old keys":
var bucket: Bucket
check isStale(bucket) == true
bucket.peers = @[NodeEntry(nodeId: testKey(1), lastSeen: Moment.now() - 40.minutes)]
check isStale(bucket) == true
bucket.peers = @[NodeEntry(nodeId: testKey(1), lastSeen: Moment.now())]
check isStale(bucket) == false
test "randomKeyInBucketRange returns id at correct distance":
let selfId = testKey(0)
let targetBucket = 3
var rid = randomKeyInBucketRange(selfId, targetBucket, rng)
rid.kind = KeyType.Unhashed
# Overriding so we don't use sha for comparing xor distances
let idx = bucketIndex(selfId, rid)
check:
idx == targetBucket
rid != selfId

View File

@@ -0,0 +1,54 @@
{.used.}
# Nim-Libp2p
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import unittest
import chronos
import ../../libp2p/protocols/kademlia/[consts, keys, xordistance]
suite "xor distance":
test "countLeadingZeroBits works":
check countLeadingZeroBits(0b00000000'u8) == 8
check countLeadingZeroBits(0b10000000'u8) == 0
check countLeadingZeroBits(0b01000000'u8) == 1
check countLeadingZeroBits(0b00000001'u8) == 7
test "leadingZeros of xor distance":
var d: XorDistance
for i in 0 ..< IdLength:
d[i] = 0
check leadingZeros(d) == IdLength * 8
d[0] = 0b00010000
check leadingZeros(d) == 3
d[0] = 0
d[1] = 0b00100000
check leadingZeros(d) == 10
test "xorDistance of identical keys is zero":
let k = @[
1'u8, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 0, 1, 2,
].toKey()
let dist = xorDistance(k, k)
check:
leadingZeros(dist) == IdLength * 8
dist == default(XorDistance)
test "cmp gives correct order":
var a: XorDistance
var b: XorDistance
a[0] = 0x01
b[0] = 0x02
check a < b
check cmp(a, b) == -1
check cmp(b, a) == 1
check cmp(a, a) == 0

View File

@@ -12,8 +12,8 @@
import sequtils, tables, sets
import chronos, stew/byteutils
import
utils,
../../libp2p/[
../utils,
../../../libp2p/[
switch,
stream/connection,
crypto/crypto,
@@ -23,9 +23,9 @@ import
protocols/pubsub/peertable,
protocols/pubsub/pubsubpeer,
]
import ../../libp2p/protocols/pubsub/errors as pubsub_errors
import ../../../libp2p/protocols/pubsub/errors as pubsub_errors
import ../helpers
import ../../helpers
proc waitSub(sender, receiver: auto, key: string) {.async.} =
# turn things deterministic
@@ -38,7 +38,7 @@ proc waitSub(sender, receiver: auto, key: string) {.async.} =
dec ceil
doAssert(ceil > 0, "waitSub timeout!")
suite "FloodSub":
suite "FloodSub Integration":
teardown:
checkTrackers()
@@ -310,5 +310,5 @@ suite "FloodSub":
check (await bigNode1[0].publish("foo", bigMessage)) > 0
checkUntilTimeout:
checkUntilTimeoutCustom(10.seconds, 100.milliseconds):
messageReceived == 1

View File

@@ -0,0 +1,388 @@
{.used.}
import std/[sequtils]
import chronicles
import ../utils
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../../helpers
suite "GossipSub Integration - Control Messages":
teardown:
checkTrackers()
asyncTest "GRAFT messages correctly add peers to mesh":
# Given 2 nodes
let
topic = "foobar"
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
numberOfNodes = 2
# First part of the hack: Weird dValues so peers are not GRAFTed automatically
dValues = DValues(dLow: some(0), dHigh: some(0), d: some(0), dOut: some(-1))
nodes = generateNodes(
numberOfNodes, gossip = true, verifySignature = false, dValues = some(dValues)
)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Because of the hack-ish dValues, the peers are added to gossipsub but not GRAFTed to mesh
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# Stop both nodes in order to prevent GRAFT message to be sent by heartbeat
await n0.stop()
await n1.stop()
# Second part of the hack
# Set values so peers can be GRAFTed
let newDValues =
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(1)))
n0.parameters.applyDValues(newDValues)
n1.parameters.applyDValues(newDValues)
# When a GRAFT message is sent
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
n1.broadcast(@[p0], RPCMsg(control: some(graftMessage)), isHighPriority = false)
checkUntilTimeout:
nodes.allIt(it.mesh.getOrDefault(topic).len == 1)
# Then the peers are GRAFTed
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "Received GRAFT for non-subscribed topic":
# Given 2 nodes
let
topic = "foo"
graftMessage = ControlMessage(graft: @[ControlGraft(topicID: topic)])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And only node0 subscribes to the topic
nodes[0].subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When a GRAFT message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(graftMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is not GRAFTed
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "PRUNE messages correctly removes peers from mesh":
# Given 2 nodes
let
topic = "foo"
backoff = 1
pruneMessage = ControlMessage(
prune: @[ControlPrune(topicID: topic, peers: @[], backoff: uint64(backoff))]
)
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When a PRUNE message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is PRUNEd
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When another PRUNE message is sent
let p0 = n1.getOrCreatePeer(n0.peerInfo.peerId, @[GossipSubCodec_12])
n1.broadcast(@[p0], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is PRUNEd
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "Received PRUNE for non-subscribed topic":
# Given 2 nodes
let
topic = "foo"
pruneMessage =
ControlMessage(prune: @[ControlPrune(topicID: topic, peers: @[], backoff: 1)])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# And the nodes are connected
await connectNodesStar(nodes)
# And only node0 subscribes to the topic
n0.subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
# When a PRUNE message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(pruneMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer is not PRUNEd
check:
n0.topics.hasKey(topic)
not n1.topics.hasKey(topic)
not n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
not n0.mesh.hasPeerId(topic, n1.peerInfo.peerId)
not n1.mesh.hasPeerId(topic, n0.peerInfo.peerId)
asyncTest "IHAVE messages correctly advertise message ID to peers":
# Given 2 nodes
let
topic = "foo"
messageID = @[0'u8, 1, 2, 3]
ihaveMessage =
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# Given node1 has an IHAVE observer
var (receivedIHaves, checkForIHaves) = createCheckForIHave()
n1.addOnRecvObserver(checkForIHaves)
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
# When an IHAVE message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer has the message ID
check:
receivedIHaves[0] == ControlIHave(topicID: topic, messageIDs: @[messageID])
asyncTest "IWANT messages correctly request messages by their IDs":
# Given 2 nodes
let
topic = "foo"
messageID = @[0'u8, 1, 2, 3]
iwantMessage = ControlMessage(iwant: @[ControlIWant(messageIDs: @[messageID])])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# Given node1 has an IWANT observer
var (receivedIWants, checkForIWants) = createCheckForIWant()
n1.addOnRecvObserver(checkForIWants)
# And the nodes are connected
await connectNodesStar(nodes)
# And both subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
n0.gossipsub.hasPeerId(topic, n1.peerInfo.peerId)
n1.gossipsub.hasPeerId(topic, n0.peerInfo.peerId)
# When an IWANT message is sent
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(iwantMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then the peer has the message ID
check:
receivedIWants[0] == ControlIWant(messageIDs: @[messageID])
asyncTest "IHAVE for message not held by peer triggers IWANT response to sender":
# Given 2 nodes
let
topic = "foo"
messageID = @[0'u8, 1, 2, 3]
ihaveMessage =
ControlMessage(ihave: @[ControlIHave(topicID: topic, messageIDs: @[messageID])])
numberOfNodes = 2
nodes = generateNodes(numberOfNodes, gossip = true, verifySignature = false)
.toGossipSub()
n0 = nodes[0]
n1 = nodes[1]
startNodesAndDeferStop(nodes)
# Given node1 has an IWANT observer
var (receivedIWants, checkForIWants) = createCheckForIWant()
n0.addOnRecvObserver(checkForIWants)
# And the nodes are connected
await connectNodesStar(nodes)
# And both nodes subscribe to the topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When an IHAVE message is sent from node0
let p1 = n0.getOrCreatePeer(n1.peerInfo.peerId, @[GossipSubCodec_12])
n0.broadcast(@[p1], RPCMsg(control: some(ihaveMessage)), isHighPriority = false)
await waitForHeartbeat()
# Then node0 should receive an IWANT message from node1 (as node1 doesn't have the message)
check:
receivedIWants[0] == ControlIWant(messageIDs: @[messageID])
asyncTest "IDONTWANT":
# 3 nodes: A <=> B <=> C (A & C are NOT connected)
let
topic = "foobar"
nodes = generateNodes(3, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[1], nodes[2])
let (bFinished, handlerB) = createCompleteHandler()
nodes[0].subscribe(topic, voidTopicHandler)
nodes[1].subscribe(topic, handlerB)
nodes[2].subscribe(topic, voidTopicHandler)
await waitSubGraph(nodes, topic)
check:
nodes[2].mesh.peers(topic) == 1
# When we pre-emptively send a dontwant from C to B,
nodes[2].broadcast(
nodes[2].mesh[topic],
RPCMsg(
control: some(
ControlMessage(idontwant: @[ControlIWant(messageIDs: @[newSeq[byte](10)])])
)
),
isHighPriority = true,
)
# Then B doesn't relay the message to C.
checkUntilTimeout:
nodes[1].mesh.getOrDefault(topic).anyIt(it.iDontWants.anyIt(it.len == 1))
# When A sends a message to the topic
tryPublish await nodes[0].publish(topic, newSeq[byte](10000)), 1
discard await bFinished
# Then B sends IDONTWANT to C, but not A
checkUntilTimeout:
toSeq(nodes[2].mesh.getOrDefault(topic)).anyIt(it.iDontWants.anyIt(it.len == 1))
check:
toSeq(nodes[0].mesh.getOrDefault(topic)).allIt(it.iDontWants.allIt(it.len == 0))
asyncTest "IDONTWANT is broadcasted on publish":
# 2 nodes: A <=> B
let
topic = "foobar"
nodes =
generateNodes(2, gossip = true, sendIDontWantOnPublish = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodes(nodes[0], nodes[1])
nodes[0].subscribe(topic, voidTopicHandler)
nodes[1].subscribe(topic, voidTopicHandler)
await waitSubGraph(nodes, topic)
# When A sends a message to the topic
tryPublish await nodes[0].publish(topic, newSeq[byte](10000)), 1
# Then IDONTWANT is sent to B on publish
checkUntilTimeout:
nodes[1].mesh.getOrDefault(topic).anyIt(it.iDontWants.anyIt(it.len == 1))

View File

@@ -0,0 +1,99 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0 ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import chronos
import stew/byteutils
import ../utils
import ../../../libp2p/protocols/pubsub/[gossipsub, peertable, pubsubpeer]
import ../../../libp2p/protocols/pubsub/rpc/[messages]
import ../../../libp2p/stream/connection
import ../../helpers
type DummyConnection* = ref object of Connection
method write*(
self: DummyConnection, msg: seq[byte]
): Future[void] {.async: (raises: [CancelledError, LPStreamError], raw: true), public.} =
let fut = newFuture[void]()
fut.complete()
return fut
proc new*(T: typedesc[DummyConnection]): DummyConnection =
let instance = T()
instance
suite "GossipSub Integration - Custom Connection Support":
teardown:
checkTrackers()
asyncTest "publish with useCustomConn triggers custom connection and peer selection":
let
topic = "test"
nodes = generateNodes(2, gossip = true).toGossipSub()
var
customConnCreated = false
peerSelectionCalled = false
nodes[0].customConnCallbacks = some(
CustomConnectionCallbacks(
customConnCreationCB: proc(
destAddr: Option[MultiAddress], destPeerId: PeerId, codec: string
): Connection =
customConnCreated = true
return DummyConnection.new(),
customPeerSelectionCB: proc(
allPeers: HashSet[PubSubPeer],
directPeers: HashSet[PubSubPeer],
meshPeers: HashSet[PubSubPeer],
fanoutPeers: HashSet[PubSubPeer],
): HashSet[PubSubPeer] =
peerSelectionCalled = true
return allPeers,
)
)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe(topic, voidTopicHandler)
await waitSub(nodes[0], nodes[1], topic)
tryPublish await nodes[0].publish(
topic, "hello".toBytes(), publishParams = some(PublishParams(useCustomConn: true))
), 1
check:
peerSelectionCalled
customConnCreated
asyncTest "publish with useCustomConn triggers assertion if custom callbacks not set":
let
topic = "test"
nodes = generateNodes(2, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe(topic, voidTopicHandler)
await waitSub(nodes[0], nodes[1], topic)
var raised = false
try:
discard await nodes[0].publish(
topic,
"hello".toBytes(),
publishParams = some(PublishParams(useCustomConn: true)),
)
except Defect:
raised = true
check raised

View File

@@ -9,66 +9,18 @@
{.used.}
import std/[sequtils]
import stew/byteutils
import chronicles
import utils
import ../../libp2p/protocols/pubsub/[gossipsub, peertable]
import ../../libp2p/protocols/pubsub/rpc/[messages]
import ../helpers
import ../utils
import ../../../libp2p/protocols/pubsub/[gossipsub, peertable]
import ../../../libp2p/protocols/pubsub/rpc/[messages]
import ../../helpers
suite "GossipSub Fanout Management":
suite "GossipSub Integration - Fanout Management":
teardown:
checkTrackers()
asyncTest "`replenishFanout` Degree Lo":
let topic = "foobar"
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
defer:
await teardownGossipSub(gossipSub, conns)
check gossipSub.gossipsub[topic].len == 15
gossipSub.replenishFanout(topic)
check gossipSub.fanout[topic].len == gossipSub.parameters.d
asyncTest "`dropFanoutPeers` drop expired fanout topics":
let topic = "foobar"
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(6, topic, populateGossipsub = true, populateFanout = true)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
await sleepAsync(5.millis) # allow the topic to expire
check gossipSub.fanout[topic].len == gossipSub.parameters.d
gossipSub.dropFanoutPeers()
check topic notin gossipSub.fanout
asyncTest "`dropFanoutPeers` leave unexpired fanout topics":
let
topic1 = "foobar1"
topic2 = "foobar2"
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
6, @[topic1, topic2], populateGossipsub = true, populateFanout = true
)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
await sleepAsync(5.millis) # allow first topic to expire
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
gossipSub.dropFanoutPeers()
check topic1 notin gossipSub.fanout
check topic2 in gossipSub.fanout
asyncTest "e2e - GossipSub send over fanout A -> B":
asyncTest "GossipSub send over fanout A -> B":
let (passed, handler) = createCompleteHandler()
let nodes = generateNodes(2, gossip = true)
@@ -107,7 +59,7 @@ suite "GossipSub Fanout Management":
check observed == 2
asyncTest "e2e - GossipSub send over fanout A -> B for subscribed topic":
asyncTest "GossipSub send over fanout A -> B for subscribed topic":
let (passed, handler) = createCompleteHandler()
let nodes = generateNodes(2, gossip = true, unsubscribeBackoff = 10.minutes)

View File

@@ -0,0 +1,253 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[sequtils]
import stew/byteutils
import chronicles
import ../utils
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../../../libp2p/protocols/pubsub/rpc/[message]
import ../../helpers, ../../utils/[futures]
suite "GossipSub Integration - Gossip Protocol":
teardown:
checkTrackers()
asyncTest "messages sent to peers not in the mesh are propagated via gossip":
let
numberOfNodes = 5
topic = "foobar"
dValues = DValues(dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1))
nodes = generateNodes(numberOfNodes, gossip = true, dValues = some(dValues))
.toGossipSub()
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var messages = addIHaveObservers(nodes)
# And are interconnected
await connectNodesStar(nodes)
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
nodes.allIt(it.gossipsub.getOrDefault(topic).len == numberOfNodes - 1)
# When node 0 sends a message
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
# At least one of the nodes should have received an iHave message
# The check is made this way because the mesh structure changes from run to run
checkUntilTimeout:
messages[].mapIt(it[].len).anyIt(it > 0)
asyncTest "adaptive gossip dissemination, dLazy and gossipFactor to 0":
let
numberOfNodes = 20
topic = "foobar"
dValues = DValues(
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(0)
)
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(dValues),
gossipFactor = some(0.float),
)
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var messages = addIHaveObservers(nodes)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When node 0 sends a message
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
await waitForHeartbeat()
# None of the nodes should have received an iHave message
let receivedIHaves = messages[].mapIt(it[].len)
check:
filterIt(receivedIHaves, it > 0).len == 0
asyncTest "adaptive gossip dissemination, with gossipFactor priority":
let
numberOfNodes = 20
topic = "foobar"
dValues = DValues(
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(4)
)
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(dValues),
gossipFactor = some(0.5),
)
.toGossipSub()
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var messages = addIHaveObservers(nodes)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
# When node 0 sends a message
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
await waitForHeartbeat(2)
# At least 8 of the nodes should have received an iHave message
# That's because the gossip factor is 0.5 over 16 available nodes
let receivedIHaves = messages[].mapIt(it[].len)
check:
filterIt(receivedIHaves, it > 0).len >= 8
asyncTest "adaptive gossip dissemination, with dLazy priority":
let
numberOfNodes = 20
topic = "foobar"
dValues = DValues(
dLow: some(2), dHigh: some(3), d: some(2), dOut: some(1), dLazy: some(6)
)
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(dValues),
gossipFactor = some(0.float),
)
.toGossipSub()
startNodesAndDeferStop(nodes)
# All nodes are checking for iHave messages
var messages = addIHaveObservers(nodes)
# And are connected to node 0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
# When node 0 sends a message
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 3
await waitForHeartbeat(2)
# At least 6 of the nodes should have received an iHave message
# That's because the dLazy is 6
let receivedIHaves = messages[].mapIt(it[].len)
check:
filterIt(receivedIHaves, it > 0).len >= dValues.dLazy.get()
asyncTest "iDontWant messages are broadcast immediately after receiving the first message instance":
let
numberOfNodes = 3
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
# All nodes are checking for iDontWant messages
var messages = addIDontWantObservers(nodes)
# And are connected in a line
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[1], nodes[2])
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
nodes[0].gossipsub.getOrDefault(topic).len == 1
nodes[1].gossipsub.getOrDefault(topic).len == 2
nodes[2].gossipsub.getOrDefault(topic).len == 1
# When node 0 sends a large message
let largeMsg = newSeq[byte](1000)
tryPublish await nodes[0].publish(topic, largeMsg), 1
# Only node 2 should have received the iDontWant message
checkUntilTimeout:
messages[].mapIt(it[].len)[2] == 1
messages[].mapIt(it[].len)[1] == 0
messages[].mapIt(it[].len)[0] == 0
asyncTest "GossipSub peer exchange":
# A, B & C are subscribed to something
# B unsubcribe from it, it should send
# PX to A & C
#
# C sent his SPR, not A
let
topic = "foobar"
nodes =
generateNodes(2, gossip = true, enablePX = true).toGossipSub() &
generateNodes(1, gossip = true, sendSignedPeerRecord = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitSubAllNodes(nodes, topic)
# Setup record handlers for all nodes
var
passed0: Future[void] = newFuture[void]()
passed2: Future[void] = newFuture[void]()
nodes[0].routingRecordsHandler.add(
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
check:
tag == topic
peers.len == 2
peers[0].record.isSome() xor peers[1].record.isSome()
passed0.complete()
)
nodes[1].routingRecordsHandler.add(
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
raiseAssert "should not get here"
)
nodes[2].routingRecordsHandler.add(
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
check:
tag == topic
peers.len == 2
peers[0].record.isSome() xor peers[1].record.isSome()
passed2.complete()
)
# Unsubscribe from the topic
nodes[1].unsubscribe(topic, voidTopicHandler)
# Then verify what nodes receive the PX
let results = await waitForStates(@[passed0, passed2], HEARTBEAT_TIMEOUT)
check:
results[0].isCompleted()
results[1].isCompleted()

View File

@@ -0,0 +1,91 @@
# Nim-LibP2P
# Copyright (c) 2023-2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[sequtils]
import chronicles
import ../utils
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../../helpers
suite "GossipSub Integration - Compatibility":
const topic = "foobar"
teardown:
checkTrackers()
asyncTest "Protocol negotiation selects highest common version":
let
node0 = generateNodes(
1,
gossip = true,
codecs = @[GossipSubCodec_12, GossipSubCodec_11, GossipSubCodec_10],
# Order from highest to lowest version is required because
# multistream protocol negotiation selects the first protocol
# in the dialer's list that both peers support
)
.toGossipSub()[0]
node1 = generateNodes(
1, gossip = true, codecs = @[GossipSubCodec_11, GossipSubCodec_10]
)
.toGossipSub()[0]
node2 =
generateNodes(1, gossip = true, codecs = @[GossipSubCodec_10]).toGossipSub()[0]
nodes = @[node0, node1, node2]
node0PeerId = node0.peerInfo.peerId
node1PeerId = node1.peerInfo.peerId
node2PeerId = node2.peerInfo.peerId
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes.subscribeAllNodes(topic, voidTopicHandler)
await waitForHeartbeat()
checkUntilTimeout:
node0.getPeerByPeerId(topic, node1PeerId).codec == GossipSubCodec_11
node0.getPeerByPeerId(topic, node2PeerId).codec == GossipSubCodec_10
node1.getPeerByPeerId(topic, node0PeerId).codec == GossipSubCodec_11
node1.getPeerByPeerId(topic, node2PeerId).codec == GossipSubCodec_10
node2.getPeerByPeerId(topic, node0PeerId).codec == GossipSubCodec_10
node2.getPeerByPeerId(topic, node1PeerId).codec == GossipSubCodec_10
asyncTest "IDONTWANT is sent only for GossipSubCodec_12":
# 4 nodes: nodeCenter in the center connected to the rest
var nodes = generateNodes(3, gossip = true).toGossipSub()
let
nodeCenter = nodes[0]
nodeSender = nodes[1]
nodeCodec12 = nodes[2]
nodeCodec11 = generateNodes(
1, gossip = true, codecs = @[GossipSubCodec_11, GossipSubCodec_10]
)
.toGossipSub()[0]
nodes &= nodeCodec11
startNodesAndDeferStop(nodes)
await connectNodes(nodeCenter, nodeSender)
await connectNodes(nodeCenter, nodeCodec12)
await connectNodes(nodeCenter, nodeCodec11)
nodes.subscribeAllNodes(topic, voidTopicHandler)
await waitForHeartbeat()
# When A sends a message to the topic
tryPublish await nodeSender.publish(topic, newSeq[byte](10000)), 1
# Then nodeCenter sends IDONTWANT only to nodeCodec12 (because nodeCodec11.codec == GossipSubCodec_11)
checkUntilTimeout:
nodeCodec12.mesh.getOrDefault(topic).toSeq()[0].iDontWants.anyIt(it.len == 1)
nodeCodec11.mesh.getOrDefault(topic).toSeq()[0].iDontWants.allIt(it.len == 0)

View File

@@ -0,0 +1,348 @@
{.used.}
import std/[sequtils]
import ../utils
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../../helpers
suite "GossipSub Integration - Heartbeat":
teardown:
checkTrackers()
asyncTest "Mesh is rebalanced during heartbeat - pruning peers":
const
numberOfNodes = 10
topic = "foobar"
heartbeatInterval = 200.milliseconds
let
nodes = generateNodes(
numberOfNodes, gossip = true, heartbeatInterval = heartbeatInterval
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(node0, nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
node0.mesh.getOrDefault(topic).len == numberOfNodes - 1
# When DValues of Node0 are updated to lower than defaults
const
newDLow = 2
newDHigh = 4
newDValues = some(
DValues(
dLow: some(newDLow),
dHigh: some(newDHigh),
d: some(3),
dLazy: some(3),
dScore: some(2),
dOut: some(2),
)
)
node0.parameters.applyDValues(newDValues)
# Then mesh of Node0 is rebalanced and peers are pruned to adapt to new values
checkUntilTimeout:
node0.mesh[topic].len >= newDLow and node0.mesh[topic].len <= newDHigh
asyncTest "Mesh is rebalanced during heartbeat - grafting new peers":
const
numberOfNodes = 10
topic = "foobar"
dLow = 3
dHigh = 4
heartbeatInterval = 200.milliseconds
let
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(
DValues(dLow: some(dLow), dHigh: some(dHigh), d: some(3), dOut: some(1))
),
pruneBackoff = 20.milliseconds,
heartbeatInterval = heartbeatInterval,
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(node0, nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
node0.mesh.getOrDefault(topic).len >= dLow and
node0.mesh.getOrDefault(topic).len <= dHigh
# When peers of Node0 mesh are disconnected
let peersToDisconnect = node0.mesh[topic].toSeq()[1 .. ^1].mapIt(it.peerId)
findAndUnsubscribePeers(nodes, peersToDisconnect, topic, voidTopicHandler)
checkUntilTimeout:
node0.mesh[topic].len >= dLow and node0.mesh[topic].len <= dHigh
node0.mesh[topic].toSeq().allIt(it.peerId notin peersToDisconnect)
asyncTest "Mesh is rebalanced during heartbeat - opportunistic grafting":
const
numberOfNodes = 10
topic = "foobar"
heartbeatInterval = 200.milliseconds
let
nodes = generateNodes(
numberOfNodes,
gossip = true,
dValues = some(
DValues(
dLow: some(3),
dHigh: some(4),
d: some(3),
dOut: some(1),
dLazy: some(3),
dScore: some(2),
)
),
pruneBackoff = 20.milliseconds,
opportunisticGraftThreshold = 600,
heartbeatInterval = heartbeatInterval,
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(node0, nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
# Keep track of initial mesh of Node0
let startingMesh = node0.mesh[topic].toSeq()
# When scores are assigned to Peers of Node0
var expectedGrafts: seq[PubSubPeer] = @[]
var score = 100.0
for peer in node0.gossipsub[topic]:
if peer in node0.mesh[topic]:
# Assign scores in starting Mesh
peer.score = score
score += 100.0
else:
# Assign scores higher than median to Peers not in starting Mesh and expect them to be grafted
peer.score = 800.0
expectedGrafts &= peer
# Then during heartbeat Peers with lower than median scores are pruned and max 2 Peers are grafted
await waitForHeartbeat(heartbeatInterval)
let actualGrafts = node0.mesh[topic].toSeq().filterIt(it notin startingMesh)
check:
actualGrafts.len == MaxOpportunisticGraftPeers
actualGrafts.allIt(it in expectedGrafts)
asyncTest "Fanout maintenance during heartbeat - expired peers are dropped":
const
numberOfNodes = 10
topic = "foobar"
heartbeatInterval = 200.milliseconds
let nodes = generateNodes(
numberOfNodes,
gossip = true,
fanoutTTL = 60.milliseconds,
heartbeatInterval = heartbeatInterval,
)
.toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
# All nodes but Node0 are subscribed to the topic
for node in nodes[1 .. ^1]:
node.subscribe(topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
let node0 = nodes[0]
checkUntilTimeout:
node0.gossipsub.hasKey(topic)
# When Node0 sends a message to the topic
tryPublish await node0.publish(topic, newSeq[byte](10000)), 3
# Then Node0 fanout peers are populated
checkUntilTimeout:
node0.fanout.hasKey(topic)
node0.fanout[topic].len > 0
# And after heartbeat Node0 fanout peers are dropped (because fanoutTTL < heartbeatInterval)
checkUntilTimeout:
not node0.fanout.hasKey(topic)
asyncTest "Fanout maintenance during heartbeat - fanout peers are replenished":
const
numberOfNodes = 10
topic = "foobar"
heartbeatInterval = 200.milliseconds
let
nodes = generateNodes(
numberOfNodes, gossip = true, heartbeatInterval = heartbeatInterval
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
# All nodes but Node0 are subscribed to the topic
for node in nodes[1 .. ^1]:
node.subscribe(topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
# When Node0 sends a message to the topic
tryPublish await node0.publish(topic, newSeq[byte](10000)), 1
# Then Node0 fanout peers are populated
let maxFanoutPeers = node0.parameters.d
checkUntilTimeout:
node0.fanout[topic].len == maxFanoutPeers
# When all peers but first one of Node0 fanout are disconnected
let peersToDisconnect = node0.fanout[topic].toSeq()[1 .. ^1].mapIt(it.peerId)
findAndUnsubscribePeers(nodes, peersToDisconnect, topic, voidTopicHandler)
# Then Node0 fanout peers are replenished during heartbeat
# expecting 10[numberOfNodes] - 1[Node0] - (6[maxFanoutPeers] - 1[first peer not disconnected]) = 4
let expectedLen = numberOfNodes - 1 - (maxFanoutPeers - 1)
checkUntilTimeout:
node0.fanout[topic].len == expectedLen
node0.fanout[topic].toSeq().allIt(it.peerId notin peersToDisconnect)
asyncTest "iDontWants history - last element is pruned during heartbeat":
const
topic = "foobar"
heartbeatInterval = 200.milliseconds
historyLength = 3
let nodes = generateNodes(
2,
gossip = true,
sendIDontWantOnPublish = true,
historyLength = historyLength,
heartbeatInterval = heartbeatInterval,
)
.toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodes(nodes[0], nodes[1])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
# Get Node0 as Peer of Node1
let peer = nodes[1].mesh[topic].toSeq()[0]
# Wait for history to populate
checkUntilTimeout:
peer.iDontWants.len == historyLength
# When Node0 sends 5 messages to the topic
const msgCount = 5
for i in 0 ..< msgCount:
tryPublish await nodes[0].publish(topic, newSeq[byte](1000)), 1
# Then Node1 receives 5 iDontWant messages from Node0
checkUntilTimeoutCustom(3.seconds, 50.milliseconds):
peer.iDontWants[0].len == msgCount
for i in 0 ..< historyLength:
# When heartbeat happens
# And history moves (new element added at start, last element pruned)
checkUntilTimeout:
peer.iDontWants[i].len == 0
# Then iDontWant messages are moved to the next element
var expectedHistory = newSeqWith(historyLength, 0)
let nextIndex = i + 1
if nextIndex < historyLength:
expectedHistory[nextIndex] = msgCount
# Until they reach last element and are pruned
checkUntilTimeout:
peer.iDontWants.mapIt(it.len) == expectedHistory
asyncTest "sentIHaves history - last element is pruned during heartbeat":
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
const
numberOfNodes = 3
topic = "foobar"
heartbeatInterval = 200.milliseconds
historyLength = 3
gossipThreshold = -100.0
let nodes = generateNodes(
numberOfNodes,
gossip = true,
historyLength = historyLength,
dValues =
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
heartbeatInterval = heartbeatInterval,
gossipThreshold = gossipThreshold,
)
.toGossipSub()
startNodesAndDeferStop(nodes)
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat(heartbeatInterval)
# Find Peer outside of mesh to which Node 0 will send IHave
let peerOutsideMesh =
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
# Wait for history to populate
checkUntilTimeout:
peerOutsideMesh.sentIHaves.len == historyLength
# When a nodeOutsideMesh receives an IHave message, it responds with an IWant to request the full message from Node0
# Setting `peer.score < gossipThreshold` to prevent the nodeOutsideMesh from sending the IWant
# As when IWant is processed, messages are removed from sentIHaves history
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
for p in nodeOutsideMesh.gossipsub[topic].toSeq():
p.score = 2 * gossipThreshold
# When NodeInsideMesh sends a messages to the topic
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
tryPublish await nodeInsideMesh.publish(topic, newSeq[byte](1000)), 1
# When next heartbeat occurs
# Then IHave is sent and sentIHaves is populated
checkUntilTimeout:
peerOutsideMesh.sentIHaves[0].len == 1
# Need to clear mCache as node would keep populating sentIHaves until cache is shifted enough times
nodes[0].clearMCache()
for i in 0 ..< historyLength:
# When heartbeat happens
# And history moves (new element added at start, last element pruned)
checkUntilTimeout:
peerOutsideMesh.sentIHaves[i].len == 0
# Then sentIHaves messages are moved to the next element
var expectedHistory = newSeqWith(historyLength, 0)
let nextIndex = i + 1
if nextIndex < historyLength:
expectedHistory[nextIndex] = 1
# Until they reach last element and are pruned
checkUntilTimeout:
peerOutsideMesh.sentIHaves.mapIt(it.len) == expectedHistory

View File

@@ -0,0 +1,347 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import chronicles
import std/[sequtils]
import ../utils
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../../helpers
suite "GossipSub Integration - Mesh Management":
teardown:
checkTrackers()
asyncTest "Nodes graft peers according to DValues - numberOfNodes < dHigh":
let
numberOfNodes = 5
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
let expectedNumberOfPeers = numberOfNodes - 1
for i in 0 ..< numberOfNodes:
let node = nodes[i]
checkUntilTimeout:
node.gossipsub.getOrDefault(topic).len == expectedNumberOfPeers
node.mesh.getOrDefault(topic).len == expectedNumberOfPeers
node.fanout.len == 0
asyncTest "Nodes graft peers according to DValues - numberOfNodes > dHigh":
let
numberOfNodes = 15
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
let
expectedNumberOfPeers = numberOfNodes - 1
dHigh = 12
d = 6
dLow = 4
for i in 0 ..< numberOfNodes:
let node = nodes[i]
checkUntilTimeout:
node.gossipsub.getOrDefault(topic).len == expectedNumberOfPeers
node.mesh.getOrDefault(topic).len >= dLow and
node.mesh.getOrDefault(topic).len <= dHigh
node.fanout.len == 0
asyncTest "GossipSub should add remote peer topic subscriptions":
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe("foobar", handler)
let gossip1 = GossipSub(nodes[0])
let gossip2 = GossipSub(nodes[1])
checkUntilTimeout:
"foobar" in gossip2.topics
"foobar" in gossip1.gossipsub
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId)
asyncTest "GossipSub should add remote peer topic subscriptions if both peers are subscribed":
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[0].subscribe("foobar", handler)
nodes[1].subscribe("foobar", handler)
var subs: seq[Future[void]]
subs &= waitSub(nodes[1], nodes[0], "foobar")
subs &= waitSub(nodes[0], nodes[1], "foobar")
await allFuturesThrowing(subs)
let
gossip1 = GossipSub(nodes[0])
gossip2 = GossipSub(nodes[1])
check:
"foobar" in gossip1.topics
"foobar" in gossip2.topics
"foobar" in gossip1.gossipsub
"foobar" in gossip2.gossipsub
gossip1.gossipsub.hasPeerId("foobar", gossip2.peerInfo.peerId) or
gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
gossip2.gossipsub.hasPeerId("foobar", gossip1.peerInfo.peerId) or
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
asyncTest "GossipSub invalid topic subscription":
var handlerFut = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
handlerFut.complete(true)
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
# We must subscribe before setting the validator
nodes[0].subscribe("foobar", handler)
var gossip = GossipSub(nodes[0])
let invalidDetected = newFuture[void]()
gossip.subscriptionValidator = proc(topic: string): bool =
if topic == "foobar":
try:
invalidDetected.complete()
except:
raise newException(Defect, "Exception during subscriptionValidator")
false
else:
true
await connectNodesStar(nodes)
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)
asyncTest "GossipSub test directPeers":
let nodes = generateNodes(2, gossip = true)
startNodesAndDeferStop(nodes)
await GossipSub(nodes[0]).addDirectPeer(
nodes[1].switch.peerInfo.peerId, nodes[1].switch.peerInfo.addrs
)
let invalidDetected = newFuture[void]()
GossipSub(nodes[0]).subscriptionValidator = proc(topic: string): bool =
if topic == "foobar":
try:
invalidDetected.complete()
except:
raise newException(Defect, "Exception during subscriptionValidator")
false
else:
true
# DO NOT SUBSCRIBE, CONNECTION SHOULD HAPPEN
### await connectNodesStar(nodes)
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
nodes[1].subscribe("foobar", handler)
await invalidDetected.wait(10.seconds)
asyncTest "mesh and gossipsub updated when topic subscribed and unsubscribed":
let
numberOfNodes = 5
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
# When all of them are connected and subscribed to the same topic
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Then mesh and gossipsub should be populated
for node in nodes:
check node.topics.contains(topic)
check node.gossipsub.hasKey(topic)
check node.gossipsub[topic].len() == numberOfNodes - 1
check node.mesh.hasKey(topic)
check node.mesh[topic].len() == numberOfNodes - 1
# When all nodes unsubscribe from the topic
unsubscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Then the topic should be removed from mesh and gossipsub
for node in nodes:
check topic notin node.topics
check topic notin node.mesh
check topic notin node.gossipsub
asyncTest "handle subscribe and unsubscribe for multiple topics":
let
numberOfNodes = 3
topics = @["foobar1", "foobar2", "foobar3"]
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
# When nodes subscribe to multiple topics
await connectNodesStar(nodes)
for topic in topics:
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Then all nodes should be subscribed to the topics initially
for i in 0 ..< topics.len:
let topic = topics[i]
checkUntilTimeout:
nodes.allIt(it.topics.contains(topic))
nodes.allIt(it.gossipsub.getOrDefault(topic).len() == numberOfNodes - 1)
nodes.allIt(it.mesh.getOrDefault(topic).len() == numberOfNodes - 1)
# When they unsubscribe from all topics
for topic in topics:
unsubscribeAllNodes(nodes, topic, voidTopicHandler)
# Then topics should be removed from mesh and gossipsub
for i in 0 ..< topics.len:
let topic = topics[i]
checkUntilTimeout:
nodes.allIt(not it.topics.contains(topic))
nodes.allIt(topic notin it.gossipsub)
nodes.allIt(topic notin it.mesh)
asyncTest "Unsubscribe backoff":
const
numberOfNodes = 3
topic = "foobar"
unsubscribeBackoff = 1.seconds # 1s is the minimum
let nodes = generateNodes(
numberOfNodes, gossip = true, unsubscribeBackoff = unsubscribeBackoff
)
.toGossipSub()
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
check:
nodes[0].mesh[topic].len == numberOfNodes - 1
# When Node0 unsubscribes from the topic
nodes[0].unsubscribe(topic, voidTopicHandler)
# And subscribes back straight away
nodes[0].subscribe(topic, voidTopicHandler)
# Then its mesh is pruned and peers have applied unsubscribeBackoff
# Waiting more than one heartbeat (60ms) and less than unsubscribeBackoff (1s)
await sleepAsync(unsubscribeBackoff.div(2))
check:
not nodes[0].mesh.hasKey(topic)
# When unsubscribeBackoff period is done
await sleepAsync(unsubscribeBackoff)
# Then on the next heartbeat mesh is rebalanced and peers are regrafted
check:
nodes[0].mesh[topic].len == numberOfNodes - 1
asyncTest "Prune backoff":
const
numberOfNodes = 9
topic = "foobar"
pruneBackoff = 1.seconds # 1s is the minimum
dValues = some(
DValues(
dLow: some(6),
dHigh: some(8),
d: some(6),
dLazy: some(6),
dScore: some(4),
dOut: some(2),
)
)
let
nodes = generateNodes(
numberOfNodes, gossip = true, dValues = dValues, pruneBackoff = pruneBackoff
)
.toGossipSub()
node0 = nodes[0]
startNodesAndDeferStop(nodes)
# Nodes are connected to Node0
for i in 1 ..< numberOfNodes:
await connectNodes(node0, nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
checkUntilTimeout:
node0.mesh.getOrDefault(topic).len == numberOfNodes - 1
# When DValues of Node0 are updated to lower than initial dValues
const newDValues = some(
DValues(
dLow: some(2),
dHigh: some(4),
d: some(3),
dLazy: some(3),
dScore: some(2),
dOut: some(2),
)
)
node0.parameters.applyDValues(newDValues)
# Then Node0 mesh is pruned to newDValues.dHigh length
# And pruned peers have applied pruneBackoff
checkUntilTimeout:
node0.mesh.getOrDefault(topic).len == newDValues.get.dHigh.get
# When DValues of Node0 are updated back to the initial dValues
node0.parameters.applyDValues(dValues)
# Waiting more than one heartbeat (60ms) and less than pruneBackoff (1s)
await sleepAsync(pruneBackoff.div(2))
check:
node0.mesh.getOrDefault(topic).len == newDValues.get.dHigh.get
# When pruneBackoff period is done
await sleepAsync(pruneBackoff)
# Then on the next heartbeat mesh is rebalanced and peers are regrafted to the initial d value
check:
node0.mesh.getOrDefault(topic).len == dValues.get.d.get

View File

@@ -0,0 +1,302 @@
{.used.}
import std/[sequtils]
import stew/byteutils
import ../utils
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, floodsub]
import ../../../libp2p/protocols/pubsub/rpc/[messages, message]
import ../../helpers
suite "GossipSub Integration - Message Cache":
teardown:
checkTrackers()
asyncTest "Received messages are added to the message cache":
const
numberOfNodes = 2
topic = "foobar"
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When Node0 publishes a message to the topic
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
# Then Node1 receives the message and saves it in the cache
checkUntilTimeout:
nodes[1].mcache.window(topic).len == 1
asyncTest "Message cache history shifts on heartbeat and is cleared on shift":
const
numberOfNodes = 2
topic = "foobar"
historyGossip = 3 # mcache window
historyLength = 5
let nodes = generateNodes(
numberOfNodes,
gossip = true,
historyGossip = historyGossip,
historyLength = historyLength,
)
.toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When Node0 publishes a message to the topic
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
# Then Node1 receives the message and saves it in the cache
checkUntilTimeout:
nodes[1].mcache.window(topic).len == 1
let messageId = nodes[1].mcache.window(topic).toSeq()[0]
# When heartbeat happens, circular history shifts to the next position
# Waiting for 5(historyLength) heartbeats
await waitForHeartbeat(historyLength)
# Then history is cleared when the position with the message is reached again
# And message is removed
check:
nodes[1].mcache.window(topic).len == 0
not nodes[1].mcache.contains(messageId)
asyncTest "IHave propagation capped by history window":
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
const
numberOfNodes = 3
topic = "foobar"
historyGossip = 3 # mcache window
historyLength = 5
let nodes = generateNodes(
numberOfNodes,
gossip = true,
historyGossip = historyGossip,
historyLength = historyLength,
dValues =
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
)
.toGossipSub()
startNodesAndDeferStop(nodes)
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Add observer to NodeOutsideMesh for received IHave messages
var (receivedIHaves, checkForIHaves) = createCheckForIHave()
let peerOutsideMesh =
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
nodeOutsideMesh.addOnRecvObserver(checkForIHaves)
# When NodeInsideMesh sends a messages to the topic
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
tryPublish await nodeInsideMesh.publish(topic, newSeq[byte](1000)), 1
# On each heartbeat, Node0 retrieves messages in its mcache and sends IHave to NodeOutsideMesh
# On heartbeat, Node0 mcache advances to the next position (rotating the message cache window)
# Node0 will gossip about messages from the last few positions, depending on the mcache window size (historyGossip)
# By waiting more than 'historyGossip' (2x3 = 6) heartbeats, we ensure Node0 does not send IHave messages for messages older than the window size
await waitForHeartbeat(2 * historyGossip)
# Then nodeInsideMesh receives 3 (historyGossip) IHave messages
check:
receivedIHaves[].len == historyGossip
asyncTest "Message is retrieved from cache when handling IWant and relayed to a peer outside the mesh":
# 3 Nodes, Node 0 <==> Node 1 and Node 0 <==> Node 2
# due to DValues: 1 peer in mesh and 1 peer only in gossip of Node 0
const
numberOfNodes = 3
topic = "foobar"
historyGossip = 3 # mcache window
historyLength = 5
let nodes = generateNodes(
numberOfNodes,
gossip = true,
historyGossip = historyGossip,
historyLength = historyLength,
dValues =
some(DValues(dLow: some(1), dHigh: some(1), d: some(1), dOut: some(0))),
)
.toGossipSub()
startNodesAndDeferStop(nodes)
for i in 1 ..< numberOfNodes:
await connectNodes(nodes[0], nodes[i])
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Add observer to Node0 for received IWant messages
var (receivedIWantsNode0, checkForIWant) = createCheckForIWant()
nodes[0].addOnRecvObserver(checkForIWant)
# Find Peer outside of mesh to which Node 0 will relay received message
let peerOutsideMesh =
nodes[0].gossipsub[topic].toSeq().filterIt(it notin nodes[0].mesh[topic])[0]
let nodeOutsideMesh = nodes.getNodeByPeerId(peerOutsideMesh.peerId)
# Add observer to NodeOutsideMesh for received messages
var (receivedMessagesNodeOutsideMesh, checkForMessage) = createCheckForMessages()
nodeOutsideMesh.addOnRecvObserver(checkForMessage)
# When NodeInsideMesh publishes a message to the topic
let peerInsideMesh = nodes[0].mesh[topic].toSeq()[0]
let nodeInsideMesh = nodes.getNodeByPeerId(peerInsideMesh.peerId)
tryPublish await nodeInsideMesh.publish(topic, "Hello!".toBytes()), 1
# Then Node0 receives the message from NodeInsideMesh and saves it in its cache
checkUntilTimeout:
nodes[0].mcache.window(topic).len == 1
let messageId = nodes[0].mcache.window(topic).toSeq()[0]
# When Node0 sends an IHave message to NodeOutsideMesh during a heartbeat
# Then NodeOutsideMesh responds with an IWant message to Node0
checkUntilTimeout:
receivedIWantsNode0[].anyIt(messageId in it.messageIDs)
# When Node0 handles the IWant message, it retrieves the message from its message cache using the MessageId
# Then Node0 relays the original message to NodeOutsideMesh
checkUntilTimeout:
messageId in
receivedMessagesNodeOutsideMesh[].mapIt(
nodeOutsideMesh.msgIdProvider(it).value()
)
asyncTest "Published and received messages are added to the seen cache":
const
numberOfNodes = 2
topic = "foobar"
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# When Node0 publishes a message to the topic
tryPublish await nodes[0].publish(topic, "Hello!".toBytes()), 1
# Then Node1 receives the message
# Get messageId from mcache
checkUntilTimeout:
nodes[1].mcache.window(topic).len == 1
let messageId = nodes[1].mcache.window(topic).toSeq()[0]
# And both nodes save it in their seen cache
# Node0 when publish, Node1 when received
check:
nodes[0].hasSeen(nodes[0].salt(messageId))
nodes[1].hasSeen(nodes[1].salt(messageId))
asyncTest "Received messages are dropped if they are already in seen cache":
# 3 Nodes, Node 0 <==> Node 1 and Node 2 not connected and not subscribed yet
const
numberOfNodes = 3
topic = "foobar"
let nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodes(nodes[0], nodes[1])
nodes[0].subscribe(topic, voidTopicHandler)
nodes[1].subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
# When Node0 publishes two messages to the topic
tryPublish await nodes[0].publish(topic, "Hello".toBytes()), 1
tryPublish await nodes[0].publish(topic, "World".toBytes()), 1
# Then Node1 receives the messages
# Getting messageIds from mcache
checkUntilTimeout:
nodes[1].mcache.window(topic).len == 2
let messageId1 = nodes[1].mcache.window(topic).toSeq()[0]
let messageId2 = nodes[1].mcache.window(topic).toSeq()[1]
# And Node0 doesn't receive messages
check:
nodes[2].mcache.window(topic).len == 0
# When Node2 connects with Node0 and subscribes to the topic
await connectNodes(nodes[0], nodes[2])
nodes[2].subscribe(topic, voidTopicHandler)
await waitForHeartbeat()
# And messageIds are added to node0PeerNode2 sentIHaves to allow processing IWant
# let node0PeerNode2 =
let node0PeerNode2 = nodes[0].getPeerByPeerId(topic, nodes[2].peerInfo.peerId)
node0PeerNode2.sentIHaves[0].incl(messageId1)
node0PeerNode2.sentIHaves[0].incl(messageId2)
# And messageId1 is added to seen messages cache of Node2
check:
not nodes[2].addSeen(nodes[2].salt(messageId1))
# And Node2 sends IWant to Node0 requesting both messages
let iWantMessage =
ControlMessage(iwant: @[ControlIWant(messageIDs: @[messageId1, messageId2])])
let node2PeerNode0 = nodes[2].getPeerByPeerId(topic, nodes[0].peerInfo.peerId)
nodes[2].broadcast(
@[node2PeerNode0], RPCMsg(control: some(iWantMessage)), isHighPriority = false
)
await waitForHeartbeat()
# Then Node2 receives only messageId2 and messageId1 is dropped
check:
nodes[2].mcache.window(topic).len == 1
nodes[2].mcache.window(topic).toSeq()[0] == messageId2
asyncTest "Published messages are dropped if they are already in seen cache":
func customMsgIdProvider(m: Message): Result[MessageId, ValidationResult] =
ok("fixed_message_id_string".toBytes())
const
numberOfNodes = 2
topic = "foobar"
let nodes = generateNodes(
numberOfNodes, gossip = true, msgIdProvider = customMsgIdProvider
)
.toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes.subscribeAllNodes(topic, voidTopicHandler)
await waitForHeartbeat()
# Given Node0 has msgId already in seen cache
let data = "Hello".toBytes()
let msg = Message.init(
some(nodes[0].peerInfo), data, topic, some(nodes[0].msgSeqno), nodes[0].sign
)
let msgId = nodes[0].msgIdProvider(msg)
check:
not nodes[0].addSeen(nodes[0].salt(msgId.value()))
# When Node0 publishes the message to the topic
discard await nodes[0].publish(topic, data)
await waitForHeartbeat()
# Then Node1 doesn't receive the message
check:
nodes[1].mcache.window(topic).len == 0

View File

@@ -11,12 +11,12 @@
import std/[sequtils, enumerate]
import stew/byteutils
import utils
import sugar
import chronicles
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
import ../../libp2p/protocols/pubsub/rpc/[message, protobuf]
import ../helpers, ../utils/[futures]
import ../utils
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, timedcache]
import ../../../libp2p/protocols/pubsub/rpc/[message]
import ../../helpers, ../../utils/[futures]
const MsgIdSuccess = "msg id gen success"
@@ -72,62 +72,11 @@ proc createMessages(
return (iwantMessageIds, sentMessages)
suite "GossipSub Message Handling":
suite "GossipSub Integration - Message Handling":
teardown:
checkTrackers()
asyncTest "Drop messages of topics without subscription":
let topic = "foobar"
var (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
defer:
await teardownGossipSub(gossipSub, conns)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = conns[i]
let peer = peers[i]
inc seqno
let msg = Message.init(conn.peerId, ("bar" & $i).toBytes(), topic, some(seqno))
await gossipSub.rpcHandler(peer, encodeRpcMsg(RPCMsg(messages: @[msg]), false))
check gossipSub.mcache.msgs.len == 0
asyncTest "subscription limits":
let gossipSub = TestGossipSub.init(newStandardSwitch())
gossipSub.topicsHigh = 10
var tooManyTopics: seq[string]
for i in 0 .. gossipSub.topicsHigh + 10:
tooManyTopics &= "topic" & $i
let lotOfSubs = RPCMsg.withSubs(tooManyTopics, true)
let conn = TestBufferStream.new(noop)
let peerId = randomPeerId()
conn.peerId = peerId
let peer = gossipSub.getPubSubPeer(peerId)
await gossipSub.rpcHandler(peer, encodeRpcMsg(lotOfSubs, false))
check:
gossipSub.gossipsub.len == gossipSub.topicsHigh
peer.behaviourPenalty > 0.0
await conn.close()
await gossipSub.switch.stop()
asyncTest "invalid message bytes":
let gossipSub = TestGossipSub.init(newStandardSwitch())
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
expect(CatchableError):
await gossipSub.rpcHandler(peer, @[byte 1, 2, 3])
await gossipSub.switch.stop()
asyncTest "e2e - Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
asyncTest "Split IWANT replies when individual messages are below maxSize but combined exceed maxSize":
# This test checks if two messages, each below the maxSize, are correctly split when their combined size exceeds maxSize.
# Expected: Both messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
@@ -154,7 +103,7 @@ suite "GossipSub Message Handling":
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Discard IWANT replies when both messages individually exceed maxSize":
asyncTest "Discard IWANT replies when both messages individually exceed maxSize":
# This test checks if two messages, each exceeding the maxSize, are discarded and not sent.
# Expected: No messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
@@ -181,7 +130,7 @@ suite "GossipSub Message Handling":
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Process IWANT replies when both messages are below maxSize":
asyncTest "Process IWANT replies when both messages are below maxSize":
# This test checks if two messages, both below the maxSize, are correctly processed and sent.
# Expected: Both messages should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
@@ -208,7 +157,7 @@ suite "GossipSub Message Handling":
await teardownTest(gossip0, gossip1)
asyncTest "e2e - Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
asyncTest "Split IWANT replies when one message is below maxSize and the other exceeds maxSize":
# This test checks if, when given two messages where one is below maxSize and the other exceeds it, only the smaller message is processed and sent.
# Expected: Only the smaller message should be received.
let (gossip0, gossip1, receivedMessages) = await setupTest()
@@ -247,7 +196,7 @@ suite "GossipSub Message Handling":
let
numberOfNodes = 3
topic = "foobar"
nodes = generateNodes(numberOfNodes, gossip = true)
nodes = generateNodes(numberOfNodes, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
@@ -262,9 +211,9 @@ suite "GossipSub Message Handling":
# And subscribed to the same topic
subscribeAllNodes(nodes, topic, @[handler0, handler1, handler2])
await waitForPeersInTable(
nodes, topic, newSeqWith(numberOfNodes, 2), PeerTableType.Mesh
)
checkUntilTimeout:
nodes.allIt(it.mesh.getOrDefault(topic).len == numberOfNodes - 1)
# When node 0 sends a message
check (await nodes[0].publish(topic, "Hello!".toBytes())) == 2
@@ -424,9 +373,6 @@ suite "GossipSub Message Handling":
sendCounter = 0
validatedCounter = 0
proc handler(topic: string, data: seq[byte]) {.async.} =
discard
proc onRecv(peer: PubSubPeer, msgs: var RPCMsg) =
inc recvCounter
@@ -446,8 +392,8 @@ suite "GossipSub Message Handling":
nodes[0].addObserver(obs0)
nodes[1].addObserver(obs1)
nodes[1].subscribe("foo", handler)
nodes[1].subscribe("bar", handler)
nodes[1].subscribe("foo", voidTopicHandler)
nodes[1].subscribe("bar", voidTopicHandler)
proc validator(
topic: string, message: Message
@@ -467,12 +413,12 @@ suite "GossipSub Message Handling":
# Send message that will be rejected by the receiver's validator
tryPublish await nodes[0].publish("bar", "Hello!".toBytes()), 1
check:
checkUntilTimeout:
recvCounter == 2
validatedCounter == 1
sendCounter == 2
asyncTest "e2e - GossipSub send over mesh A -> B":
asyncTest "GossipSub send over mesh A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
@@ -502,7 +448,7 @@ suite "GossipSub Message Handling":
gossip2.mesh.hasPeerId("foobar", gossip1.peerInfo.peerId)
not gossip2.fanout.hasPeerId("foobar", gossip1.peerInfo.peerId)
asyncTest "e2e - GossipSub should not send to source & peers who already seen":
asyncTest "GossipSub should not send to source & peers who already seen":
# 3 nodes: A, B, C
# A publishes, C relays, B is having a long validation
# so B should not send to anyone
@@ -568,7 +514,7 @@ suite "GossipSub Message Handling":
await bFinished
asyncTest "e2e - GossipSub send over floodPublish A -> B":
asyncTest "GossipSub send over floodPublish A -> B":
var passed: Future[bool] = newFuture[bool]()
proc handler(topic: string, data: seq[byte]) {.async.} =
check topic == "foobar"
@@ -598,7 +544,7 @@ suite "GossipSub Message Handling":
"foobar" notin gossip2.gossipsub
not gossip1.mesh.hasPeerId("foobar", gossip2.peerInfo.peerId)
asyncTest "e2e - GossipSub floodPublish limit":
asyncTest "GossipSub floodPublish limit":
let
nodes = setupNodes(20)
gossip1 = GossipSub(nodes[0])
@@ -610,7 +556,7 @@ suite "GossipSub Message Handling":
await connectNodes(nodes[1 ..^ 1], nodes[0])
await baseTestProcedure(nodes, gossip1, gossip1.parameters.dLow, 17)
asyncTest "e2e - GossipSub floodPublish limit with bandwidthEstimatebps = 0":
asyncTest "GossipSub floodPublish limit with bandwidthEstimatebps = 0":
let
nodes = setupNodes(20)
gossip1 = GossipSub(nodes[0])
@@ -623,7 +569,7 @@ suite "GossipSub Message Handling":
await connectNodes(nodes[1 ..^ 1], nodes[0])
await baseTestProcedure(nodes, gossip1, nodes.len - 1, nodes.len - 1)
asyncTest "e2e - GossipSub with multiple peers":
asyncTest "GossipSub with multiple peers":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
@@ -665,7 +611,7 @@ suite "GossipSub Message Handling":
check:
"foobar" in gossip.gossipsub
asyncTest "e2e - GossipSub with multiple peers (sparse)":
asyncTest "GossipSub with multiple peers (sparse)":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
@@ -714,7 +660,7 @@ suite "GossipSub Message Handling":
gossip.fanout.len == 0
gossip.mesh["foobar"].len > 0
asyncTest "e2e - GossipSub with multiple peers - control deliver (sparse)":
asyncTest "GossipSub with multiple peers - control deliver (sparse)":
var runs = 10
let nodes = generateNodes(runs, gossip = true, triggerSelf = true)
@@ -842,34 +788,3 @@ suite "GossipSub Message Handling":
publishResult == 0
results[0].isPending()
results[1].isPending()
# check correctly parsed ihave/iwant/graft/prune/idontwant messages
# check value before & after decoding equal using protoc cmd tool for reference
asyncTest "ControlMessage RPCMsg encoding and decoding":
let id: seq[byte] = @[123]
let message = RPCMsg(
control: some(
ControlMessage(
ihave: @[ControlIHave(topicID: "foobar", messageIDs: @[id])],
iwant: @[ControlIWant(messageIDs: @[id])],
graft: @[ControlGraft(topicID: "foobar")],
prune: @[ControlPrune(topicID: "foobar", backoff: 10.uint64)],
idontwant: @[ControlIWant(messageIDs: @[id])],
)
)
)
#data encoded using protoc cmd tool
let expectedEncoded: seq[byte] =
@[
26, 45, 10, 11, 10, 6, 102, 111, 111, 98, 97, 114, 18, 1, 123, 18, 3, 10, 1,
123, 26, 8, 10, 6, 102, 111, 111, 98, 97, 114, 34, 10, 10, 6, 102, 111, 111, 98,
97, 114, 24, 10, 42, 3, 10, 1, 123,
]
let actualEncoded = encodeRpcMsg(message, true)
check:
actualEncoded == expectedEncoded
let actualDecoded = decodeRpcMsg(expectedEncoded).value
check:
actualDecoded == message

View File

@@ -0,0 +1,535 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import std/[sequtils, strutils]
import stew/byteutils
import ../utils
import ../../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable, pubsubpeer]
import ../../../libp2p/protocols/pubsub/rpc/[messages]
import ../../helpers
import ../../utils/[futures]
suite "GossipSub Integration - Scoring":
const topic = "foobar"
teardown:
checkTrackers()
asyncTest "Flood publish to all peers with score above threshold, regardless of subscription":
let
numberOfNodes = 3
nodes = generateNodes(numberOfNodes, gossip = true, floodPublish = true)
g0 = GossipSub(nodes[0])
startNodesAndDeferStop(nodes)
# Nodes 1 and 2 are connected to node 0
await connectNodes(nodes[0], nodes[1])
await connectNodes(nodes[0], nodes[2])
let (handlerFut1, handler1) = createCompleteHandler()
let (handlerFut2, handler2) = createCompleteHandler()
# Nodes are subscribed to the same topic
nodes[1].subscribe(topic, handler1)
nodes[2].subscribe(topic, handler2)
await waitForHeartbeat()
# Given node 2's score is below the threshold
for peer in g0.gossipsub.getOrDefault(topic):
if peer.peerId == nodes[2].peerInfo.peerId:
peer.score = (g0.parameters.publishThreshold - 1)
# When node 0 publishes a message to topic "foo"
let message = "Hello!".toBytes()
tryPublish await nodes[0].publish(topic, message), 1
# Then only node 1 should receive the message
let results = await waitForStates(@[handlerFut1, handlerFut2], HEARTBEAT_TIMEOUT)
check:
results[0].isCompleted(true)
results[1].isPending()
asyncTest "Should not rate limit decodable messages below the size allowed":
let
nodes = generateNodes(
2,
gossip = true,
overheadRateLimit = Opt.some((20, 1.millis)),
verifySignature = false,
# Avoid being disconnected by failing signature verification
)
.toGossipSub()
rateLimitHits = currentRateLimitHits()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
nodes[0].broadcast(
nodes[0].mesh[topic],
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](10))]),
isHighPriority = true,
)
await waitForHeartbeat()
check:
currentRateLimitHits() == rateLimitHits
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
nodes[1].parameters.disconnectPeerAboveRateLimit = true
nodes[0].broadcast(
nodes[0].mesh["foobar"],
RPCMsg(messages: @[Message(topic: "foobar", data: newSeq[byte](12))]),
isHighPriority = true,
)
await waitForHeartbeat()
check:
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
currentRateLimitHits() == rateLimitHits
asyncTest "Should rate limit undecodable messages above the size allowed":
let
nodes = generateNodes(
2,
gossip = true,
overheadRateLimit = Opt.some((20, 1.millis)),
verifySignature = false,
# Avoid being disconnected by failing signature verification
)
.toGossipSub()
rateLimitHits = currentRateLimitHits()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
# Simulate sending an undecodable message
await nodes[1].peers[nodes[0].switch.peerInfo.peerId].sendEncoded(
newSeqWith(33, 1.byte), isHighPriority = true
)
await waitForHeartbeat()
check:
currentRateLimitHits() == rateLimitHits + 1
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
nodes[1].parameters.disconnectPeerAboveRateLimit = true
await nodes[0].peers[nodes[1].switch.peerInfo.peerId].sendEncoded(
newSeqWith(35, 1.byte), isHighPriority = true
)
checkUntilTimeout:
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
currentRateLimitHits() == rateLimitHits + 2
asyncTest "Should rate limit decodable messages above the size allowed":
let
nodes = generateNodes(
2,
gossip = true,
overheadRateLimit = Opt.some((20, 1.millis)),
verifySignature = false,
# Avoid being disconnected by failing signature verification
)
.toGossipSub()
rateLimitHits = currentRateLimitHits()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
let msg = RPCMsg(
control: some(
ControlMessage(
prune:
@[
ControlPrune(
topicID: topic,
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](33)))],
backoff: 123'u64,
)
]
)
)
)
nodes[0].broadcast(nodes[0].mesh[topic], msg, isHighPriority = true)
await waitForHeartbeat()
check:
currentRateLimitHits() == rateLimitHits + 1
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
nodes[1].parameters.disconnectPeerAboveRateLimit = true
let msg2 = RPCMsg(
control: some(
ControlMessage(
prune:
@[
ControlPrune(
topicID: topic,
peers: @[PeerInfoMsg(peerId: PeerId(data: newSeq[byte](35)))],
backoff: 123'u64,
)
]
)
)
)
nodes[0].broadcast(nodes[0].mesh[topic], msg2, isHighPriority = true)
checkUntilTimeout:
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
currentRateLimitHits() == rateLimitHits + 2
asyncTest "Should rate limit invalid messages above the size allowed":
let
nodes = generateNodes(
2,
gossip = true,
overheadRateLimit = Opt.some((20, 1.millis)),
verifySignature = false,
# Avoid being disconnected by failing signature verification
)
.toGossipSub()
rateLimitHits = currentRateLimitHits()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
subscribeAllNodes(nodes, topic, voidTopicHandler)
await waitForHeartbeat()
proc execValidator(
topic: string, message: messages.Message
): Future[ValidationResult] {.async.} =
return ValidationResult.Reject
nodes[0].addValidator(topic, execValidator)
nodes[1].addValidator(topic, execValidator)
let msg = RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](40))])
nodes[0].broadcast(nodes[0].mesh[topic], msg, isHighPriority = true)
await waitForHeartbeat()
check:
currentRateLimitHits() == rateLimitHits + 1
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == true
# Disconnect peer when rate limiting is enabled
nodes[1].parameters.disconnectPeerAboveRateLimit = true
nodes[0].broadcast(
nodes[0].mesh[topic],
RPCMsg(messages: @[Message(topic: topic, data: newSeq[byte](35))]),
isHighPriority = true,
)
checkUntilTimeout:
nodes[1].switch.isConnected(nodes[0].switch.peerInfo.peerId) == false
currentRateLimitHits() == rateLimitHits + 2
asyncTest "DirectPeers: don't kick direct peer with low score":
let nodes = generateNodes(2, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await nodes.addDirectPeerStar()
nodes[1].parameters.disconnectBadPeers = true
nodes[1].parameters.graylistThreshold = 100000
var (handlerFut, handler) = createCompleteHandler()
nodes[0].subscribe(topic, voidTopicHandler)
nodes[1].subscribe(topic, handler)
await waitForHeartbeat()
nodes[1].updateScores()
# peer shouldn't be in our mesh
check:
topic notin nodes[1].mesh
nodes[1].peerStats[nodes[0].switch.peerInfo.peerId].score <
nodes[1].parameters.graylistThreshold
tryPublish await nodes[0].publish(topic, toBytes("hellow")), 1
# Without directPeers, this would fail
var futResult = await waitForState(handlerFut)
check:
futResult.isCompleted(true)
asyncTest "Peers disconnections mechanics":
const numberOfNodes = 10
let nodes =
generateNodes(numberOfNodes, gossip = true, triggerSelf = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
var seen: Table[string, int]
var seenFut = newFuture[void]()
for i in 0 ..< numberOfNodes:
let dialer = nodes[i]
var handler: TopicHandler
closureScope:
var peerName = $dialer.peerInfo.peerId
handler = proc(topicName: string, data: seq[byte]) {.async.} =
seen.mgetOrPut(peerName, 0).inc()
check topicName == topic
if not seenFut.finished() and seen.len >= numberOfNodes:
seenFut.complete()
dialer.subscribe(topic, handler)
await waitSubGraph(nodes, topic)
# ensure peer stats are stored properly and kept properly
check:
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
tryPublish await nodes[0].publish(topic, toBytes("hello")), 1
await seenFut.wait(2.seconds)
check:
seen.len >= numberOfNodes
for k, v in seen.pairs:
check:
v >= 1
for node in nodes:
check:
topic in node.gossipsub
node.fanout.len == 0
node.mesh[topic].len > 0
# Removing some subscriptions
for i in 0 ..< numberOfNodes:
if i mod 3 != 0:
nodes[i].unsubscribeAll(topic)
# Waiting 2 heartbeats
await nodes[0].waitForHeartbeatByEvent(2)
# ensure peer stats are stored properly and kept properly
check:
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
# Adding again subscriptions
for i in 0 ..< numberOfNodes:
if i mod 3 != 0:
nodes[i].subscribe(topic, voidTopicHandler)
# Waiting 2 heartbeats
await nodes[0].waitForHeartbeatByEvent(2)
# ensure peer stats are stored properly and kept properly
check:
nodes[0].peerStats.len == numberOfNodes - 1 # minus self
asyncTest "DecayInterval":
const
topic = "foobar"
decayInterval = 50.milliseconds
let nodes =
generateNodes(2, gossip = true, decayInterval = decayInterval).toGossipSub()
nodes.setDefaultTopicParams(topic)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
var (handlerFut, handler) = createCompleteHandler()
nodes[0].subscribe(topic, voidTopicHandler)
nodes[1].subscribe(topic, handler)
tryPublish await nodes[0].publish(topic, toBytes("hello")), 1
var futResult = await waitForState(handlerFut)
check:
futResult.isCompleted(true)
nodes[0].peerStats[nodes[1].peerInfo.peerId].topicInfos[topic].meshMessageDeliveries =
100
nodes[0].topicParams[topic].meshMessageDeliveriesDecay = 0.9
# We should have decayed 5 times, though allowing 4..6
await sleepAsync(decayInterval * 5)
check:
nodes[0].peerStats[nodes[1].peerInfo.peerId].topicInfos[topic].meshMessageDeliveries in
50.0 .. 66.0
asyncTest "Nodes publishing invalid messages are penalised and disconnected":
# Given GossipSub nodes with Topic Params
const numberOfNodes = 3
let
nodes = generateNodes(
numberOfNodes,
gossip = true,
verifySignature = false,
# Disable signature verification to isolate validation penalties
decayInterval = 200.milliseconds, # scoring heartbeat interval
heartbeatInterval = 5.seconds,
# heartbeatInterval >>> decayInterval to prevent prunning peers with bad score
publishThreshold = -150.0,
graylistThreshold = -200.0,
disconnectBadPeers = false,
)
.toGossipSub()
centerNode = nodes[0]
node1peerId = nodes[1].peerInfo.peerId
node2peerId = nodes[2].peerInfo.peerId
nodes.setDefaultTopicParams(topic)
for node in nodes:
node.topicParams[topic].invalidMessageDeliveriesWeight = -10.0
node.topicParams[topic].invalidMessageDeliveriesDecay = 0.9
startNodesAndDeferStop(nodes)
# And Node 0 is center node, connected to others
await connectNodes(nodes[0], nodes[1]) # center to Node 1 (valid messages)
await connectNodes(nodes[0], nodes[2]) # center to Node 2 (invalid messages)
nodes.subscribeAllNodes(topic, voidTopicHandler)
# And center node has message validator: accept from node 1, reject from node 2
var validatedMessageCount = 0
proc validationHandler(
topic: string, message: Message
): Future[ValidationResult] {.async.} =
validatedMessageCount.inc
if string.fromBytes(message.data).contains("invalid"):
return ValidationResult.Reject # reject invalid messages
else:
return ValidationResult.Accept
nodes[0].addValidator(topic, validationHandler)
# 1st scoring heartbeat
checkUntilTimeout:
centerNode.gossipsub.getOrDefault(topic).len == numberOfNodes - 1
centerNode.getPeerScore(node1peerId) > 0
centerNode.getPeerScore(node2peerId) > 0
# When messages are broadcasted
const messagesToSend = 5
for i in 0 ..< messagesToSend:
nodes[1].broadcast(
nodes[1].mesh[topic],
RPCMsg(messages: @[Message(topic: topic, data: ("valid_" & $i).toBytes())]),
isHighPriority = true,
)
nodes[2].broadcast(
nodes[2].mesh[topic],
RPCMsg(messages: @[Message(topic: topic, data: ("invalid_" & $i).toBytes())]),
isHighPriority = true,
)
# And messages are processed
# Then invalidMessageDeliveries stats are applied
checkUntilTimeout:
validatedMessageCount == messagesToSend * (numberOfNodes - 1)
centerNode.getPeerTopicInfo(node1peerId, topic).invalidMessageDeliveries == 0.0
# valid messages
centerNode.getPeerTopicInfo(node2peerId, topic).invalidMessageDeliveries == 5.0
# invalid messages
# When scoring hartbeat occurs (2nd scoring heartbeat)
# Then peer scores are calculated
checkUntilTimeout:
# node1: p1 (time in mesh) + p2 (first message deliveries)
centerNode.getPeerScore(node1peerId) > 5.0 and
centerNode.getPeerScore(node1peerId) < 6.0
# node2: p1 (time in mesh) - p4 (invalid message deliveries)
centerNode.getPeerScore(node2peerId) < -249.0 and
centerNode.getPeerScore(node2peerId) > -250.0
# all peers are still connected
centerNode.mesh[topic].toSeq().len == 2
# When disconnecting peers with bad score (score < graylistThreshold) is enabled
for node in nodes:
node.parameters.disconnectBadPeers = true
# Then peers with bad score are disconnected on scoring heartbeat (3rd scoring heartbeat)
checkUntilTimeout:
centerNode.mesh[topic].toSeq().len == 1
asyncTest "Nodes not meeting Mesh Message Deliveries Threshold are penalised":
# Given GossipSub nodes with Topic Params
const numberOfNodes = 2
let
nodes = generateNodes(
numberOfNodes,
gossip = true,
decayInterval = 200.milliseconds, # scoring heartbeat interval
heartbeatInterval = 5.seconds,
# heartbeatInterval >>> decayInterval to prevent prunning peers with bad score
disconnectBadPeers = false,
)
.toGossipSub()
node1PeerId = nodes[1].peerInfo.peerId
nodes.setDefaultTopicParams(topic)
for node in nodes:
node.topicParams[topic].meshMessageDeliveriesThreshold = 5
node.topicParams[topic].meshMessageDeliveriesActivation = 1.milliseconds
# active from the start
node.topicParams[topic].meshMessageDeliveriesDecay = 0.9
node.topicParams[topic].meshMessageDeliveriesWeight = -10.0
node.topicParams[topic].meshFailurePenaltyDecay = 0.9
node.topicParams[topic].meshFailurePenaltyWeight = -5.0
startNodesAndDeferStop(nodes)
# And Nodes are connected and subscribed to the topic
await connectNodes(nodes[0], nodes[1])
nodes.subscribeAllNodes(topic, voidTopicHandler)
# When scoring heartbeat occurs
# Then Peer has negative score due to active meshMessageDeliveries deficit
checkUntilTimeout:
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
nodes[0].mesh.getOrDefault(topic).len == numberOfNodes - 1
# p1 (time in mesh) - p3 (mesh message deliveries)
nodes[0].getPeerScore(node1PeerId) < -249.0
# When Peer is unsubscribed
nodes[1].unsubscribe(topic, voidTopicHandler)
# Then meshFailurePenalty is applied due to active meshMessageDeliveries deficit
checkUntilTimeout:
nodes[0].getPeerTopicInfo(node1PeerId, topic).meshFailurePenalty == 25
# When next scoring heartbeat occurs
# Then Peer has negative score
checkUntilTimeout:
# p3b (mesh failure penalty) [p1 and p3 not calculated when peer was pruned]
nodes[0].getPeerScore(node1PeerId) == -125.0
# When Peer subscribes again
nodes[1].subscribe(topic, voidTopicHandler)
# Then Peer is not grafted to the mesh due to negative score (score was retained)
checkUntilTimeout:
nodes[0].gossipsub.getOrDefault(topic).len == numberOfNodes - 1
nodes[0].mesh.getOrDefault(topic).len == 0

View File

@@ -0,0 +1,217 @@
# Nim-LibP2P
# Copyright (c) 2023-2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import unittest2
import chronos
import stew/byteutils
import ../utils
import ../../../libp2p/protocols/pubsub/[gossipsub, pubsub]
import ../../../libp2p/protocols/pubsub/rpc/[messages]
import ../../helpers
import ../../utils/futures
suite "GossipSub Integration - Signature Flags":
const
topic = "foobar"
testData = "test message".toBytes()
teardown:
checkTrackers()
asyncTest "Default - messages are signed when sign=true and contain fromPeer and seqno when anonymize=false":
let nodes = generateNodes(
2, gossip = true, sign = true, verifySignature = true, anonymize = false
)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes.subscribeAllNodes(topic, voidTopicHandler)
var (receivedMessages, checkForMessage) = createCheckForMessages()
nodes[1].addOnRecvObserver(checkForMessage)
tryPublish await nodes[0].publish(topic, testData), 1
checkUntilTimeout:
receivedMessages[].len > 0
let receivedMessage = receivedMessages[][0]
check:
receivedMessage.data == testData
receivedMessage.fromPeer.data.len > 0
receivedMessage.seqno.len > 0
receivedMessage.signature.len > 0
receivedMessage.key.len > 0
asyncTest "Sign flag - messages are not signed when sign=false":
let nodes = generateNodes(
2, gossip = true, sign = false, verifySignature = false, anonymize = false
)
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes.subscribeAllNodes(topic, voidTopicHandler)
var (receivedMessages, checkForMessage) = createCheckForMessages()
nodes[1].addOnRecvObserver(checkForMessage)
tryPublish await nodes[0].publish(topic, testData), 1
checkUntilTimeout:
receivedMessages[].len > 0
let receivedMessage = receivedMessages[][0]
check:
receivedMessage.data == testData
receivedMessage.signature.len == 0
receivedMessage.key.len == 0
asyncTest "Anonymize flag - messages are anonymous when anonymize=true":
let nodes = generateNodes(
2, gossip = true, sign = true, verifySignature = true, anonymize = true
) # anonymize = true takes precedence
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes.subscribeAllNodes(topic, voidTopicHandler)
var (receivedMessages, checkForMessage) = createCheckForMessages()
nodes[1].addOnRecvObserver(checkForMessage)
let testData = "anonymous message".toBytes()
tryPublish await nodes[0].publish(topic, testData), 1
checkUntilTimeout:
receivedMessages[].len > 0
let receivedMessage = receivedMessages[][0]
check:
receivedMessage.data == testData
receivedMessage.fromPeer.data.len == 0
receivedMessage.seqno.len == 0
receivedMessage.signature.len == 0
receivedMessage.key.len == 0
type NodeConfig = object
sign: bool
verify: bool
anonymize: bool
type Scenario = object
senderConfig: NodeConfig
receiverConfig: NodeConfig
shouldWork: bool
let scenarios: seq[Scenario] =
@[
# valid combos
# S default, R default
Scenario(
senderConfig: NodeConfig(sign: true, verify: true, anonymize: false),
receiverConfig: NodeConfig(sign: true, verify: true, anonymize: false),
shouldWork: true,
),
# S default, R anonymous
Scenario(
senderConfig: NodeConfig(sign: true, verify: true, anonymize: false),
receiverConfig: NodeConfig(sign: false, verify: false, anonymize: true),
shouldWork: true,
),
# S anonymous, R anonymous
Scenario(
senderConfig: NodeConfig(sign: false, verify: false, anonymize: true),
receiverConfig: NodeConfig(sign: false, verify: false, anonymize: true),
shouldWork: true,
),
# S only sign, R only verify
Scenario(
senderConfig: NodeConfig(sign: true, verify: false, anonymize: false),
receiverConfig: NodeConfig(sign: false, verify: true, anonymize: false),
shouldWork: true,
),
# S only verify, R only sign
Scenario(
senderConfig: NodeConfig(sign: true, verify: true, anonymize: true),
receiverConfig: NodeConfig(sign: false, verify: false, anonymize: false),
shouldWork: true,
),
# S anonymous (not signed despite the flag), R minimal
Scenario(
senderConfig: NodeConfig(sign: false, verify: true, anonymize: true),
receiverConfig: NodeConfig(sign: true, verify: false, anonymize: false),
shouldWork: true,
),
# S unsigned, R unsigned
Scenario(
senderConfig: NodeConfig(sign: false, verify: false, anonymize: false),
receiverConfig: NodeConfig(sign: false, verify: false, anonymize: false),
shouldWork: true,
),
# invalid combos
# S anonymous, R default
Scenario(
senderConfig: NodeConfig(sign: false, verify: false, anonymize: true),
receiverConfig: NodeConfig(sign: true, verify: true, anonymize: false),
shouldWork: false,
),
# S unsigned, R anonymous but verify
Scenario(
senderConfig: NodeConfig(sign: false, verify: false, anonymize: false),
receiverConfig: NodeConfig(sign: true, verify: true, anonymize: true),
shouldWork: false,
),
# S unsigned, R default
Scenario(
senderConfig: NodeConfig(sign: false, verify: false, anonymize: false),
receiverConfig: NodeConfig(sign: true, verify: true, anonymize: false),
shouldWork: false,
),
]
for scenario in scenarios:
let title = "Compatibility matrix: " & $scenario
asyncTest title:
let
sender = generateNodes(
1,
gossip = true,
sign = scenario.senderConfig.sign,
verifySignature = scenario.senderConfig.verify,
anonymize = scenario.senderConfig.anonymize,
)[0]
receiver = generateNodes(
1,
gossip = true,
sign = scenario.receiverConfig.sign,
verifySignature = scenario.receiverConfig.verify,
anonymize = scenario.receiverConfig.anonymize,
)[0]
nodes = @[sender, receiver]
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
let (messageReceivedFut, handler) = createCompleteHandler()
nodes.subscribeAllNodes(topic, handler)
await waitForHeartbeat()
discard await sender.publish(topic, testData)
let messageReceived = await waitForState(messageReceivedFut, HEARTBEAT_TIMEOUT)
check:
if scenario.shouldWork:
messageReceived.isCompleted(true)
else:
messageReceived.isCancelled()

View File

@@ -0,0 +1,61 @@
# Nim-LibP2P
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0 ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.used.}
import chronos
import stew/byteutils
import ../utils
import ../../../libp2p/protocols/pubsub/[gossipsub, peertable]
import ../../../libp2p/protocols/pubsub/rpc/[messages]
import ../../helpers
suite "GossipSub Integration - Skip MCache Support":
teardown:
checkTrackers()
asyncTest "publish with skipMCache prevents message from being added to mcache":
let
topic = "test"
nodes = generateNodes(2, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe(topic, voidTopicHandler)
await waitSub(nodes[0], nodes[1], topic)
let publishData = "hello".toBytes()
tryPublish await nodes[0].publish(
topic, publishData, publishParams = some(PublishParams(skipMCache: true))
), 1
check:
nodes[0].mcache.msgs.len == 0
asyncTest "publish without skipMCache adds message to mcache":
let
topic = "test"
nodes = generateNodes(2, gossip = true).toGossipSub()
startNodesAndDeferStop(nodes)
await connectNodesStar(nodes)
nodes[1].subscribe(topic, voidTopicHandler)
await waitSub(nodes[0], nodes[1], topic)
let publishData = "hello".toBytes()
tryPublish await nodes[0].publish(
topic, publishData, publishParams = none(PublishParams)
), 1
check:
nodes[0].mcache.msgs.len == 1

View File

@@ -0,0 +1,8 @@
{.used.}
import
testfloodsub, testgossipsubcontrolmessages, testgossipsubcustomconn,
testgossipsubfanout, testgossipsubgossip, testgossipsubgossipcompatibility,
testgossipsubheartbeat, testgossipsubmeshmanagement, testgossipsubmessagecache,
testgossipsubmessagehandling, testgossipsubscoring, testgossipsubsignatureflags,
testgossipsubskipmcache

View File

@@ -0,0 +1,609 @@
{.used.}
import std/[sequtils, tables]
import stew/byteutils
import utils
import chronicles
import ../../libp2p/protocols/pubsub/[gossipsub, mcache, peertable]
import ../../libp2p/protocols/pubsub/rpc/[message]
import ../helpers
import ../utils/[futures]
suite "GossipSub Behavior":
const
topic = "foobar"
MsgIdSuccess = "msg id gen success"
teardown:
checkTrackers()
asyncTest "handleIHave - peers with no budget should not request messages":
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.subscribe(topic, voidTopicHandler)
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IHAVE message that contains the same message ID three times
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the peer has no budget to request messages
peer.iHaveBudget = 0
# When a peer makes an IHAVE request for the a message that `gossipSub` has
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should not generate an IWant message for the message,
check:
iwants.messageIDs.len == 0
gossipSub.mcache.msgs.len == 1
asyncTest "handleIHave - peers with budget should request messages":
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.subscribe(topic, voidTopicHandler)
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IHAVE message that contains the same message ID three times
# If ids are repeated, only one request should be generated
let msg = ControlIHave(topicID: topic, messageIDs: @[id, id, id])
# Given the budget is not 0 (because it's not been overridden)
check:
peer.iHaveBudget > 0
# When a peer makes an IHAVE request for the a message that `gossipSub` does not have
let iwants = gossipSub.handleIHave(peer, @[msg])
# Then `gossipSub` should generate an IWant message for the message
check:
iwants.messageIDs.len == 1
gossipSub.mcache.msgs.len == 1
asyncTest "handleIHave - do not handle IHave if peer score is below GossipThreshold threshold":
const gossipThreshold = -100.0
let
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
peer = peers[0]
defer:
await teardownGossipSub(gossipSub, conns)
# Given peer with score below GossipThreshold
gossipSub.parameters.gossipThreshold = gossipThreshold
peer.score = gossipThreshold - 100.0
# and IHave message
let id = @[0'u8, 1, 2, 3]
let msg = ControlIHave(topicID: topic, messageIDs: @[id])
# When IHave is handled
let iWant = gossipSub.handleIHave(peer, @[msg])
# Then IHave is ignored
check:
iWant.messageIDs.len == 0
asyncTest "handleIWant - peers with budget should request messages":
var (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.subscribe(topic, voidTopicHandler)
let peerId = randomPeerId()
let peer = gossipSub.getPubSubPeer(peerId)
# Add message to `gossipSub`'s message cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[^1].incl(id)
# Build an IWANT message that contains the same message ID three times
# If ids are repeated, only one request should be generated
let msg = ControlIWant(messageIDs: @[id, id, id])
# When a peer makes an IWANT request for the a message that `gossipSub` has
let messages = gossipSub.handleIWant(peer, @[msg])
# Then `gossipSub` should return the message
check:
messages.len == 1
gossipSub.mcache.msgs.len == 1
asyncTest "handleIWant - do not handle IWant if peer score is below GossipThreshold threshold":
const gossipThreshold = -100.0
let
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
peer = peers[0]
defer:
await teardownGossipSub(gossipSub, conns)
# Given peer with score below GossipThreshold
gossipSub.parameters.gossipThreshold = gossipThreshold
peer.score = gossipThreshold - 100.0
# and IWant message with MsgId in mcache and sentIHaves
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message())
peer.sentIHaves[0].incl(id)
let msg = ControlIWant(messageIDs: @[id])
# When IWant is handled
let messages = gossipSub.handleIWant(peer, @[msg])
# Then IWant is ignored
check:
messages.len == 0
asyncTest "handleIDontWant - Max IDONTWANT messages per heartbeat per peer":
# Given GossipSub node with 1 peer
let (gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
defer:
await teardownGossipSub(gossipSub, conns)
let peer = peers[0]
# And sequence of iDontWants with more messages than max number (1200)
proc generateMessageIds(count: int): seq[MessageId] =
return (0 ..< count).mapIt(("msg_id_" & $it & $Moment.now()).toBytes())
let iDontWants =
@[
ControlIWant(messageIDs: generateMessageIds(600)),
ControlIWant(messageIDs: generateMessageIds(600)),
]
# When node handles iDontWants
gossipSub.handleIDontWant(peer, iDontWants)
# Then it saves max IDontWantMaxCount messages in the history and the rest is dropped
check:
peer.iDontWants[0].len == IDontWantMaxCount
asyncTest "handlePrune - do not trigger PeerExchange on Prune if peer score is below GossipThreshold threshold":
const gossipThreshold = -100.0
let
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
peer = peers[0]
defer:
await teardownGossipSub(gossipSub, conns)
# Given peer with score below GossipThreshold
gossipSub.parameters.gossipThreshold = gossipThreshold
peer.score = gossipThreshold - 100.0
# and RoutingRecordsHandler added
var routingRecordsFut = newFuture[void]()
gossipSub.routingRecordsHandler.add(
proc(peer: PeerId, tag: string, peers: seq[RoutingRecordsPair]) =
routingRecordsFut.complete()
)
# and Prune message
let msg = ControlPrune(
topicID: topic, peers: @[PeerInfoMsg(peerId: peer.peerId)], backoff: 123'u64
)
# When Prune is handled
gossipSub.handlePrune(peer, @[msg])
# Then handler is not triggered
let result = await waitForState(routingRecordsFut, HEARTBEAT_TIMEOUT)
check:
result.isCancelled()
asyncTest "handleGraft - do not graft when peer score below PublishThreshold threshold":
const publishThreshold = -100.0
let
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
peer = peers[0]
defer:
await teardownGossipSub(gossipSub, conns)
# Given peer with score below publishThreshold
gossipSub.parameters.publishThreshold = publishThreshold
peer.score = publishThreshold - 100.0
# and Graft message
let msg = ControlGraft(topicID: topic)
# When Graft is handled
let prunes = gossipSub.handleGraft(peer, @[msg])
# Then peer is ignored and not added to prunes
check:
gossipSub.mesh[topic].len == 0
prunes.len == 0
asyncTest "handleGraft - penalizes direct peer attempting to graft":
# Given a GossipSub instance with one direct peer
let
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
peer = peers[0]
defer:
await teardownGossipSub(gossipSub, conns)
# And the peer is configured as a direct peer
gossipSub.parameters.directPeers[peer.peerId] = @[]
# And initial behavior penalty is zero
check:
peer.behaviourPenalty == 0.0
# When a GRAFT message is handled
let graftMsg = ControlGraft(topicID: topic)
let prunes = gossipSub.handleGraft(peer, @[graftMsg])
# Then the peer is penalized with behavior penalty
# And receives PRUNE in response
check:
peer.behaviourPenalty == 0.1
prunes.len == 1
asyncTest "handleGraft - penalizes peer for grafting during backoff period":
# Given a GossipSub instance with one peer
let
(gossipSub, conns, peers) = setupGossipSubWithPeers(1, topic)
peer = peers[0]
defer:
await teardownGossipSub(gossipSub, conns)
# And the peer is in backoff period for the topic
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]())[peer.peerId] =
Moment.now() + 1.hours
# And initial behavior penalty is zero
check:
peer.behaviourPenalty == 0.0
# When a GRAFT message is handled
let graftMsg = ControlGraft(topicID: topic)
let prunes = gossipSub.handleGraft(peer, @[graftMsg])
# Then the peer is penalized with behavior penalty
# And receives PRUNE in response
check:
peer.behaviourPenalty == 0.1
prunes.len == 1
asyncTest "replenishFanout - Degree Lo":
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
defer:
await teardownGossipSub(gossipSub, conns)
check gossipSub.gossipsub[topic].len == 15
gossipSub.replenishFanout(topic)
check gossipSub.fanout[topic].len == gossipSub.parameters.d
asyncTest "dropFanoutPeers - drop expired fanout topics":
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(6, topic, populateGossipsub = true, populateFanout = true)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.lastFanoutPubSub[topic] = Moment.fromNow(1.millis)
await sleepAsync(5.millis) # allow the topic to expire
check gossipSub.fanout[topic].len == gossipSub.parameters.d
gossipSub.dropFanoutPeers()
check topic notin gossipSub.fanout
asyncTest "dropFanoutPeers - leave unexpired fanout topics":
const
topic1 = "foobar1"
topic2 = "foobar2"
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
6, @[topic1, topic2], populateGossipsub = true, populateFanout = true
)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.lastFanoutPubSub[topic1] = Moment.fromNow(1.millis)
gossipSub.lastFanoutPubSub[topic2] = Moment.fromNow(1.minutes)
await sleepAsync(5.millis) # allow first topic to expire
check gossipSub.fanout[topic1].len == gossipSub.parameters.d
check gossipSub.fanout[topic2].len == gossipSub.parameters.d
gossipSub.dropFanoutPeers()
check topic1 notin gossipSub.fanout
check topic2 in gossipSub.fanout
asyncTest "getGossipPeers - should gather up to degree D non intersecting peers":
let (gossipSub, conns, peers) = setupGossipSubWithPeers(45, topic)
defer:
await teardownGossipSub(gossipSub, conns)
# generate mesh and fanout peers
for i in 0 ..< 30:
let peer = peers[i]
if i mod 2 == 0:
gossipSub.fanout[topic].incl(peer)
else:
gossipSub.grafted(peer, topic)
gossipSub.mesh[topic].incl(peer)
# generate gossipsub (free standing) peers
for i in 30 ..< 45:
let peer = peers[i]
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = conns[i]
inc seqno
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
check gossipSub.fanout[topic].len == 15
check gossipSub.mesh[topic].len == 15
check gossipSub.gossipsub[topic].len == 15
let gossipPeers = gossipSub.getGossipPeers()
check gossipPeers.len == gossipSub.parameters.d
for p in gossipPeers.keys:
check not gossipSub.fanout.hasPeerId(topic, p.peerId)
check not gossipSub.mesh.hasPeerId(topic, p.peerId)
asyncTest "getGossipPeers - should not crash on missing topics in mesh":
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
defer:
await teardownGossipSub(gossipSub, conns)
# generate mesh and fanout peers
for i, peer in peers:
if i mod 2 == 0:
gossipSub.fanout[topic].incl(peer)
else:
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = conns[i]
inc seqno
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let gossipPeers = gossipSub.getGossipPeers()
check gossipPeers.len == gossipSub.parameters.d
asyncTest "getGossipPeers - should not crash on missing topics in fanout":
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
defer:
await teardownGossipSub(gossipSub, conns)
# generate mesh and fanout peers
for i, peer in peers:
if i mod 2 == 0:
gossipSub.mesh[topic].incl(peer)
gossipSub.grafted(peer, topic)
else:
gossipSub.gossipsub[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = conns[i]
inc seqno
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let gossipPeers = gossipSub.getGossipPeers()
check gossipPeers.len == gossipSub.parameters.d
asyncTest "getGossipPeers - should not crash on missing topics in gossip":
let (gossipSub, conns, peers) = setupGossipSubWithPeers(30, topic)
defer:
await teardownGossipSub(gossipSub, conns)
# generate mesh and fanout peers
for i, peer in peers:
if i mod 2 == 0:
gossipSub.mesh[topic].incl(peer)
gossipSub.grafted(peer, topic)
else:
gossipSub.fanout[topic].incl(peer)
# generate messages
var seqno = 0'u64
for i in 0 .. 5:
let conn = conns[i]
inc seqno
let msg = Message.init(conn.peerId, ("HELLO" & $i).toBytes(), topic, some(seqno))
gossipSub.mcache.put(gossipSub.msgIdProvider(msg).expect(MsgIdSuccess), msg)
let gossipPeers = gossipSub.getGossipPeers()
check gossipPeers.len == 0
asyncTest "getGossipPeers - do not select peer for IHave broadcast if peer score is below GossipThreshold threshold":
const gossipThreshold = -100.0
let
(gossipSub, conns, peers) =
setupGossipSubWithPeers(1, topic, populateGossipsub = true)
peer = peers[0]
defer:
await teardownGossipSub(gossipSub, conns)
# Given peer with score below GossipThreshold
gossipSub.parameters.gossipThreshold = gossipThreshold
peer.score = gossipThreshold - 100.0
# and message in cache
let id = @[0'u8, 1, 2, 3]
gossipSub.mcache.put(id, Message(topic: topic))
# When Node selects peers for IHave broadcast
let gossipPeers = gossipSub.getGossipPeers()
# Then peer is not selected
check:
gossipPeers.len == 0
asyncTest "rebalanceMesh - Degree Lo":
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
defer:
await teardownGossipSub(gossipSub, conns)
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len == gossipSub.parameters.d
asyncTest "rebalanceMesh - bad peers":
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
defer:
await teardownGossipSub(gossipSub, conns)
var scoreLow = -11'f64
for peer in peers:
peer.score = scoreLow
scoreLow += 1.0
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
# low score peers should not be in mesh, that's why the count must be 4
check gossipSub.mesh[topic].len == 4
for peer in gossipSub.mesh[topic]:
check peer.score >= 0.0
asyncTest "rebalanceMesh - Degree Hi":
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
defer:
await teardownGossipSub(gossipSub, conns)
check gossipSub.mesh[topic].len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len ==
gossipSub.parameters.d + gossipSub.parameters.dScore
asyncTest "rebalanceMesh - fail due to backoff":
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true)
defer:
await teardownGossipSub(gossipSub, conns)
for peer in peers:
gossipSub.backingOff.mgetOrPut(topic, initTable[PeerId, Moment]()).add(
peer.peerId, Moment.now() + 1.hours
)
let prunes = gossipSub.handleGraft(peer, @[ControlGraft(topicID: topic)])
# there must be a control prune due to violation of backoff
check prunes.len != 0
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
# expect 0 since they are all backing off
check gossipSub.mesh[topic].len == 0
asyncTest "rebalanceMesh - fail due to backoff - remote":
let (gossipSub, conns, peers) =
setupGossipSubWithPeers(15, topic, populateGossipsub = true, populateMesh = true)
defer:
await teardownGossipSub(gossipSub, conns)
check gossipSub.peers.len == 15
gossipSub.rebalanceMesh(topic)
check gossipSub.mesh[topic].len != 0
for peer in peers:
gossipSub.handlePrune(
peer,
@[
ControlPrune(
topicID: topic,
peers: @[],
backoff: gossipSub.parameters.pruneBackoff.seconds.uint64,
)
],
)
# expect topic cleaned up since they are all pruned
check topic notin gossipSub.mesh
asyncTest "rebalanceMesh - Degree Hi - audit scenario":
const
numInPeers = 6
numOutPeers = 7
totalPeers = numInPeers + numOutPeers
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
totalPeers, topic, populateGossipsub = true, populateMesh = true
)
defer:
await teardownGossipSub(gossipSub, conns)
gossipSub.parameters.dScore = 4
gossipSub.parameters.d = 6
gossipSub.parameters.dOut = 3
gossipSub.parameters.dHigh = 12
gossipSub.parameters.dLow = 4
for i in 0 ..< numInPeers:
let conn = conns[i]
let peer = peers[i]
conn.transportDir = Direction.In
peer.score = 40.0
for i in numInPeers ..< totalPeers:
let conn = conns[i]
let peer = peers[i]
conn.transportDir = Direction.Out
peer.score = 10.0
check gossipSub.mesh[topic].len == 13
gossipSub.rebalanceMesh(topic)
# ensure we are above dlow
check gossipSub.mesh[topic].len > gossipSub.parameters.dLow
var outbound = 0
for peer in gossipSub.mesh[topic]:
if peer.sendConn.transportDir == Direction.Out:
inc outbound
# ensure we give priority and keep at least dOut outbound peers
check outbound >= gossipSub.parameters.dOut
asyncTest "rebalanceMesh - Degree Hi - dScore controls number of peers to retain by score when pruning":
# Given GossipSub node starting with 13 peers in mesh
const totalPeers = 13
let (gossipSub, conns, peers) = setupGossipSubWithPeers(
totalPeers, topic, populateGossipsub = true, populateMesh = true
)
defer:
await teardownGossipSub(gossipSub, conns)
# And mesh is larger than dHigh
gossipSub.parameters.dLow = 4
gossipSub.parameters.d = 6
gossipSub.parameters.dHigh = 8
gossipSub.parameters.dOut = 3
gossipSub.parameters.dScore = 13
check gossipSub.mesh[topic].len == totalPeers
# When mesh is rebalanced
gossipSub.rebalanceMesh(topic)
# Then prunning is not triggered when mesh is not larger than dScore
check gossipSub.mesh[topic].len == totalPeers

Some files were not shown because too many files have changed in this diff Show More